1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x10000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.7.1"
60 #define DRV_MODULE_RELDATE "December 19, 2007"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = bp->tx_prod - bnapi->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
272 spin_lock_bh(&bp->indirect_lock);
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
290 spin_unlock_bh(&bp->indirect_lock);
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
314 for (i = 0; i < 50; i++) {
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
371 for (i = 0; i < 50; i++) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 bnx2_disable_int(struct bnx2 *bp)
403 struct bnx2_napi *bnapi;
405 for (i = 0; i < bp->irq_nvecs; i++) {
406 bnapi = &bp->bnx2_napi[i];
407 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
408 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
410 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
414 bnx2_enable_int(struct bnx2 *bp)
417 struct bnx2_napi *bnapi;
419 for (i = 0; i < bp->irq_nvecs; i++) {
420 bnapi = &bp->bnx2_napi[i];
422 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
423 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
424 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
425 bnapi->last_status_idx);
427 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
428 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
429 bnapi->last_status_idx);
431 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
435 bnx2_disable_int_sync(struct bnx2 *bp)
439 atomic_inc(&bp->intr_sem);
440 bnx2_disable_int(bp);
441 for (i = 0; i < bp->irq_nvecs; i++)
442 synchronize_irq(bp->irq_tbl[i].vector);
446 bnx2_napi_disable(struct bnx2 *bp)
450 for (i = 0; i < bp->irq_nvecs; i++)
451 napi_disable(&bp->bnx2_napi[i].napi);
455 bnx2_napi_enable(struct bnx2 *bp)
459 for (i = 0; i < bp->irq_nvecs; i++)
460 napi_enable(&bp->bnx2_napi[i].napi);
464 bnx2_netif_stop(struct bnx2 *bp)
466 bnx2_disable_int_sync(bp);
467 if (netif_running(bp->dev)) {
468 bnx2_napi_disable(bp);
469 netif_tx_disable(bp->dev);
470 bp->dev->trans_start = jiffies; /* prevent tx timeout */
475 bnx2_netif_start(struct bnx2 *bp)
477 if (atomic_dec_and_test(&bp->intr_sem)) {
478 if (netif_running(bp->dev)) {
479 netif_wake_queue(bp->dev);
480 bnx2_napi_enable(bp);
487 bnx2_free_mem(struct bnx2 *bp)
491 for (i = 0; i < bp->ctx_pages; i++) {
492 if (bp->ctx_blk[i]) {
493 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
495 bp->ctx_blk_mapping[i]);
496 bp->ctx_blk[i] = NULL;
499 if (bp->status_blk) {
500 pci_free_consistent(bp->pdev, bp->status_stats_size,
501 bp->status_blk, bp->status_blk_mapping);
502 bp->status_blk = NULL;
503 bp->stats_blk = NULL;
505 if (bp->tx_desc_ring) {
506 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
507 bp->tx_desc_ring, bp->tx_desc_mapping);
508 bp->tx_desc_ring = NULL;
510 kfree(bp->tx_buf_ring);
511 bp->tx_buf_ring = NULL;
512 for (i = 0; i < bp->rx_max_ring; i++) {
513 if (bp->rx_desc_ring[i])
514 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
516 bp->rx_desc_mapping[i]);
517 bp->rx_desc_ring[i] = NULL;
519 vfree(bp->rx_buf_ring);
520 bp->rx_buf_ring = NULL;
521 for (i = 0; i < bp->rx_max_pg_ring; i++) {
522 if (bp->rx_pg_desc_ring[i])
523 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
524 bp->rx_pg_desc_ring[i],
525 bp->rx_pg_desc_mapping[i]);
526 bp->rx_pg_desc_ring[i] = NULL;
529 vfree(bp->rx_pg_ring);
530 bp->rx_pg_ring = NULL;
534 bnx2_alloc_mem(struct bnx2 *bp)
536 int i, status_blk_size;
538 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
539 if (bp->tx_buf_ring == NULL)
542 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
543 &bp->tx_desc_mapping);
544 if (bp->tx_desc_ring == NULL)
547 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
548 if (bp->rx_buf_ring == NULL)
551 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
553 for (i = 0; i < bp->rx_max_ring; i++) {
554 bp->rx_desc_ring[i] =
555 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
556 &bp->rx_desc_mapping[i]);
557 if (bp->rx_desc_ring[i] == NULL)
562 if (bp->rx_pg_ring_size) {
563 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
565 if (bp->rx_pg_ring == NULL)
568 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
572 for (i = 0; i < bp->rx_max_pg_ring; i++) {
573 bp->rx_pg_desc_ring[i] =
574 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
575 &bp->rx_pg_desc_mapping[i]);
576 if (bp->rx_pg_desc_ring[i] == NULL)
581 /* Combine status and statistics blocks into one allocation. */
582 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
583 if (bp->flags & MSIX_CAP_FLAG)
584 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
585 BNX2_SBLK_MSIX_ALIGN_SIZE);
586 bp->status_stats_size = status_blk_size +
587 sizeof(struct statistics_block);
589 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
590 &bp->status_blk_mapping);
591 if (bp->status_blk == NULL)
594 memset(bp->status_blk, 0, bp->status_stats_size);
596 bp->bnx2_napi[0].status_blk = bp->status_blk;
597 if (bp->flags & MSIX_CAP_FLAG) {
598 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
599 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
601 bnapi->status_blk_msix = (void *)
602 ((unsigned long) bp->status_blk +
603 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
604 bnapi->int_num = i << 24;
608 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
611 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
613 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
614 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
615 if (bp->ctx_pages == 0)
617 for (i = 0; i < bp->ctx_pages; i++) {
618 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
620 &bp->ctx_blk_mapping[i]);
621 if (bp->ctx_blk[i] == NULL)
633 bnx2_report_fw_link(struct bnx2 *bp)
635 u32 fw_link_status = 0;
637 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
643 switch (bp->line_speed) {
645 if (bp->duplex == DUPLEX_HALF)
646 fw_link_status = BNX2_LINK_STATUS_10HALF;
648 fw_link_status = BNX2_LINK_STATUS_10FULL;
651 if (bp->duplex == DUPLEX_HALF)
652 fw_link_status = BNX2_LINK_STATUS_100HALF;
654 fw_link_status = BNX2_LINK_STATUS_100FULL;
657 if (bp->duplex == DUPLEX_HALF)
658 fw_link_status = BNX2_LINK_STATUS_1000HALF;
660 fw_link_status = BNX2_LINK_STATUS_1000FULL;
663 if (bp->duplex == DUPLEX_HALF)
664 fw_link_status = BNX2_LINK_STATUS_2500HALF;
666 fw_link_status = BNX2_LINK_STATUS_2500FULL;
670 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
673 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
675 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
676 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
678 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
679 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
680 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
682 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
686 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
688 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
692 bnx2_xceiver_str(struct bnx2 *bp)
694 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
695 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
700 bnx2_report_link(struct bnx2 *bp)
703 netif_carrier_on(bp->dev);
704 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
705 bnx2_xceiver_str(bp));
707 printk("%d Mbps ", bp->line_speed);
709 if (bp->duplex == DUPLEX_FULL)
710 printk("full duplex");
712 printk("half duplex");
715 if (bp->flow_ctrl & FLOW_CTRL_RX) {
716 printk(", receive ");
717 if (bp->flow_ctrl & FLOW_CTRL_TX)
718 printk("& transmit ");
721 printk(", transmit ");
723 printk("flow control ON");
728 netif_carrier_off(bp->dev);
729 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
730 bnx2_xceiver_str(bp));
733 bnx2_report_fw_link(bp);
737 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
739 u32 local_adv, remote_adv;
742 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
743 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
745 if (bp->duplex == DUPLEX_FULL) {
746 bp->flow_ctrl = bp->req_flow_ctrl;
751 if (bp->duplex != DUPLEX_FULL) {
755 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
756 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
759 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
760 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
761 bp->flow_ctrl |= FLOW_CTRL_TX;
762 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
763 bp->flow_ctrl |= FLOW_CTRL_RX;
767 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
768 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
770 if (bp->phy_flags & PHY_SERDES_FLAG) {
771 u32 new_local_adv = 0;
772 u32 new_remote_adv = 0;
774 if (local_adv & ADVERTISE_1000XPAUSE)
775 new_local_adv |= ADVERTISE_PAUSE_CAP;
776 if (local_adv & ADVERTISE_1000XPSE_ASYM)
777 new_local_adv |= ADVERTISE_PAUSE_ASYM;
778 if (remote_adv & ADVERTISE_1000XPAUSE)
779 new_remote_adv |= ADVERTISE_PAUSE_CAP;
780 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
781 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
783 local_adv = new_local_adv;
784 remote_adv = new_remote_adv;
787 /* See Table 28B-3 of 802.3ab-1999 spec. */
788 if (local_adv & ADVERTISE_PAUSE_CAP) {
789 if(local_adv & ADVERTISE_PAUSE_ASYM) {
790 if (remote_adv & ADVERTISE_PAUSE_CAP) {
791 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
793 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
794 bp->flow_ctrl = FLOW_CTRL_RX;
798 if (remote_adv & ADVERTISE_PAUSE_CAP) {
799 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
803 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
804 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
805 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
807 bp->flow_ctrl = FLOW_CTRL_TX;
813 bnx2_5709s_linkup(struct bnx2 *bp)
819 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
820 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
821 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
823 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
824 bp->line_speed = bp->req_line_speed;
825 bp->duplex = bp->req_duplex;
828 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
830 case MII_BNX2_GP_TOP_AN_SPEED_10:
831 bp->line_speed = SPEED_10;
833 case MII_BNX2_GP_TOP_AN_SPEED_100:
834 bp->line_speed = SPEED_100;
836 case MII_BNX2_GP_TOP_AN_SPEED_1G:
837 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
838 bp->line_speed = SPEED_1000;
840 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
841 bp->line_speed = SPEED_2500;
844 if (val & MII_BNX2_GP_TOP_AN_FD)
845 bp->duplex = DUPLEX_FULL;
847 bp->duplex = DUPLEX_HALF;
852 bnx2_5708s_linkup(struct bnx2 *bp)
857 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
859 case BCM5708S_1000X_STAT1_SPEED_10:
860 bp->line_speed = SPEED_10;
862 case BCM5708S_1000X_STAT1_SPEED_100:
863 bp->line_speed = SPEED_100;
865 case BCM5708S_1000X_STAT1_SPEED_1G:
866 bp->line_speed = SPEED_1000;
868 case BCM5708S_1000X_STAT1_SPEED_2G5:
869 bp->line_speed = SPEED_2500;
872 if (val & BCM5708S_1000X_STAT1_FD)
873 bp->duplex = DUPLEX_FULL;
875 bp->duplex = DUPLEX_HALF;
881 bnx2_5706s_linkup(struct bnx2 *bp)
883 u32 bmcr, local_adv, remote_adv, common;
886 bp->line_speed = SPEED_1000;
888 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
889 if (bmcr & BMCR_FULLDPLX) {
890 bp->duplex = DUPLEX_FULL;
893 bp->duplex = DUPLEX_HALF;
896 if (!(bmcr & BMCR_ANENABLE)) {
900 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
901 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
903 common = local_adv & remote_adv;
904 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
906 if (common & ADVERTISE_1000XFULL) {
907 bp->duplex = DUPLEX_FULL;
910 bp->duplex = DUPLEX_HALF;
918 bnx2_copper_linkup(struct bnx2 *bp)
922 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
923 if (bmcr & BMCR_ANENABLE) {
924 u32 local_adv, remote_adv, common;
926 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
927 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
929 common = local_adv & (remote_adv >> 2);
930 if (common & ADVERTISE_1000FULL) {
931 bp->line_speed = SPEED_1000;
932 bp->duplex = DUPLEX_FULL;
934 else if (common & ADVERTISE_1000HALF) {
935 bp->line_speed = SPEED_1000;
936 bp->duplex = DUPLEX_HALF;
939 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
940 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
942 common = local_adv & remote_adv;
943 if (common & ADVERTISE_100FULL) {
944 bp->line_speed = SPEED_100;
945 bp->duplex = DUPLEX_FULL;
947 else if (common & ADVERTISE_100HALF) {
948 bp->line_speed = SPEED_100;
949 bp->duplex = DUPLEX_HALF;
951 else if (common & ADVERTISE_10FULL) {
952 bp->line_speed = SPEED_10;
953 bp->duplex = DUPLEX_FULL;
955 else if (common & ADVERTISE_10HALF) {
956 bp->line_speed = SPEED_10;
957 bp->duplex = DUPLEX_HALF;
966 if (bmcr & BMCR_SPEED100) {
967 bp->line_speed = SPEED_100;
970 bp->line_speed = SPEED_10;
972 if (bmcr & BMCR_FULLDPLX) {
973 bp->duplex = DUPLEX_FULL;
976 bp->duplex = DUPLEX_HALF;
984 bnx2_set_mac_link(struct bnx2 *bp)
988 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
989 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
990 (bp->duplex == DUPLEX_HALF)) {
991 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
994 /* Configure the EMAC mode register. */
995 val = REG_RD(bp, BNX2_EMAC_MODE);
997 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
998 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
999 BNX2_EMAC_MODE_25G_MODE);
1002 switch (bp->line_speed) {
1004 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1005 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1010 val |= BNX2_EMAC_MODE_PORT_MII;
1013 val |= BNX2_EMAC_MODE_25G_MODE;
1016 val |= BNX2_EMAC_MODE_PORT_GMII;
1021 val |= BNX2_EMAC_MODE_PORT_GMII;
1024 /* Set the MAC to operate in the appropriate duplex mode. */
1025 if (bp->duplex == DUPLEX_HALF)
1026 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1027 REG_WR(bp, BNX2_EMAC_MODE, val);
1029 /* Enable/disable rx PAUSE. */
1030 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1032 if (bp->flow_ctrl & FLOW_CTRL_RX)
1033 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1034 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1036 /* Enable/disable tx PAUSE. */
1037 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1038 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1040 if (bp->flow_ctrl & FLOW_CTRL_TX)
1041 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1042 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1044 /* Acknowledge the interrupt. */
1045 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1051 bnx2_enable_bmsr1(struct bnx2 *bp)
1053 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1054 (CHIP_NUM(bp) == CHIP_NUM_5709))
1055 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056 MII_BNX2_BLK_ADDR_GP_STATUS);
1060 bnx2_disable_bmsr1(struct bnx2 *bp)
1062 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1063 (CHIP_NUM(bp) == CHIP_NUM_5709))
1064 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1065 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1069 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1074 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1077 if (bp->autoneg & AUTONEG_SPEED)
1078 bp->advertising |= ADVERTISED_2500baseX_Full;
1080 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1081 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1083 bnx2_read_phy(bp, bp->mii_up1, &up1);
1084 if (!(up1 & BCM5708S_UP1_2G5)) {
1085 up1 |= BCM5708S_UP1_2G5;
1086 bnx2_write_phy(bp, bp->mii_up1, up1);
1090 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1098 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1103 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1106 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1107 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1109 bnx2_read_phy(bp, bp->mii_up1, &up1);
1110 if (up1 & BCM5708S_UP1_2G5) {
1111 up1 &= ~BCM5708S_UP1_2G5;
1112 bnx2_write_phy(bp, bp->mii_up1, up1);
1116 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1117 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1118 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1124 bnx2_enable_forced_2g5(struct bnx2 *bp)
1128 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1131 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1134 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1135 MII_BNX2_BLK_ADDR_SERDES_DIG);
1136 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1137 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1138 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1139 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1141 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1145 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1146 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147 bmcr |= BCM5708S_BMCR_FORCE_2500;
1150 if (bp->autoneg & AUTONEG_SPEED) {
1151 bmcr &= ~BMCR_ANENABLE;
1152 if (bp->req_duplex == DUPLEX_FULL)
1153 bmcr |= BMCR_FULLDPLX;
1155 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1159 bnx2_disable_forced_2g5(struct bnx2 *bp)
1163 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1166 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1169 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1170 MII_BNX2_BLK_ADDR_SERDES_DIG);
1171 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1172 val &= ~MII_BNX2_SD_MISC1_FORCE;
1173 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1175 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1176 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1177 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1179 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1180 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1181 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1184 if (bp->autoneg & AUTONEG_SPEED)
1185 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1186 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1190 bnx2_set_link(struct bnx2 *bp)
1195 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1200 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1203 link_up = bp->link_up;
1205 bnx2_enable_bmsr1(bp);
1206 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1207 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1208 bnx2_disable_bmsr1(bp);
1210 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1211 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1214 val = REG_RD(bp, BNX2_EMAC_STATUS);
1215 if (val & BNX2_EMAC_STATUS_LINK)
1216 bmsr |= BMSR_LSTATUS;
1218 bmsr &= ~BMSR_LSTATUS;
1221 if (bmsr & BMSR_LSTATUS) {
1224 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1226 bnx2_5706s_linkup(bp);
1227 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1228 bnx2_5708s_linkup(bp);
1229 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1230 bnx2_5709s_linkup(bp);
1233 bnx2_copper_linkup(bp);
1235 bnx2_resolve_flow_ctrl(bp);
1238 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1239 (bp->autoneg & AUTONEG_SPEED))
1240 bnx2_disable_forced_2g5(bp);
1242 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1246 if (bp->link_up != link_up) {
1247 bnx2_report_link(bp);
1250 bnx2_set_mac_link(bp);
1256 bnx2_reset_phy(struct bnx2 *bp)
1261 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1263 #define PHY_RESET_MAX_WAIT 100
1264 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1267 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1268 if (!(reg & BMCR_RESET)) {
1273 if (i == PHY_RESET_MAX_WAIT) {
1280 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1284 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1285 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1287 if (bp->phy_flags & PHY_SERDES_FLAG) {
1288 adv = ADVERTISE_1000XPAUSE;
1291 adv = ADVERTISE_PAUSE_CAP;
1294 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1295 if (bp->phy_flags & PHY_SERDES_FLAG) {
1296 adv = ADVERTISE_1000XPSE_ASYM;
1299 adv = ADVERTISE_PAUSE_ASYM;
1302 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1303 if (bp->phy_flags & PHY_SERDES_FLAG) {
1304 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1307 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1313 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1316 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1318 u32 speed_arg = 0, pause_adv;
1320 pause_adv = bnx2_phy_get_pause_adv(bp);
1322 if (bp->autoneg & AUTONEG_SPEED) {
1323 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1324 if (bp->advertising & ADVERTISED_10baseT_Half)
1325 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1326 if (bp->advertising & ADVERTISED_10baseT_Full)
1327 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1328 if (bp->advertising & ADVERTISED_100baseT_Half)
1329 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1330 if (bp->advertising & ADVERTISED_100baseT_Full)
1331 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1332 if (bp->advertising & ADVERTISED_1000baseT_Full)
1333 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1334 if (bp->advertising & ADVERTISED_2500baseX_Full)
1335 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1337 if (bp->req_line_speed == SPEED_2500)
1338 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1339 else if (bp->req_line_speed == SPEED_1000)
1340 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1341 else if (bp->req_line_speed == SPEED_100) {
1342 if (bp->req_duplex == DUPLEX_FULL)
1343 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1345 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1346 } else if (bp->req_line_speed == SPEED_10) {
1347 if (bp->req_duplex == DUPLEX_FULL)
1348 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1350 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1354 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1355 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1356 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1357 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1359 if (port == PORT_TP)
1360 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1361 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1363 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1365 spin_unlock_bh(&bp->phy_lock);
1366 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1367 spin_lock_bh(&bp->phy_lock);
1373 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1378 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1379 return (bnx2_setup_remote_phy(bp, port));
1381 if (!(bp->autoneg & AUTONEG_SPEED)) {
1383 int force_link_down = 0;
1385 if (bp->req_line_speed == SPEED_2500) {
1386 if (!bnx2_test_and_enable_2g5(bp))
1387 force_link_down = 1;
1388 } else if (bp->req_line_speed == SPEED_1000) {
1389 if (bnx2_test_and_disable_2g5(bp))
1390 force_link_down = 1;
1392 bnx2_read_phy(bp, bp->mii_adv, &adv);
1393 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1395 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1396 new_bmcr = bmcr & ~BMCR_ANENABLE;
1397 new_bmcr |= BMCR_SPEED1000;
1399 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1400 if (bp->req_line_speed == SPEED_2500)
1401 bnx2_enable_forced_2g5(bp);
1402 else if (bp->req_line_speed == SPEED_1000) {
1403 bnx2_disable_forced_2g5(bp);
1404 new_bmcr &= ~0x2000;
1407 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1408 if (bp->req_line_speed == SPEED_2500)
1409 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1411 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1414 if (bp->req_duplex == DUPLEX_FULL) {
1415 adv |= ADVERTISE_1000XFULL;
1416 new_bmcr |= BMCR_FULLDPLX;
1419 adv |= ADVERTISE_1000XHALF;
1420 new_bmcr &= ~BMCR_FULLDPLX;
1422 if ((new_bmcr != bmcr) || (force_link_down)) {
1423 /* Force a link down visible on the other side */
1425 bnx2_write_phy(bp, bp->mii_adv, adv &
1426 ~(ADVERTISE_1000XFULL |
1427 ADVERTISE_1000XHALF));
1428 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1429 BMCR_ANRESTART | BMCR_ANENABLE);
1432 netif_carrier_off(bp->dev);
1433 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1434 bnx2_report_link(bp);
1436 bnx2_write_phy(bp, bp->mii_adv, adv);
1437 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1439 bnx2_resolve_flow_ctrl(bp);
1440 bnx2_set_mac_link(bp);
1445 bnx2_test_and_enable_2g5(bp);
1447 if (bp->advertising & ADVERTISED_1000baseT_Full)
1448 new_adv |= ADVERTISE_1000XFULL;
1450 new_adv |= bnx2_phy_get_pause_adv(bp);
1452 bnx2_read_phy(bp, bp->mii_adv, &adv);
1453 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1455 bp->serdes_an_pending = 0;
1456 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1457 /* Force a link down visible on the other side */
1459 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1460 spin_unlock_bh(&bp->phy_lock);
1462 spin_lock_bh(&bp->phy_lock);
1465 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1466 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1468 /* Speed up link-up time when the link partner
1469 * does not autonegotiate which is very common
1470 * in blade servers. Some blade servers use
1471 * IPMI for kerboard input and it's important
1472 * to minimize link disruptions. Autoneg. involves
1473 * exchanging base pages plus 3 next pages and
1474 * normally completes in about 120 msec.
1476 bp->current_interval = SERDES_AN_TIMEOUT;
1477 bp->serdes_an_pending = 1;
1478 mod_timer(&bp->timer, jiffies + bp->current_interval);
1480 bnx2_resolve_flow_ctrl(bp);
1481 bnx2_set_mac_link(bp);
1487 #define ETHTOOL_ALL_FIBRE_SPEED \
1488 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1489 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1490 (ADVERTISED_1000baseT_Full)
1492 #define ETHTOOL_ALL_COPPER_SPEED \
1493 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1494 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1495 ADVERTISED_1000baseT_Full)
1497 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1498 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1500 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1503 bnx2_set_default_remote_link(struct bnx2 *bp)
1507 if (bp->phy_port == PORT_TP)
1508 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1510 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1512 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1513 bp->req_line_speed = 0;
1514 bp->autoneg |= AUTONEG_SPEED;
1515 bp->advertising = ADVERTISED_Autoneg;
1516 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1517 bp->advertising |= ADVERTISED_10baseT_Half;
1518 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1519 bp->advertising |= ADVERTISED_10baseT_Full;
1520 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1521 bp->advertising |= ADVERTISED_100baseT_Half;
1522 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1523 bp->advertising |= ADVERTISED_100baseT_Full;
1524 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1525 bp->advertising |= ADVERTISED_1000baseT_Full;
1526 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1527 bp->advertising |= ADVERTISED_2500baseX_Full;
1530 bp->advertising = 0;
1531 bp->req_duplex = DUPLEX_FULL;
1532 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1533 bp->req_line_speed = SPEED_10;
1534 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1535 bp->req_duplex = DUPLEX_HALF;
1537 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1538 bp->req_line_speed = SPEED_100;
1539 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1540 bp->req_duplex = DUPLEX_HALF;
1542 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1543 bp->req_line_speed = SPEED_1000;
1544 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1545 bp->req_line_speed = SPEED_2500;
1550 bnx2_set_default_link(struct bnx2 *bp)
1552 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1553 return bnx2_set_default_remote_link(bp);
1555 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1556 bp->req_line_speed = 0;
1557 if (bp->phy_flags & PHY_SERDES_FLAG) {
1560 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1562 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1563 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1564 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1566 bp->req_line_speed = bp->line_speed = SPEED_1000;
1567 bp->req_duplex = DUPLEX_FULL;
1570 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1574 bnx2_send_heart_beat(struct bnx2 *bp)
1579 spin_lock(&bp->indirect_lock);
1580 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1581 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1582 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1583 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1584 spin_unlock(&bp->indirect_lock);
1588 bnx2_remote_phy_event(struct bnx2 *bp)
1591 u8 link_up = bp->link_up;
1594 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1596 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1597 bnx2_send_heart_beat(bp);
1599 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1601 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1607 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1608 bp->duplex = DUPLEX_FULL;
1610 case BNX2_LINK_STATUS_10HALF:
1611 bp->duplex = DUPLEX_HALF;
1612 case BNX2_LINK_STATUS_10FULL:
1613 bp->line_speed = SPEED_10;
1615 case BNX2_LINK_STATUS_100HALF:
1616 bp->duplex = DUPLEX_HALF;
1617 case BNX2_LINK_STATUS_100BASE_T4:
1618 case BNX2_LINK_STATUS_100FULL:
1619 bp->line_speed = SPEED_100;
1621 case BNX2_LINK_STATUS_1000HALF:
1622 bp->duplex = DUPLEX_HALF;
1623 case BNX2_LINK_STATUS_1000FULL:
1624 bp->line_speed = SPEED_1000;
1626 case BNX2_LINK_STATUS_2500HALF:
1627 bp->duplex = DUPLEX_HALF;
1628 case BNX2_LINK_STATUS_2500FULL:
1629 bp->line_speed = SPEED_2500;
1636 spin_lock(&bp->phy_lock);
1638 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1639 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1640 if (bp->duplex == DUPLEX_FULL)
1641 bp->flow_ctrl = bp->req_flow_ctrl;
1643 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1644 bp->flow_ctrl |= FLOW_CTRL_TX;
1645 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1646 bp->flow_ctrl |= FLOW_CTRL_RX;
1649 old_port = bp->phy_port;
1650 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1651 bp->phy_port = PORT_FIBRE;
1653 bp->phy_port = PORT_TP;
1655 if (old_port != bp->phy_port)
1656 bnx2_set_default_link(bp);
1658 spin_unlock(&bp->phy_lock);
1660 if (bp->link_up != link_up)
1661 bnx2_report_link(bp);
1663 bnx2_set_mac_link(bp);
1667 bnx2_set_remote_link(struct bnx2 *bp)
1671 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1673 case BNX2_FW_EVT_CODE_LINK_EVENT:
1674 bnx2_remote_phy_event(bp);
1676 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1678 bnx2_send_heart_beat(bp);
1685 bnx2_setup_copper_phy(struct bnx2 *bp)
1690 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1692 if (bp->autoneg & AUTONEG_SPEED) {
1693 u32 adv_reg, adv1000_reg;
1694 u32 new_adv_reg = 0;
1695 u32 new_adv1000_reg = 0;
1697 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1698 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1699 ADVERTISE_PAUSE_ASYM);
1701 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1702 adv1000_reg &= PHY_ALL_1000_SPEED;
1704 if (bp->advertising & ADVERTISED_10baseT_Half)
1705 new_adv_reg |= ADVERTISE_10HALF;
1706 if (bp->advertising & ADVERTISED_10baseT_Full)
1707 new_adv_reg |= ADVERTISE_10FULL;
1708 if (bp->advertising & ADVERTISED_100baseT_Half)
1709 new_adv_reg |= ADVERTISE_100HALF;
1710 if (bp->advertising & ADVERTISED_100baseT_Full)
1711 new_adv_reg |= ADVERTISE_100FULL;
1712 if (bp->advertising & ADVERTISED_1000baseT_Full)
1713 new_adv1000_reg |= ADVERTISE_1000FULL;
1715 new_adv_reg |= ADVERTISE_CSMA;
1717 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1719 if ((adv1000_reg != new_adv1000_reg) ||
1720 (adv_reg != new_adv_reg) ||
1721 ((bmcr & BMCR_ANENABLE) == 0)) {
1723 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1724 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1725 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1728 else if (bp->link_up) {
1729 /* Flow ctrl may have changed from auto to forced */
1730 /* or vice-versa. */
1732 bnx2_resolve_flow_ctrl(bp);
1733 bnx2_set_mac_link(bp);
1739 if (bp->req_line_speed == SPEED_100) {
1740 new_bmcr |= BMCR_SPEED100;
1742 if (bp->req_duplex == DUPLEX_FULL) {
1743 new_bmcr |= BMCR_FULLDPLX;
1745 if (new_bmcr != bmcr) {
1748 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1749 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1751 if (bmsr & BMSR_LSTATUS) {
1752 /* Force link down */
1753 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1754 spin_unlock_bh(&bp->phy_lock);
1756 spin_lock_bh(&bp->phy_lock);
1758 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1759 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1762 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1764 /* Normally, the new speed is setup after the link has
1765 * gone down and up again. In some cases, link will not go
1766 * down so we need to set up the new speed here.
1768 if (bmsr & BMSR_LSTATUS) {
1769 bp->line_speed = bp->req_line_speed;
1770 bp->duplex = bp->req_duplex;
1771 bnx2_resolve_flow_ctrl(bp);
1772 bnx2_set_mac_link(bp);
1775 bnx2_resolve_flow_ctrl(bp);
1776 bnx2_set_mac_link(bp);
1782 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1784 if (bp->loopback == MAC_LOOPBACK)
1787 if (bp->phy_flags & PHY_SERDES_FLAG) {
1788 return (bnx2_setup_serdes_phy(bp, port));
1791 return (bnx2_setup_copper_phy(bp));
1796 bnx2_init_5709s_phy(struct bnx2 *bp)
1800 bp->mii_bmcr = MII_BMCR + 0x10;
1801 bp->mii_bmsr = MII_BMSR + 0x10;
1802 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1803 bp->mii_adv = MII_ADVERTISE + 0x10;
1804 bp->mii_lpa = MII_LPA + 0x10;
1805 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1807 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1808 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1810 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1813 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1815 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1816 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1817 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1818 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1820 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1821 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1822 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1823 val |= BCM5708S_UP1_2G5;
1825 val &= ~BCM5708S_UP1_2G5;
1826 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1828 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1829 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1830 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1831 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1833 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1835 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1836 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1837 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1839 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1845 bnx2_init_5708s_phy(struct bnx2 *bp)
1851 bp->mii_up1 = BCM5708S_UP1;
1853 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1854 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1855 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1857 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1858 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1859 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1861 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1862 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1863 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1865 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1866 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1867 val |= BCM5708S_UP1_2G5;
1868 bnx2_write_phy(bp, BCM5708S_UP1, val);
1871 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1872 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1873 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1874 /* increase tx signal amplitude */
1875 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1876 BCM5708S_BLK_ADDR_TX_MISC);
1877 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1878 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1879 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1880 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1883 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1884 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1889 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1890 BNX2_SHARED_HW_CFG_CONFIG);
1891 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1892 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1893 BCM5708S_BLK_ADDR_TX_MISC);
1894 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1895 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1896 BCM5708S_BLK_ADDR_DIG);
1903 bnx2_init_5706s_phy(struct bnx2 *bp)
1907 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1909 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1910 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1912 if (bp->dev->mtu > 1500) {
1915 /* Set extended packet length bit */
1916 bnx2_write_phy(bp, 0x18, 0x7);
1917 bnx2_read_phy(bp, 0x18, &val);
1918 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1920 bnx2_write_phy(bp, 0x1c, 0x6c00);
1921 bnx2_read_phy(bp, 0x1c, &val);
1922 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1927 bnx2_write_phy(bp, 0x18, 0x7);
1928 bnx2_read_phy(bp, 0x18, &val);
1929 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1931 bnx2_write_phy(bp, 0x1c, 0x6c00);
1932 bnx2_read_phy(bp, 0x1c, &val);
1933 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1940 bnx2_init_copper_phy(struct bnx2 *bp)
1946 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1947 bnx2_write_phy(bp, 0x18, 0x0c00);
1948 bnx2_write_phy(bp, 0x17, 0x000a);
1949 bnx2_write_phy(bp, 0x15, 0x310b);
1950 bnx2_write_phy(bp, 0x17, 0x201f);
1951 bnx2_write_phy(bp, 0x15, 0x9506);
1952 bnx2_write_phy(bp, 0x17, 0x401f);
1953 bnx2_write_phy(bp, 0x15, 0x14e2);
1954 bnx2_write_phy(bp, 0x18, 0x0400);
1957 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1958 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1959 MII_BNX2_DSP_EXPAND_REG | 0x8);
1960 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1962 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1965 if (bp->dev->mtu > 1500) {
1966 /* Set extended packet length bit */
1967 bnx2_write_phy(bp, 0x18, 0x7);
1968 bnx2_read_phy(bp, 0x18, &val);
1969 bnx2_write_phy(bp, 0x18, val | 0x4000);
1971 bnx2_read_phy(bp, 0x10, &val);
1972 bnx2_write_phy(bp, 0x10, val | 0x1);
1975 bnx2_write_phy(bp, 0x18, 0x7);
1976 bnx2_read_phy(bp, 0x18, &val);
1977 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1979 bnx2_read_phy(bp, 0x10, &val);
1980 bnx2_write_phy(bp, 0x10, val & ~0x1);
1983 /* ethernet@wirespeed */
1984 bnx2_write_phy(bp, 0x18, 0x7007);
1985 bnx2_read_phy(bp, 0x18, &val);
1986 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1992 bnx2_init_phy(struct bnx2 *bp)
1997 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1998 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
2000 bp->mii_bmcr = MII_BMCR;
2001 bp->mii_bmsr = MII_BMSR;
2002 bp->mii_bmsr1 = MII_BMSR;
2003 bp->mii_adv = MII_ADVERTISE;
2004 bp->mii_lpa = MII_LPA;
2006 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2008 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
2011 bnx2_read_phy(bp, MII_PHYSID1, &val);
2012 bp->phy_id = val << 16;
2013 bnx2_read_phy(bp, MII_PHYSID2, &val);
2014 bp->phy_id |= val & 0xffff;
2016 if (bp->phy_flags & PHY_SERDES_FLAG) {
2017 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2018 rc = bnx2_init_5706s_phy(bp);
2019 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2020 rc = bnx2_init_5708s_phy(bp);
2021 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2022 rc = bnx2_init_5709s_phy(bp);
2025 rc = bnx2_init_copper_phy(bp);
2030 rc = bnx2_setup_phy(bp, bp->phy_port);
2036 bnx2_set_mac_loopback(struct bnx2 *bp)
2040 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2041 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2042 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2043 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2048 static int bnx2_test_link(struct bnx2 *);
2051 bnx2_set_phy_loopback(struct bnx2 *bp)
2056 spin_lock_bh(&bp->phy_lock);
2057 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2059 spin_unlock_bh(&bp->phy_lock);
2063 for (i = 0; i < 10; i++) {
2064 if (bnx2_test_link(bp) == 0)
2069 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2070 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2071 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2072 BNX2_EMAC_MODE_25G_MODE);
2074 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2075 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2081 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2087 msg_data |= bp->fw_wr_seq;
2089 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2091 /* wait for an acknowledgement. */
2092 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2095 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2097 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2100 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2103 /* If we timed out, inform the firmware that this is the case. */
2104 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2106 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2109 msg_data &= ~BNX2_DRV_MSG_CODE;
2110 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2112 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2117 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2124 bnx2_init_5709_context(struct bnx2 *bp)
2129 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2130 val |= (BCM_PAGE_BITS - 8) << 16;
2131 REG_WR(bp, BNX2_CTX_COMMAND, val);
2132 for (i = 0; i < 10; i++) {
2133 val = REG_RD(bp, BNX2_CTX_COMMAND);
2134 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2138 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2141 for (i = 0; i < bp->ctx_pages; i++) {
2144 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2145 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2146 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2147 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2148 (u64) bp->ctx_blk_mapping[i] >> 32);
2149 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2150 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2151 for (j = 0; j < 10; j++) {
2153 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2154 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2158 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2167 bnx2_init_context(struct bnx2 *bp)
2173 u32 vcid_addr, pcid_addr, offset;
2178 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2181 vcid_addr = GET_PCID_ADDR(vcid);
2183 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2188 pcid_addr = GET_PCID_ADDR(new_vcid);
2191 vcid_addr = GET_CID_ADDR(vcid);
2192 pcid_addr = vcid_addr;
2195 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2196 vcid_addr += (i << PHY_CTX_SHIFT);
2197 pcid_addr += (i << PHY_CTX_SHIFT);
2199 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2200 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2202 /* Zero out the context. */
2203 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2204 CTX_WR(bp, vcid_addr, offset, 0);
2210 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2216 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2217 if (good_mbuf == NULL) {
2218 printk(KERN_ERR PFX "Failed to allocate memory in "
2219 "bnx2_alloc_bad_rbuf\n");
2223 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2224 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2228 /* Allocate a bunch of mbufs and save the good ones in an array. */
2229 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2230 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2231 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2233 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2235 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2237 /* The addresses with Bit 9 set are bad memory blocks. */
2238 if (!(val & (1 << 9))) {
2239 good_mbuf[good_mbuf_cnt] = (u16) val;
2243 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2246 /* Free the good ones back to the mbuf pool thus discarding
2247 * all the bad ones. */
2248 while (good_mbuf_cnt) {
2251 val = good_mbuf[good_mbuf_cnt];
2252 val = (val << 9) | val | 1;
2254 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2261 bnx2_set_mac_addr(struct bnx2 *bp)
2264 u8 *mac_addr = bp->dev->dev_addr;
2266 val = (mac_addr[0] << 8) | mac_addr[1];
2268 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2270 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2271 (mac_addr[4] << 8) | mac_addr[5];
2273 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2277 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2280 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2281 struct rx_bd *rxbd =
2282 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2283 struct page *page = alloc_page(GFP_ATOMIC);
2287 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2288 PCI_DMA_FROMDEVICE);
2290 pci_unmap_addr_set(rx_pg, mapping, mapping);
2291 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2292 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2297 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2299 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2300 struct page *page = rx_pg->page;
2305 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2306 PCI_DMA_FROMDEVICE);
2313 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2315 struct sk_buff *skb;
2316 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2318 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2319 unsigned long align;
2321 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2326 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2327 skb_reserve(skb, BNX2_RX_ALIGN - align);
2329 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2330 PCI_DMA_FROMDEVICE);
2333 pci_unmap_addr_set(rx_buf, mapping, mapping);
2335 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2336 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2338 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2344 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2346 struct status_block *sblk = bnapi->status_blk;
2347 u32 new_link_state, old_link_state;
2350 new_link_state = sblk->status_attn_bits & event;
2351 old_link_state = sblk->status_attn_bits_ack & event;
2352 if (new_link_state != old_link_state) {
2354 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2356 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2364 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2366 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2367 spin_lock(&bp->phy_lock);
2369 spin_unlock(&bp->phy_lock);
2371 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2372 bnx2_set_remote_link(bp);
2377 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2381 if (bnapi->int_num == 0)
2382 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2384 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2386 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2392 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2394 u16 hw_cons, sw_cons, sw_ring_cons;
2397 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2398 sw_cons = bnapi->tx_cons;
2400 while (sw_cons != hw_cons) {
2401 struct sw_bd *tx_buf;
2402 struct sk_buff *skb;
2405 sw_ring_cons = TX_RING_IDX(sw_cons);
2407 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2410 /* partial BD completions possible with TSO packets */
2411 if (skb_is_gso(skb)) {
2412 u16 last_idx, last_ring_idx;
2414 last_idx = sw_cons +
2415 skb_shinfo(skb)->nr_frags + 1;
2416 last_ring_idx = sw_ring_cons +
2417 skb_shinfo(skb)->nr_frags + 1;
2418 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2421 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2426 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2427 skb_headlen(skb), PCI_DMA_TODEVICE);
2430 last = skb_shinfo(skb)->nr_frags;
2432 for (i = 0; i < last; i++) {
2433 sw_cons = NEXT_TX_BD(sw_cons);
2435 pci_unmap_page(bp->pdev,
2437 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2439 skb_shinfo(skb)->frags[i].size,
2443 sw_cons = NEXT_TX_BD(sw_cons);
2447 if (tx_pkt == budget)
2450 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2453 bnapi->hw_tx_cons = hw_cons;
2454 bnapi->tx_cons = sw_cons;
2455 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2456 * before checking for netif_queue_stopped(). Without the
2457 * memory barrier, there is a small possibility that bnx2_start_xmit()
2458 * will miss it and cause the queue to be stopped forever.
2462 if (unlikely(netif_queue_stopped(bp->dev)) &&
2463 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2464 netif_tx_lock(bp->dev);
2465 if ((netif_queue_stopped(bp->dev)) &&
2466 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2467 netif_wake_queue(bp->dev);
2468 netif_tx_unlock(bp->dev);
2474 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2475 struct sk_buff *skb, int count)
2477 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2478 struct rx_bd *cons_bd, *prod_bd;
2481 u16 hw_prod = bnapi->rx_pg_prod, prod;
2482 u16 cons = bnapi->rx_pg_cons;
2484 for (i = 0; i < count; i++) {
2485 prod = RX_PG_RING_IDX(hw_prod);
2487 prod_rx_pg = &bp->rx_pg_ring[prod];
2488 cons_rx_pg = &bp->rx_pg_ring[cons];
2489 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2490 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2492 if (i == 0 && skb) {
2494 struct skb_shared_info *shinfo;
2496 shinfo = skb_shinfo(skb);
2498 page = shinfo->frags[shinfo->nr_frags].page;
2499 shinfo->frags[shinfo->nr_frags].page = NULL;
2500 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2501 PCI_DMA_FROMDEVICE);
2502 cons_rx_pg->page = page;
2503 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2507 prod_rx_pg->page = cons_rx_pg->page;
2508 cons_rx_pg->page = NULL;
2509 pci_unmap_addr_set(prod_rx_pg, mapping,
2510 pci_unmap_addr(cons_rx_pg, mapping));
2512 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2513 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2516 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2517 hw_prod = NEXT_RX_BD(hw_prod);
2519 bnapi->rx_pg_prod = hw_prod;
2520 bnapi->rx_pg_cons = cons;
2524 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2527 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2528 struct rx_bd *cons_bd, *prod_bd;
2530 cons_rx_buf = &bp->rx_buf_ring[cons];
2531 prod_rx_buf = &bp->rx_buf_ring[prod];
2533 pci_dma_sync_single_for_device(bp->pdev,
2534 pci_unmap_addr(cons_rx_buf, mapping),
2535 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2537 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2539 prod_rx_buf->skb = skb;
2544 pci_unmap_addr_set(prod_rx_buf, mapping,
2545 pci_unmap_addr(cons_rx_buf, mapping));
2547 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2548 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2549 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2550 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2554 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2555 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2559 u16 prod = ring_idx & 0xffff;
2561 err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2562 if (unlikely(err)) {
2563 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2565 unsigned int raw_len = len + 4;
2566 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2568 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2573 skb_reserve(skb, bp->rx_offset);
2574 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2575 PCI_DMA_FROMDEVICE);
2581 unsigned int i, frag_len, frag_size, pages;
2582 struct sw_pg *rx_pg;
2583 u16 pg_cons = bnapi->rx_pg_cons;
2584 u16 pg_prod = bnapi->rx_pg_prod;
2586 frag_size = len + 4 - hdr_len;
2587 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2588 skb_put(skb, hdr_len);
2590 for (i = 0; i < pages; i++) {
2591 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2592 if (unlikely(frag_len <= 4)) {
2593 unsigned int tail = 4 - frag_len;
2595 bnapi->rx_pg_cons = pg_cons;
2596 bnapi->rx_pg_prod = pg_prod;
2597 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2604 &skb_shinfo(skb)->frags[i - 1];
2606 skb->data_len -= tail;
2607 skb->truesize -= tail;
2611 rx_pg = &bp->rx_pg_ring[pg_cons];
2613 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2614 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2619 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2622 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2623 if (unlikely(err)) {
2624 bnapi->rx_pg_cons = pg_cons;
2625 bnapi->rx_pg_prod = pg_prod;
2626 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2631 frag_size -= frag_len;
2632 skb->data_len += frag_len;
2633 skb->truesize += frag_len;
2634 skb->len += frag_len;
2636 pg_prod = NEXT_RX_BD(pg_prod);
2637 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2639 bnapi->rx_pg_prod = pg_prod;
2640 bnapi->rx_pg_cons = pg_cons;
2646 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2648 u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2650 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2656 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2658 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2659 struct l2_fhdr *rx_hdr;
2660 int rx_pkt = 0, pg_ring_used = 0;
2662 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2663 sw_cons = bnapi->rx_cons;
2664 sw_prod = bnapi->rx_prod;
2666 /* Memory barrier necessary as speculative reads of the rx
2667 * buffer can be ahead of the index in the status block
2670 while (sw_cons != hw_cons) {
2671 unsigned int len, hdr_len;
2673 struct sw_bd *rx_buf;
2674 struct sk_buff *skb;
2675 dma_addr_t dma_addr;
2677 sw_ring_cons = RX_RING_IDX(sw_cons);
2678 sw_ring_prod = RX_RING_IDX(sw_prod);
2680 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2685 dma_addr = pci_unmap_addr(rx_buf, mapping);
2687 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2688 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2690 rx_hdr = (struct l2_fhdr *) skb->data;
2691 len = rx_hdr->l2_fhdr_pkt_len;
2693 if ((status = rx_hdr->l2_fhdr_status) &
2694 (L2_FHDR_ERRORS_BAD_CRC |
2695 L2_FHDR_ERRORS_PHY_DECODE |
2696 L2_FHDR_ERRORS_ALIGNMENT |
2697 L2_FHDR_ERRORS_TOO_SHORT |
2698 L2_FHDR_ERRORS_GIANT_FRAME)) {
2700 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2705 if (status & L2_FHDR_STATUS_SPLIT) {
2706 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2708 } else if (len > bp->rx_jumbo_thresh) {
2709 hdr_len = bp->rx_jumbo_thresh;
2715 if (len <= bp->rx_copy_thresh) {
2716 struct sk_buff *new_skb;
2718 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2719 if (new_skb == NULL) {
2720 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2726 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2727 new_skb->data, len + 2);
2728 skb_reserve(new_skb, 2);
2729 skb_put(new_skb, len);
2731 bnx2_reuse_rx_skb(bp, bnapi, skb,
2732 sw_ring_cons, sw_ring_prod);
2735 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2736 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2739 skb->protocol = eth_type_trans(skb, bp->dev);
2741 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2742 (ntohs(skb->protocol) != 0x8100)) {
2749 skb->ip_summed = CHECKSUM_NONE;
2751 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2752 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2754 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2755 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2756 skb->ip_summed = CHECKSUM_UNNECESSARY;
2760 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2761 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2762 rx_hdr->l2_fhdr_vlan_tag);
2766 netif_receive_skb(skb);
2768 bp->dev->last_rx = jiffies;
2772 sw_cons = NEXT_RX_BD(sw_cons);
2773 sw_prod = NEXT_RX_BD(sw_prod);
2775 if ((rx_pkt == budget))
2778 /* Refresh hw_cons to see if there is new work */
2779 if (sw_cons == hw_cons) {
2780 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2784 bnapi->rx_cons = sw_cons;
2785 bnapi->rx_prod = sw_prod;
2788 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2791 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2793 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2801 /* MSI ISR - The only difference between this and the INTx ISR
2802 * is that the MSI interrupt is always serviced.
2805 bnx2_msi(int irq, void *dev_instance)
2807 struct net_device *dev = dev_instance;
2808 struct bnx2 *bp = netdev_priv(dev);
2809 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2811 prefetch(bnapi->status_blk);
2812 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2813 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2814 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2816 /* Return here if interrupt is disabled. */
2817 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2820 netif_rx_schedule(dev, &bnapi->napi);
2826 bnx2_msi_1shot(int irq, void *dev_instance)
2828 struct net_device *dev = dev_instance;
2829 struct bnx2 *bp = netdev_priv(dev);
2830 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2832 prefetch(bnapi->status_blk);
2834 /* Return here if interrupt is disabled. */
2835 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2838 netif_rx_schedule(dev, &bnapi->napi);
2844 bnx2_interrupt(int irq, void *dev_instance)
2846 struct net_device *dev = dev_instance;
2847 struct bnx2 *bp = netdev_priv(dev);
2848 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2849 struct status_block *sblk = bnapi->status_blk;
2851 /* When using INTx, it is possible for the interrupt to arrive
2852 * at the CPU before the status block posted prior to the
2853 * interrupt. Reading a register will flush the status block.
2854 * When using MSI, the MSI message will always complete after
2855 * the status block write.
2857 if ((sblk->status_idx == bnapi->last_status_idx) &&
2858 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2859 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2862 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2863 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2864 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2866 /* Read back to deassert IRQ immediately to avoid too many
2867 * spurious interrupts.
2869 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2871 /* Return here if interrupt is shared and is disabled. */
2872 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2875 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2876 bnapi->last_status_idx = sblk->status_idx;
2877 __netif_rx_schedule(dev, &bnapi->napi);
2884 bnx2_tx_msix(int irq, void *dev_instance)
2886 struct net_device *dev = dev_instance;
2887 struct bnx2 *bp = netdev_priv(dev);
2888 struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
2890 prefetch(bnapi->status_blk_msix);
2892 /* Return here if interrupt is disabled. */
2893 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2896 netif_rx_schedule(dev, &bnapi->napi);
2900 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2901 STATUS_ATTN_BITS_TIMER_ABORT)
2904 bnx2_has_work(struct bnx2_napi *bnapi)
2906 struct status_block *sblk = bnapi->status_blk;
2908 if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
2909 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2912 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2913 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2919 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
2921 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2922 struct bnx2 *bp = bnapi->bp;
2924 struct status_block_msix *sblk = bnapi->status_blk_msix;
2927 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
2928 if (unlikely(work_done >= budget))
2931 bnapi->last_status_idx = sblk->status_idx;
2933 } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
2935 netif_rx_complete(bp->dev, napi);
2936 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
2937 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2938 bnapi->last_status_idx);
2942 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2943 int work_done, int budget)
2945 struct status_block *sblk = bnapi->status_blk;
2946 u32 status_attn_bits = sblk->status_attn_bits;
2947 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2949 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2950 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2952 bnx2_phy_int(bp, bnapi);
2954 /* This is needed to take care of transient status
2955 * during link changes.
2957 REG_WR(bp, BNX2_HC_COMMAND,
2958 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2959 REG_RD(bp, BNX2_HC_COMMAND);
2962 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
2963 bnx2_tx_int(bp, bnapi, 0);
2965 if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
2966 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
2971 static int bnx2_poll(struct napi_struct *napi, int budget)
2973 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2974 struct bnx2 *bp = bnapi->bp;
2976 struct status_block *sblk = bnapi->status_blk;
2979 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
2981 if (unlikely(work_done >= budget))
2984 /* bnapi->last_status_idx is used below to tell the hw how
2985 * much work has been processed, so we must read it before
2986 * checking for more work.
2988 bnapi->last_status_idx = sblk->status_idx;
2990 if (likely(!bnx2_has_work(bnapi))) {
2991 netif_rx_complete(bp->dev, napi);
2992 if (likely(bp->flags & USING_MSI_OR_MSIX_FLAG)) {
2993 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2994 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2995 bnapi->last_status_idx);
2998 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2999 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3000 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3001 bnapi->last_status_idx);
3003 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3004 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3005 bnapi->last_status_idx);
3013 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3014 * from set_multicast.
3017 bnx2_set_rx_mode(struct net_device *dev)
3019 struct bnx2 *bp = netdev_priv(dev);
3020 u32 rx_mode, sort_mode;
3023 spin_lock_bh(&bp->phy_lock);
3025 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3026 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3027 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3029 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
3030 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3032 if (!(bp->flags & ASF_ENABLE_FLAG))
3033 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3035 if (dev->flags & IFF_PROMISC) {
3036 /* Promiscuous mode. */
3037 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3038 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3039 BNX2_RPM_SORT_USER0_PROM_VLAN;
3041 else if (dev->flags & IFF_ALLMULTI) {
3042 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3043 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3046 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3049 /* Accept one or more multicast(s). */
3050 struct dev_mc_list *mclist;
3051 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3056 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3058 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3059 i++, mclist = mclist->next) {
3061 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3063 regidx = (bit & 0xe0) >> 5;
3065 mc_filter[regidx] |= (1 << bit);
3068 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3069 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3073 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3076 if (rx_mode != bp->rx_mode) {
3077 bp->rx_mode = rx_mode;
3078 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3081 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3082 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3083 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3085 spin_unlock_bh(&bp->phy_lock);
3089 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
3096 for (i = 0; i < rv2p_code_len; i += 8) {
3097 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
3099 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
3102 if (rv2p_proc == RV2P_PROC1) {
3103 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3104 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3107 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3108 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3112 /* Reset the processor, un-stall is done later. */
3113 if (rv2p_proc == RV2P_PROC1) {
3114 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3117 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3122 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3129 val = REG_RD_IND(bp, cpu_reg->mode);
3130 val |= cpu_reg->mode_value_halt;
3131 REG_WR_IND(bp, cpu_reg->mode, val);
3132 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3134 /* Load the Text area. */
3135 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3139 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3144 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3145 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
3149 /* Load the Data area. */
3150 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3154 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3155 REG_WR_IND(bp, offset, fw->data[j]);
3159 /* Load the SBSS area. */
3160 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3164 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3165 REG_WR_IND(bp, offset, 0);
3169 /* Load the BSS area. */
3170 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3174 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3175 REG_WR_IND(bp, offset, 0);
3179 /* Load the Read-Only area. */
3180 offset = cpu_reg->spad_base +
3181 (fw->rodata_addr - cpu_reg->mips_view_base);
3185 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3186 REG_WR_IND(bp, offset, fw->rodata[j]);
3190 /* Clear the pre-fetch instruction. */
3191 REG_WR_IND(bp, cpu_reg->inst, 0);
3192 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3194 /* Start the CPU. */
3195 val = REG_RD_IND(bp, cpu_reg->mode);
3196 val &= ~cpu_reg->mode_value_halt;
3197 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3198 REG_WR_IND(bp, cpu_reg->mode, val);
3204 bnx2_init_cpus(struct bnx2 *bp)
3206 struct cpu_reg cpu_reg;
3211 /* Initialize the RV2P processor. */
3212 text = vmalloc(FW_BUF_SIZE);
3215 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3216 rv2p = bnx2_xi_rv2p_proc1;
3217 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3219 rv2p = bnx2_rv2p_proc1;
3220 rv2p_len = sizeof(bnx2_rv2p_proc1);
3222 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3226 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3228 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3229 rv2p = bnx2_xi_rv2p_proc2;
3230 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3232 rv2p = bnx2_rv2p_proc2;
3233 rv2p_len = sizeof(bnx2_rv2p_proc2);
3235 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3239 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3241 /* Initialize the RX Processor. */
3242 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3243 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3244 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3245 cpu_reg.state = BNX2_RXP_CPU_STATE;
3246 cpu_reg.state_value_clear = 0xffffff;
3247 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3248 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3249 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3250 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3251 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3252 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3253 cpu_reg.mips_view_base = 0x8000000;
3255 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3256 fw = &bnx2_rxp_fw_09;
3258 fw = &bnx2_rxp_fw_06;
3261 rc = load_cpu_fw(bp, &cpu_reg, fw);
3265 /* Initialize the TX Processor. */
3266 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3267 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3268 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3269 cpu_reg.state = BNX2_TXP_CPU_STATE;
3270 cpu_reg.state_value_clear = 0xffffff;
3271 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3272 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3273 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3274 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3275 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3276 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3277 cpu_reg.mips_view_base = 0x8000000;
3279 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3280 fw = &bnx2_txp_fw_09;
3282 fw = &bnx2_txp_fw_06;
3285 rc = load_cpu_fw(bp, &cpu_reg, fw);
3289 /* Initialize the TX Patch-up Processor. */
3290 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3291 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3292 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3293 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3294 cpu_reg.state_value_clear = 0xffffff;
3295 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3296 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3297 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3298 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3299 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3300 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3301 cpu_reg.mips_view_base = 0x8000000;
3303 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3304 fw = &bnx2_tpat_fw_09;
3306 fw = &bnx2_tpat_fw_06;
3309 rc = load_cpu_fw(bp, &cpu_reg, fw);
3313 /* Initialize the Completion Processor. */
3314 cpu_reg.mode = BNX2_COM_CPU_MODE;
3315 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3316 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3317 cpu_reg.state = BNX2_COM_CPU_STATE;
3318 cpu_reg.state_value_clear = 0xffffff;
3319 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3320 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3321 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3322 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3323 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3324 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3325 cpu_reg.mips_view_base = 0x8000000;
3327 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3328 fw = &bnx2_com_fw_09;
3330 fw = &bnx2_com_fw_06;
3333 rc = load_cpu_fw(bp, &cpu_reg, fw);
3337 /* Initialize the Command Processor. */
3338 cpu_reg.mode = BNX2_CP_CPU_MODE;
3339 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3340 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3341 cpu_reg.state = BNX2_CP_CPU_STATE;
3342 cpu_reg.state_value_clear = 0xffffff;
3343 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3344 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3345 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3346 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3347 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3348 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3349 cpu_reg.mips_view_base = 0x8000000;
3351 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3352 fw = &bnx2_cp_fw_09;
3354 fw = &bnx2_cp_fw_06;
3357 rc = load_cpu_fw(bp, &cpu_reg, fw);
3365 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3369 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3375 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3376 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3377 PCI_PM_CTRL_PME_STATUS);
3379 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3380 /* delay required during transition out of D3hot */
3383 val = REG_RD(bp, BNX2_EMAC_MODE);
3384 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3385 val &= ~BNX2_EMAC_MODE_MPKT;
3386 REG_WR(bp, BNX2_EMAC_MODE, val);
3388 val = REG_RD(bp, BNX2_RPM_CONFIG);
3389 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3390 REG_WR(bp, BNX2_RPM_CONFIG, val);
3401 autoneg = bp->autoneg;
3402 advertising = bp->advertising;
3404 if (bp->phy_port == PORT_TP) {
3405 bp->autoneg = AUTONEG_SPEED;
3406 bp->advertising = ADVERTISED_10baseT_Half |
3407 ADVERTISED_10baseT_Full |
3408 ADVERTISED_100baseT_Half |
3409 ADVERTISED_100baseT_Full |
3413 spin_lock_bh(&bp->phy_lock);
3414 bnx2_setup_phy(bp, bp->phy_port);
3415 spin_unlock_bh(&bp->phy_lock);
3417 bp->autoneg = autoneg;
3418 bp->advertising = advertising;
3420 bnx2_set_mac_addr(bp);
3422 val = REG_RD(bp, BNX2_EMAC_MODE);
3424 /* Enable port mode. */
3425 val &= ~BNX2_EMAC_MODE_PORT;
3426 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3427 BNX2_EMAC_MODE_ACPI_RCVD |
3428 BNX2_EMAC_MODE_MPKT;
3429 if (bp->phy_port == PORT_TP)
3430 val |= BNX2_EMAC_MODE_PORT_MII;
3432 val |= BNX2_EMAC_MODE_PORT_GMII;
3433 if (bp->line_speed == SPEED_2500)
3434 val |= BNX2_EMAC_MODE_25G_MODE;
3437 REG_WR(bp, BNX2_EMAC_MODE, val);
3439 /* receive all multicast */
3440 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3441 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3444 REG_WR(bp, BNX2_EMAC_RX_MODE,
3445 BNX2_EMAC_RX_MODE_SORT_MODE);
3447 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3448 BNX2_RPM_SORT_USER0_MC_EN;
3449 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3450 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3451 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3452 BNX2_RPM_SORT_USER0_ENA);
3454 /* Need to enable EMAC and RPM for WOL. */
3455 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3456 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3457 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3458 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3460 val = REG_RD(bp, BNX2_RPM_CONFIG);
3461 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3462 REG_WR(bp, BNX2_RPM_CONFIG, val);
3464 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3467 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3470 if (!(bp->flags & NO_WOL_FLAG))
3471 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3473 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3474 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3475 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3484 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3486 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3489 /* No more memory access after this point until
3490 * device is brought back to D0.
3502 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3507 /* Request access to the flash interface. */
3508 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3509 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3510 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3511 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3517 if (j >= NVRAM_TIMEOUT_COUNT)
3524 bnx2_release_nvram_lock(struct bnx2 *bp)
3529 /* Relinquish nvram interface. */
3530 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3532 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3533 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3534 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3540 if (j >= NVRAM_TIMEOUT_COUNT)
3548 bnx2_enable_nvram_write(struct bnx2 *bp)
3552 val = REG_RD(bp, BNX2_MISC_CFG);
3553 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3555 if (bp->flash_info->flags & BNX2_NV_WREN) {
3558 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3559 REG_WR(bp, BNX2_NVM_COMMAND,
3560 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3562 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3565 val = REG_RD(bp, BNX2_NVM_COMMAND);
3566 if (val & BNX2_NVM_COMMAND_DONE)
3570 if (j >= NVRAM_TIMEOUT_COUNT)
3577 bnx2_disable_nvram_write(struct bnx2 *bp)
3581 val = REG_RD(bp, BNX2_MISC_CFG);
3582 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3587 bnx2_enable_nvram_access(struct bnx2 *bp)
3591 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3592 /* Enable both bits, even on read. */
3593 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3594 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3598 bnx2_disable_nvram_access(struct bnx2 *bp)
3602 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3603 /* Disable both bits, even after read. */
3604 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3605 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3606 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3610 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3615 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3616 /* Buffered flash, no erase needed */
3619 /* Build an erase command */
3620 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3621 BNX2_NVM_COMMAND_DOIT;
3623 /* Need to clear DONE bit separately. */
3624 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3626 /* Address of the NVRAM to read from. */
3627 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3629 /* Issue an erase command. */
3630 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3632 /* Wait for completion. */
3633 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3638 val = REG_RD(bp, BNX2_NVM_COMMAND);
3639 if (val & BNX2_NVM_COMMAND_DONE)
3643 if (j >= NVRAM_TIMEOUT_COUNT)
3650 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3655 /* Build the command word. */
3656 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3658 /* Calculate an offset of a buffered flash, not needed for 5709. */
3659 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3660 offset = ((offset / bp->flash_info->page_size) <<
3661 bp->flash_info->page_bits) +
3662 (offset % bp->flash_info->page_size);
3665 /* Need to clear DONE bit separately. */
3666 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3668 /* Address of the NVRAM to read from. */
3669 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3671 /* Issue a read command. */
3672 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3674 /* Wait for completion. */
3675 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3680 val = REG_RD(bp, BNX2_NVM_COMMAND);
3681 if (val & BNX2_NVM_COMMAND_DONE) {
3682 val = REG_RD(bp, BNX2_NVM_READ);
3684 val = be32_to_cpu(val);
3685 memcpy(ret_val, &val, 4);
3689 if (j >= NVRAM_TIMEOUT_COUNT)
3697 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3702 /* Build the command word. */
3703 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3705 /* Calculate an offset of a buffered flash, not needed for 5709. */
3706 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3707 offset = ((offset / bp->flash_info->page_size) <<
3708 bp->flash_info->page_bits) +
3709 (offset % bp->flash_info->page_size);
3712 /* Need to clear DONE bit separately. */
3713 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3715 memcpy(&val32, val, 4);
3716 val32 = cpu_to_be32(val32);
3718 /* Write the data. */
3719 REG_WR(bp, BNX2_NVM_WRITE, val32);
3721 /* Address of the NVRAM to write to. */
3722 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3724 /* Issue the write command. */
3725 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3727 /* Wait for completion. */
3728 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3731 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3734 if (j >= NVRAM_TIMEOUT_COUNT)
3741 bnx2_init_nvram(struct bnx2 *bp)
3744 int j, entry_count, rc = 0;
3745 struct flash_spec *flash;
3747 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3748 bp->flash_info = &flash_5709;
3749 goto get_flash_size;
3752 /* Determine the selected interface. */
3753 val = REG_RD(bp, BNX2_NVM_CFG1);
3755 entry_count = ARRAY_SIZE(flash_table);
3757 if (val & 0x40000000) {
3759 /* Flash interface has been reconfigured */
3760 for (j = 0, flash = &flash_table[0]; j < entry_count;
3762 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3763 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3764 bp->flash_info = flash;
3771 /* Not yet been reconfigured */
3773 if (val & (1 << 23))
3774 mask = FLASH_BACKUP_STRAP_MASK;
3776 mask = FLASH_STRAP_MASK;
3778 for (j = 0, flash = &flash_table[0]; j < entry_count;
3781 if ((val & mask) == (flash->strapping & mask)) {
3782 bp->flash_info = flash;
3784 /* Request access to the flash interface. */
3785 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3788 /* Enable access to flash interface */
3789 bnx2_enable_nvram_access(bp);
3791 /* Reconfigure the flash interface */
3792 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3793 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3794 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3795 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3797 /* Disable access to flash interface */
3798 bnx2_disable_nvram_access(bp);
3799 bnx2_release_nvram_lock(bp);
3804 } /* if (val & 0x40000000) */
3806 if (j == entry_count) {
3807 bp->flash_info = NULL;
3808 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3813 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3814 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3816 bp->flash_size = val;
3818 bp->flash_size = bp->flash_info->total_size;
3824 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3828 u32 cmd_flags, offset32, len32, extra;
3833 /* Request access to the flash interface. */
3834 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3837 /* Enable access to flash interface */
3838 bnx2_enable_nvram_access(bp);
3851 pre_len = 4 - (offset & 3);
3853 if (pre_len >= len32) {
3855 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3856 BNX2_NVM_COMMAND_LAST;
3859 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3862 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3867 memcpy(ret_buf, buf + (offset & 3), pre_len);
3874 extra = 4 - (len32 & 3);
3875 len32 = (len32 + 4) & ~3;
3882 cmd_flags = BNX2_NVM_COMMAND_LAST;
3884 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3885 BNX2_NVM_COMMAND_LAST;
3887 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3889 memcpy(ret_buf, buf, 4 - extra);
3891 else if (len32 > 0) {
3894 /* Read the first word. */
3898 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3900 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3902 /* Advance to the next dword. */
3907 while (len32 > 4 && rc == 0) {
3908 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3910 /* Advance to the next dword. */
3919 cmd_flags = BNX2_NVM_COMMAND_LAST;
3920 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3922 memcpy(ret_buf, buf, 4 - extra);
3925 /* Disable access to flash interface */
3926 bnx2_disable_nvram_access(bp);
3928 bnx2_release_nvram_lock(bp);
3934 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3937 u32 written, offset32, len32;
3938 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3940 int align_start, align_end;
3945 align_start = align_end = 0;
3947 if ((align_start = (offset32 & 3))) {
3949 len32 += align_start;
3952 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3957 align_end = 4 - (len32 & 3);
3959 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3963 if (align_start || align_end) {
3964 align_buf = kmalloc(len32, GFP_KERNEL);
3965 if (align_buf == NULL)
3968 memcpy(align_buf, start, 4);
3971 memcpy(align_buf + len32 - 4, end, 4);
3973 memcpy(align_buf + align_start, data_buf, buf_size);
3977 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3978 flash_buffer = kmalloc(264, GFP_KERNEL);
3979 if (flash_buffer == NULL) {
3981 goto nvram_write_end;
3986 while ((written < len32) && (rc == 0)) {
3987 u32 page_start, page_end, data_start, data_end;
3988 u32 addr, cmd_flags;
3991 /* Find the page_start addr */
3992 page_start = offset32 + written;
3993 page_start -= (page_start % bp->flash_info->page_size);
3994 /* Find the page_end addr */
3995 page_end = page_start + bp->flash_info->page_size;
3996 /* Find the data_start addr */
3997 data_start = (written == 0) ? offset32 : page_start;
3998 /* Find the data_end addr */
3999 data_end = (page_end > offset32 + len32) ?
4000 (offset32 + len32) : page_end;
4002 /* Request access to the flash interface. */
4003 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4004 goto nvram_write_end;
4006 /* Enable access to flash interface */
4007 bnx2_enable_nvram_access(bp);
4009 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4010 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4013 /* Read the whole page into the buffer
4014 * (non-buffer flash only) */
4015 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4016 if (j == (bp->flash_info->page_size - 4)) {
4017 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4019 rc = bnx2_nvram_read_dword(bp,
4025 goto nvram_write_end;
4031 /* Enable writes to flash interface (unlock write-protect) */
4032 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4033 goto nvram_write_end;
4035 /* Loop to write back the buffer data from page_start to
4038 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4039 /* Erase the page */
4040 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4041 goto nvram_write_end;
4043 /* Re-enable the write again for the actual write */
4044 bnx2_enable_nvram_write(bp);
4046 for (addr = page_start; addr < data_start;
4047 addr += 4, i += 4) {
4049 rc = bnx2_nvram_write_dword(bp, addr,
4050 &flash_buffer[i], cmd_flags);
4053 goto nvram_write_end;
4059 /* Loop to write the new data from data_start to data_end */
4060 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4061 if ((addr == page_end - 4) ||
4062 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4063 (addr == data_end - 4))) {
4065 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4067 rc = bnx2_nvram_write_dword(bp, addr, buf,
4071 goto nvram_write_end;
4077 /* Loop to write back the buffer data from data_end
4079 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4080 for (addr = data_end; addr < page_end;
4081 addr += 4, i += 4) {
4083 if (addr == page_end-4) {
4084 cmd_flags = BNX2_NVM_COMMAND_LAST;
4086 rc = bnx2_nvram_write_dword(bp, addr,
4087 &flash_buffer[i], cmd_flags);
4090 goto nvram_write_end;
4096 /* Disable writes to flash interface (lock write-protect) */
4097 bnx2_disable_nvram_write(bp);
4099 /* Disable access to flash interface */
4100 bnx2_disable_nvram_access(bp);
4101 bnx2_release_nvram_lock(bp);
4103 /* Increment written */
4104 written += data_end - data_start;
4108 kfree(flash_buffer);
4114 bnx2_init_remote_phy(struct bnx2 *bp)
4118 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4119 if (!(bp->phy_flags & PHY_SERDES_FLAG))
4122 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4123 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4126 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4127 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4129 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4130 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4131 bp->phy_port = PORT_FIBRE;
4133 bp->phy_port = PORT_TP;
4135 if (netif_running(bp->dev)) {
4138 if (val & BNX2_LINK_STATUS_LINK_UP) {
4140 netif_carrier_on(bp->dev);
4143 netif_carrier_off(bp->dev);
4145 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4146 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4147 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4154 bnx2_setup_msix_tbl(struct bnx2 *bp)
4156 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4158 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4159 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4163 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4169 /* Wait for the current PCI transaction to complete before
4170 * issuing a reset. */
4171 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4172 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4173 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4174 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4175 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4176 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4179 /* Wait for the firmware to tell us it is ok to issue a reset. */
4180 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4182 /* Deposit a driver reset signature so the firmware knows that
4183 * this is a soft reset. */
4184 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
4185 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4187 /* Do a dummy read to force the chip to complete all current transaction
4188 * before we issue a reset. */
4189 val = REG_RD(bp, BNX2_MISC_ID);
4191 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4192 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4193 REG_RD(bp, BNX2_MISC_COMMAND);
4196 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4197 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4199 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4202 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4203 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4204 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4207 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4209 /* Reading back any register after chip reset will hang the
4210 * bus on 5706 A0 and A1. The msleep below provides plenty
4211 * of margin for write posting.
4213 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4214 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4217 /* Reset takes approximate 30 usec */
4218 for (i = 0; i < 10; i++) {
4219 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4220 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4221 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4226 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4227 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4228 printk(KERN_ERR PFX "Chip reset did not complete\n");
4233 /* Make sure byte swapping is properly configured. */
4234 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4235 if (val != 0x01020304) {
4236 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4240 /* Wait for the firmware to finish its initialization. */
4241 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4245 spin_lock_bh(&bp->phy_lock);
4246 old_port = bp->phy_port;
4247 bnx2_init_remote_phy(bp);
4248 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
4249 bnx2_set_default_remote_link(bp);
4250 spin_unlock_bh(&bp->phy_lock);
4252 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4253 /* Adjust the voltage regular to two steps lower. The default
4254 * of this register is 0x0000000e. */
4255 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4257 /* Remove bad rbuf memory from the free pool. */
4258 rc = bnx2_alloc_bad_rbuf(bp);
4261 if (bp->flags & USING_MSIX_FLAG)
4262 bnx2_setup_msix_tbl(bp);
4268 bnx2_init_chip(struct bnx2 *bp)
4273 /* Make sure the interrupt is not active. */
4274 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4276 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4277 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4279 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4281 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4282 DMA_READ_CHANS << 12 |
4283 DMA_WRITE_CHANS << 16;
4285 val |= (0x2 << 20) | (1 << 11);
4287 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4290 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4291 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4292 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4294 REG_WR(bp, BNX2_DMA_CONFIG, val);
4296 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4297 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4298 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4299 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4302 if (bp->flags & PCIX_FLAG) {
4305 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4307 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4308 val16 & ~PCI_X_CMD_ERO);
4311 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4312 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4313 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4314 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4316 /* Initialize context mapping and zero out the quick contexts. The
4317 * context block must have already been enabled. */
4318 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4319 rc = bnx2_init_5709_context(bp);
4323 bnx2_init_context(bp);
4325 if ((rc = bnx2_init_cpus(bp)) != 0)
4328 bnx2_init_nvram(bp);
4330 bnx2_set_mac_addr(bp);
4332 val = REG_RD(bp, BNX2_MQ_CONFIG);
4333 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4334 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4335 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4336 val |= BNX2_MQ_CONFIG_HALT_DIS;
4338 REG_WR(bp, BNX2_MQ_CONFIG, val);
4340 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4341 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4342 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4344 val = (BCM_PAGE_BITS - 8) << 24;
4345 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4347 /* Configure page size. */
4348 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4349 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4350 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4351 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4353 val = bp->mac_addr[0] +
4354 (bp->mac_addr[1] << 8) +
4355 (bp->mac_addr[2] << 16) +
4357 (bp->mac_addr[4] << 8) +
4358 (bp->mac_addr[5] << 16);
4359 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4361 /* Program the MTU. Also include 4 bytes for CRC32. */
4362 val = bp->dev->mtu + ETH_HLEN + 4;
4363 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4364 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4365 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4367 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4368 bp->bnx2_napi[i].last_status_idx = 0;
4370 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4372 /* Set up how to generate a link change interrupt. */
4373 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4375 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4376 (u64) bp->status_blk_mapping & 0xffffffff);
4377 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4379 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4380 (u64) bp->stats_blk_mapping & 0xffffffff);
4381 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4382 (u64) bp->stats_blk_mapping >> 32);
4384 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4385 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4387 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4388 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4390 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4391 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4393 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4395 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4397 REG_WR(bp, BNX2_HC_COM_TICKS,
4398 (bp->com_ticks_int << 16) | bp->com_ticks);
4400 REG_WR(bp, BNX2_HC_CMD_TICKS,
4401 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4403 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4404 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4406 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4407 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4409 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4410 val = BNX2_HC_CONFIG_COLLECT_STATS;
4412 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4413 BNX2_HC_CONFIG_COLLECT_STATS;
4416 if (bp->flags & USING_MSIX_FLAG) {
4417 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4418 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4420 REG_WR(bp, BNX2_HC_SB_CONFIG_1,
4421 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4422 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4424 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP_1,
4425 (bp->tx_quick_cons_trip_int << 16) |
4426 bp->tx_quick_cons_trip);
4428 REG_WR(bp, BNX2_HC_TX_TICKS_1,
4429 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4431 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4434 if (bp->flags & ONE_SHOT_MSI_FLAG)
4435 val |= BNX2_HC_CONFIG_ONE_SHOT;
4437 REG_WR(bp, BNX2_HC_CONFIG, val);
4439 /* Clear internal stats counters. */
4440 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4442 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4444 /* Initialize the receive filter. */
4445 bnx2_set_rx_mode(bp->dev);
4447 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4448 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4449 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4450 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4452 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4455 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4456 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4460 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4466 bnx2_clear_ring_states(struct bnx2 *bp)
4468 struct bnx2_napi *bnapi;
4471 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4472 bnapi = &bp->bnx2_napi[i];
4475 bnapi->hw_tx_cons = 0;
4476 bnapi->rx_prod_bseq = 0;
4479 bnapi->rx_pg_prod = 0;
4480 bnapi->rx_pg_cons = 0;
4485 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4487 u32 val, offset0, offset1, offset2, offset3;
4489 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4490 offset0 = BNX2_L2CTX_TYPE_XI;
4491 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4492 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4493 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4495 offset0 = BNX2_L2CTX_TYPE;
4496 offset1 = BNX2_L2CTX_CMD_TYPE;
4497 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4498 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4500 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4501 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4503 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4504 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4506 val = (u64) bp->tx_desc_mapping >> 32;
4507 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4509 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4510 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4514 bnx2_init_tx_ring(struct bnx2 *bp)
4518 struct bnx2_napi *bnapi;
4521 if (bp->flags & USING_MSIX_FLAG) {
4523 bp->tx_vec = BNX2_TX_VEC;
4524 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
4527 bnapi = &bp->bnx2_napi[bp->tx_vec];
4529 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4531 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4533 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4534 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4537 bp->tx_prod_bseq = 0;
4539 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4540 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4542 bnx2_init_tx_context(bp, cid);
4546 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4552 for (i = 0; i < num_rings; i++) {
4555 rxbd = &rx_ring[i][0];
4556 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4557 rxbd->rx_bd_len = buf_size;
4558 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4560 if (i == (num_rings - 1))
4564 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4565 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4570 bnx2_init_rx_ring(struct bnx2 *bp)
4573 u16 prod, ring_prod;
4574 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4575 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4577 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4578 bp->rx_buf_use_size, bp->rx_max_ring);
4580 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4581 if (bp->rx_pg_ring_size) {
4582 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4583 bp->rx_pg_desc_mapping,
4584 PAGE_SIZE, bp->rx_max_pg_ring);
4585 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4586 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4587 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4588 BNX2_L2CTX_RBDC_JUMBO_KEY);
4590 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4591 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4593 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4594 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4596 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4597 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4600 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4601 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4603 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4605 val = (u64) bp->rx_desc_mapping[0] >> 32;
4606 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4608 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4609 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4611 ring_prod = prod = bnapi->rx_pg_prod;
4612 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4613 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4615 prod = NEXT_RX_BD(prod);
4616 ring_prod = RX_PG_RING_IDX(prod);
4618 bnapi->rx_pg_prod = prod;
4620 ring_prod = prod = bnapi->rx_prod;
4621 for (i = 0; i < bp->rx_ring_size; i++) {
4622 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4625 prod = NEXT_RX_BD(prod);
4626 ring_prod = RX_RING_IDX(prod);
4628 bnapi->rx_prod = prod;
4630 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4632 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4634 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4637 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4639 u32 max, num_rings = 1;
4641 while (ring_size > MAX_RX_DESC_CNT) {
4642 ring_size -= MAX_RX_DESC_CNT;
4645 /* round to next power of 2 */
4647 while ((max & num_rings) == 0)
4650 if (num_rings != max)
4657 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4659 u32 rx_size, rx_space, jumbo_size;
4661 /* 8 for CRC and VLAN */
4662 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4664 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4665 sizeof(struct skb_shared_info);
4667 bp->rx_copy_thresh = RX_COPY_THRESH;
4668 bp->rx_pg_ring_size = 0;
4669 bp->rx_max_pg_ring = 0;
4670 bp->rx_max_pg_ring_idx = 0;
4671 if ((rx_space > PAGE_SIZE) && !(bp->flags & JUMBO_BROKEN_FLAG)) {
4672 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4674 jumbo_size = size * pages;
4675 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4676 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4678 bp->rx_pg_ring_size = jumbo_size;
4679 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4681 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4682 rx_size = RX_COPY_THRESH + bp->rx_offset;
4683 bp->rx_copy_thresh = 0;
4686 bp->rx_buf_use_size = rx_size;
4688 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4689 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4690 bp->rx_ring_size = size;
4691 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4692 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4696 bnx2_free_tx_skbs(struct bnx2 *bp)
4700 if (bp->tx_buf_ring == NULL)
4703 for (i = 0; i < TX_DESC_CNT; ) {
4704 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4705 struct sk_buff *skb = tx_buf->skb;
4713 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4714 skb_headlen(skb), PCI_DMA_TODEVICE);
4718 last = skb_shinfo(skb)->nr_frags;
4719 for (j = 0; j < last; j++) {
4720 tx_buf = &bp->tx_buf_ring[i + j + 1];
4721 pci_unmap_page(bp->pdev,
4722 pci_unmap_addr(tx_buf, mapping),
4723 skb_shinfo(skb)->frags[j].size,
4733 bnx2_free_rx_skbs(struct bnx2 *bp)
4737 if (bp->rx_buf_ring == NULL)
4740 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4741 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4742 struct sk_buff *skb = rx_buf->skb;
4747 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4748 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4754 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4755 bnx2_free_rx_page(bp, i);
4759 bnx2_free_skbs(struct bnx2 *bp)
4761 bnx2_free_tx_skbs(bp);
4762 bnx2_free_rx_skbs(bp);
4766 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4770 rc = bnx2_reset_chip(bp, reset_code);
4775 if ((rc = bnx2_init_chip(bp)) != 0)
4778 bnx2_clear_ring_states(bp);
4779 bnx2_init_tx_ring(bp);
4780 bnx2_init_rx_ring(bp);
4785 bnx2_init_nic(struct bnx2 *bp)
4789 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4792 spin_lock_bh(&bp->phy_lock);
4795 spin_unlock_bh(&bp->phy_lock);
4800 bnx2_test_registers(struct bnx2 *bp)
4804 static const struct {
4807 #define BNX2_FL_NOT_5709 1
4811 { 0x006c, 0, 0x00000000, 0x0000003f },
4812 { 0x0090, 0, 0xffffffff, 0x00000000 },
4813 { 0x0094, 0, 0x00000000, 0x00000000 },
4815 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4816 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4817 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4818 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4819 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4820 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4821 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4822 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4823 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4825 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4826 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4827 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4828 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4829 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4830 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4832 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4833 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4834 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4836 { 0x1000, 0, 0x00000000, 0x00000001 },
4837 { 0x1004, 0, 0x00000000, 0x000f0001 },
4839 { 0x1408, 0, 0x01c00800, 0x00000000 },
4840 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4841 { 0x14a8, 0, 0x00000000, 0x000001ff },
4842 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4843 { 0x14b0, 0, 0x00000002, 0x00000001 },
4844 { 0x14b8, 0, 0x00000000, 0x00000000 },
4845 { 0x14c0, 0, 0x00000000, 0x00000009 },
4846 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4847 { 0x14cc, 0, 0x00000000, 0x00000001 },
4848 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4850 { 0x1800, 0, 0x00000000, 0x00000001 },
4851 { 0x1804, 0, 0x00000000, 0x00000003 },
4853 { 0x2800, 0, 0x00000000, 0x00000001 },
4854 { 0x2804, 0, 0x00000000, 0x00003f01 },
4855 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4856 { 0x2810, 0, 0xffff0000, 0x00000000 },
4857 { 0x2814, 0, 0xffff0000, 0x00000000 },
4858 { 0x2818, 0, 0xffff0000, 0x00000000 },
4859 { 0x281c, 0, 0xffff0000, 0x00000000 },
4860 { 0x2834, 0, 0xffffffff, 0x00000000 },
4861 { 0x2840, 0, 0x00000000, 0xffffffff },
4862 { 0x2844, 0, 0x00000000, 0xffffffff },
4863 { 0x2848, 0, 0xffffffff, 0x00000000 },
4864 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4866 { 0x2c00, 0, 0x00000000, 0x00000011 },
4867 { 0x2c04, 0, 0x00000000, 0x00030007 },
4869 { 0x3c00, 0, 0x00000000, 0x00000001 },
4870 { 0x3c04, 0, 0x00000000, 0x00070000 },
4871 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4872 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4873 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4874 { 0x3c14, 0, 0x00000000, 0xffffffff },
4875 { 0x3c18, 0, 0x00000000, 0xffffffff },
4876 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4877 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4879 { 0x5004, 0, 0x00000000, 0x0000007f },
4880 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4882 { 0x5c00, 0, 0x00000000, 0x00000001 },
4883 { 0x5c04, 0, 0x00000000, 0x0003000f },
4884 { 0x5c08, 0, 0x00000003, 0x00000000 },
4885 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4886 { 0x5c10, 0, 0x00000000, 0xffffffff },
4887 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4888 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4889 { 0x5c88, 0, 0x00000000, 0x00077373 },
4890 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4892 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4893 { 0x680c, 0, 0xffffffff, 0x00000000 },
4894 { 0x6810, 0, 0xffffffff, 0x00000000 },
4895 { 0x6814, 0, 0xffffffff, 0x00000000 },
4896 { 0x6818, 0, 0xffffffff, 0x00000000 },
4897 { 0x681c, 0, 0xffffffff, 0x00000000 },
4898 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4899 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4900 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4901 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4902 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4903 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4904 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4905 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4906 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4907 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4908 { 0x684c, 0, 0xffffffff, 0x00000000 },
4909 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4910 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4911 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4912 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4913 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4914 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4916 { 0xffff, 0, 0x00000000, 0x00000000 },
4921 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4924 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4925 u32 offset, rw_mask, ro_mask, save_val, val;
4926 u16 flags = reg_tbl[i].flags;
4928 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4931 offset = (u32) reg_tbl[i].offset;
4932 rw_mask = reg_tbl[i].rw_mask;
4933 ro_mask = reg_tbl[i].ro_mask;
4935 save_val = readl(bp->regview + offset);
4937 writel(0, bp->regview + offset);
4939 val = readl(bp->regview + offset);
4940 if ((val & rw_mask) != 0) {
4944 if ((val & ro_mask) != (save_val & ro_mask)) {
4948 writel(0xffffffff, bp->regview + offset);
4950 val = readl(bp->regview + offset);
4951 if ((val & rw_mask) != rw_mask) {
4955 if ((val & ro_mask) != (save_val & ro_mask)) {
4959 writel(save_val, bp->regview + offset);
4963 writel(save_val, bp->regview + offset);
4971 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4973 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4974 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4977 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4980 for (offset = 0; offset < size; offset += 4) {
4982 REG_WR_IND(bp, start + offset, test_pattern[i]);
4984 if (REG_RD_IND(bp, start + offset) !=
4994 bnx2_test_memory(struct bnx2 *bp)
4998 static struct mem_entry {
5001 } mem_tbl_5706[] = {
5002 { 0x60000, 0x4000 },
5003 { 0xa0000, 0x3000 },
5004 { 0xe0000, 0x4000 },
5005 { 0x120000, 0x4000 },
5006 { 0x1a0000, 0x4000 },
5007 { 0x160000, 0x4000 },
5011 { 0x60000, 0x4000 },
5012 { 0xa0000, 0x3000 },
5013 { 0xe0000, 0x4000 },
5014 { 0x120000, 0x4000 },
5015 { 0x1a0000, 0x4000 },
5018 struct mem_entry *mem_tbl;
5020 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5021 mem_tbl = mem_tbl_5709;
5023 mem_tbl = mem_tbl_5706;
5025 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5026 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5027 mem_tbl[i].len)) != 0) {
5035 #define BNX2_MAC_LOOPBACK 0
5036 #define BNX2_PHY_LOOPBACK 1
5039 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5041 unsigned int pkt_size, num_pkts, i;
5042 struct sk_buff *skb, *rx_skb;
5043 unsigned char *packet;
5044 u16 rx_start_idx, rx_idx;
5047 struct sw_bd *rx_buf;
5048 struct l2_fhdr *rx_hdr;
5050 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5053 if (bp->flags & USING_MSIX_FLAG)
5054 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5056 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5057 bp->loopback = MAC_LOOPBACK;
5058 bnx2_set_mac_loopback(bp);
5060 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5061 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5064 bp->loopback = PHY_LOOPBACK;
5065 bnx2_set_phy_loopback(bp);
5070 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5071 skb = netdev_alloc_skb(bp->dev, pkt_size);
5074 packet = skb_put(skb, pkt_size);
5075 memcpy(packet, bp->dev->dev_addr, 6);
5076 memset(packet + 6, 0x0, 8);
5077 for (i = 14; i < pkt_size; i++)
5078 packet[i] = (unsigned char) (i & 0xff);
5080 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5083 REG_WR(bp, BNX2_HC_COMMAND,
5084 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5086 REG_RD(bp, BNX2_HC_COMMAND);
5089 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5093 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5095 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5096 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5097 txbd->tx_bd_mss_nbytes = pkt_size;
5098 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5101 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5102 bp->tx_prod_bseq += pkt_size;
5104 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5105 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5109 REG_WR(bp, BNX2_HC_COMMAND,
5110 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5112 REG_RD(bp, BNX2_HC_COMMAND);
5116 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5119 if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
5120 goto loopback_test_done;
5122 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5123 if (rx_idx != rx_start_idx + num_pkts) {
5124 goto loopback_test_done;
5127 rx_buf = &bp->rx_buf_ring[rx_start_idx];
5128 rx_skb = rx_buf->skb;
5130 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5131 skb_reserve(rx_skb, bp->rx_offset);
5133 pci_dma_sync_single_for_cpu(bp->pdev,
5134 pci_unmap_addr(rx_buf, mapping),
5135 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5137 if (rx_hdr->l2_fhdr_status &
5138 (L2_FHDR_ERRORS_BAD_CRC |
5139 L2_FHDR_ERRORS_PHY_DECODE |
5140 L2_FHDR_ERRORS_ALIGNMENT |
5141 L2_FHDR_ERRORS_TOO_SHORT |
5142 L2_FHDR_ERRORS_GIANT_FRAME)) {
5144 goto loopback_test_done;
5147 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5148 goto loopback_test_done;
5151 for (i = 14; i < pkt_size; i++) {
5152 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5153 goto loopback_test_done;
5164 #define BNX2_MAC_LOOPBACK_FAILED 1
5165 #define BNX2_PHY_LOOPBACK_FAILED 2
5166 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5167 BNX2_PHY_LOOPBACK_FAILED)
5170 bnx2_test_loopback(struct bnx2 *bp)
5174 if (!netif_running(bp->dev))
5175 return BNX2_LOOPBACK_FAILED;
5177 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5178 spin_lock_bh(&bp->phy_lock);
5180 spin_unlock_bh(&bp->phy_lock);
5181 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5182 rc |= BNX2_MAC_LOOPBACK_FAILED;
5183 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5184 rc |= BNX2_PHY_LOOPBACK_FAILED;
5188 #define NVRAM_SIZE 0x200
5189 #define CRC32_RESIDUAL 0xdebb20e3
5192 bnx2_test_nvram(struct bnx2 *bp)
5194 u32 buf[NVRAM_SIZE / 4];
5195 u8 *data = (u8 *) buf;
5199 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5200 goto test_nvram_done;
5202 magic = be32_to_cpu(buf[0]);
5203 if (magic != 0x669955aa) {
5205 goto test_nvram_done;
5208 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5209 goto test_nvram_done;
5211 csum = ether_crc_le(0x100, data);
5212 if (csum != CRC32_RESIDUAL) {
5214 goto test_nvram_done;
5217 csum = ether_crc_le(0x100, data + 0x100);
5218 if (csum != CRC32_RESIDUAL) {
5227 bnx2_test_link(struct bnx2 *bp)
5231 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5236 spin_lock_bh(&bp->phy_lock);
5237 bnx2_enable_bmsr1(bp);
5238 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5239 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5240 bnx2_disable_bmsr1(bp);
5241 spin_unlock_bh(&bp->phy_lock);
5243 if (bmsr & BMSR_LSTATUS) {
5250 bnx2_test_intr(struct bnx2 *bp)
5255 if (!netif_running(bp->dev))
5258 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5260 /* This register is not touched during run-time. */
5261 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5262 REG_RD(bp, BNX2_HC_COMMAND);
5264 for (i = 0; i < 10; i++) {
5265 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5271 msleep_interruptible(10);
5280 bnx2_5706_serdes_timer(struct bnx2 *bp)
5282 spin_lock(&bp->phy_lock);
5283 if (bp->serdes_an_pending)
5284 bp->serdes_an_pending--;
5285 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5288 bp->current_interval = bp->timer_interval;
5290 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5292 if (bmcr & BMCR_ANENABLE) {
5295 bnx2_write_phy(bp, 0x1c, 0x7c00);
5296 bnx2_read_phy(bp, 0x1c, &phy1);
5298 bnx2_write_phy(bp, 0x17, 0x0f01);
5299 bnx2_read_phy(bp, 0x15, &phy2);
5300 bnx2_write_phy(bp, 0x17, 0x0f01);
5301 bnx2_read_phy(bp, 0x15, &phy2);
5303 if ((phy1 & 0x10) && /* SIGNAL DETECT */
5304 !(phy2 & 0x20)) { /* no CONFIG */
5306 bmcr &= ~BMCR_ANENABLE;
5307 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5308 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5309 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5313 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5314 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5317 bnx2_write_phy(bp, 0x17, 0x0f01);
5318 bnx2_read_phy(bp, 0x15, &phy2);
5322 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5323 bmcr |= BMCR_ANENABLE;
5324 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5326 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5329 bp->current_interval = bp->timer_interval;
5331 spin_unlock(&bp->phy_lock);
5335 bnx2_5708_serdes_timer(struct bnx2 *bp)
5337 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5340 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5341 bp->serdes_an_pending = 0;
5345 spin_lock(&bp->phy_lock);
5346 if (bp->serdes_an_pending)
5347 bp->serdes_an_pending--;
5348 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5351 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5352 if (bmcr & BMCR_ANENABLE) {
5353 bnx2_enable_forced_2g5(bp);
5354 bp->current_interval = SERDES_FORCED_TIMEOUT;
5356 bnx2_disable_forced_2g5(bp);
5357 bp->serdes_an_pending = 2;
5358 bp->current_interval = bp->timer_interval;
5362 bp->current_interval = bp->timer_interval;
5364 spin_unlock(&bp->phy_lock);
5368 bnx2_timer(unsigned long data)
5370 struct bnx2 *bp = (struct bnx2 *) data;
5372 if (!netif_running(bp->dev))
5375 if (atomic_read(&bp->intr_sem) != 0)
5376 goto bnx2_restart_timer;
5378 bnx2_send_heart_beat(bp);
5380 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5382 /* workaround occasional corrupted counters */
5383 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5384 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5385 BNX2_HC_COMMAND_STATS_NOW);
5387 if (bp->phy_flags & PHY_SERDES_FLAG) {
5388 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5389 bnx2_5706_serdes_timer(bp);
5391 bnx2_5708_serdes_timer(bp);
5395 mod_timer(&bp->timer, jiffies + bp->current_interval);
5399 bnx2_request_irq(struct bnx2 *bp)
5401 struct net_device *dev = bp->dev;
5402 unsigned long flags;
5403 struct bnx2_irq *irq;
5406 if (bp->flags & USING_MSI_OR_MSIX_FLAG)
5409 flags = IRQF_SHARED;
5411 for (i = 0; i < bp->irq_nvecs; i++) {
5412 irq = &bp->irq_tbl[i];
5413 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5423 bnx2_free_irq(struct bnx2 *bp)
5425 struct net_device *dev = bp->dev;
5426 struct bnx2_irq *irq;
5429 for (i = 0; i < bp->irq_nvecs; i++) {
5430 irq = &bp->irq_tbl[i];
5432 free_irq(irq->vector, dev);
5435 if (bp->flags & USING_MSI_FLAG)
5436 pci_disable_msi(bp->pdev);
5437 else if (bp->flags & USING_MSIX_FLAG)
5438 pci_disable_msix(bp->pdev);
5440 bp->flags &= ~(USING_MSI_OR_MSIX_FLAG | ONE_SHOT_MSI_FLAG);
5444 bnx2_enable_msix(struct bnx2 *bp)
5447 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5449 bnx2_setup_msix_tbl(bp);
5450 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5451 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5452 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5454 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5455 msix_ent[i].entry = i;
5456 msix_ent[i].vector = 0;
5459 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5463 bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
5464 bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
5466 strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
5467 strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
5468 strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
5469 strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
5471 bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5472 bp->flags |= USING_MSIX_FLAG | ONE_SHOT_MSI_FLAG;
5473 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5474 bp->irq_tbl[i].vector = msix_ent[i].vector;
5478 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5480 bp->irq_tbl[0].handler = bnx2_interrupt;
5481 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5483 bp->irq_tbl[0].vector = bp->pdev->irq;
5485 if ((bp->flags & MSIX_CAP_FLAG) && !dis_msi)
5486 bnx2_enable_msix(bp);
5488 if ((bp->flags & MSI_CAP_FLAG) && !dis_msi &&
5489 !(bp->flags & USING_MSIX_FLAG)) {
5490 if (pci_enable_msi(bp->pdev) == 0) {
5491 bp->flags |= USING_MSI_FLAG;
5492 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5493 bp->flags |= ONE_SHOT_MSI_FLAG;
5494 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5496 bp->irq_tbl[0].handler = bnx2_msi;
5498 bp->irq_tbl[0].vector = bp->pdev->irq;
5503 /* Called with rtnl_lock */
5505 bnx2_open(struct net_device *dev)
5507 struct bnx2 *bp = netdev_priv(dev);
5510 netif_carrier_off(dev);
5512 bnx2_set_power_state(bp, PCI_D0);
5513 bnx2_disable_int(bp);
5515 rc = bnx2_alloc_mem(bp);
5519 bnx2_setup_int_mode(bp, disable_msi);
5520 bnx2_napi_enable(bp);
5521 rc = bnx2_request_irq(bp);
5524 bnx2_napi_disable(bp);
5529 rc = bnx2_init_nic(bp);
5532 bnx2_napi_disable(bp);
5539 mod_timer(&bp->timer, jiffies + bp->current_interval);
5541 atomic_set(&bp->intr_sem, 0);
5543 bnx2_enable_int(bp);
5545 if (bp->flags & USING_MSI_FLAG) {
5546 /* Test MSI to make sure it is working
5547 * If MSI test fails, go back to INTx mode
5549 if (bnx2_test_intr(bp) != 0) {
5550 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5551 " using MSI, switching to INTx mode. Please"
5552 " report this failure to the PCI maintainer"
5553 " and include system chipset information.\n",
5556 bnx2_disable_int(bp);
5559 bnx2_setup_int_mode(bp, 1);
5561 rc = bnx2_init_nic(bp);
5564 rc = bnx2_request_irq(bp);
5567 bnx2_napi_disable(bp);
5570 del_timer_sync(&bp->timer);
5573 bnx2_enable_int(bp);
5576 if (bp->flags & USING_MSI_FLAG)
5577 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5578 else if (bp->flags & USING_MSIX_FLAG)
5579 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5581 netif_start_queue(dev);
5587 bnx2_reset_task(struct work_struct *work)
5589 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5591 if (!netif_running(bp->dev))
5594 bp->in_reset_task = 1;
5595 bnx2_netif_stop(bp);
5599 atomic_set(&bp->intr_sem, 1);
5600 bnx2_netif_start(bp);
5601 bp->in_reset_task = 0;
5605 bnx2_tx_timeout(struct net_device *dev)
5607 struct bnx2 *bp = netdev_priv(dev);
5609 /* This allows the netif to be shutdown gracefully before resetting */
5610 schedule_work(&bp->reset_task);
5614 /* Called with rtnl_lock */
5616 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5618 struct bnx2 *bp = netdev_priv(dev);
5620 bnx2_netif_stop(bp);
5623 bnx2_set_rx_mode(dev);
5625 bnx2_netif_start(bp);
5629 /* Called with netif_tx_lock.
5630 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5631 * netif_wake_queue().
5634 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5636 struct bnx2 *bp = netdev_priv(dev);
5639 struct sw_bd *tx_buf;
5640 u32 len, vlan_tag_flags, last_frag, mss;
5641 u16 prod, ring_prod;
5643 struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
5645 if (unlikely(bnx2_tx_avail(bp, bnapi) <
5646 (skb_shinfo(skb)->nr_frags + 1))) {
5647 netif_stop_queue(dev);
5648 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5651 return NETDEV_TX_BUSY;
5653 len = skb_headlen(skb);
5655 ring_prod = TX_RING_IDX(prod);
5658 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5659 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5662 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5664 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5666 if ((mss = skb_shinfo(skb)->gso_size)) {
5667 u32 tcp_opt_len, ip_tcp_len;
5670 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5672 tcp_opt_len = tcp_optlen(skb);
5674 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5675 u32 tcp_off = skb_transport_offset(skb) -
5676 sizeof(struct ipv6hdr) - ETH_HLEN;
5678 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5679 TX_BD_FLAGS_SW_FLAGS;
5680 if (likely(tcp_off == 0))
5681 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5684 vlan_tag_flags |= ((tcp_off & 0x3) <<
5685 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5686 ((tcp_off & 0x10) <<
5687 TX_BD_FLAGS_TCP6_OFF4_SHL);
5688 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5691 if (skb_header_cloned(skb) &&
5692 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5694 return NETDEV_TX_OK;
5697 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5701 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5702 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5706 if (tcp_opt_len || (iph->ihl > 5)) {
5707 vlan_tag_flags |= ((iph->ihl - 5) +
5708 (tcp_opt_len >> 2)) << 8;
5714 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5716 tx_buf = &bp->tx_buf_ring[ring_prod];
5718 pci_unmap_addr_set(tx_buf, mapping, mapping);
5720 txbd = &bp->tx_desc_ring[ring_prod];
5722 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5723 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5724 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5725 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5727 last_frag = skb_shinfo(skb)->nr_frags;
5729 for (i = 0; i < last_frag; i++) {
5730 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5732 prod = NEXT_TX_BD(prod);
5733 ring_prod = TX_RING_IDX(prod);
5734 txbd = &bp->tx_desc_ring[ring_prod];
5737 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5738 len, PCI_DMA_TODEVICE);
5739 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5742 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5743 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5744 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5745 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5748 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5750 prod = NEXT_TX_BD(prod);
5751 bp->tx_prod_bseq += skb->len;
5753 REG_WR16(bp, bp->tx_bidx_addr, prod);
5754 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5759 dev->trans_start = jiffies;
5761 if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5762 netif_stop_queue(dev);
5763 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5764 netif_wake_queue(dev);
5767 return NETDEV_TX_OK;
5770 /* Called with rtnl_lock */
5772 bnx2_close(struct net_device *dev)
5774 struct bnx2 *bp = netdev_priv(dev);
5777 /* Calling flush_scheduled_work() may deadlock because
5778 * linkwatch_event() may be on the workqueue and it will try to get
5779 * the rtnl_lock which we are holding.
5781 while (bp->in_reset_task)
5784 bnx2_disable_int_sync(bp);
5785 bnx2_napi_disable(bp);
5786 del_timer_sync(&bp->timer);
5787 if (bp->flags & NO_WOL_FLAG)
5788 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5790 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5792 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5793 bnx2_reset_chip(bp, reset_code);
5798 netif_carrier_off(bp->dev);
5799 bnx2_set_power_state(bp, PCI_D3hot);
5803 #define GET_NET_STATS64(ctr) \
5804 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5805 (unsigned long) (ctr##_lo)
5807 #define GET_NET_STATS32(ctr) \
5810 #if (BITS_PER_LONG == 64)
5811 #define GET_NET_STATS GET_NET_STATS64
5813 #define GET_NET_STATS GET_NET_STATS32
5816 static struct net_device_stats *
5817 bnx2_get_stats(struct net_device *dev)
5819 struct bnx2 *bp = netdev_priv(dev);
5820 struct statistics_block *stats_blk = bp->stats_blk;
5821 struct net_device_stats *net_stats = &bp->net_stats;
5823 if (bp->stats_blk == NULL) {
5826 net_stats->rx_packets =
5827 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5828 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5829 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5831 net_stats->tx_packets =
5832 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5833 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5834 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5836 net_stats->rx_bytes =
5837 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5839 net_stats->tx_bytes =
5840 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5842 net_stats->multicast =
5843 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5845 net_stats->collisions =
5846 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5848 net_stats->rx_length_errors =
5849 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5850 stats_blk->stat_EtherStatsOverrsizePkts);
5852 net_stats->rx_over_errors =
5853 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5855 net_stats->rx_frame_errors =
5856 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5858 net_stats->rx_crc_errors =
5859 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5861 net_stats->rx_errors = net_stats->rx_length_errors +
5862 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5863 net_stats->rx_crc_errors;
5865 net_stats->tx_aborted_errors =
5866 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5867 stats_blk->stat_Dot3StatsLateCollisions);
5869 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5870 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5871 net_stats->tx_carrier_errors = 0;
5873 net_stats->tx_carrier_errors =
5875 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5878 net_stats->tx_errors =
5880 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5882 net_stats->tx_aborted_errors +
5883 net_stats->tx_carrier_errors;
5885 net_stats->rx_missed_errors =
5886 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5887 stats_blk->stat_FwRxDrop);
5892 /* All ethtool functions called with rtnl_lock */
5895 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5897 struct bnx2 *bp = netdev_priv(dev);
5898 int support_serdes = 0, support_copper = 0;
5900 cmd->supported = SUPPORTED_Autoneg;
5901 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5904 } else if (bp->phy_port == PORT_FIBRE)
5909 if (support_serdes) {
5910 cmd->supported |= SUPPORTED_1000baseT_Full |
5912 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5913 cmd->supported |= SUPPORTED_2500baseX_Full;
5916 if (support_copper) {
5917 cmd->supported |= SUPPORTED_10baseT_Half |
5918 SUPPORTED_10baseT_Full |
5919 SUPPORTED_100baseT_Half |
5920 SUPPORTED_100baseT_Full |
5921 SUPPORTED_1000baseT_Full |
5926 spin_lock_bh(&bp->phy_lock);
5927 cmd->port = bp->phy_port;
5928 cmd->advertising = bp->advertising;
5930 if (bp->autoneg & AUTONEG_SPEED) {
5931 cmd->autoneg = AUTONEG_ENABLE;
5934 cmd->autoneg = AUTONEG_DISABLE;
5937 if (netif_carrier_ok(dev)) {
5938 cmd->speed = bp->line_speed;
5939 cmd->duplex = bp->duplex;
5945 spin_unlock_bh(&bp->phy_lock);
5947 cmd->transceiver = XCVR_INTERNAL;
5948 cmd->phy_address = bp->phy_addr;
5954 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5956 struct bnx2 *bp = netdev_priv(dev);
5957 u8 autoneg = bp->autoneg;
5958 u8 req_duplex = bp->req_duplex;
5959 u16 req_line_speed = bp->req_line_speed;
5960 u32 advertising = bp->advertising;
5963 spin_lock_bh(&bp->phy_lock);
5965 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5966 goto err_out_unlock;
5968 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5969 goto err_out_unlock;
5971 if (cmd->autoneg == AUTONEG_ENABLE) {
5972 autoneg |= AUTONEG_SPEED;
5974 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5976 /* allow advertising 1 speed */
5977 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5978 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5979 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5980 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5982 if (cmd->port == PORT_FIBRE)
5983 goto err_out_unlock;
5985 advertising = cmd->advertising;
5987 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5988 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5989 (cmd->port == PORT_TP))
5990 goto err_out_unlock;
5991 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5992 advertising = cmd->advertising;
5993 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5994 goto err_out_unlock;
5996 if (cmd->port == PORT_FIBRE)
5997 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5999 advertising = ETHTOOL_ALL_COPPER_SPEED;
6001 advertising |= ADVERTISED_Autoneg;
6004 if (cmd->port == PORT_FIBRE) {
6005 if ((cmd->speed != SPEED_1000 &&
6006 cmd->speed != SPEED_2500) ||
6007 (cmd->duplex != DUPLEX_FULL))
6008 goto err_out_unlock;
6010 if (cmd->speed == SPEED_2500 &&
6011 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
6012 goto err_out_unlock;
6014 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6015 goto err_out_unlock;
6017 autoneg &= ~AUTONEG_SPEED;
6018 req_line_speed = cmd->speed;
6019 req_duplex = cmd->duplex;
6023 bp->autoneg = autoneg;
6024 bp->advertising = advertising;
6025 bp->req_line_speed = req_line_speed;
6026 bp->req_duplex = req_duplex;
6028 err = bnx2_setup_phy(bp, cmd->port);
6031 spin_unlock_bh(&bp->phy_lock);
6037 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6039 struct bnx2 *bp = netdev_priv(dev);
6041 strcpy(info->driver, DRV_MODULE_NAME);
6042 strcpy(info->version, DRV_MODULE_VERSION);
6043 strcpy(info->bus_info, pci_name(bp->pdev));
6044 strcpy(info->fw_version, bp->fw_version);
6047 #define BNX2_REGDUMP_LEN (32 * 1024)
6050 bnx2_get_regs_len(struct net_device *dev)
6052 return BNX2_REGDUMP_LEN;
6056 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6058 u32 *p = _p, i, offset;
6060 struct bnx2 *bp = netdev_priv(dev);
6061 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6062 0x0800, 0x0880, 0x0c00, 0x0c10,
6063 0x0c30, 0x0d08, 0x1000, 0x101c,
6064 0x1040, 0x1048, 0x1080, 0x10a4,
6065 0x1400, 0x1490, 0x1498, 0x14f0,
6066 0x1500, 0x155c, 0x1580, 0x15dc,
6067 0x1600, 0x1658, 0x1680, 0x16d8,
6068 0x1800, 0x1820, 0x1840, 0x1854,
6069 0x1880, 0x1894, 0x1900, 0x1984,
6070 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6071 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6072 0x2000, 0x2030, 0x23c0, 0x2400,
6073 0x2800, 0x2820, 0x2830, 0x2850,
6074 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6075 0x3c00, 0x3c94, 0x4000, 0x4010,
6076 0x4080, 0x4090, 0x43c0, 0x4458,
6077 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6078 0x4fc0, 0x5010, 0x53c0, 0x5444,
6079 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6080 0x5fc0, 0x6000, 0x6400, 0x6428,
6081 0x6800, 0x6848, 0x684c, 0x6860,
6082 0x6888, 0x6910, 0x8000 };
6086 memset(p, 0, BNX2_REGDUMP_LEN);
6088 if (!netif_running(bp->dev))
6092 offset = reg_boundaries[0];
6094 while (offset < BNX2_REGDUMP_LEN) {
6095 *p++ = REG_RD(bp, offset);
6097 if (offset == reg_boundaries[i + 1]) {
6098 offset = reg_boundaries[i + 2];
6099 p = (u32 *) (orig_p + offset);
6106 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6108 struct bnx2 *bp = netdev_priv(dev);
6110 if (bp->flags & NO_WOL_FLAG) {
6115 wol->supported = WAKE_MAGIC;
6117 wol->wolopts = WAKE_MAGIC;
6121 memset(&wol->sopass, 0, sizeof(wol->sopass));
6125 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6127 struct bnx2 *bp = netdev_priv(dev);
6129 if (wol->wolopts & ~WAKE_MAGIC)
6132 if (wol->wolopts & WAKE_MAGIC) {
6133 if (bp->flags & NO_WOL_FLAG)
6145 bnx2_nway_reset(struct net_device *dev)
6147 struct bnx2 *bp = netdev_priv(dev);
6150 if (!(bp->autoneg & AUTONEG_SPEED)) {
6154 spin_lock_bh(&bp->phy_lock);
6156 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
6159 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6160 spin_unlock_bh(&bp->phy_lock);
6164 /* Force a link down visible on the other side */
6165 if (bp->phy_flags & PHY_SERDES_FLAG) {
6166 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6167 spin_unlock_bh(&bp->phy_lock);
6171 spin_lock_bh(&bp->phy_lock);
6173 bp->current_interval = SERDES_AN_TIMEOUT;
6174 bp->serdes_an_pending = 1;
6175 mod_timer(&bp->timer, jiffies + bp->current_interval);
6178 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6179 bmcr &= ~BMCR_LOOPBACK;
6180 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6182 spin_unlock_bh(&bp->phy_lock);
6188 bnx2_get_eeprom_len(struct net_device *dev)
6190 struct bnx2 *bp = netdev_priv(dev);
6192 if (bp->flash_info == NULL)
6195 return (int) bp->flash_size;
6199 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6202 struct bnx2 *bp = netdev_priv(dev);
6205 /* parameters already validated in ethtool_get_eeprom */
6207 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6213 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6216 struct bnx2 *bp = netdev_priv(dev);
6219 /* parameters already validated in ethtool_set_eeprom */
6221 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6227 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6229 struct bnx2 *bp = netdev_priv(dev);
6231 memset(coal, 0, sizeof(struct ethtool_coalesce));
6233 coal->rx_coalesce_usecs = bp->rx_ticks;
6234 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6235 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6236 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6238 coal->tx_coalesce_usecs = bp->tx_ticks;
6239 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6240 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6241 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6243 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6249 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6251 struct bnx2 *bp = netdev_priv(dev);
6253 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6254 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6256 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6257 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6259 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6260 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6262 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6263 if (bp->rx_quick_cons_trip_int > 0xff)
6264 bp->rx_quick_cons_trip_int = 0xff;
6266 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6267 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6269 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6270 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6272 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6273 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6275 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6276 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6279 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6280 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6281 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6282 bp->stats_ticks = USEC_PER_SEC;
6284 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6285 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6286 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6288 if (netif_running(bp->dev)) {
6289 bnx2_netif_stop(bp);
6291 bnx2_netif_start(bp);
6298 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6300 struct bnx2 *bp = netdev_priv(dev);
6302 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6303 ering->rx_mini_max_pending = 0;
6304 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6306 ering->rx_pending = bp->rx_ring_size;
6307 ering->rx_mini_pending = 0;
6308 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6310 ering->tx_max_pending = MAX_TX_DESC_CNT;
6311 ering->tx_pending = bp->tx_ring_size;
6315 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6317 if (netif_running(bp->dev)) {
6318 bnx2_netif_stop(bp);
6319 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6324 bnx2_set_rx_ring_size(bp, rx);
6325 bp->tx_ring_size = tx;
6327 if (netif_running(bp->dev)) {
6330 rc = bnx2_alloc_mem(bp);
6334 bnx2_netif_start(bp);
6340 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6342 struct bnx2 *bp = netdev_priv(dev);
6345 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6346 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6347 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6351 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6356 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6358 struct bnx2 *bp = netdev_priv(dev);
6360 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6361 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6362 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6366 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6368 struct bnx2 *bp = netdev_priv(dev);
6370 bp->req_flow_ctrl = 0;
6371 if (epause->rx_pause)
6372 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6373 if (epause->tx_pause)
6374 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6376 if (epause->autoneg) {
6377 bp->autoneg |= AUTONEG_FLOW_CTRL;
6380 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6383 spin_lock_bh(&bp->phy_lock);
6385 bnx2_setup_phy(bp, bp->phy_port);
6387 spin_unlock_bh(&bp->phy_lock);
6393 bnx2_get_rx_csum(struct net_device *dev)
6395 struct bnx2 *bp = netdev_priv(dev);
6401 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6403 struct bnx2 *bp = netdev_priv(dev);
6410 bnx2_set_tso(struct net_device *dev, u32 data)
6412 struct bnx2 *bp = netdev_priv(dev);
6415 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6416 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6417 dev->features |= NETIF_F_TSO6;
6419 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6424 #define BNX2_NUM_STATS 46
6427 char string[ETH_GSTRING_LEN];
6428 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6430 { "rx_error_bytes" },
6432 { "tx_error_bytes" },
6433 { "rx_ucast_packets" },
6434 { "rx_mcast_packets" },
6435 { "rx_bcast_packets" },
6436 { "tx_ucast_packets" },
6437 { "tx_mcast_packets" },
6438 { "tx_bcast_packets" },
6439 { "tx_mac_errors" },
6440 { "tx_carrier_errors" },
6441 { "rx_crc_errors" },
6442 { "rx_align_errors" },
6443 { "tx_single_collisions" },
6444 { "tx_multi_collisions" },
6446 { "tx_excess_collisions" },
6447 { "tx_late_collisions" },
6448 { "tx_total_collisions" },
6451 { "rx_undersize_packets" },
6452 { "rx_oversize_packets" },
6453 { "rx_64_byte_packets" },
6454 { "rx_65_to_127_byte_packets" },
6455 { "rx_128_to_255_byte_packets" },
6456 { "rx_256_to_511_byte_packets" },
6457 { "rx_512_to_1023_byte_packets" },
6458 { "rx_1024_to_1522_byte_packets" },
6459 { "rx_1523_to_9022_byte_packets" },
6460 { "tx_64_byte_packets" },
6461 { "tx_65_to_127_byte_packets" },
6462 { "tx_128_to_255_byte_packets" },
6463 { "tx_256_to_511_byte_packets" },
6464 { "tx_512_to_1023_byte_packets" },
6465 { "tx_1024_to_1522_byte_packets" },
6466 { "tx_1523_to_9022_byte_packets" },
6467 { "rx_xon_frames" },
6468 { "rx_xoff_frames" },
6469 { "tx_xon_frames" },
6470 { "tx_xoff_frames" },
6471 { "rx_mac_ctrl_frames" },
6472 { "rx_filtered_packets" },
6474 { "rx_fw_discards" },
6477 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6479 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6480 STATS_OFFSET32(stat_IfHCInOctets_hi),
6481 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6482 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6483 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6484 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6485 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6486 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6487 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6488 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6489 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6490 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6491 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6492 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6493 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6494 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6495 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6496 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6497 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6498 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6499 STATS_OFFSET32(stat_EtherStatsCollisions),
6500 STATS_OFFSET32(stat_EtherStatsFragments),
6501 STATS_OFFSET32(stat_EtherStatsJabbers),
6502 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6503 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6504 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6505 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6506 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6507 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6508 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6509 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6510 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6511 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6512 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6513 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6514 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6515 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6516 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6517 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6518 STATS_OFFSET32(stat_XonPauseFramesReceived),
6519 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6520 STATS_OFFSET32(stat_OutXonSent),
6521 STATS_OFFSET32(stat_OutXoffSent),
6522 STATS_OFFSET32(stat_MacControlFramesReceived),
6523 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6524 STATS_OFFSET32(stat_IfInMBUFDiscards),
6525 STATS_OFFSET32(stat_FwRxDrop),
6528 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6529 * skipped because of errata.
6531 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6532 8,0,8,8,8,8,8,8,8,8,
6533 4,0,4,4,4,4,4,4,4,4,
6534 4,4,4,4,4,4,4,4,4,4,
6535 4,4,4,4,4,4,4,4,4,4,
6539 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6540 8,0,8,8,8,8,8,8,8,8,
6541 4,4,4,4,4,4,4,4,4,4,
6542 4,4,4,4,4,4,4,4,4,4,
6543 4,4,4,4,4,4,4,4,4,4,
6547 #define BNX2_NUM_TESTS 6
6550 char string[ETH_GSTRING_LEN];
6551 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6552 { "register_test (offline)" },
6553 { "memory_test (offline)" },
6554 { "loopback_test (offline)" },
6555 { "nvram_test (online)" },
6556 { "interrupt_test (online)" },
6557 { "link_test (online)" },
6561 bnx2_get_sset_count(struct net_device *dev, int sset)
6565 return BNX2_NUM_TESTS;
6567 return BNX2_NUM_STATS;
6574 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6576 struct bnx2 *bp = netdev_priv(dev);
6578 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6579 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6582 bnx2_netif_stop(bp);
6583 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6586 if (bnx2_test_registers(bp) != 0) {
6588 etest->flags |= ETH_TEST_FL_FAILED;
6590 if (bnx2_test_memory(bp) != 0) {
6592 etest->flags |= ETH_TEST_FL_FAILED;
6594 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6595 etest->flags |= ETH_TEST_FL_FAILED;
6597 if (!netif_running(bp->dev)) {
6598 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6602 bnx2_netif_start(bp);
6605 /* wait for link up */
6606 for (i = 0; i < 7; i++) {
6609 msleep_interruptible(1000);
6613 if (bnx2_test_nvram(bp) != 0) {
6615 etest->flags |= ETH_TEST_FL_FAILED;
6617 if (bnx2_test_intr(bp) != 0) {
6619 etest->flags |= ETH_TEST_FL_FAILED;
6622 if (bnx2_test_link(bp) != 0) {
6624 etest->flags |= ETH_TEST_FL_FAILED;
6630 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6632 switch (stringset) {
6634 memcpy(buf, bnx2_stats_str_arr,
6635 sizeof(bnx2_stats_str_arr));
6638 memcpy(buf, bnx2_tests_str_arr,
6639 sizeof(bnx2_tests_str_arr));
6645 bnx2_get_ethtool_stats(struct net_device *dev,
6646 struct ethtool_stats *stats, u64 *buf)
6648 struct bnx2 *bp = netdev_priv(dev);
6650 u32 *hw_stats = (u32 *) bp->stats_blk;
6651 u8 *stats_len_arr = NULL;
6653 if (hw_stats == NULL) {
6654 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6658 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6659 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6660 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6661 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6662 stats_len_arr = bnx2_5706_stats_len_arr;
6664 stats_len_arr = bnx2_5708_stats_len_arr;
6666 for (i = 0; i < BNX2_NUM_STATS; i++) {
6667 if (stats_len_arr[i] == 0) {
6668 /* skip this counter */
6672 if (stats_len_arr[i] == 4) {
6673 /* 4-byte counter */
6675 *(hw_stats + bnx2_stats_offset_arr[i]);
6678 /* 8-byte counter */
6679 buf[i] = (((u64) *(hw_stats +
6680 bnx2_stats_offset_arr[i])) << 32) +
6681 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6686 bnx2_phys_id(struct net_device *dev, u32 data)
6688 struct bnx2 *bp = netdev_priv(dev);
6695 save = REG_RD(bp, BNX2_MISC_CFG);
6696 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6698 for (i = 0; i < (data * 2); i++) {
6700 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6703 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6704 BNX2_EMAC_LED_1000MB_OVERRIDE |
6705 BNX2_EMAC_LED_100MB_OVERRIDE |
6706 BNX2_EMAC_LED_10MB_OVERRIDE |
6707 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6708 BNX2_EMAC_LED_TRAFFIC);
6710 msleep_interruptible(500);
6711 if (signal_pending(current))
6714 REG_WR(bp, BNX2_EMAC_LED, 0);
6715 REG_WR(bp, BNX2_MISC_CFG, save);
6720 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6722 struct bnx2 *bp = netdev_priv(dev);
6724 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6725 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6727 return (ethtool_op_set_tx_csum(dev, data));
6730 static const struct ethtool_ops bnx2_ethtool_ops = {
6731 .get_settings = bnx2_get_settings,
6732 .set_settings = bnx2_set_settings,
6733 .get_drvinfo = bnx2_get_drvinfo,
6734 .get_regs_len = bnx2_get_regs_len,
6735 .get_regs = bnx2_get_regs,
6736 .get_wol = bnx2_get_wol,
6737 .set_wol = bnx2_set_wol,
6738 .nway_reset = bnx2_nway_reset,
6739 .get_link = ethtool_op_get_link,
6740 .get_eeprom_len = bnx2_get_eeprom_len,
6741 .get_eeprom = bnx2_get_eeprom,
6742 .set_eeprom = bnx2_set_eeprom,
6743 .get_coalesce = bnx2_get_coalesce,
6744 .set_coalesce = bnx2_set_coalesce,
6745 .get_ringparam = bnx2_get_ringparam,
6746 .set_ringparam = bnx2_set_ringparam,
6747 .get_pauseparam = bnx2_get_pauseparam,
6748 .set_pauseparam = bnx2_set_pauseparam,
6749 .get_rx_csum = bnx2_get_rx_csum,
6750 .set_rx_csum = bnx2_set_rx_csum,
6751 .set_tx_csum = bnx2_set_tx_csum,
6752 .set_sg = ethtool_op_set_sg,
6753 .set_tso = bnx2_set_tso,
6754 .self_test = bnx2_self_test,
6755 .get_strings = bnx2_get_strings,
6756 .phys_id = bnx2_phys_id,
6757 .get_ethtool_stats = bnx2_get_ethtool_stats,
6758 .get_sset_count = bnx2_get_sset_count,
6761 /* Called with rtnl_lock */
6763 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6765 struct mii_ioctl_data *data = if_mii(ifr);
6766 struct bnx2 *bp = netdev_priv(dev);
6771 data->phy_id = bp->phy_addr;
6777 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6780 if (!netif_running(dev))
6783 spin_lock_bh(&bp->phy_lock);
6784 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6785 spin_unlock_bh(&bp->phy_lock);
6787 data->val_out = mii_regval;
6793 if (!capable(CAP_NET_ADMIN))
6796 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6799 if (!netif_running(dev))
6802 spin_lock_bh(&bp->phy_lock);
6803 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6804 spin_unlock_bh(&bp->phy_lock);
6815 /* Called with rtnl_lock */
6817 bnx2_change_mac_addr(struct net_device *dev, void *p)
6819 struct sockaddr *addr = p;
6820 struct bnx2 *bp = netdev_priv(dev);
6822 if (!is_valid_ether_addr(addr->sa_data))
6825 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6826 if (netif_running(dev))
6827 bnx2_set_mac_addr(bp);
6832 /* Called with rtnl_lock */
6834 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6836 struct bnx2 *bp = netdev_priv(dev);
6838 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6839 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6843 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6846 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6848 poll_bnx2(struct net_device *dev)
6850 struct bnx2 *bp = netdev_priv(dev);
6852 disable_irq(bp->pdev->irq);
6853 bnx2_interrupt(bp->pdev->irq, dev);
6854 enable_irq(bp->pdev->irq);
6858 static void __devinit
6859 bnx2_get_5709_media(struct bnx2 *bp)
6861 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6862 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6865 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6867 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6868 bp->phy_flags |= PHY_SERDES_FLAG;
6872 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6873 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6875 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6877 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6882 bp->phy_flags |= PHY_SERDES_FLAG;
6890 bp->phy_flags |= PHY_SERDES_FLAG;
6896 static void __devinit
6897 bnx2_get_pci_speed(struct bnx2 *bp)
6901 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6902 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6905 bp->flags |= PCIX_FLAG;
6907 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6909 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6911 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6912 bp->bus_speed_mhz = 133;
6915 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6916 bp->bus_speed_mhz = 100;
6919 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6920 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6921 bp->bus_speed_mhz = 66;
6924 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6925 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6926 bp->bus_speed_mhz = 50;
6929 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6930 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6931 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6932 bp->bus_speed_mhz = 33;
6937 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6938 bp->bus_speed_mhz = 66;
6940 bp->bus_speed_mhz = 33;
6943 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6944 bp->flags |= PCI_32BIT_FLAG;
6948 static int __devinit
6949 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6952 unsigned long mem_len;
6955 u64 dma_mask, persist_dma_mask;
6957 SET_NETDEV_DEV(dev, &pdev->dev);
6958 bp = netdev_priv(dev);
6963 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6964 rc = pci_enable_device(pdev);
6966 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
6970 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6972 "Cannot find PCI device base address, aborting.\n");
6974 goto err_out_disable;
6977 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6979 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6980 goto err_out_disable;
6983 pci_set_master(pdev);
6985 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6986 if (bp->pm_cap == 0) {
6988 "Cannot find power management capability, aborting.\n");
6990 goto err_out_release;
6996 spin_lock_init(&bp->phy_lock);
6997 spin_lock_init(&bp->indirect_lock);
6998 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7000 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7001 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7002 dev->mem_end = dev->mem_start + mem_len;
7003 dev->irq = pdev->irq;
7005 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7008 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7010 goto err_out_release;
7013 /* Configure byte swap and enable write to the reg_window registers.
7014 * Rely on CPU to do target byte swapping on big endian systems
7015 * The chip's target access swapping will not swap all accesses
7017 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7018 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7019 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7021 bnx2_set_power_state(bp, PCI_D0);
7023 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7025 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7026 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7028 "Cannot find PCIE capability, aborting.\n");
7032 bp->flags |= PCIE_FLAG;
7033 if (CHIP_REV(bp) == CHIP_REV_Ax)
7034 bp->flags |= JUMBO_BROKEN_FLAG;
7036 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7037 if (bp->pcix_cap == 0) {
7039 "Cannot find PCIX capability, aborting.\n");
7045 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7046 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7047 bp->flags |= MSIX_CAP_FLAG;
7050 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7051 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7052 bp->flags |= MSI_CAP_FLAG;
7055 /* 5708 cannot support DMA addresses > 40-bit. */
7056 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7057 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7059 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7061 /* Configure DMA attributes. */
7062 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7063 dev->features |= NETIF_F_HIGHDMA;
7064 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7067 "pci_set_consistent_dma_mask failed, aborting.\n");
7070 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7071 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7075 if (!(bp->flags & PCIE_FLAG))
7076 bnx2_get_pci_speed(bp);
7078 /* 5706A0 may falsely detect SERR and PERR. */
7079 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7080 reg = REG_RD(bp, PCI_COMMAND);
7081 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7082 REG_WR(bp, PCI_COMMAND, reg);
7084 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7085 !(bp->flags & PCIX_FLAG)) {
7088 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7092 bnx2_init_nvram(bp);
7094 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
7096 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7097 BNX2_SHM_HDR_SIGNATURE_SIG) {
7098 u32 off = PCI_FUNC(pdev->devfn) << 2;
7100 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
7102 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7104 /* Get the permanent MAC address. First we need to make sure the
7105 * firmware is actually running.
7107 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
7109 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7110 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7111 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7116 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
7117 for (i = 0, j = 0; i < 3; i++) {
7120 num = (u8) (reg >> (24 - (i * 8)));
7121 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7122 if (num >= k || !skip0 || k == 1) {
7123 bp->fw_version[j++] = (num / k) + '0';
7128 bp->fw_version[j++] = '.';
7130 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
7131 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7134 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7135 bp->flags |= ASF_ENABLE_FLAG;
7137 for (i = 0; i < 30; i++) {
7138 reg = REG_RD_IND(bp, bp->shmem_base +
7139 BNX2_BC_STATE_CONDITION);
7140 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7145 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
7146 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7147 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7148 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7150 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
7152 bp->fw_version[j++] = ' ';
7153 for (i = 0; i < 3; i++) {
7154 reg = REG_RD_IND(bp, addr + i * 4);
7156 memcpy(&bp->fw_version[j], ®, 4);
7161 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
7162 bp->mac_addr[0] = (u8) (reg >> 8);
7163 bp->mac_addr[1] = (u8) reg;
7165 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
7166 bp->mac_addr[2] = (u8) (reg >> 24);
7167 bp->mac_addr[3] = (u8) (reg >> 16);
7168 bp->mac_addr[4] = (u8) (reg >> 8);
7169 bp->mac_addr[5] = (u8) reg;
7171 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
7173 bp->tx_ring_size = MAX_TX_DESC_CNT;
7174 bnx2_set_rx_ring_size(bp, 255);
7178 bp->tx_quick_cons_trip_int = 20;
7179 bp->tx_quick_cons_trip = 20;
7180 bp->tx_ticks_int = 80;
7183 bp->rx_quick_cons_trip_int = 6;
7184 bp->rx_quick_cons_trip = 6;
7185 bp->rx_ticks_int = 18;
7188 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7190 bp->timer_interval = HZ;
7191 bp->current_interval = HZ;
7195 /* Disable WOL support if we are running on a SERDES chip. */
7196 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7197 bnx2_get_5709_media(bp);
7198 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7199 bp->phy_flags |= PHY_SERDES_FLAG;
7201 bp->phy_port = PORT_TP;
7202 if (bp->phy_flags & PHY_SERDES_FLAG) {
7203 bp->phy_port = PORT_FIBRE;
7204 reg = REG_RD_IND(bp, bp->shmem_base +
7205 BNX2_SHARED_HW_CFG_CONFIG);
7206 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7207 bp->flags |= NO_WOL_FLAG;
7210 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
7212 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7213 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
7215 bnx2_init_remote_phy(bp);
7217 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7218 CHIP_NUM(bp) == CHIP_NUM_5708)
7219 bp->phy_flags |= PHY_CRC_FIX_FLAG;
7220 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7221 (CHIP_REV(bp) == CHIP_REV_Ax ||
7222 CHIP_REV(bp) == CHIP_REV_Bx))
7223 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
7225 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7226 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7227 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7228 bp->flags |= NO_WOL_FLAG;
7232 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7233 bp->tx_quick_cons_trip_int =
7234 bp->tx_quick_cons_trip;
7235 bp->tx_ticks_int = bp->tx_ticks;
7236 bp->rx_quick_cons_trip_int =
7237 bp->rx_quick_cons_trip;
7238 bp->rx_ticks_int = bp->rx_ticks;
7239 bp->comp_prod_trip_int = bp->comp_prod_trip;
7240 bp->com_ticks_int = bp->com_ticks;
7241 bp->cmd_ticks_int = bp->cmd_ticks;
7244 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7246 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7247 * with byte enables disabled on the unused 32-bit word. This is legal
7248 * but causes problems on the AMD 8132 which will eventually stop
7249 * responding after a while.
7251 * AMD believes this incompatibility is unique to the 5706, and
7252 * prefers to locally disable MSI rather than globally disabling it.
7254 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7255 struct pci_dev *amd_8132 = NULL;
7257 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7258 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7261 if (amd_8132->revision >= 0x10 &&
7262 amd_8132->revision <= 0x13) {
7264 pci_dev_put(amd_8132);
7270 bnx2_set_default_link(bp);
7271 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7273 init_timer(&bp->timer);
7274 bp->timer.expires = RUN_AT(bp->timer_interval);
7275 bp->timer.data = (unsigned long) bp;
7276 bp->timer.function = bnx2_timer;
7282 iounmap(bp->regview);
7287 pci_release_regions(pdev);
7290 pci_disable_device(pdev);
7291 pci_set_drvdata(pdev, NULL);
7297 static char * __devinit
7298 bnx2_bus_string(struct bnx2 *bp, char *str)
7302 if (bp->flags & PCIE_FLAG) {
7303 s += sprintf(s, "PCI Express");
7305 s += sprintf(s, "PCI");
7306 if (bp->flags & PCIX_FLAG)
7307 s += sprintf(s, "-X");
7308 if (bp->flags & PCI_32BIT_FLAG)
7309 s += sprintf(s, " 32-bit");
7311 s += sprintf(s, " 64-bit");
7312 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7317 static void __devinit
7318 bnx2_init_napi(struct bnx2 *bp)
7321 struct bnx2_napi *bnapi;
7323 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7324 bnapi = &bp->bnx2_napi[i];
7327 netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7328 netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
7332 static int __devinit
7333 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7335 static int version_printed = 0;
7336 struct net_device *dev = NULL;
7340 DECLARE_MAC_BUF(mac);
7342 if (version_printed++ == 0)
7343 printk(KERN_INFO "%s", version);
7345 /* dev zeroed in init_etherdev */
7346 dev = alloc_etherdev(sizeof(*bp));
7351 rc = bnx2_init_board(pdev, dev);
7357 dev->open = bnx2_open;
7358 dev->hard_start_xmit = bnx2_start_xmit;
7359 dev->stop = bnx2_close;
7360 dev->get_stats = bnx2_get_stats;
7361 dev->set_multicast_list = bnx2_set_rx_mode;
7362 dev->do_ioctl = bnx2_ioctl;
7363 dev->set_mac_address = bnx2_change_mac_addr;
7364 dev->change_mtu = bnx2_change_mtu;
7365 dev->tx_timeout = bnx2_tx_timeout;
7366 dev->watchdog_timeo = TX_TIMEOUT;
7368 dev->vlan_rx_register = bnx2_vlan_rx_register;
7370 dev->ethtool_ops = &bnx2_ethtool_ops;
7372 bp = netdev_priv(dev);
7375 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7376 dev->poll_controller = poll_bnx2;
7379 pci_set_drvdata(pdev, dev);
7381 memcpy(dev->dev_addr, bp->mac_addr, 6);
7382 memcpy(dev->perm_addr, bp->mac_addr, 6);
7383 bp->name = board_info[ent->driver_data].name;
7385 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7386 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7387 dev->features |= NETIF_F_IPV6_CSUM;
7390 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7392 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7393 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7394 dev->features |= NETIF_F_TSO6;
7396 if ((rc = register_netdev(dev))) {
7397 dev_err(&pdev->dev, "Cannot register net device\n");
7399 iounmap(bp->regview);
7400 pci_release_regions(pdev);
7401 pci_disable_device(pdev);
7402 pci_set_drvdata(pdev, NULL);
7407 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7408 "IRQ %d, node addr %s\n",
7411 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7412 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7413 bnx2_bus_string(bp, str),
7415 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7420 static void __devexit
7421 bnx2_remove_one(struct pci_dev *pdev)
7423 struct net_device *dev = pci_get_drvdata(pdev);
7424 struct bnx2 *bp = netdev_priv(dev);
7426 flush_scheduled_work();
7428 unregister_netdev(dev);
7431 iounmap(bp->regview);
7434 pci_release_regions(pdev);
7435 pci_disable_device(pdev);
7436 pci_set_drvdata(pdev, NULL);
7440 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7442 struct net_device *dev = pci_get_drvdata(pdev);
7443 struct bnx2 *bp = netdev_priv(dev);
7446 /* PCI register 4 needs to be saved whether netif_running() or not.
7447 * MSI address and data need to be saved if using MSI and
7450 pci_save_state(pdev);
7451 if (!netif_running(dev))
7454 flush_scheduled_work();
7455 bnx2_netif_stop(bp);
7456 netif_device_detach(dev);
7457 del_timer_sync(&bp->timer);
7458 if (bp->flags & NO_WOL_FLAG)
7459 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7461 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7463 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7464 bnx2_reset_chip(bp, reset_code);
7466 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7471 bnx2_resume(struct pci_dev *pdev)
7473 struct net_device *dev = pci_get_drvdata(pdev);
7474 struct bnx2 *bp = netdev_priv(dev);
7476 pci_restore_state(pdev);
7477 if (!netif_running(dev))
7480 bnx2_set_power_state(bp, PCI_D0);
7481 netif_device_attach(dev);
7483 bnx2_netif_start(bp);
7487 static struct pci_driver bnx2_pci_driver = {
7488 .name = DRV_MODULE_NAME,
7489 .id_table = bnx2_pci_tbl,
7490 .probe = bnx2_init_one,
7491 .remove = __devexit_p(bnx2_remove_one),
7492 .suspend = bnx2_suspend,
7493 .resume = bnx2_resume,
7496 static int __init bnx2_init(void)
7498 return pci_register_driver(&bnx2_pci_driver);
7501 static void __exit bnx2_cleanup(void)
7503 pci_unregister_driver(&bnx2_pci_driver);
7506 module_init(bnx2_init);
7507 module_exit(bnx2_cleanup);