18ed8068dc2ddca9b50c0a3e8fb0c5bb0ad95da1
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.1"
60 #define DRV_MODULE_RELDATE      "December 19, 2007"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bnapi->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271         offset += cid_addr;
272         spin_lock_bh(&bp->indirect_lock);
273         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274                 int i;
275
276                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279                 for (i = 0; i < 5; i++) {
280                         u32 val;
281                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283                                 break;
284                         udelay(5);
285                 }
286         } else {
287                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288                 REG_WR(bp, BNX2_CTX_DATA, val);
289         }
290         spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296         u32 val1;
297         int i, ret;
298
299         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306                 udelay(40);
307         }
308
309         val1 = (bp->phy_addr << 21) | (reg << 16) |
310                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311                 BNX2_EMAC_MDIO_COMM_START_BUSY;
312         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314         for (i = 0; i < 50; i++) {
315                 udelay(10);
316
317                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319                         udelay(5);
320
321                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324                         break;
325                 }
326         }
327
328         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329                 *val = 0x0;
330                 ret = -EBUSY;
331         }
332         else {
333                 *val = val1;
334                 ret = 0;
335         }
336
337         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344                 udelay(40);
345         }
346
347         return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353         u32 val1;
354         int i, ret;
355
356         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363                 udelay(40);
364         }
365
366         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371         for (i = 0; i < 50; i++) {
372                 udelay(10);
373
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376                         udelay(5);
377                         break;
378                 }
379         }
380
381         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382                 ret = -EBUSY;
383         else
384                 ret = 0;
385
386         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393                 udelay(40);
394         }
395
396         return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402         int i;
403         struct bnx2_napi *bnapi;
404
405         for (i = 0; i < bp->irq_nvecs; i++) {
406                 bnapi = &bp->bnx2_napi[i];
407                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
408                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
409         }
410         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
411 }
412
413 static void
414 bnx2_enable_int(struct bnx2 *bp)
415 {
416         int i;
417         struct bnx2_napi *bnapi;
418
419         for (i = 0; i < bp->irq_nvecs; i++) {
420                 bnapi = &bp->bnx2_napi[i];
421
422                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
423                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
424                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
425                        bnapi->last_status_idx);
426
427                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
428                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
429                        bnapi->last_status_idx);
430         }
431         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
432 }
433
434 static void
435 bnx2_disable_int_sync(struct bnx2 *bp)
436 {
437         int i;
438
439         atomic_inc(&bp->intr_sem);
440         bnx2_disable_int(bp);
441         for (i = 0; i < bp->irq_nvecs; i++)
442                 synchronize_irq(bp->irq_tbl[i].vector);
443 }
444
445 static void
446 bnx2_napi_disable(struct bnx2 *bp)
447 {
448         int i;
449
450         for (i = 0; i < bp->irq_nvecs; i++)
451                 napi_disable(&bp->bnx2_napi[i].napi);
452 }
453
454 static void
455 bnx2_napi_enable(struct bnx2 *bp)
456 {
457         int i;
458
459         for (i = 0; i < bp->irq_nvecs; i++)
460                 napi_enable(&bp->bnx2_napi[i].napi);
461 }
462
463 static void
464 bnx2_netif_stop(struct bnx2 *bp)
465 {
466         bnx2_disable_int_sync(bp);
467         if (netif_running(bp->dev)) {
468                 bnx2_napi_disable(bp);
469                 netif_tx_disable(bp->dev);
470                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
471         }
472 }
473
474 static void
475 bnx2_netif_start(struct bnx2 *bp)
476 {
477         if (atomic_dec_and_test(&bp->intr_sem)) {
478                 if (netif_running(bp->dev)) {
479                         netif_wake_queue(bp->dev);
480                         bnx2_napi_enable(bp);
481                         bnx2_enable_int(bp);
482                 }
483         }
484 }
485
486 static void
487 bnx2_free_mem(struct bnx2 *bp)
488 {
489         int i;
490
491         for (i = 0; i < bp->ctx_pages; i++) {
492                 if (bp->ctx_blk[i]) {
493                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
494                                             bp->ctx_blk[i],
495                                             bp->ctx_blk_mapping[i]);
496                         bp->ctx_blk[i] = NULL;
497                 }
498         }
499         if (bp->status_blk) {
500                 pci_free_consistent(bp->pdev, bp->status_stats_size,
501                                     bp->status_blk, bp->status_blk_mapping);
502                 bp->status_blk = NULL;
503                 bp->stats_blk = NULL;
504         }
505         if (bp->tx_desc_ring) {
506                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
507                                     bp->tx_desc_ring, bp->tx_desc_mapping);
508                 bp->tx_desc_ring = NULL;
509         }
510         kfree(bp->tx_buf_ring);
511         bp->tx_buf_ring = NULL;
512         for (i = 0; i < bp->rx_max_ring; i++) {
513                 if (bp->rx_desc_ring[i])
514                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
515                                             bp->rx_desc_ring[i],
516                                             bp->rx_desc_mapping[i]);
517                 bp->rx_desc_ring[i] = NULL;
518         }
519         vfree(bp->rx_buf_ring);
520         bp->rx_buf_ring = NULL;
521         for (i = 0; i < bp->rx_max_pg_ring; i++) {
522                 if (bp->rx_pg_desc_ring[i])
523                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
524                                             bp->rx_pg_desc_ring[i],
525                                             bp->rx_pg_desc_mapping[i]);
526                 bp->rx_pg_desc_ring[i] = NULL;
527         }
528         if (bp->rx_pg_ring)
529                 vfree(bp->rx_pg_ring);
530         bp->rx_pg_ring = NULL;
531 }
532
533 static int
534 bnx2_alloc_mem(struct bnx2 *bp)
535 {
536         int i, status_blk_size;
537
538         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
539         if (bp->tx_buf_ring == NULL)
540                 return -ENOMEM;
541
542         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
543                                                 &bp->tx_desc_mapping);
544         if (bp->tx_desc_ring == NULL)
545                 goto alloc_mem_err;
546
547         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
548         if (bp->rx_buf_ring == NULL)
549                 goto alloc_mem_err;
550
551         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
552
553         for (i = 0; i < bp->rx_max_ring; i++) {
554                 bp->rx_desc_ring[i] =
555                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
556                                              &bp->rx_desc_mapping[i]);
557                 if (bp->rx_desc_ring[i] == NULL)
558                         goto alloc_mem_err;
559
560         }
561
562         if (bp->rx_pg_ring_size) {
563                 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
564                                          bp->rx_max_pg_ring);
565                 if (bp->rx_pg_ring == NULL)
566                         goto alloc_mem_err;
567
568                 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
569                        bp->rx_max_pg_ring);
570         }
571
572         for (i = 0; i < bp->rx_max_pg_ring; i++) {
573                 bp->rx_pg_desc_ring[i] =
574                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
575                                              &bp->rx_pg_desc_mapping[i]);
576                 if (bp->rx_pg_desc_ring[i] == NULL)
577                         goto alloc_mem_err;
578
579         }
580
581         /* Combine status and statistics blocks into one allocation. */
582         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
583         if (bp->flags & MSIX_CAP_FLAG)
584                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
585                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
586         bp->status_stats_size = status_blk_size +
587                                 sizeof(struct statistics_block);
588
589         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
590                                               &bp->status_blk_mapping);
591         if (bp->status_blk == NULL)
592                 goto alloc_mem_err;
593
594         memset(bp->status_blk, 0, bp->status_stats_size);
595
596         bp->bnx2_napi[0].status_blk = bp->status_blk;
597         if (bp->flags & MSIX_CAP_FLAG) {
598                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
599                         struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
600
601                         bnapi->status_blk_msix = (void *)
602                                 ((unsigned long) bp->status_blk +
603                                  BNX2_SBLK_MSIX_ALIGN_SIZE * i);
604                         bnapi->int_num = i << 24;
605                 }
606         }
607
608         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
609                                   status_blk_size);
610
611         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
612
613         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
614                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
615                 if (bp->ctx_pages == 0)
616                         bp->ctx_pages = 1;
617                 for (i = 0; i < bp->ctx_pages; i++) {
618                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
619                                                 BCM_PAGE_SIZE,
620                                                 &bp->ctx_blk_mapping[i]);
621                         if (bp->ctx_blk[i] == NULL)
622                                 goto alloc_mem_err;
623                 }
624         }
625         return 0;
626
627 alloc_mem_err:
628         bnx2_free_mem(bp);
629         return -ENOMEM;
630 }
631
632 static void
633 bnx2_report_fw_link(struct bnx2 *bp)
634 {
635         u32 fw_link_status = 0;
636
637         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
638                 return;
639
640         if (bp->link_up) {
641                 u32 bmsr;
642
643                 switch (bp->line_speed) {
644                 case SPEED_10:
645                         if (bp->duplex == DUPLEX_HALF)
646                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
647                         else
648                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
649                         break;
650                 case SPEED_100:
651                         if (bp->duplex == DUPLEX_HALF)
652                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
653                         else
654                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
655                         break;
656                 case SPEED_1000:
657                         if (bp->duplex == DUPLEX_HALF)
658                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
659                         else
660                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
661                         break;
662                 case SPEED_2500:
663                         if (bp->duplex == DUPLEX_HALF)
664                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
665                         else
666                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
667                         break;
668                 }
669
670                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
671
672                 if (bp->autoneg) {
673                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
674
675                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
676                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
677
678                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
679                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
680                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
681                         else
682                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
683                 }
684         }
685         else
686                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
687
688         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
689 }
690
691 static char *
692 bnx2_xceiver_str(struct bnx2 *bp)
693 {
694         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
695                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
696                  "Copper"));
697 }
698
699 static void
700 bnx2_report_link(struct bnx2 *bp)
701 {
702         if (bp->link_up) {
703                 netif_carrier_on(bp->dev);
704                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
705                        bnx2_xceiver_str(bp));
706
707                 printk("%d Mbps ", bp->line_speed);
708
709                 if (bp->duplex == DUPLEX_FULL)
710                         printk("full duplex");
711                 else
712                         printk("half duplex");
713
714                 if (bp->flow_ctrl) {
715                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
716                                 printk(", receive ");
717                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
718                                         printk("& transmit ");
719                         }
720                         else {
721                                 printk(", transmit ");
722                         }
723                         printk("flow control ON");
724                 }
725                 printk("\n");
726         }
727         else {
728                 netif_carrier_off(bp->dev);
729                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
730                        bnx2_xceiver_str(bp));
731         }
732
733         bnx2_report_fw_link(bp);
734 }
735
736 static void
737 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
738 {
739         u32 local_adv, remote_adv;
740
741         bp->flow_ctrl = 0;
742         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
743                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
744
745                 if (bp->duplex == DUPLEX_FULL) {
746                         bp->flow_ctrl = bp->req_flow_ctrl;
747                 }
748                 return;
749         }
750
751         if (bp->duplex != DUPLEX_FULL) {
752                 return;
753         }
754
755         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
756             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
757                 u32 val;
758
759                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
760                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
761                         bp->flow_ctrl |= FLOW_CTRL_TX;
762                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
763                         bp->flow_ctrl |= FLOW_CTRL_RX;
764                 return;
765         }
766
767         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
768         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
769
770         if (bp->phy_flags & PHY_SERDES_FLAG) {
771                 u32 new_local_adv = 0;
772                 u32 new_remote_adv = 0;
773
774                 if (local_adv & ADVERTISE_1000XPAUSE)
775                         new_local_adv |= ADVERTISE_PAUSE_CAP;
776                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
777                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
778                 if (remote_adv & ADVERTISE_1000XPAUSE)
779                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
780                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
781                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
782
783                 local_adv = new_local_adv;
784                 remote_adv = new_remote_adv;
785         }
786
787         /* See Table 28B-3 of 802.3ab-1999 spec. */
788         if (local_adv & ADVERTISE_PAUSE_CAP) {
789                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
790                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
791                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
792                         }
793                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
794                                 bp->flow_ctrl = FLOW_CTRL_RX;
795                         }
796                 }
797                 else {
798                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
799                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
800                         }
801                 }
802         }
803         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
804                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
805                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
806
807                         bp->flow_ctrl = FLOW_CTRL_TX;
808                 }
809         }
810 }
811
812 static int
813 bnx2_5709s_linkup(struct bnx2 *bp)
814 {
815         u32 val, speed;
816
817         bp->link_up = 1;
818
819         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
820         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
821         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
822
823         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
824                 bp->line_speed = bp->req_line_speed;
825                 bp->duplex = bp->req_duplex;
826                 return 0;
827         }
828         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
829         switch (speed) {
830                 case MII_BNX2_GP_TOP_AN_SPEED_10:
831                         bp->line_speed = SPEED_10;
832                         break;
833                 case MII_BNX2_GP_TOP_AN_SPEED_100:
834                         bp->line_speed = SPEED_100;
835                         break;
836                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
837                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
838                         bp->line_speed = SPEED_1000;
839                         break;
840                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
841                         bp->line_speed = SPEED_2500;
842                         break;
843         }
844         if (val & MII_BNX2_GP_TOP_AN_FD)
845                 bp->duplex = DUPLEX_FULL;
846         else
847                 bp->duplex = DUPLEX_HALF;
848         return 0;
849 }
850
851 static int
852 bnx2_5708s_linkup(struct bnx2 *bp)
853 {
854         u32 val;
855
856         bp->link_up = 1;
857         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
859                 case BCM5708S_1000X_STAT1_SPEED_10:
860                         bp->line_speed = SPEED_10;
861                         break;
862                 case BCM5708S_1000X_STAT1_SPEED_100:
863                         bp->line_speed = SPEED_100;
864                         break;
865                 case BCM5708S_1000X_STAT1_SPEED_1G:
866                         bp->line_speed = SPEED_1000;
867                         break;
868                 case BCM5708S_1000X_STAT1_SPEED_2G5:
869                         bp->line_speed = SPEED_2500;
870                         break;
871         }
872         if (val & BCM5708S_1000X_STAT1_FD)
873                 bp->duplex = DUPLEX_FULL;
874         else
875                 bp->duplex = DUPLEX_HALF;
876
877         return 0;
878 }
879
880 static int
881 bnx2_5706s_linkup(struct bnx2 *bp)
882 {
883         u32 bmcr, local_adv, remote_adv, common;
884
885         bp->link_up = 1;
886         bp->line_speed = SPEED_1000;
887
888         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
889         if (bmcr & BMCR_FULLDPLX) {
890                 bp->duplex = DUPLEX_FULL;
891         }
892         else {
893                 bp->duplex = DUPLEX_HALF;
894         }
895
896         if (!(bmcr & BMCR_ANENABLE)) {
897                 return 0;
898         }
899
900         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
901         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
902
903         common = local_adv & remote_adv;
904         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
905
906                 if (common & ADVERTISE_1000XFULL) {
907                         bp->duplex = DUPLEX_FULL;
908                 }
909                 else {
910                         bp->duplex = DUPLEX_HALF;
911                 }
912         }
913
914         return 0;
915 }
916
917 static int
918 bnx2_copper_linkup(struct bnx2 *bp)
919 {
920         u32 bmcr;
921
922         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
923         if (bmcr & BMCR_ANENABLE) {
924                 u32 local_adv, remote_adv, common;
925
926                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
927                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
928
929                 common = local_adv & (remote_adv >> 2);
930                 if (common & ADVERTISE_1000FULL) {
931                         bp->line_speed = SPEED_1000;
932                         bp->duplex = DUPLEX_FULL;
933                 }
934                 else if (common & ADVERTISE_1000HALF) {
935                         bp->line_speed = SPEED_1000;
936                         bp->duplex = DUPLEX_HALF;
937                 }
938                 else {
939                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
940                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
941
942                         common = local_adv & remote_adv;
943                         if (common & ADVERTISE_100FULL) {
944                                 bp->line_speed = SPEED_100;
945                                 bp->duplex = DUPLEX_FULL;
946                         }
947                         else if (common & ADVERTISE_100HALF) {
948                                 bp->line_speed = SPEED_100;
949                                 bp->duplex = DUPLEX_HALF;
950                         }
951                         else if (common & ADVERTISE_10FULL) {
952                                 bp->line_speed = SPEED_10;
953                                 bp->duplex = DUPLEX_FULL;
954                         }
955                         else if (common & ADVERTISE_10HALF) {
956                                 bp->line_speed = SPEED_10;
957                                 bp->duplex = DUPLEX_HALF;
958                         }
959                         else {
960                                 bp->line_speed = 0;
961                                 bp->link_up = 0;
962                         }
963                 }
964         }
965         else {
966                 if (bmcr & BMCR_SPEED100) {
967                         bp->line_speed = SPEED_100;
968                 }
969                 else {
970                         bp->line_speed = SPEED_10;
971                 }
972                 if (bmcr & BMCR_FULLDPLX) {
973                         bp->duplex = DUPLEX_FULL;
974                 }
975                 else {
976                         bp->duplex = DUPLEX_HALF;
977                 }
978         }
979
980         return 0;
981 }
982
983 static int
984 bnx2_set_mac_link(struct bnx2 *bp)
985 {
986         u32 val;
987
988         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
989         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
990                 (bp->duplex == DUPLEX_HALF)) {
991                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
992         }
993
994         /* Configure the EMAC mode register. */
995         val = REG_RD(bp, BNX2_EMAC_MODE);
996
997         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
998                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
999                 BNX2_EMAC_MODE_25G_MODE);
1000
1001         if (bp->link_up) {
1002                 switch (bp->line_speed) {
1003                         case SPEED_10:
1004                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1005                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1006                                         break;
1007                                 }
1008                                 /* fall through */
1009                         case SPEED_100:
1010                                 val |= BNX2_EMAC_MODE_PORT_MII;
1011                                 break;
1012                         case SPEED_2500:
1013                                 val |= BNX2_EMAC_MODE_25G_MODE;
1014                                 /* fall through */
1015                         case SPEED_1000:
1016                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1017                                 break;
1018                 }
1019         }
1020         else {
1021                 val |= BNX2_EMAC_MODE_PORT_GMII;
1022         }
1023
1024         /* Set the MAC to operate in the appropriate duplex mode. */
1025         if (bp->duplex == DUPLEX_HALF)
1026                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1027         REG_WR(bp, BNX2_EMAC_MODE, val);
1028
1029         /* Enable/disable rx PAUSE. */
1030         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1031
1032         if (bp->flow_ctrl & FLOW_CTRL_RX)
1033                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1034         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1035
1036         /* Enable/disable tx PAUSE. */
1037         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1038         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1039
1040         if (bp->flow_ctrl & FLOW_CTRL_TX)
1041                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1042         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1043
1044         /* Acknowledge the interrupt. */
1045         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1046
1047         return 0;
1048 }
1049
1050 static void
1051 bnx2_enable_bmsr1(struct bnx2 *bp)
1052 {
1053         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1054             (CHIP_NUM(bp) == CHIP_NUM_5709))
1055                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056                                MII_BNX2_BLK_ADDR_GP_STATUS);
1057 }
1058
1059 static void
1060 bnx2_disable_bmsr1(struct bnx2 *bp)
1061 {
1062         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1063             (CHIP_NUM(bp) == CHIP_NUM_5709))
1064                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1065                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1066 }
1067
1068 static int
1069 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1070 {
1071         u32 up1;
1072         int ret = 1;
1073
1074         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1075                 return 0;
1076
1077         if (bp->autoneg & AUTONEG_SPEED)
1078                 bp->advertising |= ADVERTISED_2500baseX_Full;
1079
1080         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1081                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1082
1083         bnx2_read_phy(bp, bp->mii_up1, &up1);
1084         if (!(up1 & BCM5708S_UP1_2G5)) {
1085                 up1 |= BCM5708S_UP1_2G5;
1086                 bnx2_write_phy(bp, bp->mii_up1, up1);
1087                 ret = 0;
1088         }
1089
1090         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1091                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093
1094         return ret;
1095 }
1096
1097 static int
1098 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1099 {
1100         u32 up1;
1101         int ret = 0;
1102
1103         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1104                 return 0;
1105
1106         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1107                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1108
1109         bnx2_read_phy(bp, bp->mii_up1, &up1);
1110         if (up1 & BCM5708S_UP1_2G5) {
1111                 up1 &= ~BCM5708S_UP1_2G5;
1112                 bnx2_write_phy(bp, bp->mii_up1, up1);
1113                 ret = 1;
1114         }
1115
1116         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1117                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1118                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1119
1120         return ret;
1121 }
1122
1123 static void
1124 bnx2_enable_forced_2g5(struct bnx2 *bp)
1125 {
1126         u32 bmcr;
1127
1128         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1129                 return;
1130
1131         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1132                 u32 val;
1133
1134                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1135                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1136                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1137                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1138                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1139                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1140
1141                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1144
1145         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1146                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1148         }
1149
1150         if (bp->autoneg & AUTONEG_SPEED) {
1151                 bmcr &= ~BMCR_ANENABLE;
1152                 if (bp->req_duplex == DUPLEX_FULL)
1153                         bmcr |= BMCR_FULLDPLX;
1154         }
1155         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1156 }
1157
1158 static void
1159 bnx2_disable_forced_2g5(struct bnx2 *bp)
1160 {
1161         u32 bmcr;
1162
1163         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1164                 return;
1165
1166         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1167                 u32 val;
1168
1169                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1170                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1171                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1172                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1173                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1174
1175                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1176                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1177                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1178
1179         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1180                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1181                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1182         }
1183
1184         if (bp->autoneg & AUTONEG_SPEED)
1185                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1186         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1187 }
1188
1189 static int
1190 bnx2_set_link(struct bnx2 *bp)
1191 {
1192         u32 bmsr;
1193         u8 link_up;
1194
1195         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1196                 bp->link_up = 1;
1197                 return 0;
1198         }
1199
1200         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1201                 return 0;
1202
1203         link_up = bp->link_up;
1204
1205         bnx2_enable_bmsr1(bp);
1206         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1207         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1208         bnx2_disable_bmsr1(bp);
1209
1210         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1211             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1212                 u32 val;
1213
1214                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1215                 if (val & BNX2_EMAC_STATUS_LINK)
1216                         bmsr |= BMSR_LSTATUS;
1217                 else
1218                         bmsr &= ~BMSR_LSTATUS;
1219         }
1220
1221         if (bmsr & BMSR_LSTATUS) {
1222                 bp->link_up = 1;
1223
1224                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1226                                 bnx2_5706s_linkup(bp);
1227                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1228                                 bnx2_5708s_linkup(bp);
1229                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1230                                 bnx2_5709s_linkup(bp);
1231                 }
1232                 else {
1233                         bnx2_copper_linkup(bp);
1234                 }
1235                 bnx2_resolve_flow_ctrl(bp);
1236         }
1237         else {
1238                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1239                     (bp->autoneg & AUTONEG_SPEED))
1240                         bnx2_disable_forced_2g5(bp);
1241
1242                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1243                 bp->link_up = 0;
1244         }
1245
1246         if (bp->link_up != link_up) {
1247                 bnx2_report_link(bp);
1248         }
1249
1250         bnx2_set_mac_link(bp);
1251
1252         return 0;
1253 }
1254
1255 static int
1256 bnx2_reset_phy(struct bnx2 *bp)
1257 {
1258         int i;
1259         u32 reg;
1260
1261         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1262
1263 #define PHY_RESET_MAX_WAIT 100
1264         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1265                 udelay(10);
1266
1267                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1268                 if (!(reg & BMCR_RESET)) {
1269                         udelay(20);
1270                         break;
1271                 }
1272         }
1273         if (i == PHY_RESET_MAX_WAIT) {
1274                 return -EBUSY;
1275         }
1276         return 0;
1277 }
1278
1279 static u32
1280 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1281 {
1282         u32 adv = 0;
1283
1284         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1285                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1286
1287                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1288                         adv = ADVERTISE_1000XPAUSE;
1289                 }
1290                 else {
1291                         adv = ADVERTISE_PAUSE_CAP;
1292                 }
1293         }
1294         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1295                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1296                         adv = ADVERTISE_1000XPSE_ASYM;
1297                 }
1298                 else {
1299                         adv = ADVERTISE_PAUSE_ASYM;
1300                 }
1301         }
1302         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1303                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1304                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1305                 }
1306                 else {
1307                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1308                 }
1309         }
1310         return adv;
1311 }
1312
1313 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1314
1315 static int
1316 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1317 {
1318         u32 speed_arg = 0, pause_adv;
1319
1320         pause_adv = bnx2_phy_get_pause_adv(bp);
1321
1322         if (bp->autoneg & AUTONEG_SPEED) {
1323                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1324                 if (bp->advertising & ADVERTISED_10baseT_Half)
1325                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1326                 if (bp->advertising & ADVERTISED_10baseT_Full)
1327                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1328                 if (bp->advertising & ADVERTISED_100baseT_Half)
1329                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1330                 if (bp->advertising & ADVERTISED_100baseT_Full)
1331                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1332                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1333                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1334                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1335                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1336         } else {
1337                 if (bp->req_line_speed == SPEED_2500)
1338                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1339                 else if (bp->req_line_speed == SPEED_1000)
1340                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1341                 else if (bp->req_line_speed == SPEED_100) {
1342                         if (bp->req_duplex == DUPLEX_FULL)
1343                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1344                         else
1345                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1346                 } else if (bp->req_line_speed == SPEED_10) {
1347                         if (bp->req_duplex == DUPLEX_FULL)
1348                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1349                         else
1350                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1351                 }
1352         }
1353
1354         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1355                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1356         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1357                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1358
1359         if (port == PORT_TP)
1360                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1361                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1362
1363         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1364
1365         spin_unlock_bh(&bp->phy_lock);
1366         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1367         spin_lock_bh(&bp->phy_lock);
1368
1369         return 0;
1370 }
1371
1372 static int
1373 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1374 {
1375         u32 adv, bmcr;
1376         u32 new_adv = 0;
1377
1378         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1379                 return (bnx2_setup_remote_phy(bp, port));
1380
1381         if (!(bp->autoneg & AUTONEG_SPEED)) {
1382                 u32 new_bmcr;
1383                 int force_link_down = 0;
1384
1385                 if (bp->req_line_speed == SPEED_2500) {
1386                         if (!bnx2_test_and_enable_2g5(bp))
1387                                 force_link_down = 1;
1388                 } else if (bp->req_line_speed == SPEED_1000) {
1389                         if (bnx2_test_and_disable_2g5(bp))
1390                                 force_link_down = 1;
1391                 }
1392                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1393                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1394
1395                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1396                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1397                 new_bmcr |= BMCR_SPEED1000;
1398
1399                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1400                         if (bp->req_line_speed == SPEED_2500)
1401                                 bnx2_enable_forced_2g5(bp);
1402                         else if (bp->req_line_speed == SPEED_1000) {
1403                                 bnx2_disable_forced_2g5(bp);
1404                                 new_bmcr &= ~0x2000;
1405                         }
1406
1407                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1408                         if (bp->req_line_speed == SPEED_2500)
1409                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1410                         else
1411                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1412                 }
1413
1414                 if (bp->req_duplex == DUPLEX_FULL) {
1415                         adv |= ADVERTISE_1000XFULL;
1416                         new_bmcr |= BMCR_FULLDPLX;
1417                 }
1418                 else {
1419                         adv |= ADVERTISE_1000XHALF;
1420                         new_bmcr &= ~BMCR_FULLDPLX;
1421                 }
1422                 if ((new_bmcr != bmcr) || (force_link_down)) {
1423                         /* Force a link down visible on the other side */
1424                         if (bp->link_up) {
1425                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1426                                                ~(ADVERTISE_1000XFULL |
1427                                                  ADVERTISE_1000XHALF));
1428                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1429                                         BMCR_ANRESTART | BMCR_ANENABLE);
1430
1431                                 bp->link_up = 0;
1432                                 netif_carrier_off(bp->dev);
1433                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1434                                 bnx2_report_link(bp);
1435                         }
1436                         bnx2_write_phy(bp, bp->mii_adv, adv);
1437                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1438                 } else {
1439                         bnx2_resolve_flow_ctrl(bp);
1440                         bnx2_set_mac_link(bp);
1441                 }
1442                 return 0;
1443         }
1444
1445         bnx2_test_and_enable_2g5(bp);
1446
1447         if (bp->advertising & ADVERTISED_1000baseT_Full)
1448                 new_adv |= ADVERTISE_1000XFULL;
1449
1450         new_adv |= bnx2_phy_get_pause_adv(bp);
1451
1452         bnx2_read_phy(bp, bp->mii_adv, &adv);
1453         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1454
1455         bp->serdes_an_pending = 0;
1456         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1457                 /* Force a link down visible on the other side */
1458                 if (bp->link_up) {
1459                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1460                         spin_unlock_bh(&bp->phy_lock);
1461                         msleep(20);
1462                         spin_lock_bh(&bp->phy_lock);
1463                 }
1464
1465                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1466                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1467                         BMCR_ANENABLE);
1468                 /* Speed up link-up time when the link partner
1469                  * does not autonegotiate which is very common
1470                  * in blade servers. Some blade servers use
1471                  * IPMI for kerboard input and it's important
1472                  * to minimize link disruptions. Autoneg. involves
1473                  * exchanging base pages plus 3 next pages and
1474                  * normally completes in about 120 msec.
1475                  */
1476                 bp->current_interval = SERDES_AN_TIMEOUT;
1477                 bp->serdes_an_pending = 1;
1478                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1479         } else {
1480                 bnx2_resolve_flow_ctrl(bp);
1481                 bnx2_set_mac_link(bp);
1482         }
1483
1484         return 0;
1485 }
1486
1487 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1488         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1489                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1490                 (ADVERTISED_1000baseT_Full)
1491
1492 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1493         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1494         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1495         ADVERTISED_1000baseT_Full)
1496
1497 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1498         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1499
1500 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1501
1502 static void
1503 bnx2_set_default_remote_link(struct bnx2 *bp)
1504 {
1505         u32 link;
1506
1507         if (bp->phy_port == PORT_TP)
1508                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1509         else
1510                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1511
1512         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1513                 bp->req_line_speed = 0;
1514                 bp->autoneg |= AUTONEG_SPEED;
1515                 bp->advertising = ADVERTISED_Autoneg;
1516                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1517                         bp->advertising |= ADVERTISED_10baseT_Half;
1518                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1519                         bp->advertising |= ADVERTISED_10baseT_Full;
1520                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1521                         bp->advertising |= ADVERTISED_100baseT_Half;
1522                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1523                         bp->advertising |= ADVERTISED_100baseT_Full;
1524                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1525                         bp->advertising |= ADVERTISED_1000baseT_Full;
1526                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1527                         bp->advertising |= ADVERTISED_2500baseX_Full;
1528         } else {
1529                 bp->autoneg = 0;
1530                 bp->advertising = 0;
1531                 bp->req_duplex = DUPLEX_FULL;
1532                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1533                         bp->req_line_speed = SPEED_10;
1534                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1535                                 bp->req_duplex = DUPLEX_HALF;
1536                 }
1537                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1538                         bp->req_line_speed = SPEED_100;
1539                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1540                                 bp->req_duplex = DUPLEX_HALF;
1541                 }
1542                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1543                         bp->req_line_speed = SPEED_1000;
1544                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1545                         bp->req_line_speed = SPEED_2500;
1546         }
1547 }
1548
1549 static void
1550 bnx2_set_default_link(struct bnx2 *bp)
1551 {
1552         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1553                 return bnx2_set_default_remote_link(bp);
1554
1555         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1556         bp->req_line_speed = 0;
1557         if (bp->phy_flags & PHY_SERDES_FLAG) {
1558                 u32 reg;
1559
1560                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1561
1562                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1563                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1564                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1565                         bp->autoneg = 0;
1566                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1567                         bp->req_duplex = DUPLEX_FULL;
1568                 }
1569         } else
1570                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1571 }
1572
1573 static void
1574 bnx2_send_heart_beat(struct bnx2 *bp)
1575 {
1576         u32 msg;
1577         u32 addr;
1578
1579         spin_lock(&bp->indirect_lock);
1580         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1581         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1582         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1583         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1584         spin_unlock(&bp->indirect_lock);
1585 }
1586
1587 static void
1588 bnx2_remote_phy_event(struct bnx2 *bp)
1589 {
1590         u32 msg;
1591         u8 link_up = bp->link_up;
1592         u8 old_port;
1593
1594         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1595
1596         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1597                 bnx2_send_heart_beat(bp);
1598
1599         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1600
1601         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1602                 bp->link_up = 0;
1603         else {
1604                 u32 speed;
1605
1606                 bp->link_up = 1;
1607                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1608                 bp->duplex = DUPLEX_FULL;
1609                 switch (speed) {
1610                         case BNX2_LINK_STATUS_10HALF:
1611                                 bp->duplex = DUPLEX_HALF;
1612                         case BNX2_LINK_STATUS_10FULL:
1613                                 bp->line_speed = SPEED_10;
1614                                 break;
1615                         case BNX2_LINK_STATUS_100HALF:
1616                                 bp->duplex = DUPLEX_HALF;
1617                         case BNX2_LINK_STATUS_100BASE_T4:
1618                         case BNX2_LINK_STATUS_100FULL:
1619                                 bp->line_speed = SPEED_100;
1620                                 break;
1621                         case BNX2_LINK_STATUS_1000HALF:
1622                                 bp->duplex = DUPLEX_HALF;
1623                         case BNX2_LINK_STATUS_1000FULL:
1624                                 bp->line_speed = SPEED_1000;
1625                                 break;
1626                         case BNX2_LINK_STATUS_2500HALF:
1627                                 bp->duplex = DUPLEX_HALF;
1628                         case BNX2_LINK_STATUS_2500FULL:
1629                                 bp->line_speed = SPEED_2500;
1630                                 break;
1631                         default:
1632                                 bp->line_speed = 0;
1633                                 break;
1634                 }
1635
1636                 spin_lock(&bp->phy_lock);
1637                 bp->flow_ctrl = 0;
1638                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1639                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1640                         if (bp->duplex == DUPLEX_FULL)
1641                                 bp->flow_ctrl = bp->req_flow_ctrl;
1642                 } else {
1643                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1644                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1645                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1646                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1647                 }
1648
1649                 old_port = bp->phy_port;
1650                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1651                         bp->phy_port = PORT_FIBRE;
1652                 else
1653                         bp->phy_port = PORT_TP;
1654
1655                 if (old_port != bp->phy_port)
1656                         bnx2_set_default_link(bp);
1657
1658                 spin_unlock(&bp->phy_lock);
1659         }
1660         if (bp->link_up != link_up)
1661                 bnx2_report_link(bp);
1662
1663         bnx2_set_mac_link(bp);
1664 }
1665
1666 static int
1667 bnx2_set_remote_link(struct bnx2 *bp)
1668 {
1669         u32 evt_code;
1670
1671         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1672         switch (evt_code) {
1673                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1674                         bnx2_remote_phy_event(bp);
1675                         break;
1676                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1677                 default:
1678                         bnx2_send_heart_beat(bp);
1679                         break;
1680         }
1681         return 0;
1682 }
1683
1684 static int
1685 bnx2_setup_copper_phy(struct bnx2 *bp)
1686 {
1687         u32 bmcr;
1688         u32 new_bmcr;
1689
1690         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1691
1692         if (bp->autoneg & AUTONEG_SPEED) {
1693                 u32 adv_reg, adv1000_reg;
1694                 u32 new_adv_reg = 0;
1695                 u32 new_adv1000_reg = 0;
1696
1697                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1698                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1699                         ADVERTISE_PAUSE_ASYM);
1700
1701                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1702                 adv1000_reg &= PHY_ALL_1000_SPEED;
1703
1704                 if (bp->advertising & ADVERTISED_10baseT_Half)
1705                         new_adv_reg |= ADVERTISE_10HALF;
1706                 if (bp->advertising & ADVERTISED_10baseT_Full)
1707                         new_adv_reg |= ADVERTISE_10FULL;
1708                 if (bp->advertising & ADVERTISED_100baseT_Half)
1709                         new_adv_reg |= ADVERTISE_100HALF;
1710                 if (bp->advertising & ADVERTISED_100baseT_Full)
1711                         new_adv_reg |= ADVERTISE_100FULL;
1712                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1713                         new_adv1000_reg |= ADVERTISE_1000FULL;
1714
1715                 new_adv_reg |= ADVERTISE_CSMA;
1716
1717                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1718
1719                 if ((adv1000_reg != new_adv1000_reg) ||
1720                         (adv_reg != new_adv_reg) ||
1721                         ((bmcr & BMCR_ANENABLE) == 0)) {
1722
1723                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1724                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1725                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1726                                 BMCR_ANENABLE);
1727                 }
1728                 else if (bp->link_up) {
1729                         /* Flow ctrl may have changed from auto to forced */
1730                         /* or vice-versa. */
1731
1732                         bnx2_resolve_flow_ctrl(bp);
1733                         bnx2_set_mac_link(bp);
1734                 }
1735                 return 0;
1736         }
1737
1738         new_bmcr = 0;
1739         if (bp->req_line_speed == SPEED_100) {
1740                 new_bmcr |= BMCR_SPEED100;
1741         }
1742         if (bp->req_duplex == DUPLEX_FULL) {
1743                 new_bmcr |= BMCR_FULLDPLX;
1744         }
1745         if (new_bmcr != bmcr) {
1746                 u32 bmsr;
1747
1748                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1749                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1750
1751                 if (bmsr & BMSR_LSTATUS) {
1752                         /* Force link down */
1753                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1754                         spin_unlock_bh(&bp->phy_lock);
1755                         msleep(50);
1756                         spin_lock_bh(&bp->phy_lock);
1757
1758                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1759                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1760                 }
1761
1762                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1763
1764                 /* Normally, the new speed is setup after the link has
1765                  * gone down and up again. In some cases, link will not go
1766                  * down so we need to set up the new speed here.
1767                  */
1768                 if (bmsr & BMSR_LSTATUS) {
1769                         bp->line_speed = bp->req_line_speed;
1770                         bp->duplex = bp->req_duplex;
1771                         bnx2_resolve_flow_ctrl(bp);
1772                         bnx2_set_mac_link(bp);
1773                 }
1774         } else {
1775                 bnx2_resolve_flow_ctrl(bp);
1776                 bnx2_set_mac_link(bp);
1777         }
1778         return 0;
1779 }
1780
1781 static int
1782 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1783 {
1784         if (bp->loopback == MAC_LOOPBACK)
1785                 return 0;
1786
1787         if (bp->phy_flags & PHY_SERDES_FLAG) {
1788                 return (bnx2_setup_serdes_phy(bp, port));
1789         }
1790         else {
1791                 return (bnx2_setup_copper_phy(bp));
1792         }
1793 }
1794
1795 static int
1796 bnx2_init_5709s_phy(struct bnx2 *bp)
1797 {
1798         u32 val;
1799
1800         bp->mii_bmcr = MII_BMCR + 0x10;
1801         bp->mii_bmsr = MII_BMSR + 0x10;
1802         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1803         bp->mii_adv = MII_ADVERTISE + 0x10;
1804         bp->mii_lpa = MII_LPA + 0x10;
1805         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1806
1807         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1808         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1809
1810         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1811         bnx2_reset_phy(bp);
1812
1813         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1814
1815         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1816         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1817         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1818         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1819
1820         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1821         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1822         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1823                 val |= BCM5708S_UP1_2G5;
1824         else
1825                 val &= ~BCM5708S_UP1_2G5;
1826         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1827
1828         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1829         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1830         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1831         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1832
1833         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1834
1835         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1836               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1837         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1838
1839         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1840
1841         return 0;
1842 }
1843
1844 static int
1845 bnx2_init_5708s_phy(struct bnx2 *bp)
1846 {
1847         u32 val;
1848
1849         bnx2_reset_phy(bp);
1850
1851         bp->mii_up1 = BCM5708S_UP1;
1852
1853         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1854         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1855         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1856
1857         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1858         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1859         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1860
1861         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1862         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1863         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1864
1865         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1866                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1867                 val |= BCM5708S_UP1_2G5;
1868                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1869         }
1870
1871         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1872             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1873             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1874                 /* increase tx signal amplitude */
1875                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1876                                BCM5708S_BLK_ADDR_TX_MISC);
1877                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1878                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1879                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1880                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1881         }
1882
1883         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1884               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1885
1886         if (val) {
1887                 u32 is_backplane;
1888
1889                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1890                                           BNX2_SHARED_HW_CFG_CONFIG);
1891                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1892                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1893                                        BCM5708S_BLK_ADDR_TX_MISC);
1894                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1895                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1896                                        BCM5708S_BLK_ADDR_DIG);
1897                 }
1898         }
1899         return 0;
1900 }
1901
1902 static int
1903 bnx2_init_5706s_phy(struct bnx2 *bp)
1904 {
1905         bnx2_reset_phy(bp);
1906
1907         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1908
1909         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1910                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1911
1912         if (bp->dev->mtu > 1500) {
1913                 u32 val;
1914
1915                 /* Set extended packet length bit */
1916                 bnx2_write_phy(bp, 0x18, 0x7);
1917                 bnx2_read_phy(bp, 0x18, &val);
1918                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1919
1920                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1921                 bnx2_read_phy(bp, 0x1c, &val);
1922                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1923         }
1924         else {
1925                 u32 val;
1926
1927                 bnx2_write_phy(bp, 0x18, 0x7);
1928                 bnx2_read_phy(bp, 0x18, &val);
1929                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1930
1931                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1932                 bnx2_read_phy(bp, 0x1c, &val);
1933                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1934         }
1935
1936         return 0;
1937 }
1938
1939 static int
1940 bnx2_init_copper_phy(struct bnx2 *bp)
1941 {
1942         u32 val;
1943
1944         bnx2_reset_phy(bp);
1945
1946         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1947                 bnx2_write_phy(bp, 0x18, 0x0c00);
1948                 bnx2_write_phy(bp, 0x17, 0x000a);
1949                 bnx2_write_phy(bp, 0x15, 0x310b);
1950                 bnx2_write_phy(bp, 0x17, 0x201f);
1951                 bnx2_write_phy(bp, 0x15, 0x9506);
1952                 bnx2_write_phy(bp, 0x17, 0x401f);
1953                 bnx2_write_phy(bp, 0x15, 0x14e2);
1954                 bnx2_write_phy(bp, 0x18, 0x0400);
1955         }
1956
1957         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1958                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1959                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1960                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1961                 val &= ~(1 << 8);
1962                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1963         }
1964
1965         if (bp->dev->mtu > 1500) {
1966                 /* Set extended packet length bit */
1967                 bnx2_write_phy(bp, 0x18, 0x7);
1968                 bnx2_read_phy(bp, 0x18, &val);
1969                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1970
1971                 bnx2_read_phy(bp, 0x10, &val);
1972                 bnx2_write_phy(bp, 0x10, val | 0x1);
1973         }
1974         else {
1975                 bnx2_write_phy(bp, 0x18, 0x7);
1976                 bnx2_read_phy(bp, 0x18, &val);
1977                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1978
1979                 bnx2_read_phy(bp, 0x10, &val);
1980                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1981         }
1982
1983         /* ethernet@wirespeed */
1984         bnx2_write_phy(bp, 0x18, 0x7007);
1985         bnx2_read_phy(bp, 0x18, &val);
1986         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1987         return 0;
1988 }
1989
1990
1991 static int
1992 bnx2_init_phy(struct bnx2 *bp)
1993 {
1994         u32 val;
1995         int rc = 0;
1996
1997         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1998         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1999
2000         bp->mii_bmcr = MII_BMCR;
2001         bp->mii_bmsr = MII_BMSR;
2002         bp->mii_bmsr1 = MII_BMSR;
2003         bp->mii_adv = MII_ADVERTISE;
2004         bp->mii_lpa = MII_LPA;
2005
2006         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2007
2008         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
2009                 goto setup_phy;
2010
2011         bnx2_read_phy(bp, MII_PHYSID1, &val);
2012         bp->phy_id = val << 16;
2013         bnx2_read_phy(bp, MII_PHYSID2, &val);
2014         bp->phy_id |= val & 0xffff;
2015
2016         if (bp->phy_flags & PHY_SERDES_FLAG) {
2017                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2018                         rc = bnx2_init_5706s_phy(bp);
2019                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2020                         rc = bnx2_init_5708s_phy(bp);
2021                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2022                         rc = bnx2_init_5709s_phy(bp);
2023         }
2024         else {
2025                 rc = bnx2_init_copper_phy(bp);
2026         }
2027
2028 setup_phy:
2029         if (!rc)
2030                 rc = bnx2_setup_phy(bp, bp->phy_port);
2031
2032         return rc;
2033 }
2034
2035 static int
2036 bnx2_set_mac_loopback(struct bnx2 *bp)
2037 {
2038         u32 mac_mode;
2039
2040         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2041         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2042         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2043         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2044         bp->link_up = 1;
2045         return 0;
2046 }
2047
2048 static int bnx2_test_link(struct bnx2 *);
2049
2050 static int
2051 bnx2_set_phy_loopback(struct bnx2 *bp)
2052 {
2053         u32 mac_mode;
2054         int rc, i;
2055
2056         spin_lock_bh(&bp->phy_lock);
2057         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2058                             BMCR_SPEED1000);
2059         spin_unlock_bh(&bp->phy_lock);
2060         if (rc)
2061                 return rc;
2062
2063         for (i = 0; i < 10; i++) {
2064                 if (bnx2_test_link(bp) == 0)
2065                         break;
2066                 msleep(100);
2067         }
2068
2069         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2070         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2071                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2072                       BNX2_EMAC_MODE_25G_MODE);
2073
2074         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2075         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2076         bp->link_up = 1;
2077         return 0;
2078 }
2079
2080 static int
2081 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2082 {
2083         int i;
2084         u32 val;
2085
2086         bp->fw_wr_seq++;
2087         msg_data |= bp->fw_wr_seq;
2088
2089         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2090
2091         /* wait for an acknowledgement. */
2092         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2093                 msleep(10);
2094
2095                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2096
2097                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2098                         break;
2099         }
2100         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2101                 return 0;
2102
2103         /* If we timed out, inform the firmware that this is the case. */
2104         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2105                 if (!silent)
2106                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2107                                             "%x\n", msg_data);
2108
2109                 msg_data &= ~BNX2_DRV_MSG_CODE;
2110                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2111
2112                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2113
2114                 return -EBUSY;
2115         }
2116
2117         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2118                 return -EIO;
2119
2120         return 0;
2121 }
2122
2123 static int
2124 bnx2_init_5709_context(struct bnx2 *bp)
2125 {
2126         int i, ret = 0;
2127         u32 val;
2128
2129         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2130         val |= (BCM_PAGE_BITS - 8) << 16;
2131         REG_WR(bp, BNX2_CTX_COMMAND, val);
2132         for (i = 0; i < 10; i++) {
2133                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2134                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2135                         break;
2136                 udelay(2);
2137         }
2138         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2139                 return -EBUSY;
2140
2141         for (i = 0; i < bp->ctx_pages; i++) {
2142                 int j;
2143
2144                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2145                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2146                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2147                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2148                        (u64) bp->ctx_blk_mapping[i] >> 32);
2149                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2150                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2151                 for (j = 0; j < 10; j++) {
2152
2153                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2154                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2155                                 break;
2156                         udelay(5);
2157                 }
2158                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2159                         ret = -EBUSY;
2160                         break;
2161                 }
2162         }
2163         return ret;
2164 }
2165
2166 static void
2167 bnx2_init_context(struct bnx2 *bp)
2168 {
2169         u32 vcid;
2170
2171         vcid = 96;
2172         while (vcid) {
2173                 u32 vcid_addr, pcid_addr, offset;
2174                 int i;
2175
2176                 vcid--;
2177
2178                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2179                         u32 new_vcid;
2180
2181                         vcid_addr = GET_PCID_ADDR(vcid);
2182                         if (vcid & 0x8) {
2183                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2184                         }
2185                         else {
2186                                 new_vcid = vcid;
2187                         }
2188                         pcid_addr = GET_PCID_ADDR(new_vcid);
2189                 }
2190                 else {
2191                         vcid_addr = GET_CID_ADDR(vcid);
2192                         pcid_addr = vcid_addr;
2193                 }
2194
2195                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2196                         vcid_addr += (i << PHY_CTX_SHIFT);
2197                         pcid_addr += (i << PHY_CTX_SHIFT);
2198
2199                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2200                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2201
2202                         /* Zero out the context. */
2203                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2204                                 CTX_WR(bp, vcid_addr, offset, 0);
2205                 }
2206         }
2207 }
2208
2209 static int
2210 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2211 {
2212         u16 *good_mbuf;
2213         u32 good_mbuf_cnt;
2214         u32 val;
2215
2216         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2217         if (good_mbuf == NULL) {
2218                 printk(KERN_ERR PFX "Failed to allocate memory in "
2219                                     "bnx2_alloc_bad_rbuf\n");
2220                 return -ENOMEM;
2221         }
2222
2223         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2224                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2225
2226         good_mbuf_cnt = 0;
2227
2228         /* Allocate a bunch of mbufs and save the good ones in an array. */
2229         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2230         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2231                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2232
2233                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2234
2235                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2236
2237                 /* The addresses with Bit 9 set are bad memory blocks. */
2238                 if (!(val & (1 << 9))) {
2239                         good_mbuf[good_mbuf_cnt] = (u16) val;
2240                         good_mbuf_cnt++;
2241                 }
2242
2243                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2244         }
2245
2246         /* Free the good ones back to the mbuf pool thus discarding
2247          * all the bad ones. */
2248         while (good_mbuf_cnt) {
2249                 good_mbuf_cnt--;
2250
2251                 val = good_mbuf[good_mbuf_cnt];
2252                 val = (val << 9) | val | 1;
2253
2254                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2255         }
2256         kfree(good_mbuf);
2257         return 0;
2258 }
2259
2260 static void
2261 bnx2_set_mac_addr(struct bnx2 *bp)
2262 {
2263         u32 val;
2264         u8 *mac_addr = bp->dev->dev_addr;
2265
2266         val = (mac_addr[0] << 8) | mac_addr[1];
2267
2268         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2269
2270         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2271                 (mac_addr[4] << 8) | mac_addr[5];
2272
2273         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2274 }
2275
2276 static inline int
2277 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2278 {
2279         dma_addr_t mapping;
2280         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2281         struct rx_bd *rxbd =
2282                 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2283         struct page *page = alloc_page(GFP_ATOMIC);
2284
2285         if (!page)
2286                 return -ENOMEM;
2287         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2288                                PCI_DMA_FROMDEVICE);
2289         rx_pg->page = page;
2290         pci_unmap_addr_set(rx_pg, mapping, mapping);
2291         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2292         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2293         return 0;
2294 }
2295
2296 static void
2297 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2298 {
2299         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2300         struct page *page = rx_pg->page;
2301
2302         if (!page)
2303                 return;
2304
2305         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2306                        PCI_DMA_FROMDEVICE);
2307
2308         __free_page(page);
2309         rx_pg->page = NULL;
2310 }
2311
2312 static inline int
2313 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2314 {
2315         struct sk_buff *skb;
2316         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2317         dma_addr_t mapping;
2318         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2319         unsigned long align;
2320
2321         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2322         if (skb == NULL) {
2323                 return -ENOMEM;
2324         }
2325
2326         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2327                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2328
2329         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2330                 PCI_DMA_FROMDEVICE);
2331
2332         rx_buf->skb = skb;
2333         pci_unmap_addr_set(rx_buf, mapping, mapping);
2334
2335         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2336         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2337
2338         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2339
2340         return 0;
2341 }
2342
2343 static int
2344 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2345 {
2346         struct status_block *sblk = bnapi->status_blk;
2347         u32 new_link_state, old_link_state;
2348         int is_set = 1;
2349
2350         new_link_state = sblk->status_attn_bits & event;
2351         old_link_state = sblk->status_attn_bits_ack & event;
2352         if (new_link_state != old_link_state) {
2353                 if (new_link_state)
2354                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2355                 else
2356                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2357         } else
2358                 is_set = 0;
2359
2360         return is_set;
2361 }
2362
2363 static void
2364 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2365 {
2366         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2367                 spin_lock(&bp->phy_lock);
2368                 bnx2_set_link(bp);
2369                 spin_unlock(&bp->phy_lock);
2370         }
2371         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2372                 bnx2_set_remote_link(bp);
2373
2374 }
2375
2376 static inline u16
2377 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2378 {
2379         u16 cons;
2380
2381         if (bnapi->int_num == 0)
2382                 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2383         else
2384                 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2385
2386         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2387                 cons++;
2388         return cons;
2389 }
2390
2391 static int
2392 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2393 {
2394         u16 hw_cons, sw_cons, sw_ring_cons;
2395         int tx_pkt = 0;
2396
2397         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2398         sw_cons = bnapi->tx_cons;
2399
2400         while (sw_cons != hw_cons) {
2401                 struct sw_bd *tx_buf;
2402                 struct sk_buff *skb;
2403                 int i, last;
2404
2405                 sw_ring_cons = TX_RING_IDX(sw_cons);
2406
2407                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2408                 skb = tx_buf->skb;
2409
2410                 /* partial BD completions possible with TSO packets */
2411                 if (skb_is_gso(skb)) {
2412                         u16 last_idx, last_ring_idx;
2413
2414                         last_idx = sw_cons +
2415                                 skb_shinfo(skb)->nr_frags + 1;
2416                         last_ring_idx = sw_ring_cons +
2417                                 skb_shinfo(skb)->nr_frags + 1;
2418                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2419                                 last_idx++;
2420                         }
2421                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2422                                 break;
2423                         }
2424                 }
2425
2426                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2427                         skb_headlen(skb), PCI_DMA_TODEVICE);
2428
2429                 tx_buf->skb = NULL;
2430                 last = skb_shinfo(skb)->nr_frags;
2431
2432                 for (i = 0; i < last; i++) {
2433                         sw_cons = NEXT_TX_BD(sw_cons);
2434
2435                         pci_unmap_page(bp->pdev,
2436                                 pci_unmap_addr(
2437                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2438                                         mapping),
2439                                 skb_shinfo(skb)->frags[i].size,
2440                                 PCI_DMA_TODEVICE);
2441                 }
2442
2443                 sw_cons = NEXT_TX_BD(sw_cons);
2444
2445                 dev_kfree_skb(skb);
2446                 tx_pkt++;
2447                 if (tx_pkt == budget)
2448                         break;
2449
2450                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2451         }
2452
2453         bnapi->hw_tx_cons = hw_cons;
2454         bnapi->tx_cons = sw_cons;
2455         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2456          * before checking for netif_queue_stopped().  Without the
2457          * memory barrier, there is a small possibility that bnx2_start_xmit()
2458          * will miss it and cause the queue to be stopped forever.
2459          */
2460         smp_mb();
2461
2462         if (unlikely(netif_queue_stopped(bp->dev)) &&
2463                      (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2464                 netif_tx_lock(bp->dev);
2465                 if ((netif_queue_stopped(bp->dev)) &&
2466                     (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2467                         netif_wake_queue(bp->dev);
2468                 netif_tx_unlock(bp->dev);
2469         }
2470         return tx_pkt;
2471 }
2472
2473 static void
2474 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2475                         struct sk_buff *skb, int count)
2476 {
2477         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2478         struct rx_bd *cons_bd, *prod_bd;
2479         dma_addr_t mapping;
2480         int i;
2481         u16 hw_prod = bnapi->rx_pg_prod, prod;
2482         u16 cons = bnapi->rx_pg_cons;
2483
2484         for (i = 0; i < count; i++) {
2485                 prod = RX_PG_RING_IDX(hw_prod);
2486
2487                 prod_rx_pg = &bp->rx_pg_ring[prod];
2488                 cons_rx_pg = &bp->rx_pg_ring[cons];
2489                 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2490                 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2491
2492                 if (i == 0 && skb) {
2493                         struct page *page;
2494                         struct skb_shared_info *shinfo;
2495
2496                         shinfo = skb_shinfo(skb);
2497                         shinfo->nr_frags--;
2498                         page = shinfo->frags[shinfo->nr_frags].page;
2499                         shinfo->frags[shinfo->nr_frags].page = NULL;
2500                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2501                                                PCI_DMA_FROMDEVICE);
2502                         cons_rx_pg->page = page;
2503                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2504                         dev_kfree_skb(skb);
2505                 }
2506                 if (prod != cons) {
2507                         prod_rx_pg->page = cons_rx_pg->page;
2508                         cons_rx_pg->page = NULL;
2509                         pci_unmap_addr_set(prod_rx_pg, mapping,
2510                                 pci_unmap_addr(cons_rx_pg, mapping));
2511
2512                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2513                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2514
2515                 }
2516                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2517                 hw_prod = NEXT_RX_BD(hw_prod);
2518         }
2519         bnapi->rx_pg_prod = hw_prod;
2520         bnapi->rx_pg_cons = cons;
2521 }
2522
2523 static inline void
2524 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2525         u16 cons, u16 prod)
2526 {
2527         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2528         struct rx_bd *cons_bd, *prod_bd;
2529
2530         cons_rx_buf = &bp->rx_buf_ring[cons];
2531         prod_rx_buf = &bp->rx_buf_ring[prod];
2532
2533         pci_dma_sync_single_for_device(bp->pdev,
2534                 pci_unmap_addr(cons_rx_buf, mapping),
2535                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2536
2537         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2538
2539         prod_rx_buf->skb = skb;
2540
2541         if (cons == prod)
2542                 return;
2543
2544         pci_unmap_addr_set(prod_rx_buf, mapping,
2545                         pci_unmap_addr(cons_rx_buf, mapping));
2546
2547         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2548         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2549         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2550         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2551 }
2552
2553 static int
2554 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2555             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2556             u32 ring_idx)
2557 {
2558         int err;
2559         u16 prod = ring_idx & 0xffff;
2560
2561         err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2562         if (unlikely(err)) {
2563                 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2564                 if (hdr_len) {
2565                         unsigned int raw_len = len + 4;
2566                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2567
2568                         bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2569                 }
2570                 return err;
2571         }
2572
2573         skb_reserve(skb, bp->rx_offset);
2574         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2575                          PCI_DMA_FROMDEVICE);
2576
2577         if (hdr_len == 0) {
2578                 skb_put(skb, len);
2579                 return 0;
2580         } else {
2581                 unsigned int i, frag_len, frag_size, pages;
2582                 struct sw_pg *rx_pg;
2583                 u16 pg_cons = bnapi->rx_pg_cons;
2584                 u16 pg_prod = bnapi->rx_pg_prod;
2585
2586                 frag_size = len + 4 - hdr_len;
2587                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2588                 skb_put(skb, hdr_len);
2589
2590                 for (i = 0; i < pages; i++) {
2591                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2592                         if (unlikely(frag_len <= 4)) {
2593                                 unsigned int tail = 4 - frag_len;
2594
2595                                 bnapi->rx_pg_cons = pg_cons;
2596                                 bnapi->rx_pg_prod = pg_prod;
2597                                 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2598                                                         pages - i);
2599                                 skb->len -= tail;
2600                                 if (i == 0) {
2601                                         skb->tail -= tail;
2602                                 } else {
2603                                         skb_frag_t *frag =
2604                                                 &skb_shinfo(skb)->frags[i - 1];
2605                                         frag->size -= tail;
2606                                         skb->data_len -= tail;
2607                                         skb->truesize -= tail;
2608                                 }
2609                                 return 0;
2610                         }
2611                         rx_pg = &bp->rx_pg_ring[pg_cons];
2612
2613                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2614                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2615
2616                         if (i == pages - 1)
2617                                 frag_len -= 4;
2618
2619                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2620                         rx_pg->page = NULL;
2621
2622                         err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2623                         if (unlikely(err)) {
2624                                 bnapi->rx_pg_cons = pg_cons;
2625                                 bnapi->rx_pg_prod = pg_prod;
2626                                 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2627                                                         pages - i);
2628                                 return err;
2629                         }
2630
2631                         frag_size -= frag_len;
2632                         skb->data_len += frag_len;
2633                         skb->truesize += frag_len;
2634                         skb->len += frag_len;
2635
2636                         pg_prod = NEXT_RX_BD(pg_prod);
2637                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2638                 }
2639                 bnapi->rx_pg_prod = pg_prod;
2640                 bnapi->rx_pg_cons = pg_cons;
2641         }
2642         return 0;
2643 }
2644
2645 static inline u16
2646 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2647 {
2648         u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2649
2650         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2651                 cons++;
2652         return cons;
2653 }
2654
2655 static int
2656 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2657 {
2658         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2659         struct l2_fhdr *rx_hdr;
2660         int rx_pkt = 0, pg_ring_used = 0;
2661
2662         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2663         sw_cons = bnapi->rx_cons;
2664         sw_prod = bnapi->rx_prod;
2665
2666         /* Memory barrier necessary as speculative reads of the rx
2667          * buffer can be ahead of the index in the status block
2668          */
2669         rmb();
2670         while (sw_cons != hw_cons) {
2671                 unsigned int len, hdr_len;
2672                 u32 status;
2673                 struct sw_bd *rx_buf;
2674                 struct sk_buff *skb;
2675                 dma_addr_t dma_addr;
2676
2677                 sw_ring_cons = RX_RING_IDX(sw_cons);
2678                 sw_ring_prod = RX_RING_IDX(sw_prod);
2679
2680                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2681                 skb = rx_buf->skb;
2682
2683                 rx_buf->skb = NULL;
2684
2685                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2686
2687                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2688                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2689
2690                 rx_hdr = (struct l2_fhdr *) skb->data;
2691                 len = rx_hdr->l2_fhdr_pkt_len;
2692
2693                 if ((status = rx_hdr->l2_fhdr_status) &
2694                         (L2_FHDR_ERRORS_BAD_CRC |
2695                         L2_FHDR_ERRORS_PHY_DECODE |
2696                         L2_FHDR_ERRORS_ALIGNMENT |
2697                         L2_FHDR_ERRORS_TOO_SHORT |
2698                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2699
2700                         bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2701                                           sw_ring_prod);
2702                         goto next_rx;
2703                 }
2704                 hdr_len = 0;
2705                 if (status & L2_FHDR_STATUS_SPLIT) {
2706                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2707                         pg_ring_used = 1;
2708                 } else if (len > bp->rx_jumbo_thresh) {
2709                         hdr_len = bp->rx_jumbo_thresh;
2710                         pg_ring_used = 1;
2711                 }
2712
2713                 len -= 4;
2714
2715                 if (len <= bp->rx_copy_thresh) {
2716                         struct sk_buff *new_skb;
2717
2718                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2719                         if (new_skb == NULL) {
2720                                 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2721                                                   sw_ring_prod);
2722                                 goto next_rx;
2723                         }
2724
2725                         /* aligned copy */
2726                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2727                                       new_skb->data, len + 2);
2728                         skb_reserve(new_skb, 2);
2729                         skb_put(new_skb, len);
2730
2731                         bnx2_reuse_rx_skb(bp, bnapi, skb,
2732                                 sw_ring_cons, sw_ring_prod);
2733
2734                         skb = new_skb;
2735                 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2736                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2737                         goto next_rx;
2738
2739                 skb->protocol = eth_type_trans(skb, bp->dev);
2740
2741                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2742                         (ntohs(skb->protocol) != 0x8100)) {
2743
2744                         dev_kfree_skb(skb);
2745                         goto next_rx;
2746
2747                 }
2748
2749                 skb->ip_summed = CHECKSUM_NONE;
2750                 if (bp->rx_csum &&
2751                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2752                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2753
2754                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2755                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2756                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2757                 }
2758
2759 #ifdef BCM_VLAN
2760                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2761                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2762                                 rx_hdr->l2_fhdr_vlan_tag);
2763                 }
2764                 else
2765 #endif
2766                         netif_receive_skb(skb);
2767
2768                 bp->dev->last_rx = jiffies;
2769                 rx_pkt++;
2770
2771 next_rx:
2772                 sw_cons = NEXT_RX_BD(sw_cons);
2773                 sw_prod = NEXT_RX_BD(sw_prod);
2774
2775                 if ((rx_pkt == budget))
2776                         break;
2777
2778                 /* Refresh hw_cons to see if there is new work */
2779                 if (sw_cons == hw_cons) {
2780                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2781                         rmb();
2782                 }
2783         }
2784         bnapi->rx_cons = sw_cons;
2785         bnapi->rx_prod = sw_prod;
2786
2787         if (pg_ring_used)
2788                 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2789                          bnapi->rx_pg_prod);
2790
2791         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2792
2793         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2794
2795         mmiowb();
2796
2797         return rx_pkt;
2798
2799 }
2800
2801 /* MSI ISR - The only difference between this and the INTx ISR
2802  * is that the MSI interrupt is always serviced.
2803  */
2804 static irqreturn_t
2805 bnx2_msi(int irq, void *dev_instance)
2806 {
2807         struct net_device *dev = dev_instance;
2808         struct bnx2 *bp = netdev_priv(dev);
2809         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2810
2811         prefetch(bnapi->status_blk);
2812         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2813                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2814                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2815
2816         /* Return here if interrupt is disabled. */
2817         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2818                 return IRQ_HANDLED;
2819
2820         netif_rx_schedule(dev, &bnapi->napi);
2821
2822         return IRQ_HANDLED;
2823 }
2824
2825 static irqreturn_t
2826 bnx2_msi_1shot(int irq, void *dev_instance)
2827 {
2828         struct net_device *dev = dev_instance;
2829         struct bnx2 *bp = netdev_priv(dev);
2830         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2831
2832         prefetch(bnapi->status_blk);
2833
2834         /* Return here if interrupt is disabled. */
2835         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2836                 return IRQ_HANDLED;
2837
2838         netif_rx_schedule(dev, &bnapi->napi);
2839
2840         return IRQ_HANDLED;
2841 }
2842
2843 static irqreturn_t
2844 bnx2_interrupt(int irq, void *dev_instance)
2845 {
2846         struct net_device *dev = dev_instance;
2847         struct bnx2 *bp = netdev_priv(dev);
2848         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2849         struct status_block *sblk = bnapi->status_blk;
2850
2851         /* When using INTx, it is possible for the interrupt to arrive
2852          * at the CPU before the status block posted prior to the
2853          * interrupt. Reading a register will flush the status block.
2854          * When using MSI, the MSI message will always complete after
2855          * the status block write.
2856          */
2857         if ((sblk->status_idx == bnapi->last_status_idx) &&
2858             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2859              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2860                 return IRQ_NONE;
2861
2862         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2863                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2864                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2865
2866         /* Read back to deassert IRQ immediately to avoid too many
2867          * spurious interrupts.
2868          */
2869         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2870
2871         /* Return here if interrupt is shared and is disabled. */
2872         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2873                 return IRQ_HANDLED;
2874
2875         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2876                 bnapi->last_status_idx = sblk->status_idx;
2877                 __netif_rx_schedule(dev, &bnapi->napi);
2878         }
2879
2880         return IRQ_HANDLED;
2881 }
2882
2883 static irqreturn_t
2884 bnx2_tx_msix(int irq, void *dev_instance)
2885 {
2886         struct net_device *dev = dev_instance;
2887         struct bnx2 *bp = netdev_priv(dev);
2888         struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
2889
2890         prefetch(bnapi->status_blk_msix);
2891
2892         /* Return here if interrupt is disabled. */
2893         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2894                 return IRQ_HANDLED;
2895
2896         netif_rx_schedule(dev, &bnapi->napi);
2897         return IRQ_HANDLED;
2898 }
2899
2900 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2901                                  STATUS_ATTN_BITS_TIMER_ABORT)
2902
2903 static inline int
2904 bnx2_has_work(struct bnx2_napi *bnapi)
2905 {
2906         struct status_block *sblk = bnapi->status_blk;
2907
2908         if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
2909             (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2910                 return 1;
2911
2912         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2913             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2914                 return 1;
2915
2916         return 0;
2917 }
2918
2919 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
2920 {
2921         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2922         struct bnx2 *bp = bnapi->bp;
2923         int work_done = 0;
2924         struct status_block_msix *sblk = bnapi->status_blk_msix;
2925
2926         do {
2927                 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
2928                 if (unlikely(work_done >= budget))
2929                         return work_done;
2930
2931                 bnapi->last_status_idx = sblk->status_idx;
2932                 rmb();
2933         } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
2934
2935         netif_rx_complete(bp->dev, napi);
2936         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
2937                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2938                bnapi->last_status_idx);
2939         return work_done;
2940 }
2941
2942 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2943                           int work_done, int budget)
2944 {
2945         struct status_block *sblk = bnapi->status_blk;
2946         u32 status_attn_bits = sblk->status_attn_bits;
2947         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2948
2949         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2950             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2951
2952                 bnx2_phy_int(bp, bnapi);
2953
2954                 /* This is needed to take care of transient status
2955                  * during link changes.
2956                  */
2957                 REG_WR(bp, BNX2_HC_COMMAND,
2958                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2959                 REG_RD(bp, BNX2_HC_COMMAND);
2960         }
2961
2962         if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
2963                 bnx2_tx_int(bp, bnapi, 0);
2964
2965         if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
2966                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
2967
2968         return work_done;
2969 }
2970
2971 static int bnx2_poll(struct napi_struct *napi, int budget)
2972 {
2973         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2974         struct bnx2 *bp = bnapi->bp;
2975         int work_done = 0;
2976         struct status_block *sblk = bnapi->status_blk;
2977
2978         while (1) {
2979                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
2980
2981                 if (unlikely(work_done >= budget))
2982                         break;
2983
2984                 /* bnapi->last_status_idx is used below to tell the hw how
2985                  * much work has been processed, so we must read it before
2986                  * checking for more work.
2987                  */
2988                 bnapi->last_status_idx = sblk->status_idx;
2989                 rmb();
2990                 if (likely(!bnx2_has_work(bnapi))) {
2991                         netif_rx_complete(bp->dev, napi);
2992                         if (likely(bp->flags & USING_MSI_OR_MSIX_FLAG)) {
2993                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2994                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2995                                        bnapi->last_status_idx);
2996                                 break;
2997                         }
2998                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2999                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3000                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3001                                bnapi->last_status_idx);
3002
3003                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3004                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3005                                bnapi->last_status_idx);
3006                         break;
3007                 }
3008         }
3009
3010         return work_done;
3011 }
3012
3013 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3014  * from set_multicast.
3015  */
3016 static void
3017 bnx2_set_rx_mode(struct net_device *dev)
3018 {
3019         struct bnx2 *bp = netdev_priv(dev);
3020         u32 rx_mode, sort_mode;
3021         int i;
3022
3023         spin_lock_bh(&bp->phy_lock);
3024
3025         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3026                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3027         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3028 #ifdef BCM_VLAN
3029         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
3030                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3031 #else
3032         if (!(bp->flags & ASF_ENABLE_FLAG))
3033                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3034 #endif
3035         if (dev->flags & IFF_PROMISC) {
3036                 /* Promiscuous mode. */
3037                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3038                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3039                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3040         }
3041         else if (dev->flags & IFF_ALLMULTI) {
3042                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3043                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3044                                0xffffffff);
3045                 }
3046                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3047         }
3048         else {
3049                 /* Accept one or more multicast(s). */
3050                 struct dev_mc_list *mclist;
3051                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3052                 u32 regidx;
3053                 u32 bit;
3054                 u32 crc;
3055
3056                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3057
3058                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3059                      i++, mclist = mclist->next) {
3060
3061                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3062                         bit = crc & 0xff;
3063                         regidx = (bit & 0xe0) >> 5;
3064                         bit &= 0x1f;
3065                         mc_filter[regidx] |= (1 << bit);
3066                 }
3067
3068                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3069                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3070                                mc_filter[i]);
3071                 }
3072
3073                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3074         }
3075
3076         if (rx_mode != bp->rx_mode) {
3077                 bp->rx_mode = rx_mode;
3078                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3079         }
3080
3081         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3082         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3083         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3084
3085         spin_unlock_bh(&bp->phy_lock);
3086 }
3087
3088 static void
3089 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
3090         u32 rv2p_proc)
3091 {
3092         int i;
3093         u32 val;
3094
3095
3096         for (i = 0; i < rv2p_code_len; i += 8) {
3097                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
3098                 rv2p_code++;
3099                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
3100                 rv2p_code++;
3101
3102                 if (rv2p_proc == RV2P_PROC1) {
3103                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3104                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3105                 }
3106                 else {
3107                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3108                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3109                 }
3110         }
3111
3112         /* Reset the processor, un-stall is done later. */
3113         if (rv2p_proc == RV2P_PROC1) {
3114                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3115         }
3116         else {
3117                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3118         }
3119 }
3120
3121 static int
3122 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3123 {
3124         u32 offset;
3125         u32 val;
3126         int rc;
3127
3128         /* Halt the CPU. */
3129         val = REG_RD_IND(bp, cpu_reg->mode);
3130         val |= cpu_reg->mode_value_halt;
3131         REG_WR_IND(bp, cpu_reg->mode, val);
3132         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3133
3134         /* Load the Text area. */
3135         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3136         if (fw->gz_text) {
3137                 int j;
3138
3139                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3140                                        fw->gz_text_len);
3141                 if (rc < 0)
3142                         return rc;
3143
3144                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3145                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
3146                 }
3147         }
3148
3149         /* Load the Data area. */
3150         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3151         if (fw->data) {
3152                 int j;
3153
3154                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3155                         REG_WR_IND(bp, offset, fw->data[j]);
3156                 }
3157         }
3158
3159         /* Load the SBSS area. */
3160         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3161         if (fw->sbss_len) {
3162                 int j;
3163
3164                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3165                         REG_WR_IND(bp, offset, 0);
3166                 }
3167         }
3168
3169         /* Load the BSS area. */
3170         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3171         if (fw->bss_len) {
3172                 int j;
3173
3174                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3175                         REG_WR_IND(bp, offset, 0);
3176                 }
3177         }
3178
3179         /* Load the Read-Only area. */
3180         offset = cpu_reg->spad_base +
3181                 (fw->rodata_addr - cpu_reg->mips_view_base);
3182         if (fw->rodata) {
3183                 int j;
3184
3185                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3186                         REG_WR_IND(bp, offset, fw->rodata[j]);
3187                 }
3188         }
3189
3190         /* Clear the pre-fetch instruction. */
3191         REG_WR_IND(bp, cpu_reg->inst, 0);
3192         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3193
3194         /* Start the CPU. */
3195         val = REG_RD_IND(bp, cpu_reg->mode);
3196         val &= ~cpu_reg->mode_value_halt;
3197         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3198         REG_WR_IND(bp, cpu_reg->mode, val);
3199
3200         return 0;
3201 }
3202
3203 static int
3204 bnx2_init_cpus(struct bnx2 *bp)
3205 {
3206         struct cpu_reg cpu_reg;
3207         struct fw_info *fw;
3208         int rc, rv2p_len;
3209         void *text, *rv2p;
3210
3211         /* Initialize the RV2P processor. */
3212         text = vmalloc(FW_BUF_SIZE);
3213         if (!text)
3214                 return -ENOMEM;
3215         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3216                 rv2p = bnx2_xi_rv2p_proc1;
3217                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3218         } else {
3219                 rv2p = bnx2_rv2p_proc1;
3220                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3221         }
3222         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3223         if (rc < 0)
3224                 goto init_cpu_err;
3225
3226         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3227
3228         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3229                 rv2p = bnx2_xi_rv2p_proc2;
3230                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3231         } else {
3232                 rv2p = bnx2_rv2p_proc2;
3233                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3234         }
3235         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3236         if (rc < 0)
3237                 goto init_cpu_err;
3238
3239         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3240
3241         /* Initialize the RX Processor. */
3242         cpu_reg.mode = BNX2_RXP_CPU_MODE;
3243         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3244         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3245         cpu_reg.state = BNX2_RXP_CPU_STATE;
3246         cpu_reg.state_value_clear = 0xffffff;
3247         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3248         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3249         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3250         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3251         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3252         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3253         cpu_reg.mips_view_base = 0x8000000;
3254
3255         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3256                 fw = &bnx2_rxp_fw_09;
3257         else
3258                 fw = &bnx2_rxp_fw_06;
3259
3260         fw->text = text;
3261         rc = load_cpu_fw(bp, &cpu_reg, fw);
3262         if (rc)
3263                 goto init_cpu_err;
3264
3265         /* Initialize the TX Processor. */
3266         cpu_reg.mode = BNX2_TXP_CPU_MODE;
3267         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3268         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3269         cpu_reg.state = BNX2_TXP_CPU_STATE;
3270         cpu_reg.state_value_clear = 0xffffff;
3271         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3272         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3273         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3274         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3275         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3276         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3277         cpu_reg.mips_view_base = 0x8000000;
3278
3279         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3280                 fw = &bnx2_txp_fw_09;
3281         else
3282                 fw = &bnx2_txp_fw_06;
3283
3284         fw->text = text;
3285         rc = load_cpu_fw(bp, &cpu_reg, fw);
3286         if (rc)
3287                 goto init_cpu_err;
3288
3289         /* Initialize the TX Patch-up Processor. */
3290         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3291         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3292         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3293         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3294         cpu_reg.state_value_clear = 0xffffff;
3295         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3296         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3297         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3298         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3299         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3300         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3301         cpu_reg.mips_view_base = 0x8000000;
3302
3303         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3304                 fw = &bnx2_tpat_fw_09;
3305         else
3306                 fw = &bnx2_tpat_fw_06;
3307
3308         fw->text = text;
3309         rc = load_cpu_fw(bp, &cpu_reg, fw);
3310         if (rc)
3311                 goto init_cpu_err;
3312
3313         /* Initialize the Completion Processor. */
3314         cpu_reg.mode = BNX2_COM_CPU_MODE;
3315         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3316         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3317         cpu_reg.state = BNX2_COM_CPU_STATE;
3318         cpu_reg.state_value_clear = 0xffffff;
3319         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3320         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3321         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3322         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3323         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3324         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3325         cpu_reg.mips_view_base = 0x8000000;
3326
3327         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3328                 fw = &bnx2_com_fw_09;
3329         else
3330                 fw = &bnx2_com_fw_06;
3331
3332         fw->text = text;
3333         rc = load_cpu_fw(bp, &cpu_reg, fw);
3334         if (rc)
3335                 goto init_cpu_err;
3336
3337         /* Initialize the Command Processor. */
3338         cpu_reg.mode = BNX2_CP_CPU_MODE;
3339         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3340         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3341         cpu_reg.state = BNX2_CP_CPU_STATE;
3342         cpu_reg.state_value_clear = 0xffffff;
3343         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3344         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3345         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3346         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3347         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3348         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3349         cpu_reg.mips_view_base = 0x8000000;
3350
3351         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3352                 fw = &bnx2_cp_fw_09;
3353         else
3354                 fw = &bnx2_cp_fw_06;
3355
3356         fw->text = text;
3357         rc = load_cpu_fw(bp, &cpu_reg, fw);
3358
3359 init_cpu_err:
3360         vfree(text);
3361         return rc;
3362 }
3363
3364 static int
3365 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3366 {
3367         u16 pmcsr;
3368
3369         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3370
3371         switch (state) {
3372         case PCI_D0: {
3373                 u32 val;
3374
3375                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3376                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3377                         PCI_PM_CTRL_PME_STATUS);
3378
3379                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3380                         /* delay required during transition out of D3hot */
3381                         msleep(20);
3382
3383                 val = REG_RD(bp, BNX2_EMAC_MODE);
3384                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3385                 val &= ~BNX2_EMAC_MODE_MPKT;
3386                 REG_WR(bp, BNX2_EMAC_MODE, val);
3387
3388                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3389                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3390                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3391                 break;
3392         }
3393         case PCI_D3hot: {
3394                 int i;
3395                 u32 val, wol_msg;
3396
3397                 if (bp->wol) {
3398                         u32 advertising;
3399                         u8 autoneg;
3400
3401                         autoneg = bp->autoneg;
3402                         advertising = bp->advertising;
3403
3404                         if (bp->phy_port == PORT_TP) {
3405                                 bp->autoneg = AUTONEG_SPEED;
3406                                 bp->advertising = ADVERTISED_10baseT_Half |
3407                                         ADVERTISED_10baseT_Full |
3408                                         ADVERTISED_100baseT_Half |
3409                                         ADVERTISED_100baseT_Full |
3410                                         ADVERTISED_Autoneg;
3411                         }
3412
3413                         spin_lock_bh(&bp->phy_lock);
3414                         bnx2_setup_phy(bp, bp->phy_port);
3415                         spin_unlock_bh(&bp->phy_lock);
3416
3417                         bp->autoneg = autoneg;
3418                         bp->advertising = advertising;
3419
3420                         bnx2_set_mac_addr(bp);
3421
3422                         val = REG_RD(bp, BNX2_EMAC_MODE);
3423
3424                         /* Enable port mode. */
3425                         val &= ~BNX2_EMAC_MODE_PORT;
3426                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3427                                BNX2_EMAC_MODE_ACPI_RCVD |
3428                                BNX2_EMAC_MODE_MPKT;
3429                         if (bp->phy_port == PORT_TP)
3430                                 val |= BNX2_EMAC_MODE_PORT_MII;
3431                         else {
3432                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3433                                 if (bp->line_speed == SPEED_2500)
3434                                         val |= BNX2_EMAC_MODE_25G_MODE;
3435                         }
3436
3437                         REG_WR(bp, BNX2_EMAC_MODE, val);
3438
3439                         /* receive all multicast */
3440                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3441                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3442                                        0xffffffff);
3443                         }
3444                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3445                                BNX2_EMAC_RX_MODE_SORT_MODE);
3446
3447                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3448                               BNX2_RPM_SORT_USER0_MC_EN;
3449                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3450                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3451                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3452                                BNX2_RPM_SORT_USER0_ENA);
3453
3454                         /* Need to enable EMAC and RPM for WOL. */
3455                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3456                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3457                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3458                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3459
3460                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3461                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3462                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3463
3464                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3465                 }
3466                 else {
3467                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3468                 }
3469
3470                 if (!(bp->flags & NO_WOL_FLAG))
3471                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3472
3473                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3474                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3475                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3476
3477                         if (bp->wol)
3478                                 pmcsr |= 3;
3479                 }
3480                 else {
3481                         pmcsr |= 3;
3482                 }
3483                 if (bp->wol) {
3484                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3485                 }
3486                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3487                                       pmcsr);
3488
3489                 /* No more memory access after this point until
3490                  * device is brought back to D0.
3491                  */
3492                 udelay(50);
3493                 break;
3494         }
3495         default:
3496                 return -EINVAL;
3497         }
3498         return 0;
3499 }
3500
3501 static int
3502 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3503 {
3504         u32 val;
3505         int j;
3506
3507         /* Request access to the flash interface. */
3508         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3509         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3510                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3511                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3512                         break;
3513
3514                 udelay(5);
3515         }
3516
3517         if (j >= NVRAM_TIMEOUT_COUNT)
3518                 return -EBUSY;
3519
3520         return 0;
3521 }
3522
3523 static int
3524 bnx2_release_nvram_lock(struct bnx2 *bp)
3525 {
3526         int j;
3527         u32 val;
3528
3529         /* Relinquish nvram interface. */
3530         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3531
3532         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3533                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3534                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3535                         break;
3536
3537                 udelay(5);
3538         }
3539
3540         if (j >= NVRAM_TIMEOUT_COUNT)
3541                 return -EBUSY;
3542
3543         return 0;
3544 }
3545
3546
3547 static int
3548 bnx2_enable_nvram_write(struct bnx2 *bp)
3549 {
3550         u32 val;
3551
3552         val = REG_RD(bp, BNX2_MISC_CFG);
3553         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3554
3555         if (bp->flash_info->flags & BNX2_NV_WREN) {
3556                 int j;
3557
3558                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3559                 REG_WR(bp, BNX2_NVM_COMMAND,
3560                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3561
3562                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3563                         udelay(5);
3564
3565                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3566                         if (val & BNX2_NVM_COMMAND_DONE)
3567                                 break;
3568                 }
3569
3570                 if (j >= NVRAM_TIMEOUT_COUNT)
3571                         return -EBUSY;
3572         }
3573         return 0;
3574 }
3575
3576 static void
3577 bnx2_disable_nvram_write(struct bnx2 *bp)
3578 {
3579         u32 val;
3580
3581         val = REG_RD(bp, BNX2_MISC_CFG);
3582         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3583 }
3584
3585
3586 static void
3587 bnx2_enable_nvram_access(struct bnx2 *bp)
3588 {
3589         u32 val;
3590
3591         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3592         /* Enable both bits, even on read. */
3593         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3594                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3595 }
3596
3597 static void
3598 bnx2_disable_nvram_access(struct bnx2 *bp)
3599 {
3600         u32 val;
3601
3602         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3603         /* Disable both bits, even after read. */
3604         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3605                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3606                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3607 }
3608
3609 static int
3610 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3611 {
3612         u32 cmd;
3613         int j;
3614
3615         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3616                 /* Buffered flash, no erase needed */
3617                 return 0;
3618
3619         /* Build an erase command */
3620         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3621               BNX2_NVM_COMMAND_DOIT;
3622
3623         /* Need to clear DONE bit separately. */
3624         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3625
3626         /* Address of the NVRAM to read from. */
3627         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3628
3629         /* Issue an erase command. */
3630         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3631
3632         /* Wait for completion. */
3633         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3634                 u32 val;
3635
3636                 udelay(5);
3637
3638                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3639                 if (val & BNX2_NVM_COMMAND_DONE)
3640                         break;
3641         }
3642
3643         if (j >= NVRAM_TIMEOUT_COUNT)
3644                 return -EBUSY;
3645
3646         return 0;
3647 }
3648
3649 static int
3650 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3651 {
3652         u32 cmd;
3653         int j;
3654
3655         /* Build the command word. */
3656         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3657
3658         /* Calculate an offset of a buffered flash, not needed for 5709. */
3659         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3660                 offset = ((offset / bp->flash_info->page_size) <<
3661                            bp->flash_info->page_bits) +
3662                           (offset % bp->flash_info->page_size);
3663         }
3664
3665         /* Need to clear DONE bit separately. */
3666         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3667
3668         /* Address of the NVRAM to read from. */
3669         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3670
3671         /* Issue a read command. */
3672         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3673
3674         /* Wait for completion. */
3675         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3676                 u32 val;
3677
3678                 udelay(5);
3679
3680                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3681                 if (val & BNX2_NVM_COMMAND_DONE) {
3682                         val = REG_RD(bp, BNX2_NVM_READ);
3683
3684                         val = be32_to_cpu(val);
3685                         memcpy(ret_val, &val, 4);
3686                         break;
3687                 }
3688         }
3689         if (j >= NVRAM_TIMEOUT_COUNT)
3690                 return -EBUSY;
3691
3692         return 0;
3693 }
3694
3695
3696 static int
3697 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3698 {
3699         u32 cmd, val32;
3700         int j;
3701
3702         /* Build the command word. */
3703         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3704
3705         /* Calculate an offset of a buffered flash, not needed for 5709. */
3706         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3707                 offset = ((offset / bp->flash_info->page_size) <<
3708                           bp->flash_info->page_bits) +
3709                          (offset % bp->flash_info->page_size);
3710         }
3711
3712         /* Need to clear DONE bit separately. */
3713         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3714
3715         memcpy(&val32, val, 4);
3716         val32 = cpu_to_be32(val32);
3717
3718         /* Write the data. */
3719         REG_WR(bp, BNX2_NVM_WRITE, val32);
3720
3721         /* Address of the NVRAM to write to. */
3722         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3723
3724         /* Issue the write command. */
3725         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3726
3727         /* Wait for completion. */
3728         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3729                 udelay(5);
3730
3731                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3732                         break;
3733         }
3734         if (j >= NVRAM_TIMEOUT_COUNT)
3735                 return -EBUSY;
3736
3737         return 0;
3738 }
3739
3740 static int
3741 bnx2_init_nvram(struct bnx2 *bp)
3742 {
3743         u32 val;
3744         int j, entry_count, rc = 0;
3745         struct flash_spec *flash;
3746
3747         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3748                 bp->flash_info = &flash_5709;
3749                 goto get_flash_size;
3750         }
3751
3752         /* Determine the selected interface. */
3753         val = REG_RD(bp, BNX2_NVM_CFG1);
3754
3755         entry_count = ARRAY_SIZE(flash_table);
3756
3757         if (val & 0x40000000) {
3758
3759                 /* Flash interface has been reconfigured */
3760                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3761                      j++, flash++) {
3762                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3763                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3764                                 bp->flash_info = flash;
3765                                 break;
3766                         }
3767                 }
3768         }
3769         else {
3770                 u32 mask;
3771                 /* Not yet been reconfigured */
3772
3773                 if (val & (1 << 23))
3774                         mask = FLASH_BACKUP_STRAP_MASK;
3775                 else
3776                         mask = FLASH_STRAP_MASK;
3777
3778                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3779                         j++, flash++) {
3780
3781                         if ((val & mask) == (flash->strapping & mask)) {
3782                                 bp->flash_info = flash;
3783
3784                                 /* Request access to the flash interface. */
3785                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3786                                         return rc;
3787
3788                                 /* Enable access to flash interface */
3789                                 bnx2_enable_nvram_access(bp);
3790
3791                                 /* Reconfigure the flash interface */
3792                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3793                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3794                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3795                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3796
3797                                 /* Disable access to flash interface */
3798                                 bnx2_disable_nvram_access(bp);
3799                                 bnx2_release_nvram_lock(bp);
3800
3801                                 break;
3802                         }
3803                 }
3804         } /* if (val & 0x40000000) */
3805
3806         if (j == entry_count) {
3807                 bp->flash_info = NULL;
3808                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3809                 return -ENODEV;
3810         }
3811
3812 get_flash_size:
3813         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3814         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3815         if (val)
3816                 bp->flash_size = val;
3817         else
3818                 bp->flash_size = bp->flash_info->total_size;
3819
3820         return rc;
3821 }
3822
3823 static int
3824 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3825                 int buf_size)
3826 {
3827         int rc = 0;
3828         u32 cmd_flags, offset32, len32, extra;
3829
3830         if (buf_size == 0)
3831                 return 0;
3832
3833         /* Request access to the flash interface. */
3834         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3835                 return rc;
3836
3837         /* Enable access to flash interface */
3838         bnx2_enable_nvram_access(bp);
3839
3840         len32 = buf_size;
3841         offset32 = offset;
3842         extra = 0;
3843
3844         cmd_flags = 0;
3845
3846         if (offset32 & 3) {
3847                 u8 buf[4];
3848                 u32 pre_len;
3849
3850                 offset32 &= ~3;
3851                 pre_len = 4 - (offset & 3);
3852
3853                 if (pre_len >= len32) {
3854                         pre_len = len32;
3855                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3856                                     BNX2_NVM_COMMAND_LAST;
3857                 }
3858                 else {
3859                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3860                 }
3861
3862                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3863
3864                 if (rc)
3865                         return rc;
3866
3867                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3868
3869                 offset32 += 4;
3870                 ret_buf += pre_len;
3871                 len32 -= pre_len;
3872         }
3873         if (len32 & 3) {
3874                 extra = 4 - (len32 & 3);
3875                 len32 = (len32 + 4) & ~3;
3876         }
3877
3878         if (len32 == 4) {
3879                 u8 buf[4];
3880
3881                 if (cmd_flags)
3882                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3883                 else
3884                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3885                                     BNX2_NVM_COMMAND_LAST;
3886
3887                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3888
3889                 memcpy(ret_buf, buf, 4 - extra);
3890         }
3891         else if (len32 > 0) {
3892                 u8 buf[4];
3893
3894                 /* Read the first word. */
3895                 if (cmd_flags)
3896                         cmd_flags = 0;
3897                 else
3898                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3899
3900                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3901
3902                 /* Advance to the next dword. */
3903                 offset32 += 4;
3904                 ret_buf += 4;
3905                 len32 -= 4;
3906
3907                 while (len32 > 4 && rc == 0) {
3908                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3909
3910                         /* Advance to the next dword. */
3911                         offset32 += 4;
3912                         ret_buf += 4;
3913                         len32 -= 4;
3914                 }
3915
3916                 if (rc)
3917                         return rc;
3918
3919                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3920                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3921
3922                 memcpy(ret_buf, buf, 4 - extra);
3923         }
3924
3925         /* Disable access to flash interface */
3926         bnx2_disable_nvram_access(bp);
3927
3928         bnx2_release_nvram_lock(bp);
3929
3930         return rc;
3931 }
3932
3933 static int
3934 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3935                 int buf_size)
3936 {
3937         u32 written, offset32, len32;
3938         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3939         int rc = 0;
3940         int align_start, align_end;
3941
3942         buf = data_buf;
3943         offset32 = offset;
3944         len32 = buf_size;
3945         align_start = align_end = 0;
3946
3947         if ((align_start = (offset32 & 3))) {
3948                 offset32 &= ~3;
3949                 len32 += align_start;
3950                 if (len32 < 4)
3951                         len32 = 4;
3952                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3953                         return rc;
3954         }
3955
3956         if (len32 & 3) {
3957                 align_end = 4 - (len32 & 3);
3958                 len32 += align_end;
3959                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3960                         return rc;
3961         }
3962
3963         if (align_start || align_end) {
3964                 align_buf = kmalloc(len32, GFP_KERNEL);
3965                 if (align_buf == NULL)
3966                         return -ENOMEM;
3967                 if (align_start) {
3968                         memcpy(align_buf, start, 4);
3969                 }
3970                 if (align_end) {
3971                         memcpy(align_buf + len32 - 4, end, 4);
3972                 }
3973                 memcpy(align_buf + align_start, data_buf, buf_size);
3974                 buf = align_buf;
3975         }
3976
3977         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3978                 flash_buffer = kmalloc(264, GFP_KERNEL);
3979                 if (flash_buffer == NULL) {
3980                         rc = -ENOMEM;
3981                         goto nvram_write_end;
3982                 }
3983         }
3984
3985         written = 0;
3986         while ((written < len32) && (rc == 0)) {
3987                 u32 page_start, page_end, data_start, data_end;
3988                 u32 addr, cmd_flags;
3989                 int i;
3990
3991                 /* Find the page_start addr */
3992                 page_start = offset32 + written;
3993                 page_start -= (page_start % bp->flash_info->page_size);
3994                 /* Find the page_end addr */
3995                 page_end = page_start + bp->flash_info->page_size;
3996                 /* Find the data_start addr */
3997                 data_start = (written == 0) ? offset32 : page_start;
3998                 /* Find the data_end addr */
3999                 data_end = (page_end > offset32 + len32) ?
4000                         (offset32 + len32) : page_end;
4001
4002                 /* Request access to the flash interface. */
4003                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4004                         goto nvram_write_end;
4005
4006                 /* Enable access to flash interface */
4007                 bnx2_enable_nvram_access(bp);
4008
4009                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4010                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4011                         int j;
4012
4013                         /* Read the whole page into the buffer
4014                          * (non-buffer flash only) */
4015                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4016                                 if (j == (bp->flash_info->page_size - 4)) {
4017                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4018                                 }
4019                                 rc = bnx2_nvram_read_dword(bp,
4020                                         page_start + j,
4021                                         &flash_buffer[j],
4022                                         cmd_flags);
4023
4024                                 if (rc)
4025                                         goto nvram_write_end;
4026
4027                                 cmd_flags = 0;
4028                         }
4029                 }
4030
4031                 /* Enable writes to flash interface (unlock write-protect) */
4032                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4033                         goto nvram_write_end;
4034
4035                 /* Loop to write back the buffer data from page_start to
4036                  * data_start */
4037                 i = 0;
4038                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4039                         /* Erase the page */
4040                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4041                                 goto nvram_write_end;
4042
4043                         /* Re-enable the write again for the actual write */
4044                         bnx2_enable_nvram_write(bp);
4045
4046                         for (addr = page_start; addr < data_start;
4047                                 addr += 4, i += 4) {
4048
4049                                 rc = bnx2_nvram_write_dword(bp, addr,
4050                                         &flash_buffer[i], cmd_flags);
4051
4052                                 if (rc != 0)
4053                                         goto nvram_write_end;
4054
4055                                 cmd_flags = 0;
4056                         }
4057                 }
4058
4059                 /* Loop to write the new data from data_start to data_end */
4060                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4061                         if ((addr == page_end - 4) ||
4062                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4063                                  (addr == data_end - 4))) {
4064
4065                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4066                         }
4067                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4068                                 cmd_flags);
4069
4070                         if (rc != 0)
4071                                 goto nvram_write_end;
4072
4073                         cmd_flags = 0;
4074                         buf += 4;
4075                 }
4076
4077                 /* Loop to write back the buffer data from data_end
4078                  * to page_end */
4079                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4080                         for (addr = data_end; addr < page_end;
4081                                 addr += 4, i += 4) {
4082
4083                                 if (addr == page_end-4) {
4084                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4085                                 }
4086                                 rc = bnx2_nvram_write_dword(bp, addr,
4087                                         &flash_buffer[i], cmd_flags);
4088
4089                                 if (rc != 0)
4090                                         goto nvram_write_end;
4091
4092                                 cmd_flags = 0;
4093                         }
4094                 }
4095
4096                 /* Disable writes to flash interface (lock write-protect) */
4097                 bnx2_disable_nvram_write(bp);
4098
4099                 /* Disable access to flash interface */
4100                 bnx2_disable_nvram_access(bp);
4101                 bnx2_release_nvram_lock(bp);
4102
4103                 /* Increment written */
4104                 written += data_end - data_start;
4105         }
4106
4107 nvram_write_end:
4108         kfree(flash_buffer);
4109         kfree(align_buf);
4110         return rc;
4111 }
4112
4113 static void
4114 bnx2_init_remote_phy(struct bnx2 *bp)
4115 {
4116         u32 val;
4117
4118         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4119         if (!(bp->phy_flags & PHY_SERDES_FLAG))
4120                 return;
4121
4122         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4123         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4124                 return;
4125
4126         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4127                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4128
4129                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4130                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4131                         bp->phy_port = PORT_FIBRE;
4132                 else
4133                         bp->phy_port = PORT_TP;
4134
4135                 if (netif_running(bp->dev)) {
4136                         u32 sig;
4137
4138                         if (val & BNX2_LINK_STATUS_LINK_UP) {
4139                                 bp->link_up = 1;
4140                                 netif_carrier_on(bp->dev);
4141                         } else {
4142                                 bp->link_up = 0;
4143                                 netif_carrier_off(bp->dev);
4144                         }
4145                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4146                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4147                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4148                                    sig);
4149                 }
4150         }
4151 }
4152
4153 static void
4154 bnx2_setup_msix_tbl(struct bnx2 *bp)
4155 {
4156         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4157
4158         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4159         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4160 }
4161
4162 static int
4163 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4164 {
4165         u32 val;
4166         int i, rc = 0;
4167         u8 old_port;
4168
4169         /* Wait for the current PCI transaction to complete before
4170          * issuing a reset. */
4171         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4172                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4173                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4174                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4175                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4176         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4177         udelay(5);
4178
4179         /* Wait for the firmware to tell us it is ok to issue a reset. */
4180         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4181
4182         /* Deposit a driver reset signature so the firmware knows that
4183          * this is a soft reset. */
4184         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
4185                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
4186
4187         /* Do a dummy read to force the chip to complete all current transaction
4188          * before we issue a reset. */
4189         val = REG_RD(bp, BNX2_MISC_ID);
4190
4191         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4192                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4193                 REG_RD(bp, BNX2_MISC_COMMAND);
4194                 udelay(5);
4195
4196                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4197                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4198
4199                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4200
4201         } else {
4202                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4203                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4204                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4205
4206                 /* Chip reset. */
4207                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4208
4209                 /* Reading back any register after chip reset will hang the
4210                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4211                  * of margin for write posting.
4212                  */
4213                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4214                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4215                         msleep(20);
4216
4217                 /* Reset takes approximate 30 usec */
4218                 for (i = 0; i < 10; i++) {
4219                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4220                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4221                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4222                                 break;
4223                         udelay(10);
4224                 }
4225
4226                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4227                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4228                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4229                         return -EBUSY;
4230                 }
4231         }
4232
4233         /* Make sure byte swapping is properly configured. */
4234         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4235         if (val != 0x01020304) {
4236                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4237                 return -ENODEV;
4238         }
4239
4240         /* Wait for the firmware to finish its initialization. */
4241         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4242         if (rc)
4243                 return rc;
4244
4245         spin_lock_bh(&bp->phy_lock);
4246         old_port = bp->phy_port;
4247         bnx2_init_remote_phy(bp);
4248         if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
4249                 bnx2_set_default_remote_link(bp);
4250         spin_unlock_bh(&bp->phy_lock);
4251
4252         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4253                 /* Adjust the voltage regular to two steps lower.  The default
4254                  * of this register is 0x0000000e. */
4255                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4256
4257                 /* Remove bad rbuf memory from the free pool. */
4258                 rc = bnx2_alloc_bad_rbuf(bp);
4259         }
4260
4261         if (bp->flags & USING_MSIX_FLAG)
4262                 bnx2_setup_msix_tbl(bp);
4263
4264         return rc;
4265 }
4266
4267 static int
4268 bnx2_init_chip(struct bnx2 *bp)
4269 {
4270         u32 val;
4271         int rc, i;
4272
4273         /* Make sure the interrupt is not active. */
4274         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4275
4276         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4277               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4278 #ifdef __BIG_ENDIAN
4279               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4280 #endif
4281               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4282               DMA_READ_CHANS << 12 |
4283               DMA_WRITE_CHANS << 16;
4284
4285         val |= (0x2 << 20) | (1 << 11);
4286
4287         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4288                 val |= (1 << 23);
4289
4290         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4291             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4292                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4293
4294         REG_WR(bp, BNX2_DMA_CONFIG, val);
4295
4296         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4297                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4298                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4299                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4300         }
4301
4302         if (bp->flags & PCIX_FLAG) {
4303                 u16 val16;
4304
4305                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4306                                      &val16);
4307                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4308                                       val16 & ~PCI_X_CMD_ERO);
4309         }
4310
4311         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4312                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4313                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4314                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4315
4316         /* Initialize context mapping and zero out the quick contexts.  The
4317          * context block must have already been enabled. */
4318         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4319                 rc = bnx2_init_5709_context(bp);
4320                 if (rc)
4321                         return rc;
4322         } else
4323                 bnx2_init_context(bp);
4324
4325         if ((rc = bnx2_init_cpus(bp)) != 0)
4326                 return rc;
4327
4328         bnx2_init_nvram(bp);
4329
4330         bnx2_set_mac_addr(bp);
4331
4332         val = REG_RD(bp, BNX2_MQ_CONFIG);
4333         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4334         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4335         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4336                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4337
4338         REG_WR(bp, BNX2_MQ_CONFIG, val);
4339
4340         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4341         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4342         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4343
4344         val = (BCM_PAGE_BITS - 8) << 24;
4345         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4346
4347         /* Configure page size. */
4348         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4349         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4350         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4351         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4352
4353         val = bp->mac_addr[0] +
4354               (bp->mac_addr[1] << 8) +
4355               (bp->mac_addr[2] << 16) +
4356               bp->mac_addr[3] +
4357               (bp->mac_addr[4] << 8) +
4358               (bp->mac_addr[5] << 16);
4359         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4360
4361         /* Program the MTU.  Also include 4 bytes for CRC32. */
4362         val = bp->dev->mtu + ETH_HLEN + 4;
4363         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4364                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4365         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4366
4367         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4368                 bp->bnx2_napi[i].last_status_idx = 0;
4369
4370         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4371
4372         /* Set up how to generate a link change interrupt. */
4373         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4374
4375         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4376                (u64) bp->status_blk_mapping & 0xffffffff);
4377         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4378
4379         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4380                (u64) bp->stats_blk_mapping & 0xffffffff);
4381         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4382                (u64) bp->stats_blk_mapping >> 32);
4383
4384         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4385                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4386
4387         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4388                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4389
4390         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4391                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4392
4393         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4394
4395         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4396
4397         REG_WR(bp, BNX2_HC_COM_TICKS,
4398                (bp->com_ticks_int << 16) | bp->com_ticks);
4399
4400         REG_WR(bp, BNX2_HC_CMD_TICKS,
4401                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4402
4403         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4404                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4405         else
4406                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4407         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4408
4409         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4410                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4411         else {
4412                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4413                       BNX2_HC_CONFIG_COLLECT_STATS;
4414         }
4415
4416         if (bp->flags & USING_MSIX_FLAG) {
4417                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4418                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4419
4420                 REG_WR(bp, BNX2_HC_SB_CONFIG_1,
4421                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4422                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4423
4424                 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP_1,
4425                         (bp->tx_quick_cons_trip_int << 16) |
4426                          bp->tx_quick_cons_trip);
4427
4428                 REG_WR(bp, BNX2_HC_TX_TICKS_1,
4429                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4430
4431                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4432         }
4433
4434         if (bp->flags & ONE_SHOT_MSI_FLAG)
4435                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4436
4437         REG_WR(bp, BNX2_HC_CONFIG, val);
4438
4439         /* Clear internal stats counters. */
4440         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4441
4442         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4443
4444         /* Initialize the receive filter. */
4445         bnx2_set_rx_mode(bp->dev);
4446
4447         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4448                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4449                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4450                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4451         }
4452         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4453                           0);
4454
4455         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4456         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4457
4458         udelay(20);
4459
4460         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4461
4462         return rc;
4463 }
4464
4465 static void
4466 bnx2_clear_ring_states(struct bnx2 *bp)
4467 {
4468         struct bnx2_napi *bnapi;
4469         int i;
4470
4471         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4472                 bnapi = &bp->bnx2_napi[i];
4473
4474                 bnapi->tx_cons = 0;
4475                 bnapi->hw_tx_cons = 0;
4476                 bnapi->rx_prod_bseq = 0;
4477                 bnapi->rx_prod = 0;
4478                 bnapi->rx_cons = 0;
4479                 bnapi->rx_pg_prod = 0;
4480                 bnapi->rx_pg_cons = 0;
4481         }
4482 }
4483
4484 static void
4485 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4486 {
4487         u32 val, offset0, offset1, offset2, offset3;
4488
4489         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4490                 offset0 = BNX2_L2CTX_TYPE_XI;
4491                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4492                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4493                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4494         } else {
4495                 offset0 = BNX2_L2CTX_TYPE;
4496                 offset1 = BNX2_L2CTX_CMD_TYPE;
4497                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4498                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4499         }
4500         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4501         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4502
4503         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4504         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4505
4506         val = (u64) bp->tx_desc_mapping >> 32;
4507         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4508
4509         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4510         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4511 }
4512
4513 static void
4514 bnx2_init_tx_ring(struct bnx2 *bp)
4515 {
4516         struct tx_bd *txbd;
4517         u32 cid = TX_CID;
4518         struct bnx2_napi *bnapi;
4519
4520         bp->tx_vec = 0;
4521         if (bp->flags & USING_MSIX_FLAG) {
4522                 cid = TX_TSS_CID;
4523                 bp->tx_vec = BNX2_TX_VEC;
4524                 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
4525                        (TX_TSS_CID << 7));
4526         }
4527         bnapi = &bp->bnx2_napi[bp->tx_vec];
4528
4529         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4530
4531         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4532
4533         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4534         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4535
4536         bp->tx_prod = 0;
4537         bp->tx_prod_bseq = 0;
4538
4539         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4540         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4541
4542         bnx2_init_tx_context(bp, cid);
4543 }
4544
4545 static void
4546 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4547                      int num_rings)
4548 {
4549         int i;
4550         struct rx_bd *rxbd;
4551
4552         for (i = 0; i < num_rings; i++) {
4553                 int j;
4554
4555                 rxbd = &rx_ring[i][0];
4556                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4557                         rxbd->rx_bd_len = buf_size;
4558                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4559                 }
4560                 if (i == (num_rings - 1))
4561                         j = 0;
4562                 else
4563                         j = i + 1;
4564                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4565                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4566         }
4567 }
4568
4569 static void
4570 bnx2_init_rx_ring(struct bnx2 *bp)
4571 {
4572         int i;
4573         u16 prod, ring_prod;
4574         u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4575         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4576
4577         bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4578                              bp->rx_buf_use_size, bp->rx_max_ring);
4579
4580         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4581         if (bp->rx_pg_ring_size) {
4582                 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4583                                      bp->rx_pg_desc_mapping,
4584                                      PAGE_SIZE, bp->rx_max_pg_ring);
4585                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4586                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4587                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4588                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4589
4590                 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4591                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4592
4593                 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4594                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4595
4596                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4597                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4598         }
4599
4600         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4601         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4602         val |= 0x02 << 8;
4603         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4604
4605         val = (u64) bp->rx_desc_mapping[0] >> 32;
4606         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4607
4608         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4609         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4610
4611         ring_prod = prod = bnapi->rx_pg_prod;
4612         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4613                 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4614                         break;
4615                 prod = NEXT_RX_BD(prod);
4616                 ring_prod = RX_PG_RING_IDX(prod);
4617         }
4618         bnapi->rx_pg_prod = prod;
4619
4620         ring_prod = prod = bnapi->rx_prod;
4621         for (i = 0; i < bp->rx_ring_size; i++) {
4622                 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4623                         break;
4624                 }
4625                 prod = NEXT_RX_BD(prod);
4626                 ring_prod = RX_RING_IDX(prod);
4627         }
4628         bnapi->rx_prod = prod;
4629
4630         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4631                  bnapi->rx_pg_prod);
4632         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4633
4634         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4635 }
4636
4637 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4638 {
4639         u32 max, num_rings = 1;
4640
4641         while (ring_size > MAX_RX_DESC_CNT) {
4642                 ring_size -= MAX_RX_DESC_CNT;
4643                 num_rings++;
4644         }
4645         /* round to next power of 2 */
4646         max = max_size;
4647         while ((max & num_rings) == 0)
4648                 max >>= 1;
4649
4650         if (num_rings != max)
4651                 max <<= 1;
4652
4653         return max;
4654 }
4655
4656 static void
4657 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4658 {
4659         u32 rx_size, rx_space, jumbo_size;
4660
4661         /* 8 for CRC and VLAN */
4662         rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4663
4664         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4665                 sizeof(struct skb_shared_info);
4666
4667         bp->rx_copy_thresh = RX_COPY_THRESH;
4668         bp->rx_pg_ring_size = 0;
4669         bp->rx_max_pg_ring = 0;
4670         bp->rx_max_pg_ring_idx = 0;
4671         if ((rx_space > PAGE_SIZE) && !(bp->flags & JUMBO_BROKEN_FLAG)) {
4672                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4673
4674                 jumbo_size = size * pages;
4675                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4676                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4677
4678                 bp->rx_pg_ring_size = jumbo_size;
4679                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4680                                                         MAX_RX_PG_RINGS);
4681                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4682                 rx_size = RX_COPY_THRESH + bp->rx_offset;
4683                 bp->rx_copy_thresh = 0;
4684         }
4685
4686         bp->rx_buf_use_size = rx_size;
4687         /* hw alignment */
4688         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4689         bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4690         bp->rx_ring_size = size;
4691         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4692         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4693 }
4694
4695 static void
4696 bnx2_free_tx_skbs(struct bnx2 *bp)
4697 {
4698         int i;
4699
4700         if (bp->tx_buf_ring == NULL)
4701                 return;
4702
4703         for (i = 0; i < TX_DESC_CNT; ) {
4704                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4705                 struct sk_buff *skb = tx_buf->skb;
4706                 int j, last;
4707
4708                 if (skb == NULL) {
4709                         i++;
4710                         continue;
4711                 }
4712
4713                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4714                         skb_headlen(skb), PCI_DMA_TODEVICE);
4715
4716                 tx_buf->skb = NULL;
4717
4718                 last = skb_shinfo(skb)->nr_frags;
4719                 for (j = 0; j < last; j++) {
4720                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4721                         pci_unmap_page(bp->pdev,
4722                                 pci_unmap_addr(tx_buf, mapping),
4723                                 skb_shinfo(skb)->frags[j].size,
4724                                 PCI_DMA_TODEVICE);
4725                 }
4726                 dev_kfree_skb(skb);
4727                 i += j + 1;
4728         }
4729
4730 }
4731
4732 static void
4733 bnx2_free_rx_skbs(struct bnx2 *bp)
4734 {
4735         int i;
4736
4737         if (bp->rx_buf_ring == NULL)
4738                 return;
4739
4740         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4741                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4742                 struct sk_buff *skb = rx_buf->skb;
4743
4744                 if (skb == NULL)
4745                         continue;
4746
4747                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4748                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4749
4750                 rx_buf->skb = NULL;
4751
4752                 dev_kfree_skb(skb);
4753         }
4754         for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4755                 bnx2_free_rx_page(bp, i);
4756 }
4757
4758 static void
4759 bnx2_free_skbs(struct bnx2 *bp)
4760 {
4761         bnx2_free_tx_skbs(bp);
4762         bnx2_free_rx_skbs(bp);
4763 }
4764
4765 static int
4766 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4767 {
4768         int rc;
4769
4770         rc = bnx2_reset_chip(bp, reset_code);
4771         bnx2_free_skbs(bp);
4772         if (rc)
4773                 return rc;
4774
4775         if ((rc = bnx2_init_chip(bp)) != 0)
4776                 return rc;
4777
4778         bnx2_clear_ring_states(bp);
4779         bnx2_init_tx_ring(bp);
4780         bnx2_init_rx_ring(bp);
4781         return 0;
4782 }
4783
4784 static int
4785 bnx2_init_nic(struct bnx2 *bp)
4786 {
4787         int rc;
4788
4789         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4790                 return rc;
4791
4792         spin_lock_bh(&bp->phy_lock);
4793         bnx2_init_phy(bp);
4794         bnx2_set_link(bp);
4795         spin_unlock_bh(&bp->phy_lock);
4796         return 0;
4797 }
4798
4799 static int
4800 bnx2_test_registers(struct bnx2 *bp)
4801 {
4802         int ret;
4803         int i, is_5709;
4804         static const struct {
4805                 u16   offset;
4806                 u16   flags;
4807 #define BNX2_FL_NOT_5709        1
4808                 u32   rw_mask;
4809                 u32   ro_mask;
4810         } reg_tbl[] = {
4811                 { 0x006c, 0, 0x00000000, 0x0000003f },
4812                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4813                 { 0x0094, 0, 0x00000000, 0x00000000 },
4814
4815                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4816                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4817                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4818                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4819                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4820                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4821                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4822                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4823                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4824
4825                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4826                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4827                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4828                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4829                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4830                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4831
4832                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4833                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4834                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4835
4836                 { 0x1000, 0, 0x00000000, 0x00000001 },
4837                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4838
4839                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4840                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4841                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4842                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4843                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4844                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4845                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4846                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4847                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4848                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4849
4850                 { 0x1800, 0, 0x00000000, 0x00000001 },
4851                 { 0x1804, 0, 0x00000000, 0x00000003 },
4852
4853                 { 0x2800, 0, 0x00000000, 0x00000001 },
4854                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4855                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4856                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4857                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4858                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4859                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4860                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4861                 { 0x2840, 0, 0x00000000, 0xffffffff },
4862                 { 0x2844, 0, 0x00000000, 0xffffffff },
4863                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4864                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4865
4866                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4867                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4868
4869                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4870                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4871                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4872                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4873                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4874                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4875                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4876                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4877                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4878
4879                 { 0x5004, 0, 0x00000000, 0x0000007f },
4880                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4881
4882                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4883                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4884                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4885                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4886                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4887                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4888                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4889                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4890                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4891
4892                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4893                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4894                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4895                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4896                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4897                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4898                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4899                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4900                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4901                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4902                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4903                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4904                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4905                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4906                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4907                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4908                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4909                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4910                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4911                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4912                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4913                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4914                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4915
4916                 { 0xffff, 0, 0x00000000, 0x00000000 },
4917         };
4918
4919         ret = 0;
4920         is_5709 = 0;
4921         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4922                 is_5709 = 1;
4923
4924         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4925                 u32 offset, rw_mask, ro_mask, save_val, val;
4926                 u16 flags = reg_tbl[i].flags;
4927
4928                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4929                         continue;
4930
4931                 offset = (u32) reg_tbl[i].offset;
4932                 rw_mask = reg_tbl[i].rw_mask;
4933                 ro_mask = reg_tbl[i].ro_mask;
4934
4935                 save_val = readl(bp->regview + offset);
4936
4937                 writel(0, bp->regview + offset);
4938
4939                 val = readl(bp->regview + offset);
4940                 if ((val & rw_mask) != 0) {
4941                         goto reg_test_err;
4942                 }
4943
4944                 if ((val & ro_mask) != (save_val & ro_mask)) {
4945                         goto reg_test_err;
4946                 }
4947
4948                 writel(0xffffffff, bp->regview + offset);
4949
4950                 val = readl(bp->regview + offset);
4951                 if ((val & rw_mask) != rw_mask) {
4952                         goto reg_test_err;
4953                 }
4954
4955                 if ((val & ro_mask) != (save_val & ro_mask)) {
4956                         goto reg_test_err;
4957                 }
4958
4959                 writel(save_val, bp->regview + offset);
4960                 continue;
4961
4962 reg_test_err:
4963                 writel(save_val, bp->regview + offset);
4964                 ret = -ENODEV;
4965                 break;
4966         }
4967         return ret;
4968 }
4969
4970 static int
4971 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4972 {
4973         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4974                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4975         int i;
4976
4977         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4978                 u32 offset;
4979
4980                 for (offset = 0; offset < size; offset += 4) {
4981
4982                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4983
4984                         if (REG_RD_IND(bp, start + offset) !=
4985                                 test_pattern[i]) {
4986                                 return -ENODEV;
4987                         }
4988                 }
4989         }
4990         return 0;
4991 }
4992
4993 static int
4994 bnx2_test_memory(struct bnx2 *bp)
4995 {
4996         int ret = 0;
4997         int i;
4998         static struct mem_entry {
4999                 u32   offset;
5000                 u32   len;
5001         } mem_tbl_5706[] = {
5002                 { 0x60000,  0x4000 },
5003                 { 0xa0000,  0x3000 },
5004                 { 0xe0000,  0x4000 },
5005                 { 0x120000, 0x4000 },
5006                 { 0x1a0000, 0x4000 },
5007                 { 0x160000, 0x4000 },
5008                 { 0xffffffff, 0    },
5009         },
5010         mem_tbl_5709[] = {
5011                 { 0x60000,  0x4000 },
5012                 { 0xa0000,  0x3000 },
5013                 { 0xe0000,  0x4000 },
5014                 { 0x120000, 0x4000 },
5015                 { 0x1a0000, 0x4000 },
5016                 { 0xffffffff, 0    },
5017         };
5018         struct mem_entry *mem_tbl;
5019
5020         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5021                 mem_tbl = mem_tbl_5709;
5022         else
5023                 mem_tbl = mem_tbl_5706;
5024
5025         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5026                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5027                         mem_tbl[i].len)) != 0) {
5028                         return ret;
5029                 }
5030         }
5031
5032         return ret;
5033 }
5034
5035 #define BNX2_MAC_LOOPBACK       0
5036 #define BNX2_PHY_LOOPBACK       1
5037
5038 static int
5039 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5040 {
5041         unsigned int pkt_size, num_pkts, i;
5042         struct sk_buff *skb, *rx_skb;
5043         unsigned char *packet;
5044         u16 rx_start_idx, rx_idx;
5045         dma_addr_t map;
5046         struct tx_bd *txbd;
5047         struct sw_bd *rx_buf;
5048         struct l2_fhdr *rx_hdr;
5049         int ret = -ENODEV;
5050         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5051
5052         tx_napi = bnapi;
5053         if (bp->flags & USING_MSIX_FLAG)
5054                 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5055
5056         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5057                 bp->loopback = MAC_LOOPBACK;
5058                 bnx2_set_mac_loopback(bp);
5059         }
5060         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5061                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5062                         return 0;
5063
5064                 bp->loopback = PHY_LOOPBACK;
5065                 bnx2_set_phy_loopback(bp);
5066         }
5067         else
5068                 return -EINVAL;
5069
5070         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5071         skb = netdev_alloc_skb(bp->dev, pkt_size);
5072         if (!skb)
5073                 return -ENOMEM;
5074         packet = skb_put(skb, pkt_size);
5075         memcpy(packet, bp->dev->dev_addr, 6);
5076         memset(packet + 6, 0x0, 8);
5077         for (i = 14; i < pkt_size; i++)
5078                 packet[i] = (unsigned char) (i & 0xff);
5079
5080         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5081                 PCI_DMA_TODEVICE);
5082
5083         REG_WR(bp, BNX2_HC_COMMAND,
5084                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5085
5086         REG_RD(bp, BNX2_HC_COMMAND);
5087
5088         udelay(5);
5089         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5090
5091         num_pkts = 0;
5092
5093         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5094
5095         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5096         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5097         txbd->tx_bd_mss_nbytes = pkt_size;
5098         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5099
5100         num_pkts++;
5101         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5102         bp->tx_prod_bseq += pkt_size;
5103
5104         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5105         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5106
5107         udelay(100);
5108
5109         REG_WR(bp, BNX2_HC_COMMAND,
5110                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5111
5112         REG_RD(bp, BNX2_HC_COMMAND);
5113
5114         udelay(5);
5115
5116         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5117         dev_kfree_skb(skb);
5118
5119         if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
5120                 goto loopback_test_done;
5121
5122         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5123         if (rx_idx != rx_start_idx + num_pkts) {
5124                 goto loopback_test_done;
5125         }
5126
5127         rx_buf = &bp->rx_buf_ring[rx_start_idx];
5128         rx_skb = rx_buf->skb;
5129
5130         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5131         skb_reserve(rx_skb, bp->rx_offset);
5132
5133         pci_dma_sync_single_for_cpu(bp->pdev,
5134                 pci_unmap_addr(rx_buf, mapping),
5135                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5136
5137         if (rx_hdr->l2_fhdr_status &
5138                 (L2_FHDR_ERRORS_BAD_CRC |
5139                 L2_FHDR_ERRORS_PHY_DECODE |
5140                 L2_FHDR_ERRORS_ALIGNMENT |
5141                 L2_FHDR_ERRORS_TOO_SHORT |
5142                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5143
5144                 goto loopback_test_done;
5145         }
5146
5147         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5148                 goto loopback_test_done;
5149         }
5150
5151         for (i = 14; i < pkt_size; i++) {
5152                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5153                         goto loopback_test_done;
5154                 }
5155         }
5156
5157         ret = 0;
5158
5159 loopback_test_done:
5160         bp->loopback = 0;
5161         return ret;
5162 }
5163
5164 #define BNX2_MAC_LOOPBACK_FAILED        1
5165 #define BNX2_PHY_LOOPBACK_FAILED        2
5166 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5167                                          BNX2_PHY_LOOPBACK_FAILED)
5168
5169 static int
5170 bnx2_test_loopback(struct bnx2 *bp)
5171 {
5172         int rc = 0;
5173
5174         if (!netif_running(bp->dev))
5175                 return BNX2_LOOPBACK_FAILED;
5176
5177         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5178         spin_lock_bh(&bp->phy_lock);
5179         bnx2_init_phy(bp);
5180         spin_unlock_bh(&bp->phy_lock);
5181         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5182                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5183         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5184                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5185         return rc;
5186 }
5187
5188 #define NVRAM_SIZE 0x200
5189 #define CRC32_RESIDUAL 0xdebb20e3
5190
5191 static int
5192 bnx2_test_nvram(struct bnx2 *bp)
5193 {
5194         u32 buf[NVRAM_SIZE / 4];
5195         u8 *data = (u8 *) buf;
5196         int rc = 0;
5197         u32 magic, csum;
5198
5199         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5200                 goto test_nvram_done;
5201
5202         magic = be32_to_cpu(buf[0]);
5203         if (magic != 0x669955aa) {
5204                 rc = -ENODEV;
5205                 goto test_nvram_done;
5206         }
5207
5208         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5209                 goto test_nvram_done;
5210
5211         csum = ether_crc_le(0x100, data);
5212         if (csum != CRC32_RESIDUAL) {
5213                 rc = -ENODEV;
5214                 goto test_nvram_done;
5215         }
5216
5217         csum = ether_crc_le(0x100, data + 0x100);
5218         if (csum != CRC32_RESIDUAL) {
5219                 rc = -ENODEV;
5220         }
5221
5222 test_nvram_done:
5223         return rc;
5224 }
5225
5226 static int
5227 bnx2_test_link(struct bnx2 *bp)
5228 {
5229         u32 bmsr;
5230
5231         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5232                 if (bp->link_up)
5233                         return 0;
5234                 return -ENODEV;
5235         }
5236         spin_lock_bh(&bp->phy_lock);
5237         bnx2_enable_bmsr1(bp);
5238         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5239         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5240         bnx2_disable_bmsr1(bp);
5241         spin_unlock_bh(&bp->phy_lock);
5242
5243         if (bmsr & BMSR_LSTATUS) {
5244                 return 0;
5245         }
5246         return -ENODEV;
5247 }
5248
5249 static int
5250 bnx2_test_intr(struct bnx2 *bp)
5251 {
5252         int i;
5253         u16 status_idx;
5254
5255         if (!netif_running(bp->dev))
5256                 return -ENODEV;
5257
5258         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5259
5260         /* This register is not touched during run-time. */
5261         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5262         REG_RD(bp, BNX2_HC_COMMAND);
5263
5264         for (i = 0; i < 10; i++) {
5265                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5266                         status_idx) {
5267
5268                         break;
5269                 }
5270
5271                 msleep_interruptible(10);
5272         }
5273         if (i < 10)
5274                 return 0;
5275
5276         return -ENODEV;
5277 }
5278
5279 static void
5280 bnx2_5706_serdes_timer(struct bnx2 *bp)
5281 {
5282         spin_lock(&bp->phy_lock);
5283         if (bp->serdes_an_pending)
5284                 bp->serdes_an_pending--;
5285         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5286                 u32 bmcr;
5287
5288                 bp->current_interval = bp->timer_interval;
5289
5290                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5291
5292                 if (bmcr & BMCR_ANENABLE) {
5293                         u32 phy1, phy2;
5294
5295                         bnx2_write_phy(bp, 0x1c, 0x7c00);
5296                         bnx2_read_phy(bp, 0x1c, &phy1);
5297
5298                         bnx2_write_phy(bp, 0x17, 0x0f01);
5299                         bnx2_read_phy(bp, 0x15, &phy2);
5300                         bnx2_write_phy(bp, 0x17, 0x0f01);
5301                         bnx2_read_phy(bp, 0x15, &phy2);
5302
5303                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
5304                                 !(phy2 & 0x20)) {       /* no CONFIG */
5305
5306                                 bmcr &= ~BMCR_ANENABLE;
5307                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5308                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5309                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5310                         }
5311                 }
5312         }
5313         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5314                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5315                 u32 phy2;
5316
5317                 bnx2_write_phy(bp, 0x17, 0x0f01);
5318                 bnx2_read_phy(bp, 0x15, &phy2);
5319                 if (phy2 & 0x20) {
5320                         u32 bmcr;
5321
5322                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5323                         bmcr |= BMCR_ANENABLE;
5324                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5325
5326                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5327                 }
5328         } else
5329                 bp->current_interval = bp->timer_interval;
5330
5331         spin_unlock(&bp->phy_lock);
5332 }
5333
5334 static void
5335 bnx2_5708_serdes_timer(struct bnx2 *bp)
5336 {
5337         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5338                 return;
5339
5340         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5341                 bp->serdes_an_pending = 0;
5342                 return;
5343         }
5344
5345         spin_lock(&bp->phy_lock);
5346         if (bp->serdes_an_pending)
5347                 bp->serdes_an_pending--;
5348         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5349                 u32 bmcr;
5350
5351                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5352                 if (bmcr & BMCR_ANENABLE) {
5353                         bnx2_enable_forced_2g5(bp);
5354                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5355                 } else {
5356                         bnx2_disable_forced_2g5(bp);
5357                         bp->serdes_an_pending = 2;
5358                         bp->current_interval = bp->timer_interval;
5359                 }
5360
5361         } else
5362                 bp->current_interval = bp->timer_interval;
5363
5364         spin_unlock(&bp->phy_lock);
5365 }
5366
5367 static void
5368 bnx2_timer(unsigned long data)
5369 {
5370         struct bnx2 *bp = (struct bnx2 *) data;
5371
5372         if (!netif_running(bp->dev))
5373                 return;
5374
5375         if (atomic_read(&bp->intr_sem) != 0)
5376                 goto bnx2_restart_timer;
5377
5378         bnx2_send_heart_beat(bp);
5379
5380         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5381
5382         /* workaround occasional corrupted counters */
5383         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5384                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5385                                             BNX2_HC_COMMAND_STATS_NOW);
5386
5387         if (bp->phy_flags & PHY_SERDES_FLAG) {
5388                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5389                         bnx2_5706_serdes_timer(bp);
5390                 else
5391                         bnx2_5708_serdes_timer(bp);
5392         }
5393
5394 bnx2_restart_timer:
5395         mod_timer(&bp->timer, jiffies + bp->current_interval);
5396 }
5397
5398 static int
5399 bnx2_request_irq(struct bnx2 *bp)
5400 {
5401         struct net_device *dev = bp->dev;
5402         unsigned long flags;
5403         struct bnx2_irq *irq;
5404         int rc = 0, i;
5405
5406         if (bp->flags & USING_MSI_OR_MSIX_FLAG)
5407                 flags = 0;
5408         else
5409                 flags = IRQF_SHARED;
5410
5411         for (i = 0; i < bp->irq_nvecs; i++) {
5412                 irq = &bp->irq_tbl[i];
5413                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5414                                  dev);
5415                 if (rc)
5416                         break;
5417                 irq->requested = 1;
5418         }
5419         return rc;
5420 }
5421
5422 static void
5423 bnx2_free_irq(struct bnx2 *bp)
5424 {
5425         struct net_device *dev = bp->dev;
5426         struct bnx2_irq *irq;
5427         int i;
5428
5429         for (i = 0; i < bp->irq_nvecs; i++) {
5430                 irq = &bp->irq_tbl[i];
5431                 if (irq->requested)
5432                         free_irq(irq->vector, dev);
5433                 irq->requested = 0;
5434         }
5435         if (bp->flags & USING_MSI_FLAG)
5436                 pci_disable_msi(bp->pdev);
5437         else if (bp->flags & USING_MSIX_FLAG)
5438                 pci_disable_msix(bp->pdev);
5439
5440         bp->flags &= ~(USING_MSI_OR_MSIX_FLAG | ONE_SHOT_MSI_FLAG);
5441 }
5442
5443 static void
5444 bnx2_enable_msix(struct bnx2 *bp)
5445 {
5446         int i, rc;
5447         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5448
5449         bnx2_setup_msix_tbl(bp);
5450         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5451         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5452         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5453
5454         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5455                 msix_ent[i].entry = i;
5456                 msix_ent[i].vector = 0;
5457         }
5458
5459         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5460         if (rc != 0)
5461                 return;
5462
5463         bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
5464         bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
5465
5466         strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
5467         strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
5468         strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
5469         strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
5470
5471         bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5472         bp->flags |= USING_MSIX_FLAG | ONE_SHOT_MSI_FLAG;
5473         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5474                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5475 }
5476
5477 static void
5478 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5479 {
5480         bp->irq_tbl[0].handler = bnx2_interrupt;
5481         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5482         bp->irq_nvecs = 1;
5483         bp->irq_tbl[0].vector = bp->pdev->irq;
5484
5485         if ((bp->flags & MSIX_CAP_FLAG) && !dis_msi)
5486                 bnx2_enable_msix(bp);
5487
5488         if ((bp->flags & MSI_CAP_FLAG) && !dis_msi &&
5489             !(bp->flags & USING_MSIX_FLAG)) {
5490                 if (pci_enable_msi(bp->pdev) == 0) {
5491                         bp->flags |= USING_MSI_FLAG;
5492                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5493                                 bp->flags |= ONE_SHOT_MSI_FLAG;
5494                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5495                         } else
5496                                 bp->irq_tbl[0].handler = bnx2_msi;
5497
5498                         bp->irq_tbl[0].vector = bp->pdev->irq;
5499                 }
5500         }
5501 }
5502
5503 /* Called with rtnl_lock */
5504 static int
5505 bnx2_open(struct net_device *dev)
5506 {
5507         struct bnx2 *bp = netdev_priv(dev);
5508         int rc;
5509
5510         netif_carrier_off(dev);
5511
5512         bnx2_set_power_state(bp, PCI_D0);
5513         bnx2_disable_int(bp);
5514
5515         rc = bnx2_alloc_mem(bp);
5516         if (rc)
5517                 return rc;
5518
5519         bnx2_setup_int_mode(bp, disable_msi);
5520         bnx2_napi_enable(bp);
5521         rc = bnx2_request_irq(bp);
5522
5523         if (rc) {
5524                 bnx2_napi_disable(bp);
5525                 bnx2_free_mem(bp);
5526                 return rc;
5527         }
5528
5529         rc = bnx2_init_nic(bp);
5530
5531         if (rc) {
5532                 bnx2_napi_disable(bp);
5533                 bnx2_free_irq(bp);
5534                 bnx2_free_skbs(bp);
5535                 bnx2_free_mem(bp);
5536                 return rc;
5537         }
5538
5539         mod_timer(&bp->timer, jiffies + bp->current_interval);
5540
5541         atomic_set(&bp->intr_sem, 0);
5542
5543         bnx2_enable_int(bp);
5544
5545         if (bp->flags & USING_MSI_FLAG) {
5546                 /* Test MSI to make sure it is working
5547                  * If MSI test fails, go back to INTx mode
5548                  */
5549                 if (bnx2_test_intr(bp) != 0) {
5550                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5551                                " using MSI, switching to INTx mode. Please"
5552                                " report this failure to the PCI maintainer"
5553                                " and include system chipset information.\n",
5554                                bp->dev->name);
5555
5556                         bnx2_disable_int(bp);
5557                         bnx2_free_irq(bp);
5558
5559                         bnx2_setup_int_mode(bp, 1);
5560
5561                         rc = bnx2_init_nic(bp);
5562
5563                         if (!rc)
5564                                 rc = bnx2_request_irq(bp);
5565
5566                         if (rc) {
5567                                 bnx2_napi_disable(bp);
5568                                 bnx2_free_skbs(bp);
5569                                 bnx2_free_mem(bp);
5570                                 del_timer_sync(&bp->timer);
5571                                 return rc;
5572                         }
5573                         bnx2_enable_int(bp);
5574                 }
5575         }
5576         if (bp->flags & USING_MSI_FLAG)
5577                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5578         else if (bp->flags & USING_MSIX_FLAG)
5579                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5580
5581         netif_start_queue(dev);
5582
5583         return 0;
5584 }
5585
5586 static void
5587 bnx2_reset_task(struct work_struct *work)
5588 {
5589         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5590
5591         if (!netif_running(bp->dev))
5592                 return;
5593
5594         bp->in_reset_task = 1;
5595         bnx2_netif_stop(bp);
5596
5597         bnx2_init_nic(bp);
5598
5599         atomic_set(&bp->intr_sem, 1);
5600         bnx2_netif_start(bp);
5601         bp->in_reset_task = 0;
5602 }
5603
5604 static void
5605 bnx2_tx_timeout(struct net_device *dev)
5606 {
5607         struct bnx2 *bp = netdev_priv(dev);
5608
5609         /* This allows the netif to be shutdown gracefully before resetting */
5610         schedule_work(&bp->reset_task);
5611 }
5612
5613 #ifdef BCM_VLAN
5614 /* Called with rtnl_lock */
5615 static void
5616 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5617 {
5618         struct bnx2 *bp = netdev_priv(dev);
5619
5620         bnx2_netif_stop(bp);
5621
5622         bp->vlgrp = vlgrp;
5623         bnx2_set_rx_mode(dev);
5624
5625         bnx2_netif_start(bp);
5626 }
5627 #endif
5628
5629 /* Called with netif_tx_lock.
5630  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5631  * netif_wake_queue().
5632  */
5633 static int
5634 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5635 {
5636         struct bnx2 *bp = netdev_priv(dev);
5637         dma_addr_t mapping;
5638         struct tx_bd *txbd;
5639         struct sw_bd *tx_buf;
5640         u32 len, vlan_tag_flags, last_frag, mss;
5641         u16 prod, ring_prod;
5642         int i;
5643         struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
5644
5645         if (unlikely(bnx2_tx_avail(bp, bnapi) <
5646             (skb_shinfo(skb)->nr_frags + 1))) {
5647                 netif_stop_queue(dev);
5648                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5649                         dev->name);
5650
5651                 return NETDEV_TX_BUSY;
5652         }
5653         len = skb_headlen(skb);
5654         prod = bp->tx_prod;
5655         ring_prod = TX_RING_IDX(prod);
5656
5657         vlan_tag_flags = 0;
5658         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5659                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5660         }
5661
5662         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5663                 vlan_tag_flags |=
5664                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5665         }
5666         if ((mss = skb_shinfo(skb)->gso_size)) {
5667                 u32 tcp_opt_len, ip_tcp_len;
5668                 struct iphdr *iph;
5669
5670                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5671
5672                 tcp_opt_len = tcp_optlen(skb);
5673
5674                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5675                         u32 tcp_off = skb_transport_offset(skb) -
5676                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5677
5678                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5679                                           TX_BD_FLAGS_SW_FLAGS;
5680                         if (likely(tcp_off == 0))
5681                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5682                         else {
5683                                 tcp_off >>= 3;
5684                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5685                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5686                                                   ((tcp_off & 0x10) <<
5687                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5688                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5689                         }
5690                 } else {
5691                         if (skb_header_cloned(skb) &&
5692                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5693                                 dev_kfree_skb(skb);
5694                                 return NETDEV_TX_OK;
5695                         }
5696
5697                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5698
5699                         iph = ip_hdr(skb);
5700                         iph->check = 0;
5701                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5702                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5703                                                                  iph->daddr, 0,
5704                                                                  IPPROTO_TCP,
5705                                                                  0);
5706                         if (tcp_opt_len || (iph->ihl > 5)) {
5707                                 vlan_tag_flags |= ((iph->ihl - 5) +
5708                                                    (tcp_opt_len >> 2)) << 8;
5709                         }
5710                 }
5711         } else
5712                 mss = 0;
5713
5714         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5715
5716         tx_buf = &bp->tx_buf_ring[ring_prod];
5717         tx_buf->skb = skb;
5718         pci_unmap_addr_set(tx_buf, mapping, mapping);
5719
5720         txbd = &bp->tx_desc_ring[ring_prod];
5721
5722         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5723         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5724         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5725         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5726
5727         last_frag = skb_shinfo(skb)->nr_frags;
5728
5729         for (i = 0; i < last_frag; i++) {
5730                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5731
5732                 prod = NEXT_TX_BD(prod);
5733                 ring_prod = TX_RING_IDX(prod);
5734                 txbd = &bp->tx_desc_ring[ring_prod];
5735
5736                 len = frag->size;
5737                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5738                         len, PCI_DMA_TODEVICE);
5739                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5740                                 mapping, mapping);
5741
5742                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5743                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5744                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5745                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5746
5747         }
5748         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5749
5750         prod = NEXT_TX_BD(prod);
5751         bp->tx_prod_bseq += skb->len;
5752
5753         REG_WR16(bp, bp->tx_bidx_addr, prod);
5754         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5755
5756         mmiowb();
5757
5758         bp->tx_prod = prod;
5759         dev->trans_start = jiffies;
5760
5761         if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5762                 netif_stop_queue(dev);
5763                 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5764                         netif_wake_queue(dev);
5765         }
5766
5767         return NETDEV_TX_OK;
5768 }
5769
5770 /* Called with rtnl_lock */
5771 static int
5772 bnx2_close(struct net_device *dev)
5773 {
5774         struct bnx2 *bp = netdev_priv(dev);
5775         u32 reset_code;
5776
5777         /* Calling flush_scheduled_work() may deadlock because
5778          * linkwatch_event() may be on the workqueue and it will try to get
5779          * the rtnl_lock which we are holding.
5780          */
5781         while (bp->in_reset_task)
5782                 msleep(1);
5783
5784         bnx2_disable_int_sync(bp);
5785         bnx2_napi_disable(bp);
5786         del_timer_sync(&bp->timer);
5787         if (bp->flags & NO_WOL_FLAG)
5788                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5789         else if (bp->wol)
5790                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5791         else
5792                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5793         bnx2_reset_chip(bp, reset_code);
5794         bnx2_free_irq(bp);
5795         bnx2_free_skbs(bp);
5796         bnx2_free_mem(bp);
5797         bp->link_up = 0;
5798         netif_carrier_off(bp->dev);
5799         bnx2_set_power_state(bp, PCI_D3hot);
5800         return 0;
5801 }
5802
5803 #define GET_NET_STATS64(ctr)                                    \
5804         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5805         (unsigned long) (ctr##_lo)
5806
5807 #define GET_NET_STATS32(ctr)            \
5808         (ctr##_lo)
5809
5810 #if (BITS_PER_LONG == 64)
5811 #define GET_NET_STATS   GET_NET_STATS64
5812 #else
5813 #define GET_NET_STATS   GET_NET_STATS32
5814 #endif
5815
5816 static struct net_device_stats *
5817 bnx2_get_stats(struct net_device *dev)
5818 {
5819         struct bnx2 *bp = netdev_priv(dev);
5820         struct statistics_block *stats_blk = bp->stats_blk;
5821         struct net_device_stats *net_stats = &bp->net_stats;
5822
5823         if (bp->stats_blk == NULL) {
5824                 return net_stats;
5825         }
5826         net_stats->rx_packets =
5827                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5828                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5829                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5830
5831         net_stats->tx_packets =
5832                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5833                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5834                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5835
5836         net_stats->rx_bytes =
5837                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5838
5839         net_stats->tx_bytes =
5840                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5841
5842         net_stats->multicast =
5843                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5844
5845         net_stats->collisions =
5846                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5847
5848         net_stats->rx_length_errors =
5849                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5850                 stats_blk->stat_EtherStatsOverrsizePkts);
5851
5852         net_stats->rx_over_errors =
5853                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5854
5855         net_stats->rx_frame_errors =
5856                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5857
5858         net_stats->rx_crc_errors =
5859                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5860
5861         net_stats->rx_errors = net_stats->rx_length_errors +
5862                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5863                 net_stats->rx_crc_errors;
5864
5865         net_stats->tx_aborted_errors =
5866                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5867                 stats_blk->stat_Dot3StatsLateCollisions);
5868
5869         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5870             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5871                 net_stats->tx_carrier_errors = 0;
5872         else {
5873                 net_stats->tx_carrier_errors =
5874                         (unsigned long)
5875                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5876         }
5877
5878         net_stats->tx_errors =
5879                 (unsigned long)
5880                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5881                 +
5882                 net_stats->tx_aborted_errors +
5883                 net_stats->tx_carrier_errors;
5884
5885         net_stats->rx_missed_errors =
5886                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5887                 stats_blk->stat_FwRxDrop);
5888
5889         return net_stats;
5890 }
5891
5892 /* All ethtool functions called with rtnl_lock */
5893
5894 static int
5895 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5896 {
5897         struct bnx2 *bp = netdev_priv(dev);
5898         int support_serdes = 0, support_copper = 0;
5899
5900         cmd->supported = SUPPORTED_Autoneg;
5901         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5902                 support_serdes = 1;
5903                 support_copper = 1;
5904         } else if (bp->phy_port == PORT_FIBRE)
5905                 support_serdes = 1;
5906         else
5907                 support_copper = 1;
5908
5909         if (support_serdes) {
5910                 cmd->supported |= SUPPORTED_1000baseT_Full |
5911                         SUPPORTED_FIBRE;
5912                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5913                         cmd->supported |= SUPPORTED_2500baseX_Full;
5914
5915         }
5916         if (support_copper) {
5917                 cmd->supported |= SUPPORTED_10baseT_Half |
5918                         SUPPORTED_10baseT_Full |
5919                         SUPPORTED_100baseT_Half |
5920                         SUPPORTED_100baseT_Full |
5921                         SUPPORTED_1000baseT_Full |
5922                         SUPPORTED_TP;
5923
5924         }
5925
5926         spin_lock_bh(&bp->phy_lock);
5927         cmd->port = bp->phy_port;
5928         cmd->advertising = bp->advertising;
5929
5930         if (bp->autoneg & AUTONEG_SPEED) {
5931                 cmd->autoneg = AUTONEG_ENABLE;
5932         }
5933         else {
5934                 cmd->autoneg = AUTONEG_DISABLE;
5935         }
5936
5937         if (netif_carrier_ok(dev)) {
5938                 cmd->speed = bp->line_speed;
5939                 cmd->duplex = bp->duplex;
5940         }
5941         else {
5942                 cmd->speed = -1;
5943                 cmd->duplex = -1;
5944         }
5945         spin_unlock_bh(&bp->phy_lock);
5946
5947         cmd->transceiver = XCVR_INTERNAL;
5948         cmd->phy_address = bp->phy_addr;
5949
5950         return 0;
5951 }
5952
5953 static int
5954 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5955 {
5956         struct bnx2 *bp = netdev_priv(dev);
5957         u8 autoneg = bp->autoneg;
5958         u8 req_duplex = bp->req_duplex;
5959         u16 req_line_speed = bp->req_line_speed;
5960         u32 advertising = bp->advertising;
5961         int err = -EINVAL;
5962
5963         spin_lock_bh(&bp->phy_lock);
5964
5965         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5966                 goto err_out_unlock;
5967
5968         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5969                 goto err_out_unlock;
5970
5971         if (cmd->autoneg == AUTONEG_ENABLE) {
5972                 autoneg |= AUTONEG_SPEED;
5973
5974                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5975
5976                 /* allow advertising 1 speed */
5977                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5978                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5979                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5980                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5981
5982                         if (cmd->port == PORT_FIBRE)
5983                                 goto err_out_unlock;
5984
5985                         advertising = cmd->advertising;
5986
5987                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5988                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5989                             (cmd->port == PORT_TP))
5990                                 goto err_out_unlock;
5991                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5992                         advertising = cmd->advertising;
5993                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5994                         goto err_out_unlock;
5995                 else {
5996                         if (cmd->port == PORT_FIBRE)
5997                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5998                         else
5999                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6000                 }
6001                 advertising |= ADVERTISED_Autoneg;
6002         }
6003         else {
6004                 if (cmd->port == PORT_FIBRE) {
6005                         if ((cmd->speed != SPEED_1000 &&
6006                              cmd->speed != SPEED_2500) ||
6007                             (cmd->duplex != DUPLEX_FULL))
6008                                 goto err_out_unlock;
6009
6010                         if (cmd->speed == SPEED_2500 &&
6011                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
6012                                 goto err_out_unlock;
6013                 }
6014                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6015                         goto err_out_unlock;
6016
6017                 autoneg &= ~AUTONEG_SPEED;
6018                 req_line_speed = cmd->speed;
6019                 req_duplex = cmd->duplex;
6020                 advertising = 0;
6021         }
6022
6023         bp->autoneg = autoneg;
6024         bp->advertising = advertising;
6025         bp->req_line_speed = req_line_speed;
6026         bp->req_duplex = req_duplex;
6027
6028         err = bnx2_setup_phy(bp, cmd->port);
6029
6030 err_out_unlock:
6031         spin_unlock_bh(&bp->phy_lock);
6032
6033         return err;
6034 }
6035
6036 static void
6037 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6038 {
6039         struct bnx2 *bp = netdev_priv(dev);
6040
6041         strcpy(info->driver, DRV_MODULE_NAME);
6042         strcpy(info->version, DRV_MODULE_VERSION);
6043         strcpy(info->bus_info, pci_name(bp->pdev));
6044         strcpy(info->fw_version, bp->fw_version);
6045 }
6046
6047 #define BNX2_REGDUMP_LEN                (32 * 1024)
6048
6049 static int
6050 bnx2_get_regs_len(struct net_device *dev)
6051 {
6052         return BNX2_REGDUMP_LEN;
6053 }
6054
6055 static void
6056 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6057 {
6058         u32 *p = _p, i, offset;
6059         u8 *orig_p = _p;
6060         struct bnx2 *bp = netdev_priv(dev);
6061         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6062                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6063                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6064                                  0x1040, 0x1048, 0x1080, 0x10a4,
6065                                  0x1400, 0x1490, 0x1498, 0x14f0,
6066                                  0x1500, 0x155c, 0x1580, 0x15dc,
6067                                  0x1600, 0x1658, 0x1680, 0x16d8,
6068                                  0x1800, 0x1820, 0x1840, 0x1854,
6069                                  0x1880, 0x1894, 0x1900, 0x1984,
6070                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6071                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6072                                  0x2000, 0x2030, 0x23c0, 0x2400,
6073                                  0x2800, 0x2820, 0x2830, 0x2850,
6074                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6075                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6076                                  0x4080, 0x4090, 0x43c0, 0x4458,
6077                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6078                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6079                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6080                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6081                                  0x6800, 0x6848, 0x684c, 0x6860,
6082                                  0x6888, 0x6910, 0x8000 };
6083
6084         regs->version = 0;
6085
6086         memset(p, 0, BNX2_REGDUMP_LEN);
6087
6088         if (!netif_running(bp->dev))
6089                 return;
6090
6091         i = 0;
6092         offset = reg_boundaries[0];
6093         p += offset;
6094         while (offset < BNX2_REGDUMP_LEN) {
6095                 *p++ = REG_RD(bp, offset);
6096                 offset += 4;
6097                 if (offset == reg_boundaries[i + 1]) {
6098                         offset = reg_boundaries[i + 2];
6099                         p = (u32 *) (orig_p + offset);
6100                         i += 2;
6101                 }
6102         }
6103 }
6104
6105 static void
6106 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6107 {
6108         struct bnx2 *bp = netdev_priv(dev);
6109
6110         if (bp->flags & NO_WOL_FLAG) {
6111                 wol->supported = 0;
6112                 wol->wolopts = 0;
6113         }
6114         else {
6115                 wol->supported = WAKE_MAGIC;
6116                 if (bp->wol)
6117                         wol->wolopts = WAKE_MAGIC;
6118                 else
6119                         wol->wolopts = 0;
6120         }
6121         memset(&wol->sopass, 0, sizeof(wol->sopass));
6122 }
6123
6124 static int
6125 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6126 {
6127         struct bnx2 *bp = netdev_priv(dev);
6128
6129         if (wol->wolopts & ~WAKE_MAGIC)
6130                 return -EINVAL;
6131
6132         if (wol->wolopts & WAKE_MAGIC) {
6133                 if (bp->flags & NO_WOL_FLAG)
6134                         return -EINVAL;
6135
6136                 bp->wol = 1;
6137         }
6138         else {
6139                 bp->wol = 0;
6140         }
6141         return 0;
6142 }
6143
6144 static int
6145 bnx2_nway_reset(struct net_device *dev)
6146 {
6147         struct bnx2 *bp = netdev_priv(dev);
6148         u32 bmcr;
6149
6150         if (!(bp->autoneg & AUTONEG_SPEED)) {
6151                 return -EINVAL;
6152         }
6153
6154         spin_lock_bh(&bp->phy_lock);
6155
6156         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
6157                 int rc;
6158
6159                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6160                 spin_unlock_bh(&bp->phy_lock);
6161                 return rc;
6162         }
6163
6164         /* Force a link down visible on the other side */
6165         if (bp->phy_flags & PHY_SERDES_FLAG) {
6166                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6167                 spin_unlock_bh(&bp->phy_lock);
6168
6169                 msleep(20);
6170
6171                 spin_lock_bh(&bp->phy_lock);
6172
6173                 bp->current_interval = SERDES_AN_TIMEOUT;
6174                 bp->serdes_an_pending = 1;
6175                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6176         }
6177
6178         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6179         bmcr &= ~BMCR_LOOPBACK;
6180         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6181
6182         spin_unlock_bh(&bp->phy_lock);
6183
6184         return 0;
6185 }
6186
6187 static int
6188 bnx2_get_eeprom_len(struct net_device *dev)
6189 {
6190         struct bnx2 *bp = netdev_priv(dev);
6191
6192         if (bp->flash_info == NULL)
6193                 return 0;
6194
6195         return (int) bp->flash_size;
6196 }
6197
6198 static int
6199 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6200                 u8 *eebuf)
6201 {
6202         struct bnx2 *bp = netdev_priv(dev);
6203         int rc;
6204
6205         /* parameters already validated in ethtool_get_eeprom */
6206
6207         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6208
6209         return rc;
6210 }
6211
6212 static int
6213 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6214                 u8 *eebuf)
6215 {
6216         struct bnx2 *bp = netdev_priv(dev);
6217         int rc;
6218
6219         /* parameters already validated in ethtool_set_eeprom */
6220
6221         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6222
6223         return rc;
6224 }
6225
6226 static int
6227 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6228 {
6229         struct bnx2 *bp = netdev_priv(dev);
6230
6231         memset(coal, 0, sizeof(struct ethtool_coalesce));
6232
6233         coal->rx_coalesce_usecs = bp->rx_ticks;
6234         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6235         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6236         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6237
6238         coal->tx_coalesce_usecs = bp->tx_ticks;
6239         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6240         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6241         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6242
6243         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6244
6245         return 0;
6246 }
6247
6248 static int
6249 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6250 {
6251         struct bnx2 *bp = netdev_priv(dev);
6252
6253         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6254         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6255
6256         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6257         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6258
6259         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6260         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6261
6262         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6263         if (bp->rx_quick_cons_trip_int > 0xff)
6264                 bp->rx_quick_cons_trip_int = 0xff;
6265
6266         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6267         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6268
6269         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6270         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6271
6272         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6273         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6274
6275         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6276         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6277                 0xff;
6278
6279         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6280         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6281                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6282                         bp->stats_ticks = USEC_PER_SEC;
6283         }
6284         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6285                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6286         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6287
6288         if (netif_running(bp->dev)) {
6289                 bnx2_netif_stop(bp);
6290                 bnx2_init_nic(bp);
6291                 bnx2_netif_start(bp);
6292         }
6293
6294         return 0;
6295 }
6296
6297 static void
6298 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6299 {
6300         struct bnx2 *bp = netdev_priv(dev);
6301
6302         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6303         ering->rx_mini_max_pending = 0;
6304         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6305
6306         ering->rx_pending = bp->rx_ring_size;
6307         ering->rx_mini_pending = 0;
6308         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6309
6310         ering->tx_max_pending = MAX_TX_DESC_CNT;
6311         ering->tx_pending = bp->tx_ring_size;
6312 }
6313
6314 static int
6315 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6316 {
6317         if (netif_running(bp->dev)) {
6318                 bnx2_netif_stop(bp);
6319                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6320                 bnx2_free_skbs(bp);
6321                 bnx2_free_mem(bp);
6322         }
6323
6324         bnx2_set_rx_ring_size(bp, rx);
6325         bp->tx_ring_size = tx;
6326
6327         if (netif_running(bp->dev)) {
6328                 int rc;
6329
6330                 rc = bnx2_alloc_mem(bp);
6331                 if (rc)
6332                         return rc;
6333                 bnx2_init_nic(bp);
6334                 bnx2_netif_start(bp);
6335         }
6336         return 0;
6337 }
6338
6339 static int
6340 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6341 {
6342         struct bnx2 *bp = netdev_priv(dev);
6343         int rc;
6344
6345         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6346                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6347                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6348
6349                 return -EINVAL;
6350         }
6351         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6352         return rc;
6353 }
6354
6355 static void
6356 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6357 {
6358         struct bnx2 *bp = netdev_priv(dev);
6359
6360         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6361         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6362         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6363 }
6364
6365 static int
6366 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6367 {
6368         struct bnx2 *bp = netdev_priv(dev);
6369
6370         bp->req_flow_ctrl = 0;
6371         if (epause->rx_pause)
6372                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6373         if (epause->tx_pause)
6374                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6375
6376         if (epause->autoneg) {
6377                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6378         }
6379         else {
6380                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6381         }
6382
6383         spin_lock_bh(&bp->phy_lock);
6384
6385         bnx2_setup_phy(bp, bp->phy_port);
6386
6387         spin_unlock_bh(&bp->phy_lock);
6388
6389         return 0;
6390 }
6391
6392 static u32
6393 bnx2_get_rx_csum(struct net_device *dev)
6394 {
6395         struct bnx2 *bp = netdev_priv(dev);
6396
6397         return bp->rx_csum;
6398 }
6399
6400 static int
6401 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6402 {
6403         struct bnx2 *bp = netdev_priv(dev);
6404
6405         bp->rx_csum = data;
6406         return 0;
6407 }
6408
6409 static int
6410 bnx2_set_tso(struct net_device *dev, u32 data)
6411 {
6412         struct bnx2 *bp = netdev_priv(dev);
6413
6414         if (data) {
6415                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6416                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6417                         dev->features |= NETIF_F_TSO6;
6418         } else
6419                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6420                                    NETIF_F_TSO_ECN);
6421         return 0;
6422 }
6423
6424 #define BNX2_NUM_STATS 46
6425
6426 static struct {
6427         char string[ETH_GSTRING_LEN];
6428 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6429         { "rx_bytes" },
6430         { "rx_error_bytes" },
6431         { "tx_bytes" },
6432         { "tx_error_bytes" },
6433         { "rx_ucast_packets" },
6434         { "rx_mcast_packets" },
6435         { "rx_bcast_packets" },
6436         { "tx_ucast_packets" },
6437         { "tx_mcast_packets" },
6438         { "tx_bcast_packets" },
6439         { "tx_mac_errors" },
6440         { "tx_carrier_errors" },
6441         { "rx_crc_errors" },
6442         { "rx_align_errors" },
6443         { "tx_single_collisions" },
6444         { "tx_multi_collisions" },
6445         { "tx_deferred" },
6446         { "tx_excess_collisions" },
6447         { "tx_late_collisions" },
6448         { "tx_total_collisions" },
6449         { "rx_fragments" },
6450         { "rx_jabbers" },
6451         { "rx_undersize_packets" },
6452         { "rx_oversize_packets" },
6453         { "rx_64_byte_packets" },
6454         { "rx_65_to_127_byte_packets" },
6455         { "rx_128_to_255_byte_packets" },
6456         { "rx_256_to_511_byte_packets" },
6457         { "rx_512_to_1023_byte_packets" },
6458         { "rx_1024_to_1522_byte_packets" },
6459         { "rx_1523_to_9022_byte_packets" },
6460         { "tx_64_byte_packets" },
6461         { "tx_65_to_127_byte_packets" },
6462         { "tx_128_to_255_byte_packets" },
6463         { "tx_256_to_511_byte_packets" },
6464         { "tx_512_to_1023_byte_packets" },
6465         { "tx_1024_to_1522_byte_packets" },
6466         { "tx_1523_to_9022_byte_packets" },
6467         { "rx_xon_frames" },
6468         { "rx_xoff_frames" },
6469         { "tx_xon_frames" },
6470         { "tx_xoff_frames" },
6471         { "rx_mac_ctrl_frames" },
6472         { "rx_filtered_packets" },
6473         { "rx_discards" },
6474         { "rx_fw_discards" },
6475 };
6476
6477 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6478
6479 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6480     STATS_OFFSET32(stat_IfHCInOctets_hi),
6481     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6482     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6483     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6484     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6485     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6486     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6487     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6488     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6489     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6490     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6491     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6492     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6493     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6494     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6495     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6496     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6497     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6498     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6499     STATS_OFFSET32(stat_EtherStatsCollisions),
6500     STATS_OFFSET32(stat_EtherStatsFragments),
6501     STATS_OFFSET32(stat_EtherStatsJabbers),
6502     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6503     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6504     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6505     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6506     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6507     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6508     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6509     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6510     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6511     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6512     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6513     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6514     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6515     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6516     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6517     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6518     STATS_OFFSET32(stat_XonPauseFramesReceived),
6519     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6520     STATS_OFFSET32(stat_OutXonSent),
6521     STATS_OFFSET32(stat_OutXoffSent),
6522     STATS_OFFSET32(stat_MacControlFramesReceived),
6523     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6524     STATS_OFFSET32(stat_IfInMBUFDiscards),
6525     STATS_OFFSET32(stat_FwRxDrop),
6526 };
6527
6528 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6529  * skipped because of errata.
6530  */
6531 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6532         8,0,8,8,8,8,8,8,8,8,
6533         4,0,4,4,4,4,4,4,4,4,
6534         4,4,4,4,4,4,4,4,4,4,
6535         4,4,4,4,4,4,4,4,4,4,
6536         4,4,4,4,4,4,
6537 };
6538
6539 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6540         8,0,8,8,8,8,8,8,8,8,
6541         4,4,4,4,4,4,4,4,4,4,
6542         4,4,4,4,4,4,4,4,4,4,
6543         4,4,4,4,4,4,4,4,4,4,
6544         4,4,4,4,4,4,
6545 };
6546
6547 #define BNX2_NUM_TESTS 6
6548
6549 static struct {
6550         char string[ETH_GSTRING_LEN];
6551 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6552         { "register_test (offline)" },
6553         { "memory_test (offline)" },
6554         { "loopback_test (offline)" },
6555         { "nvram_test (online)" },
6556         { "interrupt_test (online)" },
6557         { "link_test (online)" },
6558 };
6559
6560 static int
6561 bnx2_get_sset_count(struct net_device *dev, int sset)
6562 {
6563         switch (sset) {
6564         case ETH_SS_TEST:
6565                 return BNX2_NUM_TESTS;
6566         case ETH_SS_STATS:
6567                 return BNX2_NUM_STATS;
6568         default:
6569                 return -EOPNOTSUPP;
6570         }
6571 }
6572
6573 static void
6574 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6575 {
6576         struct bnx2 *bp = netdev_priv(dev);
6577
6578         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6579         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6580                 int i;
6581
6582                 bnx2_netif_stop(bp);
6583                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6584                 bnx2_free_skbs(bp);
6585
6586                 if (bnx2_test_registers(bp) != 0) {
6587                         buf[0] = 1;
6588                         etest->flags |= ETH_TEST_FL_FAILED;
6589                 }
6590                 if (bnx2_test_memory(bp) != 0) {
6591                         buf[1] = 1;
6592                         etest->flags |= ETH_TEST_FL_FAILED;
6593                 }
6594                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6595                         etest->flags |= ETH_TEST_FL_FAILED;
6596
6597                 if (!netif_running(bp->dev)) {
6598                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6599                 }
6600                 else {
6601                         bnx2_init_nic(bp);
6602                         bnx2_netif_start(bp);
6603                 }
6604
6605                 /* wait for link up */
6606                 for (i = 0; i < 7; i++) {
6607                         if (bp->link_up)
6608                                 break;
6609                         msleep_interruptible(1000);
6610                 }
6611         }
6612
6613         if (bnx2_test_nvram(bp) != 0) {
6614                 buf[3] = 1;
6615                 etest->flags |= ETH_TEST_FL_FAILED;
6616         }
6617         if (bnx2_test_intr(bp) != 0) {
6618                 buf[4] = 1;
6619                 etest->flags |= ETH_TEST_FL_FAILED;
6620         }
6621
6622         if (bnx2_test_link(bp) != 0) {
6623                 buf[5] = 1;
6624                 etest->flags |= ETH_TEST_FL_FAILED;
6625
6626         }
6627 }
6628
6629 static void
6630 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6631 {
6632         switch (stringset) {
6633         case ETH_SS_STATS:
6634                 memcpy(buf, bnx2_stats_str_arr,
6635                         sizeof(bnx2_stats_str_arr));
6636                 break;
6637         case ETH_SS_TEST:
6638                 memcpy(buf, bnx2_tests_str_arr,
6639                         sizeof(bnx2_tests_str_arr));
6640                 break;
6641         }
6642 }
6643
6644 static void
6645 bnx2_get_ethtool_stats(struct net_device *dev,
6646                 struct ethtool_stats *stats, u64 *buf)
6647 {
6648         struct bnx2 *bp = netdev_priv(dev);
6649         int i;
6650         u32 *hw_stats = (u32 *) bp->stats_blk;
6651         u8 *stats_len_arr = NULL;
6652
6653         if (hw_stats == NULL) {
6654                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6655                 return;
6656         }
6657
6658         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6659             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6660             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6661             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6662                 stats_len_arr = bnx2_5706_stats_len_arr;
6663         else
6664                 stats_len_arr = bnx2_5708_stats_len_arr;
6665
6666         for (i = 0; i < BNX2_NUM_STATS; i++) {
6667                 if (stats_len_arr[i] == 0) {
6668                         /* skip this counter */
6669                         buf[i] = 0;
6670                         continue;
6671                 }
6672                 if (stats_len_arr[i] == 4) {
6673                         /* 4-byte counter */
6674                         buf[i] = (u64)
6675                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6676                         continue;
6677                 }
6678                 /* 8-byte counter */
6679                 buf[i] = (((u64) *(hw_stats +
6680                                         bnx2_stats_offset_arr[i])) << 32) +
6681                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6682         }
6683 }
6684
6685 static int
6686 bnx2_phys_id(struct net_device *dev, u32 data)
6687 {
6688         struct bnx2 *bp = netdev_priv(dev);
6689         int i;
6690         u32 save;
6691
6692         if (data == 0)
6693                 data = 2;
6694
6695         save = REG_RD(bp, BNX2_MISC_CFG);
6696         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6697
6698         for (i = 0; i < (data * 2); i++) {
6699                 if ((i % 2) == 0) {
6700                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6701                 }
6702                 else {
6703                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6704                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6705                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6706                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6707                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6708                                 BNX2_EMAC_LED_TRAFFIC);
6709                 }
6710                 msleep_interruptible(500);
6711                 if (signal_pending(current))
6712                         break;
6713         }
6714         REG_WR(bp, BNX2_EMAC_LED, 0);
6715         REG_WR(bp, BNX2_MISC_CFG, save);
6716         return 0;
6717 }
6718
6719 static int
6720 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6721 {
6722         struct bnx2 *bp = netdev_priv(dev);
6723
6724         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6725                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6726         else
6727                 return (ethtool_op_set_tx_csum(dev, data));
6728 }
6729
6730 static const struct ethtool_ops bnx2_ethtool_ops = {
6731         .get_settings           = bnx2_get_settings,
6732         .set_settings           = bnx2_set_settings,
6733         .get_drvinfo            = bnx2_get_drvinfo,
6734         .get_regs_len           = bnx2_get_regs_len,
6735         .get_regs               = bnx2_get_regs,
6736         .get_wol                = bnx2_get_wol,
6737         .set_wol                = bnx2_set_wol,
6738         .nway_reset             = bnx2_nway_reset,
6739         .get_link               = ethtool_op_get_link,
6740         .get_eeprom_len         = bnx2_get_eeprom_len,
6741         .get_eeprom             = bnx2_get_eeprom,
6742         .set_eeprom             = bnx2_set_eeprom,
6743         .get_coalesce           = bnx2_get_coalesce,
6744         .set_coalesce           = bnx2_set_coalesce,
6745         .get_ringparam          = bnx2_get_ringparam,
6746         .set_ringparam          = bnx2_set_ringparam,
6747         .get_pauseparam         = bnx2_get_pauseparam,
6748         .set_pauseparam         = bnx2_set_pauseparam,
6749         .get_rx_csum            = bnx2_get_rx_csum,
6750         .set_rx_csum            = bnx2_set_rx_csum,
6751         .set_tx_csum            = bnx2_set_tx_csum,
6752         .set_sg                 = ethtool_op_set_sg,
6753         .set_tso                = bnx2_set_tso,
6754         .self_test              = bnx2_self_test,
6755         .get_strings            = bnx2_get_strings,
6756         .phys_id                = bnx2_phys_id,
6757         .get_ethtool_stats      = bnx2_get_ethtool_stats,
6758         .get_sset_count         = bnx2_get_sset_count,
6759 };
6760
6761 /* Called with rtnl_lock */
6762 static int
6763 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6764 {
6765         struct mii_ioctl_data *data = if_mii(ifr);
6766         struct bnx2 *bp = netdev_priv(dev);
6767         int err;
6768
6769         switch(cmd) {
6770         case SIOCGMIIPHY:
6771                 data->phy_id = bp->phy_addr;
6772
6773                 /* fallthru */
6774         case SIOCGMIIREG: {
6775                 u32 mii_regval;
6776
6777                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6778                         return -EOPNOTSUPP;
6779
6780                 if (!netif_running(dev))
6781                         return -EAGAIN;
6782
6783                 spin_lock_bh(&bp->phy_lock);
6784                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6785                 spin_unlock_bh(&bp->phy_lock);
6786
6787                 data->val_out = mii_regval;
6788
6789                 return err;
6790         }
6791
6792         case SIOCSMIIREG:
6793                 if (!capable(CAP_NET_ADMIN))
6794                         return -EPERM;
6795
6796                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6797                         return -EOPNOTSUPP;
6798
6799                 if (!netif_running(dev))
6800                         return -EAGAIN;
6801
6802                 spin_lock_bh(&bp->phy_lock);
6803                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6804                 spin_unlock_bh(&bp->phy_lock);
6805
6806                 return err;
6807
6808         default:
6809                 /* do nothing */
6810                 break;
6811         }
6812         return -EOPNOTSUPP;
6813 }
6814
6815 /* Called with rtnl_lock */
6816 static int
6817 bnx2_change_mac_addr(struct net_device *dev, void *p)
6818 {
6819         struct sockaddr *addr = p;
6820         struct bnx2 *bp = netdev_priv(dev);
6821
6822         if (!is_valid_ether_addr(addr->sa_data))
6823                 return -EINVAL;
6824
6825         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6826         if (netif_running(dev))
6827                 bnx2_set_mac_addr(bp);
6828
6829         return 0;
6830 }
6831
6832 /* Called with rtnl_lock */
6833 static int
6834 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6835 {
6836         struct bnx2 *bp = netdev_priv(dev);
6837
6838         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6839                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6840                 return -EINVAL;
6841
6842         dev->mtu = new_mtu;
6843         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6844 }
6845
6846 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6847 static void
6848 poll_bnx2(struct net_device *dev)
6849 {
6850         struct bnx2 *bp = netdev_priv(dev);
6851
6852         disable_irq(bp->pdev->irq);
6853         bnx2_interrupt(bp->pdev->irq, dev);
6854         enable_irq(bp->pdev->irq);
6855 }
6856 #endif
6857
6858 static void __devinit
6859 bnx2_get_5709_media(struct bnx2 *bp)
6860 {
6861         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6862         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6863         u32 strap;
6864
6865         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6866                 return;
6867         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6868                 bp->phy_flags |= PHY_SERDES_FLAG;
6869                 return;
6870         }
6871
6872         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6873                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6874         else
6875                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6876
6877         if (PCI_FUNC(bp->pdev->devfn) == 0) {
6878                 switch (strap) {
6879                 case 0x4:
6880                 case 0x5:
6881                 case 0x6:
6882                         bp->phy_flags |= PHY_SERDES_FLAG;
6883                         return;
6884                 }
6885         } else {
6886                 switch (strap) {
6887                 case 0x1:
6888                 case 0x2:
6889                 case 0x4:
6890                         bp->phy_flags |= PHY_SERDES_FLAG;
6891                         return;
6892                 }
6893         }
6894 }
6895
6896 static void __devinit
6897 bnx2_get_pci_speed(struct bnx2 *bp)
6898 {
6899         u32 reg;
6900
6901         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6902         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6903                 u32 clkreg;
6904
6905                 bp->flags |= PCIX_FLAG;
6906
6907                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6908
6909                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6910                 switch (clkreg) {
6911                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6912                         bp->bus_speed_mhz = 133;
6913                         break;
6914
6915                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6916                         bp->bus_speed_mhz = 100;
6917                         break;
6918
6919                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6920                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6921                         bp->bus_speed_mhz = 66;
6922                         break;
6923
6924                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6925                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6926                         bp->bus_speed_mhz = 50;
6927                         break;
6928
6929                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6930                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6931                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6932                         bp->bus_speed_mhz = 33;
6933                         break;
6934                 }
6935         }
6936         else {
6937                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6938                         bp->bus_speed_mhz = 66;
6939                 else
6940                         bp->bus_speed_mhz = 33;
6941         }
6942
6943         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6944                 bp->flags |= PCI_32BIT_FLAG;
6945
6946 }
6947
6948 static int __devinit
6949 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6950 {
6951         struct bnx2 *bp;
6952         unsigned long mem_len;
6953         int rc, i, j;
6954         u32 reg;
6955         u64 dma_mask, persist_dma_mask;
6956
6957         SET_NETDEV_DEV(dev, &pdev->dev);
6958         bp = netdev_priv(dev);
6959
6960         bp->flags = 0;
6961         bp->phy_flags = 0;
6962
6963         /* enable device (incl. PCI PM wakeup), and bus-mastering */
6964         rc = pci_enable_device(pdev);
6965         if (rc) {
6966                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
6967                 goto err_out;
6968         }
6969
6970         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6971                 dev_err(&pdev->dev,
6972                         "Cannot find PCI device base address, aborting.\n");
6973                 rc = -ENODEV;
6974                 goto err_out_disable;
6975         }
6976
6977         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6978         if (rc) {
6979                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6980                 goto err_out_disable;
6981         }
6982
6983         pci_set_master(pdev);
6984
6985         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6986         if (bp->pm_cap == 0) {
6987                 dev_err(&pdev->dev,
6988                         "Cannot find power management capability, aborting.\n");
6989                 rc = -EIO;
6990                 goto err_out_release;
6991         }
6992
6993         bp->dev = dev;
6994         bp->pdev = pdev;
6995
6996         spin_lock_init(&bp->phy_lock);
6997         spin_lock_init(&bp->indirect_lock);
6998         INIT_WORK(&bp->reset_task, bnx2_reset_task);
6999
7000         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7001         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7002         dev->mem_end = dev->mem_start + mem_len;
7003         dev->irq = pdev->irq;
7004
7005         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7006
7007         if (!bp->regview) {
7008                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7009                 rc = -ENOMEM;
7010                 goto err_out_release;
7011         }
7012
7013         /* Configure byte swap and enable write to the reg_window registers.
7014          * Rely on CPU to do target byte swapping on big endian systems
7015          * The chip's target access swapping will not swap all accesses
7016          */
7017         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7018                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7019                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7020
7021         bnx2_set_power_state(bp, PCI_D0);
7022
7023         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7024
7025         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7026                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7027                         dev_err(&pdev->dev,
7028                                 "Cannot find PCIE capability, aborting.\n");
7029                         rc = -EIO;
7030                         goto err_out_unmap;
7031                 }
7032                 bp->flags |= PCIE_FLAG;
7033                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7034                         bp->flags |= JUMBO_BROKEN_FLAG;
7035         } else {
7036                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7037                 if (bp->pcix_cap == 0) {
7038                         dev_err(&pdev->dev,
7039                                 "Cannot find PCIX capability, aborting.\n");
7040                         rc = -EIO;
7041                         goto err_out_unmap;
7042                 }
7043         }
7044
7045         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7046                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7047                         bp->flags |= MSIX_CAP_FLAG;
7048         }
7049
7050         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7051                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7052                         bp->flags |= MSI_CAP_FLAG;
7053         }
7054
7055         /* 5708 cannot support DMA addresses > 40-bit.  */
7056         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7057                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7058         else
7059                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7060
7061         /* Configure DMA attributes. */
7062         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7063                 dev->features |= NETIF_F_HIGHDMA;
7064                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7065                 if (rc) {
7066                         dev_err(&pdev->dev,
7067                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7068                         goto err_out_unmap;
7069                 }
7070         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7071                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7072                 goto err_out_unmap;
7073         }
7074
7075         if (!(bp->flags & PCIE_FLAG))
7076                 bnx2_get_pci_speed(bp);
7077
7078         /* 5706A0 may falsely detect SERR and PERR. */
7079         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7080                 reg = REG_RD(bp, PCI_COMMAND);
7081                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7082                 REG_WR(bp, PCI_COMMAND, reg);
7083         }
7084         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7085                 !(bp->flags & PCIX_FLAG)) {
7086
7087                 dev_err(&pdev->dev,
7088                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7089                 goto err_out_unmap;
7090         }
7091
7092         bnx2_init_nvram(bp);
7093
7094         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
7095
7096         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7097             BNX2_SHM_HDR_SIGNATURE_SIG) {
7098                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7099
7100                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
7101         } else
7102                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7103
7104         /* Get the permanent MAC address.  First we need to make sure the
7105          * firmware is actually running.
7106          */
7107         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
7108
7109         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7110             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7111                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7112                 rc = -ENODEV;
7113                 goto err_out_unmap;
7114         }
7115
7116         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
7117         for (i = 0, j = 0; i < 3; i++) {
7118                 u8 num, k, skip0;
7119
7120                 num = (u8) (reg >> (24 - (i * 8)));
7121                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7122                         if (num >= k || !skip0 || k == 1) {
7123                                 bp->fw_version[j++] = (num / k) + '0';
7124                                 skip0 = 0;
7125                         }
7126                 }
7127                 if (i != 2)
7128                         bp->fw_version[j++] = '.';
7129         }
7130         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
7131         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7132                 bp->wol = 1;
7133
7134         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7135                 bp->flags |= ASF_ENABLE_FLAG;
7136
7137                 for (i = 0; i < 30; i++) {
7138                         reg = REG_RD_IND(bp, bp->shmem_base +
7139                                              BNX2_BC_STATE_CONDITION);
7140                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7141                                 break;
7142                         msleep(10);
7143                 }
7144         }
7145         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
7146         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7147         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7148             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7149                 int i;
7150                 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
7151
7152                 bp->fw_version[j++] = ' ';
7153                 for (i = 0; i < 3; i++) {
7154                         reg = REG_RD_IND(bp, addr + i * 4);
7155                         reg = swab32(reg);
7156                         memcpy(&bp->fw_version[j], &reg, 4);
7157                         j += 4;
7158                 }
7159         }
7160
7161         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
7162         bp->mac_addr[0] = (u8) (reg >> 8);
7163         bp->mac_addr[1] = (u8) reg;
7164
7165         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
7166         bp->mac_addr[2] = (u8) (reg >> 24);
7167         bp->mac_addr[3] = (u8) (reg >> 16);
7168         bp->mac_addr[4] = (u8) (reg >> 8);
7169         bp->mac_addr[5] = (u8) reg;
7170
7171         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
7172
7173         bp->tx_ring_size = MAX_TX_DESC_CNT;
7174         bnx2_set_rx_ring_size(bp, 255);
7175
7176         bp->rx_csum = 1;
7177
7178         bp->tx_quick_cons_trip_int = 20;
7179         bp->tx_quick_cons_trip = 20;
7180         bp->tx_ticks_int = 80;
7181         bp->tx_ticks = 80;
7182
7183         bp->rx_quick_cons_trip_int = 6;
7184         bp->rx_quick_cons_trip = 6;
7185         bp->rx_ticks_int = 18;
7186         bp->rx_ticks = 18;
7187
7188         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7189
7190         bp->timer_interval =  HZ;
7191         bp->current_interval =  HZ;
7192
7193         bp->phy_addr = 1;
7194
7195         /* Disable WOL support if we are running on a SERDES chip. */
7196         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7197                 bnx2_get_5709_media(bp);
7198         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7199                 bp->phy_flags |= PHY_SERDES_FLAG;
7200
7201         bp->phy_port = PORT_TP;
7202         if (bp->phy_flags & PHY_SERDES_FLAG) {
7203                 bp->phy_port = PORT_FIBRE;
7204                 reg = REG_RD_IND(bp, bp->shmem_base +
7205                                      BNX2_SHARED_HW_CFG_CONFIG);
7206                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7207                         bp->flags |= NO_WOL_FLAG;
7208                         bp->wol = 0;
7209                 }
7210                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
7211                         bp->phy_addr = 2;
7212                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7213                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
7214                 }
7215                 bnx2_init_remote_phy(bp);
7216
7217         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7218                    CHIP_NUM(bp) == CHIP_NUM_5708)
7219                 bp->phy_flags |= PHY_CRC_FIX_FLAG;
7220         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7221                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7222                   CHIP_REV(bp) == CHIP_REV_Bx))
7223                 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
7224
7225         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7226             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7227             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7228                 bp->flags |= NO_WOL_FLAG;
7229                 bp->wol = 0;
7230         }
7231
7232         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7233                 bp->tx_quick_cons_trip_int =
7234                         bp->tx_quick_cons_trip;
7235                 bp->tx_ticks_int = bp->tx_ticks;
7236                 bp->rx_quick_cons_trip_int =
7237                         bp->rx_quick_cons_trip;
7238                 bp->rx_ticks_int = bp->rx_ticks;
7239                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7240                 bp->com_ticks_int = bp->com_ticks;
7241                 bp->cmd_ticks_int = bp->cmd_ticks;
7242         }
7243
7244         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7245          *
7246          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7247          * with byte enables disabled on the unused 32-bit word.  This is legal
7248          * but causes problems on the AMD 8132 which will eventually stop
7249          * responding after a while.
7250          *
7251          * AMD believes this incompatibility is unique to the 5706, and
7252          * prefers to locally disable MSI rather than globally disabling it.
7253          */
7254         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7255                 struct pci_dev *amd_8132 = NULL;
7256
7257                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7258                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7259                                                   amd_8132))) {
7260
7261                         if (amd_8132->revision >= 0x10 &&
7262                             amd_8132->revision <= 0x13) {
7263                                 disable_msi = 1;
7264                                 pci_dev_put(amd_8132);
7265                                 break;
7266                         }
7267                 }
7268         }
7269
7270         bnx2_set_default_link(bp);
7271         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7272
7273         init_timer(&bp->timer);
7274         bp->timer.expires = RUN_AT(bp->timer_interval);
7275         bp->timer.data = (unsigned long) bp;
7276         bp->timer.function = bnx2_timer;
7277
7278         return 0;
7279
7280 err_out_unmap:
7281         if (bp->regview) {
7282                 iounmap(bp->regview);
7283                 bp->regview = NULL;
7284         }
7285
7286 err_out_release:
7287         pci_release_regions(pdev);
7288
7289 err_out_disable:
7290         pci_disable_device(pdev);
7291         pci_set_drvdata(pdev, NULL);
7292
7293 err_out:
7294         return rc;
7295 }
7296
7297 static char * __devinit
7298 bnx2_bus_string(struct bnx2 *bp, char *str)
7299 {
7300         char *s = str;
7301
7302         if (bp->flags & PCIE_FLAG) {
7303                 s += sprintf(s, "PCI Express");
7304         } else {
7305                 s += sprintf(s, "PCI");
7306                 if (bp->flags & PCIX_FLAG)
7307                         s += sprintf(s, "-X");
7308                 if (bp->flags & PCI_32BIT_FLAG)
7309                         s += sprintf(s, " 32-bit");
7310                 else
7311                         s += sprintf(s, " 64-bit");
7312                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7313         }
7314         return str;
7315 }
7316
7317 static void __devinit
7318 bnx2_init_napi(struct bnx2 *bp)
7319 {
7320         int i;
7321         struct bnx2_napi *bnapi;
7322
7323         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7324                 bnapi = &bp->bnx2_napi[i];
7325                 bnapi->bp = bp;
7326         }
7327         netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7328         netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
7329                        64);
7330 }
7331
7332 static int __devinit
7333 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7334 {
7335         static int version_printed = 0;
7336         struct net_device *dev = NULL;
7337         struct bnx2 *bp;
7338         int rc;
7339         char str[40];
7340         DECLARE_MAC_BUF(mac);
7341
7342         if (version_printed++ == 0)
7343                 printk(KERN_INFO "%s", version);
7344
7345         /* dev zeroed in init_etherdev */
7346         dev = alloc_etherdev(sizeof(*bp));
7347
7348         if (!dev)
7349                 return -ENOMEM;
7350
7351         rc = bnx2_init_board(pdev, dev);
7352         if (rc < 0) {
7353                 free_netdev(dev);
7354                 return rc;
7355         }
7356
7357         dev->open = bnx2_open;
7358         dev->hard_start_xmit = bnx2_start_xmit;
7359         dev->stop = bnx2_close;
7360         dev->get_stats = bnx2_get_stats;
7361         dev->set_multicast_list = bnx2_set_rx_mode;
7362         dev->do_ioctl = bnx2_ioctl;
7363         dev->set_mac_address = bnx2_change_mac_addr;
7364         dev->change_mtu = bnx2_change_mtu;
7365         dev->tx_timeout = bnx2_tx_timeout;
7366         dev->watchdog_timeo = TX_TIMEOUT;
7367 #ifdef BCM_VLAN
7368         dev->vlan_rx_register = bnx2_vlan_rx_register;
7369 #endif
7370         dev->ethtool_ops = &bnx2_ethtool_ops;
7371
7372         bp = netdev_priv(dev);
7373         bnx2_init_napi(bp);
7374
7375 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7376         dev->poll_controller = poll_bnx2;
7377 #endif
7378
7379         pci_set_drvdata(pdev, dev);
7380
7381         memcpy(dev->dev_addr, bp->mac_addr, 6);
7382         memcpy(dev->perm_addr, bp->mac_addr, 6);
7383         bp->name = board_info[ent->driver_data].name;
7384
7385         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7386         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7387                 dev->features |= NETIF_F_IPV6_CSUM;
7388
7389 #ifdef BCM_VLAN
7390         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7391 #endif
7392         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7393         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7394                 dev->features |= NETIF_F_TSO6;
7395
7396         if ((rc = register_netdev(dev))) {
7397                 dev_err(&pdev->dev, "Cannot register net device\n");
7398                 if (bp->regview)
7399                         iounmap(bp->regview);
7400                 pci_release_regions(pdev);
7401                 pci_disable_device(pdev);
7402                 pci_set_drvdata(pdev, NULL);
7403                 free_netdev(dev);
7404                 return rc;
7405         }
7406
7407         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7408                 "IRQ %d, node addr %s\n",
7409                 dev->name,
7410                 bp->name,
7411                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7412                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7413                 bnx2_bus_string(bp, str),
7414                 dev->base_addr,
7415                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7416
7417         return 0;
7418 }
7419
7420 static void __devexit
7421 bnx2_remove_one(struct pci_dev *pdev)
7422 {
7423         struct net_device *dev = pci_get_drvdata(pdev);
7424         struct bnx2 *bp = netdev_priv(dev);
7425
7426         flush_scheduled_work();
7427
7428         unregister_netdev(dev);
7429
7430         if (bp->regview)
7431                 iounmap(bp->regview);
7432
7433         free_netdev(dev);
7434         pci_release_regions(pdev);
7435         pci_disable_device(pdev);
7436         pci_set_drvdata(pdev, NULL);
7437 }
7438
7439 static int
7440 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7441 {
7442         struct net_device *dev = pci_get_drvdata(pdev);
7443         struct bnx2 *bp = netdev_priv(dev);
7444         u32 reset_code;
7445
7446         /* PCI register 4 needs to be saved whether netif_running() or not.
7447          * MSI address and data need to be saved if using MSI and
7448          * netif_running().
7449          */
7450         pci_save_state(pdev);
7451         if (!netif_running(dev))
7452                 return 0;
7453
7454         flush_scheduled_work();
7455         bnx2_netif_stop(bp);
7456         netif_device_detach(dev);
7457         del_timer_sync(&bp->timer);
7458         if (bp->flags & NO_WOL_FLAG)
7459                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7460         else if (bp->wol)
7461                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7462         else
7463                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7464         bnx2_reset_chip(bp, reset_code);
7465         bnx2_free_skbs(bp);
7466         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7467         return 0;
7468 }
7469
7470 static int
7471 bnx2_resume(struct pci_dev *pdev)
7472 {
7473         struct net_device *dev = pci_get_drvdata(pdev);
7474         struct bnx2 *bp = netdev_priv(dev);
7475
7476         pci_restore_state(pdev);
7477         if (!netif_running(dev))
7478                 return 0;
7479
7480         bnx2_set_power_state(bp, PCI_D0);
7481         netif_device_attach(dev);
7482         bnx2_init_nic(bp);
7483         bnx2_netif_start(bp);
7484         return 0;
7485 }
7486
7487 static struct pci_driver bnx2_pci_driver = {
7488         .name           = DRV_MODULE_NAME,
7489         .id_table       = bnx2_pci_tbl,
7490         .probe          = bnx2_init_one,
7491         .remove         = __devexit_p(bnx2_remove_one),
7492         .suspend        = bnx2_suspend,
7493         .resume         = bnx2_resume,
7494 };
7495
7496 static int __init bnx2_init(void)
7497 {
7498         return pci_register_driver(&bnx2_pci_driver);
7499 }
7500
7501 static void __exit bnx2_cleanup(void)
7502 {
7503         pci_unregister_driver(&bnx2_pci_driver);
7504 }
7505
7506 module_init(bnx2_init);
7507 module_exit(bnx2_cleanup);
7508
7509
7510