bnx2: clean up unnecessary MSI/MSI-X capability find
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / net / ethernet / broadcom / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2013 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/timer.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/init.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define DRV_MODULE_VERSION      "2.2.4"
62 #define DRV_MODULE_RELDATE      "Aug 05, 2013"
63 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-6.2.3.fw"
64 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-6.0.15.fw"
65 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-6.2.1b.fw"
66 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-6.0.17.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] =
75         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93         BCM5706 = 0,
94         NC370T,
95         NC370I,
96         BCM5706S,
97         NC370F,
98         BCM5708,
99         BCM5708S,
100         BCM5709,
101         BCM5709S,
102         BCM5716,
103         BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108         char *name;
109 } board_info[] = {
110         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111         { "HP NC370T Multifunction Gigabit Server Adapter" },
112         { "HP NC370i Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114         { "HP NC370F Multifunction Gigabit Server Adapter" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121         };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142         { PCI_VENDOR_ID_BROADCOM, 0x163b,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144         { PCI_VENDOR_ID_BROADCOM, 0x163c,
145           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146         { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
153         /* Slow EEPROM */
154         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157          "EEPROM - slow"},
158         /* Expansion entry 0001 */
159         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162          "Entry 0001"},
163         /* Saifun SA25F010 (non-buffered flash) */
164         /* strap, cfg1, & write1 need updates */
165         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168          "Non-buffered flash (128kB)"},
169         /* Saifun SA25F020 (non-buffered flash) */
170         /* strap, cfg1, & write1 need updates */
171         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174          "Non-buffered flash (256kB)"},
175         /* Expansion entry 0100 */
176         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179          "Entry 0100"},
180         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190         /* Saifun SA25F005 (non-buffered flash) */
191         /* strap, cfg1, & write1 need updates */
192         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195          "Non-buffered flash (64kB)"},
196         /* Fast EEPROM */
197         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200          "EEPROM - fast"},
201         /* Expansion entry 1001 */
202         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1001"},
206         /* Expansion entry 1010 */
207         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1010"},
211         /* ATMEL AT45DB011B (buffered flash) */
212         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215          "Buffered flash (128kB)"},
216         /* Expansion entry 1100 */
217         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1100"},
221         /* Expansion entry 1101 */
222         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1101"},
226         /* Ateml Expansion entry 1110 */
227         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230          "Entry 1110 (Atmel)"},
231         /* ATMEL AT45DB021B (buffered flash) */
232         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235          "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239         .flags          = BNX2_NV_BUFFERED,
240         .page_bits      = BCM5709_FLASH_PAGE_BITS,
241         .page_size      = BCM5709_FLASH_PAGE_SIZE,
242         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
243         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
244         .name           = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254         u32 diff;
255
256         /* Tell compiler to fetch tx_prod and tx_cons from memory. */
257         barrier();
258
259         /* The ring uses 256 indices for 255 entries, one of them
260          * needs to be skipped.
261          */
262         diff = txr->tx_prod - txr->tx_cons;
263         if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
264                 diff &= 0xffff;
265                 if (diff == BNX2_TX_DESC_CNT)
266                         diff = BNX2_MAX_TX_DESC_CNT;
267         }
268         return bp->tx_ring_size - diff;
269 }
270
271 static u32
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273 {
274         u32 val;
275
276         spin_lock_bh(&bp->indirect_lock);
277         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278         val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
279         spin_unlock_bh(&bp->indirect_lock);
280         return val;
281 }
282
283 static void
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 {
286         spin_lock_bh(&bp->indirect_lock);
287         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289         spin_unlock_bh(&bp->indirect_lock);
290 }
291
292 static void
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 {
295         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 }
297
298 static u32
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 {
301         return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 }
303
304 static void
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306 {
307         offset += cid_addr;
308         spin_lock_bh(&bp->indirect_lock);
309         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
310                 int i;
311
312                 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
313                 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
314                         offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315                 for (i = 0; i < 5; i++) {
316                         val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
317                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318                                 break;
319                         udelay(5);
320                 }
321         } else {
322                 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
323                 BNX2_WR(bp, BNX2_CTX_DATA, val);
324         }
325         spin_unlock_bh(&bp->indirect_lock);
326 }
327
328 #ifdef BCM_CNIC
329 static int
330 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 {
332         struct bnx2 *bp = netdev_priv(dev);
333         struct drv_ctl_io *io = &info->data.io;
334
335         switch (info->cmd) {
336         case DRV_CTL_IO_WR_CMD:
337                 bnx2_reg_wr_ind(bp, io->offset, io->data);
338                 break;
339         case DRV_CTL_IO_RD_CMD:
340                 io->data = bnx2_reg_rd_ind(bp, io->offset);
341                 break;
342         case DRV_CTL_CTX_WR_CMD:
343                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344                 break;
345         default:
346                 return -EINVAL;
347         }
348         return 0;
349 }
350
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 {
353         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355         int sb_id;
356
357         if (bp->flags & BNX2_FLAG_USING_MSIX) {
358                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359                 bnapi->cnic_present = 0;
360                 sb_id = bp->irq_nvecs;
361                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362         } else {
363                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364                 bnapi->cnic_tag = bnapi->last_status_idx;
365                 bnapi->cnic_present = 1;
366                 sb_id = 0;
367                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368         }
369
370         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371         cp->irq_arr[0].status_blk = (void *)
372                 ((unsigned long) bnapi->status_blk.msi +
373                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374         cp->irq_arr[0].status_blk_num = sb_id;
375         cp->num_irq = 1;
376 }
377
378 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379                               void *data)
380 {
381         struct bnx2 *bp = netdev_priv(dev);
382         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383
384         if (ops == NULL)
385                 return -EINVAL;
386
387         if (cp->drv_state & CNIC_DRV_STATE_REGD)
388                 return -EBUSY;
389
390         if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
391                 return -ENODEV;
392
393         bp->cnic_data = data;
394         rcu_assign_pointer(bp->cnic_ops, ops);
395
396         cp->num_irq = 0;
397         cp->drv_state = CNIC_DRV_STATE_REGD;
398
399         bnx2_setup_cnic_irq_info(bp);
400
401         return 0;
402 }
403
404 static int bnx2_unregister_cnic(struct net_device *dev)
405 {
406         struct bnx2 *bp = netdev_priv(dev);
407         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
408         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409
410         mutex_lock(&bp->cnic_lock);
411         cp->drv_state = 0;
412         bnapi->cnic_present = 0;
413         RCU_INIT_POINTER(bp->cnic_ops, NULL);
414         mutex_unlock(&bp->cnic_lock);
415         synchronize_rcu();
416         return 0;
417 }
418
419 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 {
421         struct bnx2 *bp = netdev_priv(dev);
422         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423
424         if (!cp->max_iscsi_conn)
425                 return NULL;
426
427         cp->drv_owner = THIS_MODULE;
428         cp->chip_id = bp->chip_id;
429         cp->pdev = bp->pdev;
430         cp->io_base = bp->regview;
431         cp->drv_ctl = bnx2_drv_ctl;
432         cp->drv_register_cnic = bnx2_register_cnic;
433         cp->drv_unregister_cnic = bnx2_unregister_cnic;
434
435         return cp;
436 }
437
438 static void
439 bnx2_cnic_stop(struct bnx2 *bp)
440 {
441         struct cnic_ops *c_ops;
442         struct cnic_ctl_info info;
443
444         mutex_lock(&bp->cnic_lock);
445         c_ops = rcu_dereference_protected(bp->cnic_ops,
446                                           lockdep_is_held(&bp->cnic_lock));
447         if (c_ops) {
448                 info.cmd = CNIC_CTL_STOP_CMD;
449                 c_ops->cnic_ctl(bp->cnic_data, &info);
450         }
451         mutex_unlock(&bp->cnic_lock);
452 }
453
454 static void
455 bnx2_cnic_start(struct bnx2 *bp)
456 {
457         struct cnic_ops *c_ops;
458         struct cnic_ctl_info info;
459
460         mutex_lock(&bp->cnic_lock);
461         c_ops = rcu_dereference_protected(bp->cnic_ops,
462                                           lockdep_is_held(&bp->cnic_lock));
463         if (c_ops) {
464                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466
467                         bnapi->cnic_tag = bnapi->last_status_idx;
468                 }
469                 info.cmd = CNIC_CTL_START_CMD;
470                 c_ops->cnic_ctl(bp->cnic_data, &info);
471         }
472         mutex_unlock(&bp->cnic_lock);
473 }
474
475 #else
476
477 static void
478 bnx2_cnic_stop(struct bnx2 *bp)
479 {
480 }
481
482 static void
483 bnx2_cnic_start(struct bnx2 *bp)
484 {
485 }
486
487 #endif
488
489 static int
490 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491 {
492         u32 val1;
493         int i, ret;
494
495         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
497                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498
499                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
501
502                 udelay(40);
503         }
504
505         val1 = (bp->phy_addr << 21) | (reg << 16) |
506                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507                 BNX2_EMAC_MDIO_COMM_START_BUSY;
508         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509
510         for (i = 0; i < 50; i++) {
511                 udelay(10);
512
513                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
514                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
515                         udelay(5);
516
517                         val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
518                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519
520                         break;
521                 }
522         }
523
524         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525                 *val = 0x0;
526                 ret = -EBUSY;
527         }
528         else {
529                 *val = val1;
530                 ret = 0;
531         }
532
533         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
535                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536
537                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
539
540                 udelay(40);
541         }
542
543         return ret;
544 }
545
546 static int
547 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548 {
549         u32 val1;
550         int i, ret;
551
552         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
554                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555
556                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
558
559                 udelay(40);
560         }
561
562         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566
567         for (i = 0; i < 50; i++) {
568                 udelay(10);
569
570                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
571                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
572                         udelay(5);
573                         break;
574                 }
575         }
576
577         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
578                 ret = -EBUSY;
579         else
580                 ret = 0;
581
582         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
584                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585
586                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
588
589                 udelay(40);
590         }
591
592         return ret;
593 }
594
595 static void
596 bnx2_disable_int(struct bnx2 *bp)
597 {
598         int i;
599         struct bnx2_napi *bnapi;
600
601         for (i = 0; i < bp->irq_nvecs; i++) {
602                 bnapi = &bp->bnx2_napi[i];
603                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605         }
606         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607 }
608
609 static void
610 bnx2_enable_int(struct bnx2 *bp)
611 {
612         int i;
613         struct bnx2_napi *bnapi;
614
615         for (i = 0; i < bp->irq_nvecs; i++) {
616                 bnapi = &bp->bnx2_napi[i];
617
618                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620                         BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621                         bnapi->last_status_idx);
622
623                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625                         bnapi->last_status_idx);
626         }
627         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
628 }
629
630 static void
631 bnx2_disable_int_sync(struct bnx2 *bp)
632 {
633         int i;
634
635         atomic_inc(&bp->intr_sem);
636         if (!netif_running(bp->dev))
637                 return;
638
639         bnx2_disable_int(bp);
640         for (i = 0; i < bp->irq_nvecs; i++)
641                 synchronize_irq(bp->irq_tbl[i].vector);
642 }
643
644 static void
645 bnx2_napi_disable(struct bnx2 *bp)
646 {
647         int i;
648
649         for (i = 0; i < bp->irq_nvecs; i++)
650                 napi_disable(&bp->bnx2_napi[i].napi);
651 }
652
653 static void
654 bnx2_napi_enable(struct bnx2 *bp)
655 {
656         int i;
657
658         for (i = 0; i < bp->irq_nvecs; i++)
659                 napi_enable(&bp->bnx2_napi[i].napi);
660 }
661
662 static void
663 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664 {
665         if (stop_cnic)
666                 bnx2_cnic_stop(bp);
667         if (netif_running(bp->dev)) {
668                 bnx2_napi_disable(bp);
669                 netif_tx_disable(bp->dev);
670         }
671         bnx2_disable_int_sync(bp);
672         netif_carrier_off(bp->dev);     /* prevent tx timeout */
673 }
674
675 static void
676 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677 {
678         if (atomic_dec_and_test(&bp->intr_sem)) {
679                 if (netif_running(bp->dev)) {
680                         netif_tx_wake_all_queues(bp->dev);
681                         spin_lock_bh(&bp->phy_lock);
682                         if (bp->link_up)
683                                 netif_carrier_on(bp->dev);
684                         spin_unlock_bh(&bp->phy_lock);
685                         bnx2_napi_enable(bp);
686                         bnx2_enable_int(bp);
687                         if (start_cnic)
688                                 bnx2_cnic_start(bp);
689                 }
690         }
691 }
692
693 static void
694 bnx2_free_tx_mem(struct bnx2 *bp)
695 {
696         int i;
697
698         for (i = 0; i < bp->num_tx_rings; i++) {
699                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701
702                 if (txr->tx_desc_ring) {
703                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
704                                           txr->tx_desc_ring,
705                                           txr->tx_desc_mapping);
706                         txr->tx_desc_ring = NULL;
707                 }
708                 kfree(txr->tx_buf_ring);
709                 txr->tx_buf_ring = NULL;
710         }
711 }
712
713 static void
714 bnx2_free_rx_mem(struct bnx2 *bp)
715 {
716         int i;
717
718         for (i = 0; i < bp->num_rx_rings; i++) {
719                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
721                 int j;
722
723                 for (j = 0; j < bp->rx_max_ring; j++) {
724                         if (rxr->rx_desc_ring[j])
725                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726                                                   rxr->rx_desc_ring[j],
727                                                   rxr->rx_desc_mapping[j]);
728                         rxr->rx_desc_ring[j] = NULL;
729                 }
730                 vfree(rxr->rx_buf_ring);
731                 rxr->rx_buf_ring = NULL;
732
733                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
734                         if (rxr->rx_pg_desc_ring[j])
735                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736                                                   rxr->rx_pg_desc_ring[j],
737                                                   rxr->rx_pg_desc_mapping[j]);
738                         rxr->rx_pg_desc_ring[j] = NULL;
739                 }
740                 vfree(rxr->rx_pg_ring);
741                 rxr->rx_pg_ring = NULL;
742         }
743 }
744
745 static int
746 bnx2_alloc_tx_mem(struct bnx2 *bp)
747 {
748         int i;
749
750         for (i = 0; i < bp->num_tx_rings; i++) {
751                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753
754                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755                 if (txr->tx_buf_ring == NULL)
756                         return -ENOMEM;
757
758                 txr->tx_desc_ring =
759                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760                                            &txr->tx_desc_mapping, GFP_KERNEL);
761                 if (txr->tx_desc_ring == NULL)
762                         return -ENOMEM;
763         }
764         return 0;
765 }
766
767 static int
768 bnx2_alloc_rx_mem(struct bnx2 *bp)
769 {
770         int i;
771
772         for (i = 0; i < bp->num_rx_rings; i++) {
773                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
775                 int j;
776
777                 rxr->rx_buf_ring =
778                         vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779                 if (rxr->rx_buf_ring == NULL)
780                         return -ENOMEM;
781
782                 for (j = 0; j < bp->rx_max_ring; j++) {
783                         rxr->rx_desc_ring[j] =
784                                 dma_alloc_coherent(&bp->pdev->dev,
785                                                    RXBD_RING_SIZE,
786                                                    &rxr->rx_desc_mapping[j],
787                                                    GFP_KERNEL);
788                         if (rxr->rx_desc_ring[j] == NULL)
789                                 return -ENOMEM;
790
791                 }
792
793                 if (bp->rx_pg_ring_size) {
794                         rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
795                                                   bp->rx_max_pg_ring);
796                         if (rxr->rx_pg_ring == NULL)
797                                 return -ENOMEM;
798
799                 }
800
801                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
802                         rxr->rx_pg_desc_ring[j] =
803                                 dma_alloc_coherent(&bp->pdev->dev,
804                                                    RXBD_RING_SIZE,
805                                                    &rxr->rx_pg_desc_mapping[j],
806                                                    GFP_KERNEL);
807                         if (rxr->rx_pg_desc_ring[j] == NULL)
808                                 return -ENOMEM;
809
810                 }
811         }
812         return 0;
813 }
814
815 static void
816 bnx2_free_mem(struct bnx2 *bp)
817 {
818         int i;
819         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820
821         bnx2_free_tx_mem(bp);
822         bnx2_free_rx_mem(bp);
823
824         for (i = 0; i < bp->ctx_pages; i++) {
825                 if (bp->ctx_blk[i]) {
826                         dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
827                                           bp->ctx_blk[i],
828                                           bp->ctx_blk_mapping[i]);
829                         bp->ctx_blk[i] = NULL;
830                 }
831         }
832         if (bnapi->status_blk.msi) {
833                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834                                   bnapi->status_blk.msi,
835                                   bp->status_blk_mapping);
836                 bnapi->status_blk.msi = NULL;
837                 bp->stats_blk = NULL;
838         }
839 }
840
841 static int
842 bnx2_alloc_mem(struct bnx2 *bp)
843 {
844         int i, status_blk_size, err;
845         struct bnx2_napi *bnapi;
846         void *status_blk;
847
848         /* Combine status and statistics blocks into one allocation. */
849         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850         if (bp->flags & BNX2_FLAG_MSIX_CAP)
851                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
853         bp->status_stats_size = status_blk_size +
854                                 sizeof(struct statistics_block);
855
856         status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857                                         &bp->status_blk_mapping,
858                                         GFP_KERNEL | __GFP_ZERO);
859         if (status_blk == NULL)
860                 goto alloc_mem_err;
861
862         bnapi = &bp->bnx2_napi[0];
863         bnapi->status_blk.msi = status_blk;
864         bnapi->hw_tx_cons_ptr =
865                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
866         bnapi->hw_rx_cons_ptr =
867                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
868         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
869                 for (i = 1; i < bp->irq_nvecs; i++) {
870                         struct status_block_msix *sblk;
871
872                         bnapi = &bp->bnx2_napi[i];
873
874                         sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
875                         bnapi->status_blk.msix = sblk;
876                         bnapi->hw_tx_cons_ptr =
877                                 &sblk->status_tx_quick_consumer_index;
878                         bnapi->hw_rx_cons_ptr =
879                                 &sblk->status_rx_quick_consumer_index;
880                         bnapi->int_num = i << 24;
881                 }
882         }
883
884         bp->stats_blk = status_blk + status_blk_size;
885
886         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
887
888         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
889                 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
890                 if (bp->ctx_pages == 0)
891                         bp->ctx_pages = 1;
892                 for (i = 0; i < bp->ctx_pages; i++) {
893                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
894                                                 BNX2_PAGE_SIZE,
895                                                 &bp->ctx_blk_mapping[i],
896                                                 GFP_KERNEL);
897                         if (bp->ctx_blk[i] == NULL)
898                                 goto alloc_mem_err;
899                 }
900         }
901
902         err = bnx2_alloc_rx_mem(bp);
903         if (err)
904                 goto alloc_mem_err;
905
906         err = bnx2_alloc_tx_mem(bp);
907         if (err)
908                 goto alloc_mem_err;
909
910         return 0;
911
912 alloc_mem_err:
913         bnx2_free_mem(bp);
914         return -ENOMEM;
915 }
916
917 static void
918 bnx2_report_fw_link(struct bnx2 *bp)
919 {
920         u32 fw_link_status = 0;
921
922         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
923                 return;
924
925         if (bp->link_up) {
926                 u32 bmsr;
927
928                 switch (bp->line_speed) {
929                 case SPEED_10:
930                         if (bp->duplex == DUPLEX_HALF)
931                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
932                         else
933                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
934                         break;
935                 case SPEED_100:
936                         if (bp->duplex == DUPLEX_HALF)
937                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
938                         else
939                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
940                         break;
941                 case SPEED_1000:
942                         if (bp->duplex == DUPLEX_HALF)
943                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
944                         else
945                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
946                         break;
947                 case SPEED_2500:
948                         if (bp->duplex == DUPLEX_HALF)
949                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
950                         else
951                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
952                         break;
953                 }
954
955                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
956
957                 if (bp->autoneg) {
958                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
959
960                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
961                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
962
963                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
964                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
965                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
966                         else
967                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
968                 }
969         }
970         else
971                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
972
973         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
974 }
975
976 static char *
977 bnx2_xceiver_str(struct bnx2 *bp)
978 {
979         return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
980                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
981                  "Copper");
982 }
983
984 static void
985 bnx2_report_link(struct bnx2 *bp)
986 {
987         if (bp->link_up) {
988                 netif_carrier_on(bp->dev);
989                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
990                             bnx2_xceiver_str(bp),
991                             bp->line_speed,
992                             bp->duplex == DUPLEX_FULL ? "full" : "half");
993
994                 if (bp->flow_ctrl) {
995                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
996                                 pr_cont(", receive ");
997                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
998                                         pr_cont("& transmit ");
999                         }
1000                         else {
1001                                 pr_cont(", transmit ");
1002                         }
1003                         pr_cont("flow control ON");
1004                 }
1005                 pr_cont("\n");
1006         } else {
1007                 netif_carrier_off(bp->dev);
1008                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1009                            bnx2_xceiver_str(bp));
1010         }
1011
1012         bnx2_report_fw_link(bp);
1013 }
1014
1015 static void
1016 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1017 {
1018         u32 local_adv, remote_adv;
1019
1020         bp->flow_ctrl = 0;
1021         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1022                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1023
1024                 if (bp->duplex == DUPLEX_FULL) {
1025                         bp->flow_ctrl = bp->req_flow_ctrl;
1026                 }
1027                 return;
1028         }
1029
1030         if (bp->duplex != DUPLEX_FULL) {
1031                 return;
1032         }
1033
1034         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1035             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1036                 u32 val;
1037
1038                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1039                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1040                         bp->flow_ctrl |= FLOW_CTRL_TX;
1041                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1042                         bp->flow_ctrl |= FLOW_CTRL_RX;
1043                 return;
1044         }
1045
1046         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1047         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1048
1049         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1050                 u32 new_local_adv = 0;
1051                 u32 new_remote_adv = 0;
1052
1053                 if (local_adv & ADVERTISE_1000XPAUSE)
1054                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1055                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1056                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1057                 if (remote_adv & ADVERTISE_1000XPAUSE)
1058                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1059                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1060                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1061
1062                 local_adv = new_local_adv;
1063                 remote_adv = new_remote_adv;
1064         }
1065
1066         /* See Table 28B-3 of 802.3ab-1999 spec. */
1067         if (local_adv & ADVERTISE_PAUSE_CAP) {
1068                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1069                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1070                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1071                         }
1072                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1073                                 bp->flow_ctrl = FLOW_CTRL_RX;
1074                         }
1075                 }
1076                 else {
1077                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1078                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1079                         }
1080                 }
1081         }
1082         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1083                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1084                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1085
1086                         bp->flow_ctrl = FLOW_CTRL_TX;
1087                 }
1088         }
1089 }
1090
1091 static int
1092 bnx2_5709s_linkup(struct bnx2 *bp)
1093 {
1094         u32 val, speed;
1095
1096         bp->link_up = 1;
1097
1098         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1099         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1100         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1101
1102         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1103                 bp->line_speed = bp->req_line_speed;
1104                 bp->duplex = bp->req_duplex;
1105                 return 0;
1106         }
1107         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1108         switch (speed) {
1109                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1110                         bp->line_speed = SPEED_10;
1111                         break;
1112                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1113                         bp->line_speed = SPEED_100;
1114                         break;
1115                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1116                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1117                         bp->line_speed = SPEED_1000;
1118                         break;
1119                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1120                         bp->line_speed = SPEED_2500;
1121                         break;
1122         }
1123         if (val & MII_BNX2_GP_TOP_AN_FD)
1124                 bp->duplex = DUPLEX_FULL;
1125         else
1126                 bp->duplex = DUPLEX_HALF;
1127         return 0;
1128 }
1129
1130 static int
1131 bnx2_5708s_linkup(struct bnx2 *bp)
1132 {
1133         u32 val;
1134
1135         bp->link_up = 1;
1136         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1137         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1138                 case BCM5708S_1000X_STAT1_SPEED_10:
1139                         bp->line_speed = SPEED_10;
1140                         break;
1141                 case BCM5708S_1000X_STAT1_SPEED_100:
1142                         bp->line_speed = SPEED_100;
1143                         break;
1144                 case BCM5708S_1000X_STAT1_SPEED_1G:
1145                         bp->line_speed = SPEED_1000;
1146                         break;
1147                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1148                         bp->line_speed = SPEED_2500;
1149                         break;
1150         }
1151         if (val & BCM5708S_1000X_STAT1_FD)
1152                 bp->duplex = DUPLEX_FULL;
1153         else
1154                 bp->duplex = DUPLEX_HALF;
1155
1156         return 0;
1157 }
1158
1159 static int
1160 bnx2_5706s_linkup(struct bnx2 *bp)
1161 {
1162         u32 bmcr, local_adv, remote_adv, common;
1163
1164         bp->link_up = 1;
1165         bp->line_speed = SPEED_1000;
1166
1167         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1168         if (bmcr & BMCR_FULLDPLX) {
1169                 bp->duplex = DUPLEX_FULL;
1170         }
1171         else {
1172                 bp->duplex = DUPLEX_HALF;
1173         }
1174
1175         if (!(bmcr & BMCR_ANENABLE)) {
1176                 return 0;
1177         }
1178
1179         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1180         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1181
1182         common = local_adv & remote_adv;
1183         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1184
1185                 if (common & ADVERTISE_1000XFULL) {
1186                         bp->duplex = DUPLEX_FULL;
1187                 }
1188                 else {
1189                         bp->duplex = DUPLEX_HALF;
1190                 }
1191         }
1192
1193         return 0;
1194 }
1195
1196 static int
1197 bnx2_copper_linkup(struct bnx2 *bp)
1198 {
1199         u32 bmcr;
1200
1201         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1202         if (bmcr & BMCR_ANENABLE) {
1203                 u32 local_adv, remote_adv, common;
1204
1205                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1206                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1207
1208                 common = local_adv & (remote_adv >> 2);
1209                 if (common & ADVERTISE_1000FULL) {
1210                         bp->line_speed = SPEED_1000;
1211                         bp->duplex = DUPLEX_FULL;
1212                 }
1213                 else if (common & ADVERTISE_1000HALF) {
1214                         bp->line_speed = SPEED_1000;
1215                         bp->duplex = DUPLEX_HALF;
1216                 }
1217                 else {
1218                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1219                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1220
1221                         common = local_adv & remote_adv;
1222                         if (common & ADVERTISE_100FULL) {
1223                                 bp->line_speed = SPEED_100;
1224                                 bp->duplex = DUPLEX_FULL;
1225                         }
1226                         else if (common & ADVERTISE_100HALF) {
1227                                 bp->line_speed = SPEED_100;
1228                                 bp->duplex = DUPLEX_HALF;
1229                         }
1230                         else if (common & ADVERTISE_10FULL) {
1231                                 bp->line_speed = SPEED_10;
1232                                 bp->duplex = DUPLEX_FULL;
1233                         }
1234                         else if (common & ADVERTISE_10HALF) {
1235                                 bp->line_speed = SPEED_10;
1236                                 bp->duplex = DUPLEX_HALF;
1237                         }
1238                         else {
1239                                 bp->line_speed = 0;
1240                                 bp->link_up = 0;
1241                         }
1242                 }
1243         }
1244         else {
1245                 if (bmcr & BMCR_SPEED100) {
1246                         bp->line_speed = SPEED_100;
1247                 }
1248                 else {
1249                         bp->line_speed = SPEED_10;
1250                 }
1251                 if (bmcr & BMCR_FULLDPLX) {
1252                         bp->duplex = DUPLEX_FULL;
1253                 }
1254                 else {
1255                         bp->duplex = DUPLEX_HALF;
1256                 }
1257         }
1258
1259         return 0;
1260 }
1261
1262 static void
1263 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1264 {
1265         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1266
1267         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1268         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1269         val |= 0x02 << 8;
1270
1271         if (bp->flow_ctrl & FLOW_CTRL_TX)
1272                 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1273
1274         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1275 }
1276
1277 static void
1278 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1279 {
1280         int i;
1281         u32 cid;
1282
1283         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1284                 if (i == 1)
1285                         cid = RX_RSS_CID;
1286                 bnx2_init_rx_context(bp, cid);
1287         }
1288 }
1289
1290 static void
1291 bnx2_set_mac_link(struct bnx2 *bp)
1292 {
1293         u32 val;
1294
1295         BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1296         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1297                 (bp->duplex == DUPLEX_HALF)) {
1298                 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1299         }
1300
1301         /* Configure the EMAC mode register. */
1302         val = BNX2_RD(bp, BNX2_EMAC_MODE);
1303
1304         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1305                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1306                 BNX2_EMAC_MODE_25G_MODE);
1307
1308         if (bp->link_up) {
1309                 switch (bp->line_speed) {
1310                         case SPEED_10:
1311                                 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1312                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1313                                         break;
1314                                 }
1315                                 /* fall through */
1316                         case SPEED_100:
1317                                 val |= BNX2_EMAC_MODE_PORT_MII;
1318                                 break;
1319                         case SPEED_2500:
1320                                 val |= BNX2_EMAC_MODE_25G_MODE;
1321                                 /* fall through */
1322                         case SPEED_1000:
1323                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1324                                 break;
1325                 }
1326         }
1327         else {
1328                 val |= BNX2_EMAC_MODE_PORT_GMII;
1329         }
1330
1331         /* Set the MAC to operate in the appropriate duplex mode. */
1332         if (bp->duplex == DUPLEX_HALF)
1333                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1334         BNX2_WR(bp, BNX2_EMAC_MODE, val);
1335
1336         /* Enable/disable rx PAUSE. */
1337         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1338
1339         if (bp->flow_ctrl & FLOW_CTRL_RX)
1340                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1341         BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1342
1343         /* Enable/disable tx PAUSE. */
1344         val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1345         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1346
1347         if (bp->flow_ctrl & FLOW_CTRL_TX)
1348                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1349         BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1350
1351         /* Acknowledge the interrupt. */
1352         BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1353
1354         bnx2_init_all_rx_contexts(bp);
1355 }
1356
1357 static void
1358 bnx2_enable_bmsr1(struct bnx2 *bp)
1359 {
1360         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1361             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1362                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1363                                MII_BNX2_BLK_ADDR_GP_STATUS);
1364 }
1365
1366 static void
1367 bnx2_disable_bmsr1(struct bnx2 *bp)
1368 {
1369         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1370             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1371                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1372                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1373 }
1374
1375 static int
1376 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1377 {
1378         u32 up1;
1379         int ret = 1;
1380
1381         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1382                 return 0;
1383
1384         if (bp->autoneg & AUTONEG_SPEED)
1385                 bp->advertising |= ADVERTISED_2500baseX_Full;
1386
1387         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1388                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1389
1390         bnx2_read_phy(bp, bp->mii_up1, &up1);
1391         if (!(up1 & BCM5708S_UP1_2G5)) {
1392                 up1 |= BCM5708S_UP1_2G5;
1393                 bnx2_write_phy(bp, bp->mii_up1, up1);
1394                 ret = 0;
1395         }
1396
1397         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1398                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1399                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1400
1401         return ret;
1402 }
1403
1404 static int
1405 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1406 {
1407         u32 up1;
1408         int ret = 0;
1409
1410         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1411                 return 0;
1412
1413         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1414                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1415
1416         bnx2_read_phy(bp, bp->mii_up1, &up1);
1417         if (up1 & BCM5708S_UP1_2G5) {
1418                 up1 &= ~BCM5708S_UP1_2G5;
1419                 bnx2_write_phy(bp, bp->mii_up1, up1);
1420                 ret = 1;
1421         }
1422
1423         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1424                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1425                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1426
1427         return ret;
1428 }
1429
1430 static void
1431 bnx2_enable_forced_2g5(struct bnx2 *bp)
1432 {
1433         u32 uninitialized_var(bmcr);
1434         int err;
1435
1436         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1437                 return;
1438
1439         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1440                 u32 val;
1441
1442                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1443                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1444                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1445                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1446                         val |= MII_BNX2_SD_MISC1_FORCE |
1447                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1448                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1449                 }
1450
1451                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1452                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1453                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1454
1455         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1456                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1457                 if (!err)
1458                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1459         } else {
1460                 return;
1461         }
1462
1463         if (err)
1464                 return;
1465
1466         if (bp->autoneg & AUTONEG_SPEED) {
1467                 bmcr &= ~BMCR_ANENABLE;
1468                 if (bp->req_duplex == DUPLEX_FULL)
1469                         bmcr |= BMCR_FULLDPLX;
1470         }
1471         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1472 }
1473
1474 static void
1475 bnx2_disable_forced_2g5(struct bnx2 *bp)
1476 {
1477         u32 uninitialized_var(bmcr);
1478         int err;
1479
1480         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1481                 return;
1482
1483         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1484                 u32 val;
1485
1486                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1487                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1488                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1489                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1490                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1491                 }
1492
1493                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1494                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1495                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1496
1497         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1498                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1499                 if (!err)
1500                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1501         } else {
1502                 return;
1503         }
1504
1505         if (err)
1506                 return;
1507
1508         if (bp->autoneg & AUTONEG_SPEED)
1509                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1510         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1511 }
1512
1513 static void
1514 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1515 {
1516         u32 val;
1517
1518         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1519         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1520         if (start)
1521                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1522         else
1523                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1524 }
1525
1526 static int
1527 bnx2_set_link(struct bnx2 *bp)
1528 {
1529         u32 bmsr;
1530         u8 link_up;
1531
1532         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1533                 bp->link_up = 1;
1534                 return 0;
1535         }
1536
1537         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1538                 return 0;
1539
1540         link_up = bp->link_up;
1541
1542         bnx2_enable_bmsr1(bp);
1543         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1544         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1545         bnx2_disable_bmsr1(bp);
1546
1547         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1548             (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1549                 u32 val, an_dbg;
1550
1551                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1552                         bnx2_5706s_force_link_dn(bp, 0);
1553                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1554                 }
1555                 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1556
1557                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1558                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1559                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1560
1561                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1562                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1563                         bmsr |= BMSR_LSTATUS;
1564                 else
1565                         bmsr &= ~BMSR_LSTATUS;
1566         }
1567
1568         if (bmsr & BMSR_LSTATUS) {
1569                 bp->link_up = 1;
1570
1571                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1572                         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1573                                 bnx2_5706s_linkup(bp);
1574                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1575                                 bnx2_5708s_linkup(bp);
1576                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1577                                 bnx2_5709s_linkup(bp);
1578                 }
1579                 else {
1580                         bnx2_copper_linkup(bp);
1581                 }
1582                 bnx2_resolve_flow_ctrl(bp);
1583         }
1584         else {
1585                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1586                     (bp->autoneg & AUTONEG_SPEED))
1587                         bnx2_disable_forced_2g5(bp);
1588
1589                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1590                         u32 bmcr;
1591
1592                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1593                         bmcr |= BMCR_ANENABLE;
1594                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1595
1596                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1597                 }
1598                 bp->link_up = 0;
1599         }
1600
1601         if (bp->link_up != link_up) {
1602                 bnx2_report_link(bp);
1603         }
1604
1605         bnx2_set_mac_link(bp);
1606
1607         return 0;
1608 }
1609
1610 static int
1611 bnx2_reset_phy(struct bnx2 *bp)
1612 {
1613         int i;
1614         u32 reg;
1615
1616         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1617
1618 #define PHY_RESET_MAX_WAIT 100
1619         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1620                 udelay(10);
1621
1622                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1623                 if (!(reg & BMCR_RESET)) {
1624                         udelay(20);
1625                         break;
1626                 }
1627         }
1628         if (i == PHY_RESET_MAX_WAIT) {
1629                 return -EBUSY;
1630         }
1631         return 0;
1632 }
1633
1634 static u32
1635 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1636 {
1637         u32 adv = 0;
1638
1639         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1640                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1641
1642                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1643                         adv = ADVERTISE_1000XPAUSE;
1644                 }
1645                 else {
1646                         adv = ADVERTISE_PAUSE_CAP;
1647                 }
1648         }
1649         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1650                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1651                         adv = ADVERTISE_1000XPSE_ASYM;
1652                 }
1653                 else {
1654                         adv = ADVERTISE_PAUSE_ASYM;
1655                 }
1656         }
1657         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1658                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1659                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1660                 }
1661                 else {
1662                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1663                 }
1664         }
1665         return adv;
1666 }
1667
1668 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1669
1670 static int
1671 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1672 __releases(&bp->phy_lock)
1673 __acquires(&bp->phy_lock)
1674 {
1675         u32 speed_arg = 0, pause_adv;
1676
1677         pause_adv = bnx2_phy_get_pause_adv(bp);
1678
1679         if (bp->autoneg & AUTONEG_SPEED) {
1680                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1681                 if (bp->advertising & ADVERTISED_10baseT_Half)
1682                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1683                 if (bp->advertising & ADVERTISED_10baseT_Full)
1684                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1685                 if (bp->advertising & ADVERTISED_100baseT_Half)
1686                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1687                 if (bp->advertising & ADVERTISED_100baseT_Full)
1688                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1689                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1690                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1691                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1692                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1693         } else {
1694                 if (bp->req_line_speed == SPEED_2500)
1695                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1696                 else if (bp->req_line_speed == SPEED_1000)
1697                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1698                 else if (bp->req_line_speed == SPEED_100) {
1699                         if (bp->req_duplex == DUPLEX_FULL)
1700                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1701                         else
1702                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1703                 } else if (bp->req_line_speed == SPEED_10) {
1704                         if (bp->req_duplex == DUPLEX_FULL)
1705                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1706                         else
1707                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1708                 }
1709         }
1710
1711         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1712                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1713         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1714                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1715
1716         if (port == PORT_TP)
1717                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1718                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1719
1720         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1721
1722         spin_unlock_bh(&bp->phy_lock);
1723         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1724         spin_lock_bh(&bp->phy_lock);
1725
1726         return 0;
1727 }
1728
1729 static int
1730 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1731 __releases(&bp->phy_lock)
1732 __acquires(&bp->phy_lock)
1733 {
1734         u32 adv, bmcr;
1735         u32 new_adv = 0;
1736
1737         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1738                 return bnx2_setup_remote_phy(bp, port);
1739
1740         if (!(bp->autoneg & AUTONEG_SPEED)) {
1741                 u32 new_bmcr;
1742                 int force_link_down = 0;
1743
1744                 if (bp->req_line_speed == SPEED_2500) {
1745                         if (!bnx2_test_and_enable_2g5(bp))
1746                                 force_link_down = 1;
1747                 } else if (bp->req_line_speed == SPEED_1000) {
1748                         if (bnx2_test_and_disable_2g5(bp))
1749                                 force_link_down = 1;
1750                 }
1751                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1752                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1753
1754                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1755                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1756                 new_bmcr |= BMCR_SPEED1000;
1757
1758                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1759                         if (bp->req_line_speed == SPEED_2500)
1760                                 bnx2_enable_forced_2g5(bp);
1761                         else if (bp->req_line_speed == SPEED_1000) {
1762                                 bnx2_disable_forced_2g5(bp);
1763                                 new_bmcr &= ~0x2000;
1764                         }
1765
1766                 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1767                         if (bp->req_line_speed == SPEED_2500)
1768                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1769                         else
1770                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1771                 }
1772
1773                 if (bp->req_duplex == DUPLEX_FULL) {
1774                         adv |= ADVERTISE_1000XFULL;
1775                         new_bmcr |= BMCR_FULLDPLX;
1776                 }
1777                 else {
1778                         adv |= ADVERTISE_1000XHALF;
1779                         new_bmcr &= ~BMCR_FULLDPLX;
1780                 }
1781                 if ((new_bmcr != bmcr) || (force_link_down)) {
1782                         /* Force a link down visible on the other side */
1783                         if (bp->link_up) {
1784                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1785                                                ~(ADVERTISE_1000XFULL |
1786                                                  ADVERTISE_1000XHALF));
1787                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1788                                         BMCR_ANRESTART | BMCR_ANENABLE);
1789
1790                                 bp->link_up = 0;
1791                                 netif_carrier_off(bp->dev);
1792                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1793                                 bnx2_report_link(bp);
1794                         }
1795                         bnx2_write_phy(bp, bp->mii_adv, adv);
1796                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1797                 } else {
1798                         bnx2_resolve_flow_ctrl(bp);
1799                         bnx2_set_mac_link(bp);
1800                 }
1801                 return 0;
1802         }
1803
1804         bnx2_test_and_enable_2g5(bp);
1805
1806         if (bp->advertising & ADVERTISED_1000baseT_Full)
1807                 new_adv |= ADVERTISE_1000XFULL;
1808
1809         new_adv |= bnx2_phy_get_pause_adv(bp);
1810
1811         bnx2_read_phy(bp, bp->mii_adv, &adv);
1812         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1813
1814         bp->serdes_an_pending = 0;
1815         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1816                 /* Force a link down visible on the other side */
1817                 if (bp->link_up) {
1818                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1819                         spin_unlock_bh(&bp->phy_lock);
1820                         msleep(20);
1821                         spin_lock_bh(&bp->phy_lock);
1822                 }
1823
1824                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1825                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1826                         BMCR_ANENABLE);
1827                 /* Speed up link-up time when the link partner
1828                  * does not autonegotiate which is very common
1829                  * in blade servers. Some blade servers use
1830                  * IPMI for kerboard input and it's important
1831                  * to minimize link disruptions. Autoneg. involves
1832                  * exchanging base pages plus 3 next pages and
1833                  * normally completes in about 120 msec.
1834                  */
1835                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1836                 bp->serdes_an_pending = 1;
1837                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1838         } else {
1839                 bnx2_resolve_flow_ctrl(bp);
1840                 bnx2_set_mac_link(bp);
1841         }
1842
1843         return 0;
1844 }
1845
1846 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1847         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1848                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1849                 (ADVERTISED_1000baseT_Full)
1850
1851 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1852         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1853         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1854         ADVERTISED_1000baseT_Full)
1855
1856 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1857         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1858
1859 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1860
1861 static void
1862 bnx2_set_default_remote_link(struct bnx2 *bp)
1863 {
1864         u32 link;
1865
1866         if (bp->phy_port == PORT_TP)
1867                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1868         else
1869                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1870
1871         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1872                 bp->req_line_speed = 0;
1873                 bp->autoneg |= AUTONEG_SPEED;
1874                 bp->advertising = ADVERTISED_Autoneg;
1875                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1876                         bp->advertising |= ADVERTISED_10baseT_Half;
1877                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1878                         bp->advertising |= ADVERTISED_10baseT_Full;
1879                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1880                         bp->advertising |= ADVERTISED_100baseT_Half;
1881                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1882                         bp->advertising |= ADVERTISED_100baseT_Full;
1883                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1884                         bp->advertising |= ADVERTISED_1000baseT_Full;
1885                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1886                         bp->advertising |= ADVERTISED_2500baseX_Full;
1887         } else {
1888                 bp->autoneg = 0;
1889                 bp->advertising = 0;
1890                 bp->req_duplex = DUPLEX_FULL;
1891                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1892                         bp->req_line_speed = SPEED_10;
1893                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1894                                 bp->req_duplex = DUPLEX_HALF;
1895                 }
1896                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1897                         bp->req_line_speed = SPEED_100;
1898                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1899                                 bp->req_duplex = DUPLEX_HALF;
1900                 }
1901                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1902                         bp->req_line_speed = SPEED_1000;
1903                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1904                         bp->req_line_speed = SPEED_2500;
1905         }
1906 }
1907
1908 static void
1909 bnx2_set_default_link(struct bnx2 *bp)
1910 {
1911         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1912                 bnx2_set_default_remote_link(bp);
1913                 return;
1914         }
1915
1916         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1917         bp->req_line_speed = 0;
1918         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1919                 u32 reg;
1920
1921                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1922
1923                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1924                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1925                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1926                         bp->autoneg = 0;
1927                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1928                         bp->req_duplex = DUPLEX_FULL;
1929                 }
1930         } else
1931                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1932 }
1933
1934 static void
1935 bnx2_send_heart_beat(struct bnx2 *bp)
1936 {
1937         u32 msg;
1938         u32 addr;
1939
1940         spin_lock(&bp->indirect_lock);
1941         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1942         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1943         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1944         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1945         spin_unlock(&bp->indirect_lock);
1946 }
1947
1948 static void
1949 bnx2_remote_phy_event(struct bnx2 *bp)
1950 {
1951         u32 msg;
1952         u8 link_up = bp->link_up;
1953         u8 old_port;
1954
1955         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1956
1957         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1958                 bnx2_send_heart_beat(bp);
1959
1960         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1961
1962         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1963                 bp->link_up = 0;
1964         else {
1965                 u32 speed;
1966
1967                 bp->link_up = 1;
1968                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1969                 bp->duplex = DUPLEX_FULL;
1970                 switch (speed) {
1971                         case BNX2_LINK_STATUS_10HALF:
1972                                 bp->duplex = DUPLEX_HALF;
1973                                 /* fall through */
1974                         case BNX2_LINK_STATUS_10FULL:
1975                                 bp->line_speed = SPEED_10;
1976                                 break;
1977                         case BNX2_LINK_STATUS_100HALF:
1978                                 bp->duplex = DUPLEX_HALF;
1979                                 /* fall through */
1980                         case BNX2_LINK_STATUS_100BASE_T4:
1981                         case BNX2_LINK_STATUS_100FULL:
1982                                 bp->line_speed = SPEED_100;
1983                                 break;
1984                         case BNX2_LINK_STATUS_1000HALF:
1985                                 bp->duplex = DUPLEX_HALF;
1986                                 /* fall through */
1987                         case BNX2_LINK_STATUS_1000FULL:
1988                                 bp->line_speed = SPEED_1000;
1989                                 break;
1990                         case BNX2_LINK_STATUS_2500HALF:
1991                                 bp->duplex = DUPLEX_HALF;
1992                                 /* fall through */
1993                         case BNX2_LINK_STATUS_2500FULL:
1994                                 bp->line_speed = SPEED_2500;
1995                                 break;
1996                         default:
1997                                 bp->line_speed = 0;
1998                                 break;
1999                 }
2000
2001                 bp->flow_ctrl = 0;
2002                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2003                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2004                         if (bp->duplex == DUPLEX_FULL)
2005                                 bp->flow_ctrl = bp->req_flow_ctrl;
2006                 } else {
2007                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2008                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2009                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2010                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2011                 }
2012
2013                 old_port = bp->phy_port;
2014                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2015                         bp->phy_port = PORT_FIBRE;
2016                 else
2017                         bp->phy_port = PORT_TP;
2018
2019                 if (old_port != bp->phy_port)
2020                         bnx2_set_default_link(bp);
2021
2022         }
2023         if (bp->link_up != link_up)
2024                 bnx2_report_link(bp);
2025
2026         bnx2_set_mac_link(bp);
2027 }
2028
2029 static int
2030 bnx2_set_remote_link(struct bnx2 *bp)
2031 {
2032         u32 evt_code;
2033
2034         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2035         switch (evt_code) {
2036                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2037                         bnx2_remote_phy_event(bp);
2038                         break;
2039                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2040                 default:
2041                         bnx2_send_heart_beat(bp);
2042                         break;
2043         }
2044         return 0;
2045 }
2046
2047 static int
2048 bnx2_setup_copper_phy(struct bnx2 *bp)
2049 __releases(&bp->phy_lock)
2050 __acquires(&bp->phy_lock)
2051 {
2052         u32 bmcr;
2053         u32 new_bmcr;
2054
2055         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2056
2057         if (bp->autoneg & AUTONEG_SPEED) {
2058                 u32 adv_reg, adv1000_reg;
2059                 u32 new_adv = 0;
2060                 u32 new_adv1000 = 0;
2061
2062                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2063                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2064                         ADVERTISE_PAUSE_ASYM);
2065
2066                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2067                 adv1000_reg &= PHY_ALL_1000_SPEED;
2068
2069                 new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
2070                 new_adv |= ADVERTISE_CSMA;
2071                 new_adv |= bnx2_phy_get_pause_adv(bp);
2072
2073                 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2074
2075                 if ((adv1000_reg != new_adv1000) ||
2076                         (adv_reg != new_adv) ||
2077                         ((bmcr & BMCR_ANENABLE) == 0)) {
2078
2079                         bnx2_write_phy(bp, bp->mii_adv, new_adv);
2080                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2081                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2082                                 BMCR_ANENABLE);
2083                 }
2084                 else if (bp->link_up) {
2085                         /* Flow ctrl may have changed from auto to forced */
2086                         /* or vice-versa. */
2087
2088                         bnx2_resolve_flow_ctrl(bp);
2089                         bnx2_set_mac_link(bp);
2090                 }
2091                 return 0;
2092         }
2093
2094         new_bmcr = 0;
2095         if (bp->req_line_speed == SPEED_100) {
2096                 new_bmcr |= BMCR_SPEED100;
2097         }
2098         if (bp->req_duplex == DUPLEX_FULL) {
2099                 new_bmcr |= BMCR_FULLDPLX;
2100         }
2101         if (new_bmcr != bmcr) {
2102                 u32 bmsr;
2103
2104                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2105                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2106
2107                 if (bmsr & BMSR_LSTATUS) {
2108                         /* Force link down */
2109                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2110                         spin_unlock_bh(&bp->phy_lock);
2111                         msleep(50);
2112                         spin_lock_bh(&bp->phy_lock);
2113
2114                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116                 }
2117
2118                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2119
2120                 /* Normally, the new speed is setup after the link has
2121                  * gone down and up again. In some cases, link will not go
2122                  * down so we need to set up the new speed here.
2123                  */
2124                 if (bmsr & BMSR_LSTATUS) {
2125                         bp->line_speed = bp->req_line_speed;
2126                         bp->duplex = bp->req_duplex;
2127                         bnx2_resolve_flow_ctrl(bp);
2128                         bnx2_set_mac_link(bp);
2129                 }
2130         } else {
2131                 bnx2_resolve_flow_ctrl(bp);
2132                 bnx2_set_mac_link(bp);
2133         }
2134         return 0;
2135 }
2136
2137 static int
2138 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2139 __releases(&bp->phy_lock)
2140 __acquires(&bp->phy_lock)
2141 {
2142         if (bp->loopback == MAC_LOOPBACK)
2143                 return 0;
2144
2145         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2146                 return bnx2_setup_serdes_phy(bp, port);
2147         }
2148         else {
2149                 return bnx2_setup_copper_phy(bp);
2150         }
2151 }
2152
2153 static int
2154 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2155 {
2156         u32 val;
2157
2158         bp->mii_bmcr = MII_BMCR + 0x10;
2159         bp->mii_bmsr = MII_BMSR + 0x10;
2160         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2161         bp->mii_adv = MII_ADVERTISE + 0x10;
2162         bp->mii_lpa = MII_LPA + 0x10;
2163         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2164
2165         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2166         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2167
2168         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2169         if (reset_phy)
2170                 bnx2_reset_phy(bp);
2171
2172         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2173
2174         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2175         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2176         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2177         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2178
2179         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2180         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2181         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2182                 val |= BCM5708S_UP1_2G5;
2183         else
2184                 val &= ~BCM5708S_UP1_2G5;
2185         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2186
2187         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2188         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2189         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2190         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2191
2192         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2193
2194         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2195               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2196         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2197
2198         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2199
2200         return 0;
2201 }
2202
2203 static int
2204 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2205 {
2206         u32 val;
2207
2208         if (reset_phy)
2209                 bnx2_reset_phy(bp);
2210
2211         bp->mii_up1 = BCM5708S_UP1;
2212
2213         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2214         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2215         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2216
2217         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2218         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2219         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2220
2221         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2222         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2223         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2224
2225         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2226                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2227                 val |= BCM5708S_UP1_2G5;
2228                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2229         }
2230
2231         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2232             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2233             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2234                 /* increase tx signal amplitude */
2235                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2236                                BCM5708S_BLK_ADDR_TX_MISC);
2237                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2238                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2239                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2240                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2241         }
2242
2243         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2244               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2245
2246         if (val) {
2247                 u32 is_backplane;
2248
2249                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2250                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2251                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2252                                        BCM5708S_BLK_ADDR_TX_MISC);
2253                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2254                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2255                                        BCM5708S_BLK_ADDR_DIG);
2256                 }
2257         }
2258         return 0;
2259 }
2260
2261 static int
2262 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2263 {
2264         if (reset_phy)
2265                 bnx2_reset_phy(bp);
2266
2267         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2268
2269         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2270                 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2271
2272         if (bp->dev->mtu > 1500) {
2273                 u32 val;
2274
2275                 /* Set extended packet length bit */
2276                 bnx2_write_phy(bp, 0x18, 0x7);
2277                 bnx2_read_phy(bp, 0x18, &val);
2278                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2279
2280                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2281                 bnx2_read_phy(bp, 0x1c, &val);
2282                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2283         }
2284         else {
2285                 u32 val;
2286
2287                 bnx2_write_phy(bp, 0x18, 0x7);
2288                 bnx2_read_phy(bp, 0x18, &val);
2289                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2290
2291                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2292                 bnx2_read_phy(bp, 0x1c, &val);
2293                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2294         }
2295
2296         return 0;
2297 }
2298
2299 static int
2300 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2301 {
2302         u32 val;
2303
2304         if (reset_phy)
2305                 bnx2_reset_phy(bp);
2306
2307         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2308                 bnx2_write_phy(bp, 0x18, 0x0c00);
2309                 bnx2_write_phy(bp, 0x17, 0x000a);
2310                 bnx2_write_phy(bp, 0x15, 0x310b);
2311                 bnx2_write_phy(bp, 0x17, 0x201f);
2312                 bnx2_write_phy(bp, 0x15, 0x9506);
2313                 bnx2_write_phy(bp, 0x17, 0x401f);
2314                 bnx2_write_phy(bp, 0x15, 0x14e2);
2315                 bnx2_write_phy(bp, 0x18, 0x0400);
2316         }
2317
2318         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2319                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2320                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2321                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2322                 val &= ~(1 << 8);
2323                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2324         }
2325
2326         if (bp->dev->mtu > 1500) {
2327                 /* Set extended packet length bit */
2328                 bnx2_write_phy(bp, 0x18, 0x7);
2329                 bnx2_read_phy(bp, 0x18, &val);
2330                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2331
2332                 bnx2_read_phy(bp, 0x10, &val);
2333                 bnx2_write_phy(bp, 0x10, val | 0x1);
2334         }
2335         else {
2336                 bnx2_write_phy(bp, 0x18, 0x7);
2337                 bnx2_read_phy(bp, 0x18, &val);
2338                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2339
2340                 bnx2_read_phy(bp, 0x10, &val);
2341                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2342         }
2343
2344         /* ethernet@wirespeed */
2345         bnx2_write_phy(bp, 0x18, 0x7007);
2346         bnx2_read_phy(bp, 0x18, &val);
2347         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2348         return 0;
2349 }
2350
2351
2352 static int
2353 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2354 __releases(&bp->phy_lock)
2355 __acquires(&bp->phy_lock)
2356 {
2357         u32 val;
2358         int rc = 0;
2359
2360         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2361         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2362
2363         bp->mii_bmcr = MII_BMCR;
2364         bp->mii_bmsr = MII_BMSR;
2365         bp->mii_bmsr1 = MII_BMSR;
2366         bp->mii_adv = MII_ADVERTISE;
2367         bp->mii_lpa = MII_LPA;
2368
2369         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2370
2371         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2372                 goto setup_phy;
2373
2374         bnx2_read_phy(bp, MII_PHYSID1, &val);
2375         bp->phy_id = val << 16;
2376         bnx2_read_phy(bp, MII_PHYSID2, &val);
2377         bp->phy_id |= val & 0xffff;
2378
2379         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2380                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2381                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2382                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2383                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2384                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2385                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2386         }
2387         else {
2388                 rc = bnx2_init_copper_phy(bp, reset_phy);
2389         }
2390
2391 setup_phy:
2392         if (!rc)
2393                 rc = bnx2_setup_phy(bp, bp->phy_port);
2394
2395         return rc;
2396 }
2397
2398 static int
2399 bnx2_set_mac_loopback(struct bnx2 *bp)
2400 {
2401         u32 mac_mode;
2402
2403         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2404         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2405         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2406         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2407         bp->link_up = 1;
2408         return 0;
2409 }
2410
2411 static int bnx2_test_link(struct bnx2 *);
2412
2413 static int
2414 bnx2_set_phy_loopback(struct bnx2 *bp)
2415 {
2416         u32 mac_mode;
2417         int rc, i;
2418
2419         spin_lock_bh(&bp->phy_lock);
2420         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2421                             BMCR_SPEED1000);
2422         spin_unlock_bh(&bp->phy_lock);
2423         if (rc)
2424                 return rc;
2425
2426         for (i = 0; i < 10; i++) {
2427                 if (bnx2_test_link(bp) == 0)
2428                         break;
2429                 msleep(100);
2430         }
2431
2432         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2433         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2434                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2435                       BNX2_EMAC_MODE_25G_MODE);
2436
2437         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2438         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2439         bp->link_up = 1;
2440         return 0;
2441 }
2442
2443 static void
2444 bnx2_dump_mcp_state(struct bnx2 *bp)
2445 {
2446         struct net_device *dev = bp->dev;
2447         u32 mcp_p0, mcp_p1;
2448
2449         netdev_err(dev, "<--- start MCP states dump --->\n");
2450         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2451                 mcp_p0 = BNX2_MCP_STATE_P0;
2452                 mcp_p1 = BNX2_MCP_STATE_P1;
2453         } else {
2454                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2455                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2456         }
2457         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2458                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2459         netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2460                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2461                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2462                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2463         netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2464                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2465                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2466                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2467         netdev_err(dev, "DEBUG: shmem states:\n");
2468         netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2469                    bnx2_shmem_rd(bp, BNX2_DRV_MB),
2470                    bnx2_shmem_rd(bp, BNX2_FW_MB),
2471                    bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2472         pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2473         netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2474                    bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2475                    bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2476         pr_cont(" condition[%08x]\n",
2477                 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2478         DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2479         DP_SHMEM_LINE(bp, 0x3cc);
2480         DP_SHMEM_LINE(bp, 0x3dc);
2481         DP_SHMEM_LINE(bp, 0x3ec);
2482         netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2483         netdev_err(dev, "<--- end MCP states dump --->\n");
2484 }
2485
2486 static int
2487 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2488 {
2489         int i;
2490         u32 val;
2491
2492         bp->fw_wr_seq++;
2493         msg_data |= bp->fw_wr_seq;
2494
2495         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2496
2497         if (!ack)
2498                 return 0;
2499
2500         /* wait for an acknowledgement. */
2501         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2502                 msleep(10);
2503
2504                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2505
2506                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2507                         break;
2508         }
2509         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2510                 return 0;
2511
2512         /* If we timed out, inform the firmware that this is the case. */
2513         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2514                 msg_data &= ~BNX2_DRV_MSG_CODE;
2515                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2516
2517                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2518                 if (!silent) {
2519                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2520                         bnx2_dump_mcp_state(bp);
2521                 }
2522
2523                 return -EBUSY;
2524         }
2525
2526         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2527                 return -EIO;
2528
2529         return 0;
2530 }
2531
2532 static int
2533 bnx2_init_5709_context(struct bnx2 *bp)
2534 {
2535         int i, ret = 0;
2536         u32 val;
2537
2538         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2539         val |= (BNX2_PAGE_BITS - 8) << 16;
2540         BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2541         for (i = 0; i < 10; i++) {
2542                 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2543                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2544                         break;
2545                 udelay(2);
2546         }
2547         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2548                 return -EBUSY;
2549
2550         for (i = 0; i < bp->ctx_pages; i++) {
2551                 int j;
2552
2553                 if (bp->ctx_blk[i])
2554                         memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2555                 else
2556                         return -ENOMEM;
2557
2558                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2559                         (bp->ctx_blk_mapping[i] & 0xffffffff) |
2560                         BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2561                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2562                         (u64) bp->ctx_blk_mapping[i] >> 32);
2563                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2564                         BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2565                 for (j = 0; j < 10; j++) {
2566
2567                         val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2568                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2569                                 break;
2570                         udelay(5);
2571                 }
2572                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2573                         ret = -EBUSY;
2574                         break;
2575                 }
2576         }
2577         return ret;
2578 }
2579
2580 static void
2581 bnx2_init_context(struct bnx2 *bp)
2582 {
2583         u32 vcid;
2584
2585         vcid = 96;
2586         while (vcid) {
2587                 u32 vcid_addr, pcid_addr, offset;
2588                 int i;
2589
2590                 vcid--;
2591
2592                 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2593                         u32 new_vcid;
2594
2595                         vcid_addr = GET_PCID_ADDR(vcid);
2596                         if (vcid & 0x8) {
2597                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2598                         }
2599                         else {
2600                                 new_vcid = vcid;
2601                         }
2602                         pcid_addr = GET_PCID_ADDR(new_vcid);
2603                 }
2604                 else {
2605                         vcid_addr = GET_CID_ADDR(vcid);
2606                         pcid_addr = vcid_addr;
2607                 }
2608
2609                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2610                         vcid_addr += (i << PHY_CTX_SHIFT);
2611                         pcid_addr += (i << PHY_CTX_SHIFT);
2612
2613                         BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2614                         BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2615
2616                         /* Zero out the context. */
2617                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2618                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2619                 }
2620         }
2621 }
2622
2623 static int
2624 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2625 {
2626         u16 *good_mbuf;
2627         u32 good_mbuf_cnt;
2628         u32 val;
2629
2630         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2631         if (good_mbuf == NULL)
2632                 return -ENOMEM;
2633
2634         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2635                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2636
2637         good_mbuf_cnt = 0;
2638
2639         /* Allocate a bunch of mbufs and save the good ones in an array. */
2640         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2641         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2642                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2643                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2644
2645                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2646
2647                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2648
2649                 /* The addresses with Bit 9 set are bad memory blocks. */
2650                 if (!(val & (1 << 9))) {
2651                         good_mbuf[good_mbuf_cnt] = (u16) val;
2652                         good_mbuf_cnt++;
2653                 }
2654
2655                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2656         }
2657
2658         /* Free the good ones back to the mbuf pool thus discarding
2659          * all the bad ones. */
2660         while (good_mbuf_cnt) {
2661                 good_mbuf_cnt--;
2662
2663                 val = good_mbuf[good_mbuf_cnt];
2664                 val = (val << 9) | val | 1;
2665
2666                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2667         }
2668         kfree(good_mbuf);
2669         return 0;
2670 }
2671
2672 static void
2673 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2674 {
2675         u32 val;
2676
2677         val = (mac_addr[0] << 8) | mac_addr[1];
2678
2679         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2680
2681         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2682                 (mac_addr[4] << 8) | mac_addr[5];
2683
2684         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2685 }
2686
2687 static inline int
2688 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2689 {
2690         dma_addr_t mapping;
2691         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2692         struct bnx2_rx_bd *rxbd =
2693                 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2694         struct page *page = alloc_page(gfp);
2695
2696         if (!page)
2697                 return -ENOMEM;
2698         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2699                                PCI_DMA_FROMDEVICE);
2700         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2701                 __free_page(page);
2702                 return -EIO;
2703         }
2704
2705         rx_pg->page = page;
2706         dma_unmap_addr_set(rx_pg, mapping, mapping);
2707         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2708         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2709         return 0;
2710 }
2711
2712 static void
2713 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2714 {
2715         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2716         struct page *page = rx_pg->page;
2717
2718         if (!page)
2719                 return;
2720
2721         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2722                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2723
2724         __free_page(page);
2725         rx_pg->page = NULL;
2726 }
2727
2728 static inline int
2729 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2730 {
2731         u8 *data;
2732         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2733         dma_addr_t mapping;
2734         struct bnx2_rx_bd *rxbd =
2735                 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2736
2737         data = kmalloc(bp->rx_buf_size, gfp);
2738         if (!data)
2739                 return -ENOMEM;
2740
2741         mapping = dma_map_single(&bp->pdev->dev,
2742                                  get_l2_fhdr(data),
2743                                  bp->rx_buf_use_size,
2744                                  PCI_DMA_FROMDEVICE);
2745         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2746                 kfree(data);
2747                 return -EIO;
2748         }
2749
2750         rx_buf->data = data;
2751         dma_unmap_addr_set(rx_buf, mapping, mapping);
2752
2753         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2754         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2755
2756         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2757
2758         return 0;
2759 }
2760
2761 static int
2762 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2763 {
2764         struct status_block *sblk = bnapi->status_blk.msi;
2765         u32 new_link_state, old_link_state;
2766         int is_set = 1;
2767
2768         new_link_state = sblk->status_attn_bits & event;
2769         old_link_state = sblk->status_attn_bits_ack & event;
2770         if (new_link_state != old_link_state) {
2771                 if (new_link_state)
2772                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2773                 else
2774                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2775         } else
2776                 is_set = 0;
2777
2778         return is_set;
2779 }
2780
2781 static void
2782 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2783 {
2784         spin_lock(&bp->phy_lock);
2785
2786         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2787                 bnx2_set_link(bp);
2788         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2789                 bnx2_set_remote_link(bp);
2790
2791         spin_unlock(&bp->phy_lock);
2792
2793 }
2794
2795 static inline u16
2796 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2797 {
2798         u16 cons;
2799
2800         /* Tell compiler that status block fields can change. */
2801         barrier();
2802         cons = *bnapi->hw_tx_cons_ptr;
2803         barrier();
2804         if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2805                 cons++;
2806         return cons;
2807 }
2808
2809 static int
2810 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2811 {
2812         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2813         u16 hw_cons, sw_cons, sw_ring_cons;
2814         int tx_pkt = 0, index;
2815         unsigned int tx_bytes = 0;
2816         struct netdev_queue *txq;
2817
2818         index = (bnapi - bp->bnx2_napi);
2819         txq = netdev_get_tx_queue(bp->dev, index);
2820
2821         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2822         sw_cons = txr->tx_cons;
2823
2824         while (sw_cons != hw_cons) {
2825                 struct bnx2_sw_tx_bd *tx_buf;
2826                 struct sk_buff *skb;
2827                 int i, last;
2828
2829                 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2830
2831                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2832                 skb = tx_buf->skb;
2833
2834                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2835                 prefetch(&skb->end);
2836
2837                 /* partial BD completions possible with TSO packets */
2838                 if (tx_buf->is_gso) {
2839                         u16 last_idx, last_ring_idx;
2840
2841                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2842                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2843                         if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2844                                 last_idx++;
2845                         }
2846                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2847                                 break;
2848                         }
2849                 }
2850
2851                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2852                         skb_headlen(skb), PCI_DMA_TODEVICE);
2853
2854                 tx_buf->skb = NULL;
2855                 last = tx_buf->nr_frags;
2856
2857                 for (i = 0; i < last; i++) {
2858                         struct bnx2_sw_tx_bd *tx_buf;
2859
2860                         sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2861
2862                         tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2863                         dma_unmap_page(&bp->pdev->dev,
2864                                 dma_unmap_addr(tx_buf, mapping),
2865                                 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2866                                 PCI_DMA_TODEVICE);
2867                 }
2868
2869                 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2870
2871                 tx_bytes += skb->len;
2872                 dev_kfree_skb(skb);
2873                 tx_pkt++;
2874                 if (tx_pkt == budget)
2875                         break;
2876
2877                 if (hw_cons == sw_cons)
2878                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2879         }
2880
2881         netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2882         txr->hw_tx_cons = hw_cons;
2883         txr->tx_cons = sw_cons;
2884
2885         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2886          * before checking for netif_tx_queue_stopped().  Without the
2887          * memory barrier, there is a small possibility that bnx2_start_xmit()
2888          * will miss it and cause the queue to be stopped forever.
2889          */
2890         smp_mb();
2891
2892         if (unlikely(netif_tx_queue_stopped(txq)) &&
2893                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2894                 __netif_tx_lock(txq, smp_processor_id());
2895                 if ((netif_tx_queue_stopped(txq)) &&
2896                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2897                         netif_tx_wake_queue(txq);
2898                 __netif_tx_unlock(txq);
2899         }
2900
2901         return tx_pkt;
2902 }
2903
2904 static void
2905 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2906                         struct sk_buff *skb, int count)
2907 {
2908         struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2909         struct bnx2_rx_bd *cons_bd, *prod_bd;
2910         int i;
2911         u16 hw_prod, prod;
2912         u16 cons = rxr->rx_pg_cons;
2913
2914         cons_rx_pg = &rxr->rx_pg_ring[cons];
2915
2916         /* The caller was unable to allocate a new page to replace the
2917          * last one in the frags array, so we need to recycle that page
2918          * and then free the skb.
2919          */
2920         if (skb) {
2921                 struct page *page;
2922                 struct skb_shared_info *shinfo;
2923
2924                 shinfo = skb_shinfo(skb);
2925                 shinfo->nr_frags--;
2926                 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2927                 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2928
2929                 cons_rx_pg->page = page;
2930                 dev_kfree_skb(skb);
2931         }
2932
2933         hw_prod = rxr->rx_pg_prod;
2934
2935         for (i = 0; i < count; i++) {
2936                 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2937
2938                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2939                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2940                 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2941                                                 [BNX2_RX_IDX(cons)];
2942                 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2943                                                 [BNX2_RX_IDX(prod)];
2944
2945                 if (prod != cons) {
2946                         prod_rx_pg->page = cons_rx_pg->page;
2947                         cons_rx_pg->page = NULL;
2948                         dma_unmap_addr_set(prod_rx_pg, mapping,
2949                                 dma_unmap_addr(cons_rx_pg, mapping));
2950
2951                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2952                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2953
2954                 }
2955                 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2956                 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2957         }
2958         rxr->rx_pg_prod = hw_prod;
2959         rxr->rx_pg_cons = cons;
2960 }
2961
2962 static inline void
2963 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2964                    u8 *data, u16 cons, u16 prod)
2965 {
2966         struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2967         struct bnx2_rx_bd *cons_bd, *prod_bd;
2968
2969         cons_rx_buf = &rxr->rx_buf_ring[cons];
2970         prod_rx_buf = &rxr->rx_buf_ring[prod];
2971
2972         dma_sync_single_for_device(&bp->pdev->dev,
2973                 dma_unmap_addr(cons_rx_buf, mapping),
2974                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2975
2976         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2977
2978         prod_rx_buf->data = data;
2979
2980         if (cons == prod)
2981                 return;
2982
2983         dma_unmap_addr_set(prod_rx_buf, mapping,
2984                         dma_unmap_addr(cons_rx_buf, mapping));
2985
2986         cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
2987         prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
2988         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2989         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2990 }
2991
2992 static struct sk_buff *
2993 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2994             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2995             u32 ring_idx)
2996 {
2997         int err;
2998         u16 prod = ring_idx & 0xffff;
2999         struct sk_buff *skb;
3000
3001         err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3002         if (unlikely(err)) {
3003                 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3004 error:
3005                 if (hdr_len) {
3006                         unsigned int raw_len = len + 4;
3007                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3008
3009                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3010                 }
3011                 return NULL;
3012         }
3013
3014         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3015                          PCI_DMA_FROMDEVICE);
3016         skb = build_skb(data, 0);
3017         if (!skb) {
3018                 kfree(data);
3019                 goto error;
3020         }
3021         skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3022         if (hdr_len == 0) {
3023                 skb_put(skb, len);
3024                 return skb;
3025         } else {
3026                 unsigned int i, frag_len, frag_size, pages;
3027                 struct bnx2_sw_pg *rx_pg;
3028                 u16 pg_cons = rxr->rx_pg_cons;
3029                 u16 pg_prod = rxr->rx_pg_prod;
3030
3031                 frag_size = len + 4 - hdr_len;
3032                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3033                 skb_put(skb, hdr_len);
3034
3035                 for (i = 0; i < pages; i++) {
3036                         dma_addr_t mapping_old;
3037
3038                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3039                         if (unlikely(frag_len <= 4)) {
3040                                 unsigned int tail = 4 - frag_len;
3041
3042                                 rxr->rx_pg_cons = pg_cons;
3043                                 rxr->rx_pg_prod = pg_prod;
3044                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3045                                                         pages - i);
3046                                 skb->len -= tail;
3047                                 if (i == 0) {
3048                                         skb->tail -= tail;
3049                                 } else {
3050                                         skb_frag_t *frag =
3051                                                 &skb_shinfo(skb)->frags[i - 1];
3052                                         skb_frag_size_sub(frag, tail);
3053                                         skb->data_len -= tail;
3054                                 }
3055                                 return skb;
3056                         }
3057                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3058
3059                         /* Don't unmap yet.  If we're unable to allocate a new
3060                          * page, we need to recycle the page and the DMA addr.
3061                          */
3062                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3063                         if (i == pages - 1)
3064                                 frag_len -= 4;
3065
3066                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3067                         rx_pg->page = NULL;
3068
3069                         err = bnx2_alloc_rx_page(bp, rxr,
3070                                                  BNX2_RX_PG_RING_IDX(pg_prod),
3071                                                  GFP_ATOMIC);
3072                         if (unlikely(err)) {
3073                                 rxr->rx_pg_cons = pg_cons;
3074                                 rxr->rx_pg_prod = pg_prod;
3075                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3076                                                         pages - i);
3077                                 return NULL;
3078                         }
3079
3080                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3081                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3082
3083                         frag_size -= frag_len;
3084                         skb->data_len += frag_len;
3085                         skb->truesize += PAGE_SIZE;
3086                         skb->len += frag_len;
3087
3088                         pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3089                         pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3090                 }
3091                 rxr->rx_pg_prod = pg_prod;
3092                 rxr->rx_pg_cons = pg_cons;
3093         }
3094         return skb;
3095 }
3096
3097 static inline u16
3098 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3099 {
3100         u16 cons;
3101
3102         /* Tell compiler that status block fields can change. */
3103         barrier();
3104         cons = *bnapi->hw_rx_cons_ptr;
3105         barrier();
3106         if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3107                 cons++;
3108         return cons;
3109 }
3110
3111 static int
3112 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3113 {
3114         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3115         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3116         struct l2_fhdr *rx_hdr;
3117         int rx_pkt = 0, pg_ring_used = 0;
3118
3119         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3120         sw_cons = rxr->rx_cons;
3121         sw_prod = rxr->rx_prod;
3122
3123         /* Memory barrier necessary as speculative reads of the rx
3124          * buffer can be ahead of the index in the status block
3125          */
3126         rmb();
3127         while (sw_cons != hw_cons) {
3128                 unsigned int len, hdr_len;
3129                 u32 status;
3130                 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3131                 struct sk_buff *skb;
3132                 dma_addr_t dma_addr;
3133                 u8 *data;
3134                 u16 next_ring_idx;
3135
3136                 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3137                 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3138
3139                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3140                 data = rx_buf->data;
3141                 rx_buf->data = NULL;
3142
3143                 rx_hdr = get_l2_fhdr(data);
3144                 prefetch(rx_hdr);
3145
3146                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3147
3148                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3149                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3150                         PCI_DMA_FROMDEVICE);
3151
3152                 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3153                 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3154                 prefetch(get_l2_fhdr(next_rx_buf->data));
3155
3156                 len = rx_hdr->l2_fhdr_pkt_len;
3157                 status = rx_hdr->l2_fhdr_status;
3158
3159                 hdr_len = 0;
3160                 if (status & L2_FHDR_STATUS_SPLIT) {
3161                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3162                         pg_ring_used = 1;
3163                 } else if (len > bp->rx_jumbo_thresh) {
3164                         hdr_len = bp->rx_jumbo_thresh;
3165                         pg_ring_used = 1;
3166                 }
3167
3168                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3169                                        L2_FHDR_ERRORS_PHY_DECODE |
3170                                        L2_FHDR_ERRORS_ALIGNMENT |
3171                                        L2_FHDR_ERRORS_TOO_SHORT |
3172                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3173
3174                         bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3175                                           sw_ring_prod);
3176                         if (pg_ring_used) {
3177                                 int pages;
3178
3179                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3180
3181                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3182                         }
3183                         goto next_rx;
3184                 }
3185
3186                 len -= 4;
3187
3188                 if (len <= bp->rx_copy_thresh) {
3189                         skb = netdev_alloc_skb(bp->dev, len + 6);
3190                         if (skb == NULL) {
3191                                 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3192                                                   sw_ring_prod);
3193                                 goto next_rx;
3194                         }
3195
3196                         /* aligned copy */
3197                         memcpy(skb->data,
3198                                (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3199                                len + 6);
3200                         skb_reserve(skb, 6);
3201                         skb_put(skb, len);
3202
3203                         bnx2_reuse_rx_data(bp, rxr, data,
3204                                 sw_ring_cons, sw_ring_prod);
3205
3206                 } else {
3207                         skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3208                                           (sw_ring_cons << 16) | sw_ring_prod);
3209                         if (!skb)
3210                                 goto next_rx;
3211                 }
3212                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3213                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3214                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3215
3216                 skb->protocol = eth_type_trans(skb, bp->dev);
3217
3218                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3219                         (ntohs(skb->protocol) != 0x8100)) {
3220
3221                         dev_kfree_skb(skb);
3222                         goto next_rx;
3223
3224                 }
3225
3226                 skb_checksum_none_assert(skb);
3227                 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3228                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3229                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3230
3231                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3232                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3233                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3234                 }
3235                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3236                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3237                      L2_FHDR_STATUS_USE_RXHASH))
3238                         skb->rxhash = rx_hdr->l2_fhdr_hash;
3239
3240                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3241                 napi_gro_receive(&bnapi->napi, skb);
3242                 rx_pkt++;
3243
3244 next_rx:
3245                 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3246                 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3247
3248                 if ((rx_pkt == budget))
3249                         break;
3250
3251                 /* Refresh hw_cons to see if there is new work */
3252                 if (sw_cons == hw_cons) {
3253                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3254                         rmb();
3255                 }
3256         }
3257         rxr->rx_cons = sw_cons;
3258         rxr->rx_prod = sw_prod;
3259
3260         if (pg_ring_used)
3261                 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3262
3263         BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3264
3265         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3266
3267         mmiowb();
3268
3269         return rx_pkt;
3270
3271 }
3272
3273 /* MSI ISR - The only difference between this and the INTx ISR
3274  * is that the MSI interrupt is always serviced.
3275  */
3276 static irqreturn_t
3277 bnx2_msi(int irq, void *dev_instance)
3278 {
3279         struct bnx2_napi *bnapi = dev_instance;
3280         struct bnx2 *bp = bnapi->bp;
3281
3282         prefetch(bnapi->status_blk.msi);
3283         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3284                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3285                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3286
3287         /* Return here if interrupt is disabled. */
3288         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3289                 return IRQ_HANDLED;
3290
3291         napi_schedule(&bnapi->napi);
3292
3293         return IRQ_HANDLED;
3294 }
3295
3296 static irqreturn_t
3297 bnx2_msi_1shot(int irq, void *dev_instance)
3298 {
3299         struct bnx2_napi *bnapi = dev_instance;
3300         struct bnx2 *bp = bnapi->bp;
3301
3302         prefetch(bnapi->status_blk.msi);
3303
3304         /* Return here if interrupt is disabled. */
3305         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3306                 return IRQ_HANDLED;
3307
3308         napi_schedule(&bnapi->napi);
3309
3310         return IRQ_HANDLED;
3311 }
3312
3313 static irqreturn_t
3314 bnx2_interrupt(int irq, void *dev_instance)
3315 {
3316         struct bnx2_napi *bnapi = dev_instance;
3317         struct bnx2 *bp = bnapi->bp;
3318         struct status_block *sblk = bnapi->status_blk.msi;
3319
3320         /* When using INTx, it is possible for the interrupt to arrive
3321          * at the CPU before the status block posted prior to the
3322          * interrupt. Reading a register will flush the status block.
3323          * When using MSI, the MSI message will always complete after
3324          * the status block write.
3325          */
3326         if ((sblk->status_idx == bnapi->last_status_idx) &&
3327             (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3328              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3329                 return IRQ_NONE;
3330
3331         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3332                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3333                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3334
3335         /* Read back to deassert IRQ immediately to avoid too many
3336          * spurious interrupts.
3337          */
3338         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3339
3340         /* Return here if interrupt is shared and is disabled. */
3341         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3342                 return IRQ_HANDLED;
3343
3344         if (napi_schedule_prep(&bnapi->napi)) {
3345                 bnapi->last_status_idx = sblk->status_idx;
3346                 __napi_schedule(&bnapi->napi);
3347         }
3348
3349         return IRQ_HANDLED;
3350 }
3351
3352 static inline int
3353 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3354 {
3355         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3356         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3357
3358         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3359             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3360                 return 1;
3361         return 0;
3362 }
3363
3364 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3365                                  STATUS_ATTN_BITS_TIMER_ABORT)
3366
3367 static inline int
3368 bnx2_has_work(struct bnx2_napi *bnapi)
3369 {
3370         struct status_block *sblk = bnapi->status_blk.msi;
3371
3372         if (bnx2_has_fast_work(bnapi))
3373                 return 1;
3374
3375 #ifdef BCM_CNIC
3376         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3377                 return 1;
3378 #endif
3379
3380         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3381             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3382                 return 1;
3383
3384         return 0;
3385 }
3386
3387 static void
3388 bnx2_chk_missed_msi(struct bnx2 *bp)
3389 {
3390         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3391         u32 msi_ctrl;
3392
3393         if (bnx2_has_work(bnapi)) {
3394                 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3395                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3396                         return;
3397
3398                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3399                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3400                                 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3401                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3402                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3403                 }
3404         }
3405
3406         bp->idle_chk_status_idx = bnapi->last_status_idx;
3407 }
3408
3409 #ifdef BCM_CNIC
3410 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3411 {
3412         struct cnic_ops *c_ops;
3413
3414         if (!bnapi->cnic_present)
3415                 return;
3416
3417         rcu_read_lock();
3418         c_ops = rcu_dereference(bp->cnic_ops);
3419         if (c_ops)
3420                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3421                                                       bnapi->status_blk.msi);
3422         rcu_read_unlock();
3423 }
3424 #endif
3425
3426 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3427 {
3428         struct status_block *sblk = bnapi->status_blk.msi;
3429         u32 status_attn_bits = sblk->status_attn_bits;
3430         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3431
3432         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3433             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3434
3435                 bnx2_phy_int(bp, bnapi);
3436
3437                 /* This is needed to take care of transient status
3438                  * during link changes.
3439                  */
3440                 BNX2_WR(bp, BNX2_HC_COMMAND,
3441                         bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3442                 BNX2_RD(bp, BNX2_HC_COMMAND);
3443         }
3444 }
3445
3446 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3447                           int work_done, int budget)
3448 {
3449         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3450         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3451
3452         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3453                 bnx2_tx_int(bp, bnapi, 0);
3454
3455         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3456                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3457
3458         return work_done;
3459 }
3460
3461 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3462 {
3463         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3464         struct bnx2 *bp = bnapi->bp;
3465         int work_done = 0;
3466         struct status_block_msix *sblk = bnapi->status_blk.msix;
3467
3468         while (1) {
3469                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3470                 if (unlikely(work_done >= budget))
3471                         break;
3472
3473                 bnapi->last_status_idx = sblk->status_idx;
3474                 /* status idx must be read before checking for more work. */
3475                 rmb();
3476                 if (likely(!bnx2_has_fast_work(bnapi))) {
3477
3478                         napi_complete(napi);
3479                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3480                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3481                                 bnapi->last_status_idx);
3482                         break;
3483                 }
3484         }
3485         return work_done;
3486 }
3487
3488 static int bnx2_poll(struct napi_struct *napi, int budget)
3489 {
3490         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3491         struct bnx2 *bp = bnapi->bp;
3492         int work_done = 0;
3493         struct status_block *sblk = bnapi->status_blk.msi;
3494
3495         while (1) {
3496                 bnx2_poll_link(bp, bnapi);
3497
3498                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3499
3500 #ifdef BCM_CNIC
3501                 bnx2_poll_cnic(bp, bnapi);
3502 #endif
3503
3504                 /* bnapi->last_status_idx is used below to tell the hw how
3505                  * much work has been processed, so we must read it before
3506                  * checking for more work.
3507                  */
3508                 bnapi->last_status_idx = sblk->status_idx;
3509
3510                 if (unlikely(work_done >= budget))
3511                         break;
3512
3513                 rmb();
3514                 if (likely(!bnx2_has_work(bnapi))) {
3515                         napi_complete(napi);
3516                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3517                                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3518                                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3519                                         bnapi->last_status_idx);
3520                                 break;
3521                         }
3522                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3523                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3524                                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3525                                 bnapi->last_status_idx);
3526
3527                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3528                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3529                                 bnapi->last_status_idx);
3530                         break;
3531                 }
3532         }
3533
3534         return work_done;
3535 }
3536
3537 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3538  * from set_multicast.
3539  */
3540 static void
3541 bnx2_set_rx_mode(struct net_device *dev)
3542 {
3543         struct bnx2 *bp = netdev_priv(dev);
3544         u32 rx_mode, sort_mode;
3545         struct netdev_hw_addr *ha;
3546         int i;
3547
3548         if (!netif_running(dev))
3549                 return;
3550
3551         spin_lock_bh(&bp->phy_lock);
3552
3553         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3554                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3555         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3556         if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3557              (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3558                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3559         if (dev->flags & IFF_PROMISC) {
3560                 /* Promiscuous mode. */
3561                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3562                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3563                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3564         }
3565         else if (dev->flags & IFF_ALLMULTI) {
3566                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3567                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3568                                 0xffffffff);
3569                 }
3570                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3571         }
3572         else {
3573                 /* Accept one or more multicast(s). */
3574                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3575                 u32 regidx;
3576                 u32 bit;
3577                 u32 crc;
3578
3579                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3580
3581                 netdev_for_each_mc_addr(ha, dev) {
3582                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3583                         bit = crc & 0xff;
3584                         regidx = (bit & 0xe0) >> 5;
3585                         bit &= 0x1f;
3586                         mc_filter[regidx] |= (1 << bit);
3587                 }
3588
3589                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3590                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3591                                 mc_filter[i]);
3592                 }
3593
3594                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3595         }
3596
3597         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3598                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3599                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3600                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3601         } else if (!(dev->flags & IFF_PROMISC)) {
3602                 /* Add all entries into to the match filter list */
3603                 i = 0;
3604                 netdev_for_each_uc_addr(ha, dev) {
3605                         bnx2_set_mac_addr(bp, ha->addr,
3606                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3607                         sort_mode |= (1 <<
3608                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3609                         i++;
3610                 }
3611
3612         }
3613
3614         if (rx_mode != bp->rx_mode) {
3615                 bp->rx_mode = rx_mode;
3616                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3617         }
3618
3619         BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3620         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3621         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3622
3623         spin_unlock_bh(&bp->phy_lock);
3624 }
3625
3626 static int
3627 check_fw_section(const struct firmware *fw,
3628                  const struct bnx2_fw_file_section *section,
3629                  u32 alignment, bool non_empty)
3630 {
3631         u32 offset = be32_to_cpu(section->offset);
3632         u32 len = be32_to_cpu(section->len);
3633
3634         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3635                 return -EINVAL;
3636         if ((non_empty && len == 0) || len > fw->size - offset ||
3637             len & (alignment - 1))
3638                 return -EINVAL;
3639         return 0;
3640 }
3641
3642 static int
3643 check_mips_fw_entry(const struct firmware *fw,
3644                     const struct bnx2_mips_fw_file_entry *entry)
3645 {
3646         if (check_fw_section(fw, &entry->text, 4, true) ||
3647             check_fw_section(fw, &entry->data, 4, false) ||
3648             check_fw_section(fw, &entry->rodata, 4, false))
3649                 return -EINVAL;
3650         return 0;
3651 }
3652
3653 static void bnx2_release_firmware(struct bnx2 *bp)
3654 {
3655         if (bp->rv2p_firmware) {
3656                 release_firmware(bp->mips_firmware);
3657                 release_firmware(bp->rv2p_firmware);
3658                 bp->rv2p_firmware = NULL;
3659         }
3660 }
3661
3662 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3663 {
3664         const char *mips_fw_file, *rv2p_fw_file;
3665         const struct bnx2_mips_fw_file *mips_fw;
3666         const struct bnx2_rv2p_fw_file *rv2p_fw;
3667         int rc;
3668
3669         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3670                 mips_fw_file = FW_MIPS_FILE_09;
3671                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3672                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3673                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3674                 else
3675                         rv2p_fw_file = FW_RV2P_FILE_09;
3676         } else {
3677                 mips_fw_file = FW_MIPS_FILE_06;
3678                 rv2p_fw_file = FW_RV2P_FILE_06;
3679         }
3680
3681         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3682         if (rc) {
3683                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3684                 goto out;
3685         }
3686
3687         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3688         if (rc) {
3689                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3690                 goto err_release_mips_firmware;
3691         }
3692         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3693         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3694         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3695             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3696             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3697             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3698             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3699             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3700                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3701                 rc = -EINVAL;
3702                 goto err_release_firmware;
3703         }
3704         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3705             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3706             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3707                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3708                 rc = -EINVAL;
3709                 goto err_release_firmware;
3710         }
3711 out:
3712         return rc;
3713
3714 err_release_firmware:
3715         release_firmware(bp->rv2p_firmware);
3716         bp->rv2p_firmware = NULL;
3717 err_release_mips_firmware:
3718         release_firmware(bp->mips_firmware);
3719         goto out;
3720 }
3721
3722 static int bnx2_request_firmware(struct bnx2 *bp)
3723 {
3724         return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3725 }
3726
3727 static u32
3728 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3729 {
3730         switch (idx) {
3731         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3732                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3733                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3734                 break;
3735         }
3736         return rv2p_code;
3737 }
3738
3739 static int
3740 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3741              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3742 {
3743         u32 rv2p_code_len, file_offset;
3744         __be32 *rv2p_code;
3745         int i;
3746         u32 val, cmd, addr;
3747
3748         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3749         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3750
3751         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3752
3753         if (rv2p_proc == RV2P_PROC1) {
3754                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3755                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3756         } else {
3757                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3758                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3759         }
3760
3761         for (i = 0; i < rv2p_code_len; i += 8) {
3762                 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3763                 rv2p_code++;
3764                 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3765                 rv2p_code++;
3766
3767                 val = (i / 8) | cmd;
3768                 BNX2_WR(bp, addr, val);
3769         }
3770
3771         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3772         for (i = 0; i < 8; i++) {
3773                 u32 loc, code;
3774
3775                 loc = be32_to_cpu(fw_entry->fixup[i]);
3776                 if (loc && ((loc * 4) < rv2p_code_len)) {
3777                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3778                         BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3779                         code = be32_to_cpu(*(rv2p_code + loc));
3780                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3781                         BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3782
3783                         val = (loc / 2) | cmd;
3784                         BNX2_WR(bp, addr, val);
3785                 }
3786         }
3787
3788         /* Reset the processor, un-stall is done later. */
3789         if (rv2p_proc == RV2P_PROC1) {
3790                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3791         }
3792         else {
3793                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3794         }
3795
3796         return 0;
3797 }
3798
3799 static int
3800 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3801             const struct bnx2_mips_fw_file_entry *fw_entry)
3802 {
3803         u32 addr, len, file_offset;
3804         __be32 *data;
3805         u32 offset;
3806         u32 val;
3807
3808         /* Halt the CPU. */
3809         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3810         val |= cpu_reg->mode_value_halt;
3811         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3812         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3813
3814         /* Load the Text area. */
3815         addr = be32_to_cpu(fw_entry->text.addr);
3816         len = be32_to_cpu(fw_entry->text.len);
3817         file_offset = be32_to_cpu(fw_entry->text.offset);
3818         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3819
3820         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3821         if (len) {
3822                 int j;
3823
3824                 for (j = 0; j < (len / 4); j++, offset += 4)
3825                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3826         }
3827
3828         /* Load the Data area. */
3829         addr = be32_to_cpu(fw_entry->data.addr);
3830         len = be32_to_cpu(fw_entry->data.len);
3831         file_offset = be32_to_cpu(fw_entry->data.offset);
3832         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3833
3834         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3835         if (len) {
3836                 int j;
3837
3838                 for (j = 0; j < (len / 4); j++, offset += 4)
3839                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3840         }
3841
3842         /* Load the Read-Only area. */
3843         addr = be32_to_cpu(fw_entry->rodata.addr);
3844         len = be32_to_cpu(fw_entry->rodata.len);
3845         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3846         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3847
3848         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3849         if (len) {
3850                 int j;
3851
3852                 for (j = 0; j < (len / 4); j++, offset += 4)
3853                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3854         }
3855
3856         /* Clear the pre-fetch instruction. */
3857         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3858
3859         val = be32_to_cpu(fw_entry->start_addr);
3860         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3861
3862         /* Start the CPU. */
3863         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3864         val &= ~cpu_reg->mode_value_halt;
3865         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3866         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3867
3868         return 0;
3869 }
3870
3871 static int
3872 bnx2_init_cpus(struct bnx2 *bp)
3873 {
3874         const struct bnx2_mips_fw_file *mips_fw =
3875                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3876         const struct bnx2_rv2p_fw_file *rv2p_fw =
3877                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3878         int rc;
3879
3880         /* Initialize the RV2P processor. */
3881         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3882         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3883
3884         /* Initialize the RX Processor. */
3885         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3886         if (rc)
3887                 goto init_cpu_err;
3888
3889         /* Initialize the TX Processor. */
3890         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3891         if (rc)
3892                 goto init_cpu_err;
3893
3894         /* Initialize the TX Patch-up Processor. */
3895         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3896         if (rc)
3897                 goto init_cpu_err;
3898
3899         /* Initialize the Completion Processor. */
3900         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3901         if (rc)
3902                 goto init_cpu_err;
3903
3904         /* Initialize the Command Processor. */
3905         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3906
3907 init_cpu_err:
3908         return rc;
3909 }
3910
3911 static void
3912 bnx2_setup_wol(struct bnx2 *bp)
3913 {
3914         int i;
3915         u32 val, wol_msg;
3916
3917         if (bp->wol) {
3918                 u32 advertising;
3919                 u8 autoneg;
3920
3921                 autoneg = bp->autoneg;
3922                 advertising = bp->advertising;
3923
3924                 if (bp->phy_port == PORT_TP) {
3925                         bp->autoneg = AUTONEG_SPEED;
3926                         bp->advertising = ADVERTISED_10baseT_Half |
3927                                 ADVERTISED_10baseT_Full |
3928                                 ADVERTISED_100baseT_Half |
3929                                 ADVERTISED_100baseT_Full |
3930                                 ADVERTISED_Autoneg;
3931                 }
3932
3933                 spin_lock_bh(&bp->phy_lock);
3934                 bnx2_setup_phy(bp, bp->phy_port);
3935                 spin_unlock_bh(&bp->phy_lock);
3936
3937                 bp->autoneg = autoneg;
3938                 bp->advertising = advertising;
3939
3940                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3941
3942                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3943
3944                 /* Enable port mode. */
3945                 val &= ~BNX2_EMAC_MODE_PORT;
3946                 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3947                        BNX2_EMAC_MODE_ACPI_RCVD |
3948                        BNX2_EMAC_MODE_MPKT;
3949                 if (bp->phy_port == PORT_TP) {
3950                         val |= BNX2_EMAC_MODE_PORT_MII;
3951                 } else {
3952                         val |= BNX2_EMAC_MODE_PORT_GMII;
3953                         if (bp->line_speed == SPEED_2500)
3954                                 val |= BNX2_EMAC_MODE_25G_MODE;
3955                 }
3956
3957                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3958
3959                 /* receive all multicast */
3960                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3961                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3962                                 0xffffffff);
3963                 }
3964                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3965
3966                 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3967                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3968                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3969                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3970
3971                 /* Need to enable EMAC and RPM for WOL. */
3972                 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3973                         BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3974                         BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3975                         BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3976
3977                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3978                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3979                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3980
3981                 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3982         } else {
3983                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3984         }
3985
3986         if (!(bp->flags & BNX2_FLAG_NO_WOL))
3987                 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
3988
3989 }
3990
3991 static int
3992 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3993 {
3994         switch (state) {
3995         case PCI_D0: {
3996                 u32 val;
3997
3998                 pci_enable_wake(bp->pdev, PCI_D0, false);
3999                 pci_set_power_state(bp->pdev, PCI_D0);
4000
4001                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4002                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4003                 val &= ~BNX2_EMAC_MODE_MPKT;
4004                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4005
4006                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4007                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4008                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4009                 break;
4010         }
4011         case PCI_D3hot: {
4012                 bnx2_setup_wol(bp);
4013                 pci_wake_from_d3(bp->pdev, bp->wol);
4014                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4015                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4016
4017                         if (bp->wol)
4018                                 pci_set_power_state(bp->pdev, PCI_D3hot);
4019                 } else {
4020                         pci_set_power_state(bp->pdev, PCI_D3hot);
4021                 }
4022
4023                 /* No more memory access after this point until
4024                  * device is brought back to D0.
4025                  */
4026                 break;
4027         }
4028         default:
4029                 return -EINVAL;
4030         }
4031         return 0;
4032 }
4033
4034 static int
4035 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4036 {
4037         u32 val;
4038         int j;
4039
4040         /* Request access to the flash interface. */
4041         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4042         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4043                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4044                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4045                         break;
4046
4047                 udelay(5);
4048         }
4049
4050         if (j >= NVRAM_TIMEOUT_COUNT)
4051                 return -EBUSY;
4052
4053         return 0;
4054 }
4055
4056 static int
4057 bnx2_release_nvram_lock(struct bnx2 *bp)
4058 {
4059         int j;
4060         u32 val;
4061
4062         /* Relinquish nvram interface. */
4063         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4064
4065         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4066                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4067                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4068                         break;
4069
4070                 udelay(5);
4071         }
4072
4073         if (j >= NVRAM_TIMEOUT_COUNT)
4074                 return -EBUSY;
4075
4076         return 0;
4077 }
4078
4079
4080 static int
4081 bnx2_enable_nvram_write(struct bnx2 *bp)
4082 {
4083         u32 val;
4084
4085         val = BNX2_RD(bp, BNX2_MISC_CFG);
4086         BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4087
4088         if (bp->flash_info->flags & BNX2_NV_WREN) {
4089                 int j;
4090
4091                 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4092                 BNX2_WR(bp, BNX2_NVM_COMMAND,
4093                         BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4094
4095                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4096                         udelay(5);
4097
4098                         val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4099                         if (val & BNX2_NVM_COMMAND_DONE)
4100                                 break;
4101                 }
4102
4103                 if (j >= NVRAM_TIMEOUT_COUNT)
4104                         return -EBUSY;
4105         }
4106         return 0;
4107 }
4108
4109 static void
4110 bnx2_disable_nvram_write(struct bnx2 *bp)
4111 {
4112         u32 val;
4113
4114         val = BNX2_RD(bp, BNX2_MISC_CFG);
4115         BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4116 }
4117
4118
4119 static void
4120 bnx2_enable_nvram_access(struct bnx2 *bp)
4121 {
4122         u32 val;
4123
4124         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4125         /* Enable both bits, even on read. */
4126         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4127                 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4128 }
4129
4130 static void
4131 bnx2_disable_nvram_access(struct bnx2 *bp)
4132 {
4133         u32 val;
4134
4135         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4136         /* Disable both bits, even after read. */
4137         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4138                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4139                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4140 }
4141
4142 static int
4143 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4144 {
4145         u32 cmd;
4146         int j;
4147
4148         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4149                 /* Buffered flash, no erase needed */
4150                 return 0;
4151
4152         /* Build an erase command */
4153         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4154               BNX2_NVM_COMMAND_DOIT;
4155
4156         /* Need to clear DONE bit separately. */
4157         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4158
4159         /* Address of the NVRAM to read from. */
4160         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4161
4162         /* Issue an erase command. */
4163         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4164
4165         /* Wait for completion. */
4166         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4167                 u32 val;
4168
4169                 udelay(5);
4170
4171                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4172                 if (val & BNX2_NVM_COMMAND_DONE)
4173                         break;
4174         }
4175
4176         if (j >= NVRAM_TIMEOUT_COUNT)
4177                 return -EBUSY;
4178
4179         return 0;
4180 }
4181
4182 static int
4183 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4184 {
4185         u32 cmd;
4186         int j;
4187
4188         /* Build the command word. */
4189         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4190
4191         /* Calculate an offset of a buffered flash, not needed for 5709. */
4192         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4193                 offset = ((offset / bp->flash_info->page_size) <<
4194                            bp->flash_info->page_bits) +
4195                           (offset % bp->flash_info->page_size);
4196         }
4197
4198         /* Need to clear DONE bit separately. */
4199         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4200
4201         /* Address of the NVRAM to read from. */
4202         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4203
4204         /* Issue a read command. */
4205         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4206
4207         /* Wait for completion. */
4208         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4209                 u32 val;
4210
4211                 udelay(5);
4212
4213                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4214                 if (val & BNX2_NVM_COMMAND_DONE) {
4215                         __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4216                         memcpy(ret_val, &v, 4);
4217                         break;
4218                 }
4219         }
4220         if (j >= NVRAM_TIMEOUT_COUNT)
4221                 return -EBUSY;
4222
4223         return 0;
4224 }
4225
4226
4227 static int
4228 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4229 {
4230         u32 cmd;
4231         __be32 val32;
4232         int j;
4233
4234         /* Build the command word. */
4235         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4236
4237         /* Calculate an offset of a buffered flash, not needed for 5709. */
4238         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4239                 offset = ((offset / bp->flash_info->page_size) <<
4240                           bp->flash_info->page_bits) +
4241                          (offset % bp->flash_info->page_size);
4242         }
4243
4244         /* Need to clear DONE bit separately. */
4245         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4246
4247         memcpy(&val32, val, 4);
4248
4249         /* Write the data. */
4250         BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4251
4252         /* Address of the NVRAM to write to. */
4253         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4254
4255         /* Issue the write command. */
4256         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4257
4258         /* Wait for completion. */
4259         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4260                 udelay(5);
4261
4262                 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4263                         break;
4264         }
4265         if (j >= NVRAM_TIMEOUT_COUNT)
4266                 return -EBUSY;
4267
4268         return 0;
4269 }
4270
4271 static int
4272 bnx2_init_nvram(struct bnx2 *bp)
4273 {
4274         u32 val;
4275         int j, entry_count, rc = 0;
4276         const struct flash_spec *flash;
4277
4278         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4279                 bp->flash_info = &flash_5709;
4280                 goto get_flash_size;
4281         }
4282
4283         /* Determine the selected interface. */
4284         val = BNX2_RD(bp, BNX2_NVM_CFG1);
4285
4286         entry_count = ARRAY_SIZE(flash_table);
4287
4288         if (val & 0x40000000) {
4289
4290                 /* Flash interface has been reconfigured */
4291                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4292                      j++, flash++) {
4293                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4294                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4295                                 bp->flash_info = flash;
4296                                 break;
4297                         }
4298                 }
4299         }
4300         else {
4301                 u32 mask;
4302                 /* Not yet been reconfigured */
4303
4304                 if (val & (1 << 23))
4305                         mask = FLASH_BACKUP_STRAP_MASK;
4306                 else
4307                         mask = FLASH_STRAP_MASK;
4308
4309                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4310                         j++, flash++) {
4311
4312                         if ((val & mask) == (flash->strapping & mask)) {
4313                                 bp->flash_info = flash;
4314
4315                                 /* Request access to the flash interface. */
4316                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4317                                         return rc;
4318
4319                                 /* Enable access to flash interface */
4320                                 bnx2_enable_nvram_access(bp);
4321
4322                                 /* Reconfigure the flash interface */
4323                                 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4324                                 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4325                                 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4326                                 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4327
4328                                 /* Disable access to flash interface */
4329                                 bnx2_disable_nvram_access(bp);
4330                                 bnx2_release_nvram_lock(bp);
4331
4332                                 break;
4333                         }
4334                 }
4335         } /* if (val & 0x40000000) */
4336
4337         if (j == entry_count) {
4338                 bp->flash_info = NULL;
4339                 pr_alert("Unknown flash/EEPROM type\n");
4340                 return -ENODEV;
4341         }
4342
4343 get_flash_size:
4344         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4345         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4346         if (val)
4347                 bp->flash_size = val;
4348         else
4349                 bp->flash_size = bp->flash_info->total_size;
4350
4351         return rc;
4352 }
4353
4354 static int
4355 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4356                 int buf_size)
4357 {
4358         int rc = 0;
4359         u32 cmd_flags, offset32, len32, extra;
4360
4361         if (buf_size == 0)
4362                 return 0;
4363
4364         /* Request access to the flash interface. */
4365         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4366                 return rc;
4367
4368         /* Enable access to flash interface */
4369         bnx2_enable_nvram_access(bp);
4370
4371         len32 = buf_size;
4372         offset32 = offset;
4373         extra = 0;
4374
4375         cmd_flags = 0;
4376
4377         if (offset32 & 3) {
4378                 u8 buf[4];
4379                 u32 pre_len;
4380
4381                 offset32 &= ~3;
4382                 pre_len = 4 - (offset & 3);
4383
4384                 if (pre_len >= len32) {
4385                         pre_len = len32;
4386                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4387                                     BNX2_NVM_COMMAND_LAST;
4388                 }
4389                 else {
4390                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4391                 }
4392
4393                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4394
4395                 if (rc)
4396                         return rc;
4397
4398                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4399
4400                 offset32 += 4;
4401                 ret_buf += pre_len;
4402                 len32 -= pre_len;
4403         }
4404         if (len32 & 3) {
4405                 extra = 4 - (len32 & 3);
4406                 len32 = (len32 + 4) & ~3;
4407         }
4408
4409         if (len32 == 4) {
4410                 u8 buf[4];
4411
4412                 if (cmd_flags)
4413                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4414                 else
4415                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4416                                     BNX2_NVM_COMMAND_LAST;
4417
4418                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4419
4420                 memcpy(ret_buf, buf, 4 - extra);
4421         }
4422         else if (len32 > 0) {
4423                 u8 buf[4];
4424
4425                 /* Read the first word. */
4426                 if (cmd_flags)
4427                         cmd_flags = 0;
4428                 else
4429                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4430
4431                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4432
4433                 /* Advance to the next dword. */
4434                 offset32 += 4;
4435                 ret_buf += 4;
4436                 len32 -= 4;
4437
4438                 while (len32 > 4 && rc == 0) {
4439                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4440
4441                         /* Advance to the next dword. */
4442                         offset32 += 4;
4443                         ret_buf += 4;
4444                         len32 -= 4;
4445                 }
4446
4447                 if (rc)
4448                         return rc;
4449
4450                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4451                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4452
4453                 memcpy(ret_buf, buf, 4 - extra);
4454         }
4455
4456         /* Disable access to flash interface */
4457         bnx2_disable_nvram_access(bp);
4458
4459         bnx2_release_nvram_lock(bp);
4460
4461         return rc;
4462 }
4463
4464 static int
4465 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4466                 int buf_size)
4467 {
4468         u32 written, offset32, len32;
4469         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4470         int rc = 0;
4471         int align_start, align_end;
4472
4473         buf = data_buf;
4474         offset32 = offset;
4475         len32 = buf_size;
4476         align_start = align_end = 0;
4477
4478         if ((align_start = (offset32 & 3))) {
4479                 offset32 &= ~3;
4480                 len32 += align_start;
4481                 if (len32 < 4)
4482                         len32 = 4;
4483                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4484                         return rc;
4485         }
4486
4487         if (len32 & 3) {
4488                 align_end = 4 - (len32 & 3);
4489                 len32 += align_end;
4490                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4491                         return rc;
4492         }
4493
4494         if (align_start || align_end) {
4495                 align_buf = kmalloc(len32, GFP_KERNEL);
4496                 if (align_buf == NULL)
4497                         return -ENOMEM;
4498                 if (align_start) {
4499                         memcpy(align_buf, start, 4);
4500                 }
4501                 if (align_end) {
4502                         memcpy(align_buf + len32 - 4, end, 4);
4503                 }
4504                 memcpy(align_buf + align_start, data_buf, buf_size);
4505                 buf = align_buf;
4506         }
4507
4508         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4509                 flash_buffer = kmalloc(264, GFP_KERNEL);
4510                 if (flash_buffer == NULL) {
4511                         rc = -ENOMEM;
4512                         goto nvram_write_end;
4513                 }
4514         }
4515
4516         written = 0;
4517         while ((written < len32) && (rc == 0)) {
4518                 u32 page_start, page_end, data_start, data_end;
4519                 u32 addr, cmd_flags;
4520                 int i;
4521
4522                 /* Find the page_start addr */
4523                 page_start = offset32 + written;
4524                 page_start -= (page_start % bp->flash_info->page_size);
4525                 /* Find the page_end addr */
4526                 page_end = page_start + bp->flash_info->page_size;
4527                 /* Find the data_start addr */
4528                 data_start = (written == 0) ? offset32 : page_start;
4529                 /* Find the data_end addr */
4530                 data_end = (page_end > offset32 + len32) ?
4531                         (offset32 + len32) : page_end;
4532
4533                 /* Request access to the flash interface. */
4534                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4535                         goto nvram_write_end;
4536
4537                 /* Enable access to flash interface */
4538                 bnx2_enable_nvram_access(bp);
4539
4540                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4541                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4542                         int j;
4543
4544                         /* Read the whole page into the buffer
4545                          * (non-buffer flash only) */
4546                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4547                                 if (j == (bp->flash_info->page_size - 4)) {
4548                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4549                                 }
4550                                 rc = bnx2_nvram_read_dword(bp,
4551                                         page_start + j,
4552                                         &flash_buffer[j],
4553                                         cmd_flags);
4554
4555                                 if (rc)
4556                                         goto nvram_write_end;
4557
4558                                 cmd_flags = 0;
4559                         }
4560                 }
4561
4562                 /* Enable writes to flash interface (unlock write-protect) */
4563                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4564                         goto nvram_write_end;
4565
4566                 /* Loop to write back the buffer data from page_start to
4567                  * data_start */
4568                 i = 0;
4569                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4570                         /* Erase the page */
4571                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4572                                 goto nvram_write_end;
4573
4574                         /* Re-enable the write again for the actual write */
4575                         bnx2_enable_nvram_write(bp);
4576
4577                         for (addr = page_start; addr < data_start;
4578                                 addr += 4, i += 4) {
4579
4580                                 rc = bnx2_nvram_write_dword(bp, addr,
4581                                         &flash_buffer[i], cmd_flags);
4582
4583                                 if (rc != 0)
4584                                         goto nvram_write_end;
4585
4586                                 cmd_flags = 0;
4587                         }
4588                 }
4589
4590                 /* Loop to write the new data from data_start to data_end */
4591                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4592                         if ((addr == page_end - 4) ||
4593                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4594                                  (addr == data_end - 4))) {
4595
4596                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4597                         }
4598                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4599                                 cmd_flags);
4600
4601                         if (rc != 0)
4602                                 goto nvram_write_end;
4603
4604                         cmd_flags = 0;
4605                         buf += 4;
4606                 }
4607
4608                 /* Loop to write back the buffer data from data_end
4609                  * to page_end */
4610                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4611                         for (addr = data_end; addr < page_end;
4612                                 addr += 4, i += 4) {
4613
4614                                 if (addr == page_end-4) {
4615                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4616                                 }
4617                                 rc = bnx2_nvram_write_dword(bp, addr,
4618                                         &flash_buffer[i], cmd_flags);
4619
4620                                 if (rc != 0)
4621                                         goto nvram_write_end;
4622
4623                                 cmd_flags = 0;
4624                         }
4625                 }
4626
4627                 /* Disable writes to flash interface (lock write-protect) */
4628                 bnx2_disable_nvram_write(bp);
4629
4630                 /* Disable access to flash interface */
4631                 bnx2_disable_nvram_access(bp);
4632                 bnx2_release_nvram_lock(bp);
4633
4634                 /* Increment written */
4635                 written += data_end - data_start;
4636         }
4637
4638 nvram_write_end:
4639         kfree(flash_buffer);
4640         kfree(align_buf);
4641         return rc;
4642 }
4643
4644 static void
4645 bnx2_init_fw_cap(struct bnx2 *bp)
4646 {
4647         u32 val, sig = 0;
4648
4649         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4650         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4651
4652         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4653                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4654
4655         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4656         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4657                 return;
4658
4659         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4660                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4661                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4662         }
4663
4664         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4665             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4666                 u32 link;
4667
4668                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4669
4670                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4671                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4672                         bp->phy_port = PORT_FIBRE;
4673                 else
4674                         bp->phy_port = PORT_TP;
4675
4676                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4677                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4678         }
4679
4680         if (netif_running(bp->dev) && sig)
4681                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4682 }
4683
4684 static void
4685 bnx2_setup_msix_tbl(struct bnx2 *bp)
4686 {
4687         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4688
4689         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4690         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4691 }
4692
4693 static int
4694 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4695 {
4696         u32 val;
4697         int i, rc = 0;
4698         u8 old_port;
4699
4700         /* Wait for the current PCI transaction to complete before
4701          * issuing a reset. */
4702         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4703             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4704                 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4705                         BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4706                         BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4707                         BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4708                         BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4709                 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4710                 udelay(5);
4711         } else {  /* 5709 */
4712                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4713                 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4714                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4715                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4716
4717                 for (i = 0; i < 100; i++) {
4718                         msleep(1);
4719                         val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4720                         if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4721                                 break;
4722                 }
4723         }
4724
4725         /* Wait for the firmware to tell us it is ok to issue a reset. */
4726         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4727
4728         /* Deposit a driver reset signature so the firmware knows that
4729          * this is a soft reset. */
4730         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4731                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4732
4733         /* Do a dummy read to force the chip to complete all current transaction
4734          * before we issue a reset. */
4735         val = BNX2_RD(bp, BNX2_MISC_ID);
4736
4737         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4738                 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4739                 BNX2_RD(bp, BNX2_MISC_COMMAND);
4740                 udelay(5);
4741
4742                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4743                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4744
4745                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4746
4747         } else {
4748                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4749                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4750                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4751
4752                 /* Chip reset. */
4753                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4754
4755                 /* Reading back any register after chip reset will hang the
4756                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4757                  * of margin for write posting.
4758                  */
4759                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4760                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4761                         msleep(20);
4762
4763                 /* Reset takes approximate 30 usec */
4764                 for (i = 0; i < 10; i++) {
4765                         val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4766                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4767                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4768                                 break;
4769                         udelay(10);
4770                 }
4771
4772                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4773                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4774                         pr_err("Chip reset did not complete\n");
4775                         return -EBUSY;
4776                 }
4777         }
4778
4779         /* Make sure byte swapping is properly configured. */
4780         val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4781         if (val != 0x01020304) {
4782                 pr_err("Chip not in correct endian mode\n");
4783                 return -ENODEV;
4784         }
4785
4786         /* Wait for the firmware to finish its initialization. */
4787         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4788         if (rc)
4789                 return rc;
4790
4791         spin_lock_bh(&bp->phy_lock);
4792         old_port = bp->phy_port;
4793         bnx2_init_fw_cap(bp);
4794         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4795             old_port != bp->phy_port)
4796                 bnx2_set_default_remote_link(bp);
4797         spin_unlock_bh(&bp->phy_lock);
4798
4799         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4800                 /* Adjust the voltage regular to two steps lower.  The default
4801                  * of this register is 0x0000000e. */
4802                 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4803
4804                 /* Remove bad rbuf memory from the free pool. */
4805                 rc = bnx2_alloc_bad_rbuf(bp);
4806         }
4807
4808         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4809                 bnx2_setup_msix_tbl(bp);
4810                 /* Prevent MSIX table reads and write from timing out */
4811                 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4812                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4813         }
4814
4815         return rc;
4816 }
4817
4818 static int
4819 bnx2_init_chip(struct bnx2 *bp)
4820 {
4821         u32 val, mtu;
4822         int rc, i;
4823
4824         /* Make sure the interrupt is not active. */
4825         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4826
4827         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4828               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4829 #ifdef __BIG_ENDIAN
4830               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4831 #endif
4832               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4833               DMA_READ_CHANS << 12 |
4834               DMA_WRITE_CHANS << 16;
4835
4836         val |= (0x2 << 20) | (1 << 11);
4837
4838         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4839                 val |= (1 << 23);
4840
4841         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4842             (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4843             !(bp->flags & BNX2_FLAG_PCIX))
4844                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4845
4846         BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4847
4848         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4849                 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4850                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4851                 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4852         }
4853
4854         if (bp->flags & BNX2_FLAG_PCIX) {
4855                 u16 val16;
4856
4857                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4858                                      &val16);
4859                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4860                                       val16 & ~PCI_X_CMD_ERO);
4861         }
4862
4863         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4864                 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4865                 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4866                 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4867
4868         /* Initialize context mapping and zero out the quick contexts.  The
4869          * context block must have already been enabled. */
4870         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4871                 rc = bnx2_init_5709_context(bp);
4872                 if (rc)
4873                         return rc;
4874         } else
4875                 bnx2_init_context(bp);
4876
4877         if ((rc = bnx2_init_cpus(bp)) != 0)
4878                 return rc;
4879
4880         bnx2_init_nvram(bp);
4881
4882         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4883
4884         val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4885         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4886         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4887         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4888                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4889                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4890                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4891         }
4892
4893         BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4894
4895         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4896         BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4897         BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4898
4899         val = (BNX2_PAGE_BITS - 8) << 24;
4900         BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4901
4902         /* Configure page size. */
4903         val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4904         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4905         val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4906         BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4907
4908         val = bp->mac_addr[0] +
4909               (bp->mac_addr[1] << 8) +
4910               (bp->mac_addr[2] << 16) +
4911               bp->mac_addr[3] +
4912               (bp->mac_addr[4] << 8) +
4913               (bp->mac_addr[5] << 16);
4914         BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4915
4916         /* Program the MTU.  Also include 4 bytes for CRC32. */
4917         mtu = bp->dev->mtu;
4918         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4919         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4920                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4921         BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4922
4923         if (mtu < 1500)
4924                 mtu = 1500;
4925
4926         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4927         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4928         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4929
4930         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4931         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4932                 bp->bnx2_napi[i].last_status_idx = 0;
4933
4934         bp->idle_chk_status_idx = 0xffff;
4935
4936         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4937
4938         /* Set up how to generate a link change interrupt. */
4939         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4940
4941         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4942                 (u64) bp->status_blk_mapping & 0xffffffff);
4943         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4944
4945         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4946                 (u64) bp->stats_blk_mapping & 0xffffffff);
4947         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4948                 (u64) bp->stats_blk_mapping >> 32);
4949
4950         BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4951                 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4952
4953         BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4954                 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4955
4956         BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4957                 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4958
4959         BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4960
4961         BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4962
4963         BNX2_WR(bp, BNX2_HC_COM_TICKS,
4964                 (bp->com_ticks_int << 16) | bp->com_ticks);
4965
4966         BNX2_WR(bp, BNX2_HC_CMD_TICKS,
4967                 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4968
4969         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4970                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
4971         else
4972                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4973         BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4974
4975         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
4976                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4977         else {
4978                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4979                       BNX2_HC_CONFIG_COLLECT_STATS;
4980         }
4981
4982         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4983                 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4984                         BNX2_HC_MSIX_BIT_VECTOR_VAL);
4985
4986                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4987         }
4988
4989         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4990                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4991
4992         BNX2_WR(bp, BNX2_HC_CONFIG, val);
4993
4994         if (bp->rx_ticks < 25)
4995                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
4996         else
4997                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
4998
4999         for (i = 1; i < bp->irq_nvecs; i++) {
5000                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5001                            BNX2_HC_SB_CONFIG_1;
5002
5003                 BNX2_WR(bp, base,
5004                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5005                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5006                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5007
5008                 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5009                         (bp->tx_quick_cons_trip_int << 16) |
5010                          bp->tx_quick_cons_trip);
5011
5012                 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5013                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
5014
5015                 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5016                         (bp->rx_quick_cons_trip_int << 16) |
5017                         bp->rx_quick_cons_trip);
5018
5019                 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5020                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
5021         }
5022
5023         /* Clear internal stats counters. */
5024         BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5025
5026         BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5027
5028         /* Initialize the receive filter. */
5029         bnx2_set_rx_mode(bp->dev);
5030
5031         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5032                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5033                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5034                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5035         }
5036         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5037                           1, 0);
5038
5039         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5040         BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5041
5042         udelay(20);
5043
5044         bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5045
5046         return rc;
5047 }
5048
5049 static void
5050 bnx2_clear_ring_states(struct bnx2 *bp)
5051 {
5052         struct bnx2_napi *bnapi;
5053         struct bnx2_tx_ring_info *txr;
5054         struct bnx2_rx_ring_info *rxr;
5055         int i;
5056
5057         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5058                 bnapi = &bp->bnx2_napi[i];
5059                 txr = &bnapi->tx_ring;
5060                 rxr = &bnapi->rx_ring;
5061
5062                 txr->tx_cons = 0;
5063                 txr->hw_tx_cons = 0;
5064                 rxr->rx_prod_bseq = 0;
5065                 rxr->rx_prod = 0;
5066                 rxr->rx_cons = 0;
5067                 rxr->rx_pg_prod = 0;
5068                 rxr->rx_pg_cons = 0;
5069         }
5070 }
5071
5072 static void
5073 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5074 {
5075         u32 val, offset0, offset1, offset2, offset3;
5076         u32 cid_addr = GET_CID_ADDR(cid);
5077
5078         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5079                 offset0 = BNX2_L2CTX_TYPE_XI;
5080                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5081                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5082                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5083         } else {
5084                 offset0 = BNX2_L2CTX_TYPE;
5085                 offset1 = BNX2_L2CTX_CMD_TYPE;
5086                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5087                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5088         }
5089         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5090         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5091
5092         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5093         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5094
5095         val = (u64) txr->tx_desc_mapping >> 32;
5096         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5097
5098         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5099         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5100 }
5101
5102 static void
5103 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5104 {
5105         struct bnx2_tx_bd *txbd;
5106         u32 cid = TX_CID;
5107         struct bnx2_napi *bnapi;
5108         struct bnx2_tx_ring_info *txr;
5109
5110         bnapi = &bp->bnx2_napi[ring_num];
5111         txr = &bnapi->tx_ring;
5112
5113         if (ring_num == 0)
5114                 cid = TX_CID;
5115         else
5116                 cid = TX_TSS_CID + ring_num - 1;
5117
5118         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5119
5120         txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5121
5122         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5123         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5124
5125         txr->tx_prod = 0;
5126         txr->tx_prod_bseq = 0;
5127
5128         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5129         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5130
5131         bnx2_init_tx_context(bp, cid, txr);
5132 }
5133
5134 static void
5135 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5136                      u32 buf_size, int num_rings)
5137 {
5138         int i;
5139         struct bnx2_rx_bd *rxbd;
5140
5141         for (i = 0; i < num_rings; i++) {
5142                 int j;
5143
5144                 rxbd = &rx_ring[i][0];
5145                 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5146                         rxbd->rx_bd_len = buf_size;
5147                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5148                 }
5149                 if (i == (num_rings - 1))
5150                         j = 0;
5151                 else
5152                         j = i + 1;
5153                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5154                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5155         }
5156 }
5157
5158 static void
5159 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5160 {
5161         int i;
5162         u16 prod, ring_prod;
5163         u32 cid, rx_cid_addr, val;
5164         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5165         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5166
5167         if (ring_num == 0)
5168                 cid = RX_CID;
5169         else
5170                 cid = RX_RSS_CID + ring_num - 1;
5171
5172         rx_cid_addr = GET_CID_ADDR(cid);
5173
5174         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5175                              bp->rx_buf_use_size, bp->rx_max_ring);
5176
5177         bnx2_init_rx_context(bp, cid);
5178
5179         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5180                 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5181                 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5182         }
5183
5184         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5185         if (bp->rx_pg_ring_size) {
5186                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5187                                      rxr->rx_pg_desc_mapping,
5188                                      PAGE_SIZE, bp->rx_max_pg_ring);
5189                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5190                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5191                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5192                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5193
5194                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5195                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5196
5197                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5198                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5199
5200                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5201                         BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5202         }
5203
5204         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5205         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5206
5207         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5208         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5209
5210         ring_prod = prod = rxr->rx_pg_prod;
5211         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5212                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5213                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5214                                     ring_num, i, bp->rx_pg_ring_size);
5215                         break;
5216                 }
5217                 prod = BNX2_NEXT_RX_BD(prod);
5218                 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5219         }
5220         rxr->rx_pg_prod = prod;
5221
5222         ring_prod = prod = rxr->rx_prod;
5223         for (i = 0; i < bp->rx_ring_size; i++) {
5224                 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5225                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5226                                     ring_num, i, bp->rx_ring_size);
5227                         break;
5228                 }
5229                 prod = BNX2_NEXT_RX_BD(prod);
5230                 ring_prod = BNX2_RX_RING_IDX(prod);
5231         }
5232         rxr->rx_prod = prod;
5233
5234         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5235         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5236         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5237
5238         BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5239         BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5240
5241         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5242 }
5243
5244 static void
5245 bnx2_init_all_rings(struct bnx2 *bp)
5246 {
5247         int i;
5248         u32 val;
5249
5250         bnx2_clear_ring_states(bp);
5251
5252         BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5253         for (i = 0; i < bp->num_tx_rings; i++)
5254                 bnx2_init_tx_ring(bp, i);
5255
5256         if (bp->num_tx_rings > 1)
5257                 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5258                         (TX_TSS_CID << 7));
5259
5260         BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5261         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5262
5263         for (i = 0; i < bp->num_rx_rings; i++)
5264                 bnx2_init_rx_ring(bp, i);
5265
5266         if (bp->num_rx_rings > 1) {
5267                 u32 tbl_32 = 0;
5268
5269                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5270                         int shift = (i % 8) << 2;
5271
5272                         tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5273                         if ((i % 8) == 7) {
5274                                 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5275                                 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5276                                         BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5277                                         BNX2_RLUP_RSS_COMMAND_WRITE |
5278                                         BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5279                                 tbl_32 = 0;
5280                         }
5281                 }
5282
5283                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5284                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5285
5286                 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5287
5288         }
5289 }
5290
5291 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5292 {
5293         u32 max, num_rings = 1;
5294
5295         while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5296                 ring_size -= BNX2_MAX_RX_DESC_CNT;
5297                 num_rings++;
5298         }
5299         /* round to next power of 2 */
5300         max = max_size;
5301         while ((max & num_rings) == 0)
5302                 max >>= 1;
5303
5304         if (num_rings != max)
5305                 max <<= 1;
5306
5307         return max;
5308 }
5309
5310 static void
5311 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5312 {
5313         u32 rx_size, rx_space, jumbo_size;
5314
5315         /* 8 for CRC and VLAN */
5316         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5317
5318         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5319                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5320
5321         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5322         bp->rx_pg_ring_size = 0;
5323         bp->rx_max_pg_ring = 0;
5324         bp->rx_max_pg_ring_idx = 0;
5325         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5326                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5327
5328                 jumbo_size = size * pages;
5329                 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5330                         jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5331
5332                 bp->rx_pg_ring_size = jumbo_size;
5333                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5334                                                         BNX2_MAX_RX_PG_RINGS);
5335                 bp->rx_max_pg_ring_idx =
5336                         (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5337                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5338                 bp->rx_copy_thresh = 0;
5339         }
5340
5341         bp->rx_buf_use_size = rx_size;
5342         /* hw alignment + build_skb() overhead*/
5343         bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5344                 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5345         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5346         bp->rx_ring_size = size;
5347         bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5348         bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5349 }
5350
5351 static void
5352 bnx2_free_tx_skbs(struct bnx2 *bp)
5353 {
5354         int i;
5355
5356         for (i = 0; i < bp->num_tx_rings; i++) {
5357                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5358                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5359                 int j;
5360
5361                 if (txr->tx_buf_ring == NULL)
5362                         continue;
5363
5364                 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5365                         struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5366                         struct sk_buff *skb = tx_buf->skb;
5367                         int k, last;
5368
5369                         if (skb == NULL) {
5370                                 j = BNX2_NEXT_TX_BD(j);
5371                                 continue;
5372                         }
5373
5374                         dma_unmap_single(&bp->pdev->dev,
5375                                          dma_unmap_addr(tx_buf, mapping),
5376                                          skb_headlen(skb),
5377                                          PCI_DMA_TODEVICE);
5378
5379                         tx_buf->skb = NULL;
5380
5381                         last = tx_buf->nr_frags;
5382                         j = BNX2_NEXT_TX_BD(j);
5383                         for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5384                                 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5385                                 dma_unmap_page(&bp->pdev->dev,
5386                                         dma_unmap_addr(tx_buf, mapping),
5387                                         skb_frag_size(&skb_shinfo(skb)->frags[k]),
5388                                         PCI_DMA_TODEVICE);
5389                         }
5390                         dev_kfree_skb(skb);
5391                 }
5392                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5393         }
5394 }
5395
5396 static void
5397 bnx2_free_rx_skbs(struct bnx2 *bp)
5398 {
5399         int i;
5400
5401         for (i = 0; i < bp->num_rx_rings; i++) {
5402                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5403                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5404                 int j;
5405
5406                 if (rxr->rx_buf_ring == NULL)
5407                         return;
5408
5409                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5410                         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5411                         u8 *data = rx_buf->data;
5412
5413                         if (data == NULL)
5414                                 continue;
5415
5416                         dma_unmap_single(&bp->pdev->dev,
5417                                          dma_unmap_addr(rx_buf, mapping),
5418                                          bp->rx_buf_use_size,
5419                                          PCI_DMA_FROMDEVICE);
5420
5421                         rx_buf->data = NULL;
5422
5423                         kfree(data);
5424                 }
5425                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5426                         bnx2_free_rx_page(bp, rxr, j);
5427         }
5428 }
5429
5430 static void
5431 bnx2_free_skbs(struct bnx2 *bp)
5432 {
5433         bnx2_free_tx_skbs(bp);
5434         bnx2_free_rx_skbs(bp);
5435 }
5436
5437 static int
5438 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5439 {
5440         int rc;
5441
5442         rc = bnx2_reset_chip(bp, reset_code);
5443         bnx2_free_skbs(bp);
5444         if (rc)
5445                 return rc;
5446
5447         if ((rc = bnx2_init_chip(bp)) != 0)
5448                 return rc;
5449
5450         bnx2_init_all_rings(bp);
5451         return 0;
5452 }
5453
5454 static int
5455 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5456 {
5457         int rc;
5458
5459         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5460                 return rc;
5461
5462         spin_lock_bh(&bp->phy_lock);
5463         bnx2_init_phy(bp, reset_phy);
5464         bnx2_set_link(bp);
5465         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5466                 bnx2_remote_phy_event(bp);
5467         spin_unlock_bh(&bp->phy_lock);
5468         return 0;
5469 }
5470
5471 static int
5472 bnx2_shutdown_chip(struct bnx2 *bp)
5473 {
5474         u32 reset_code;
5475
5476         if (bp->flags & BNX2_FLAG_NO_WOL)
5477                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5478         else if (bp->wol)
5479                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5480         else
5481                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5482
5483         return bnx2_reset_chip(bp, reset_code);
5484 }
5485
5486 static int
5487 bnx2_test_registers(struct bnx2 *bp)
5488 {
5489         int ret;
5490         int i, is_5709;
5491         static const struct {
5492                 u16   offset;
5493                 u16   flags;
5494 #define BNX2_FL_NOT_5709        1
5495                 u32   rw_mask;
5496                 u32   ro_mask;
5497         } reg_tbl[] = {
5498                 { 0x006c, 0, 0x00000000, 0x0000003f },
5499                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5500                 { 0x0094, 0, 0x00000000, 0x00000000 },
5501
5502                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5503                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5504                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5505                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5506                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5507                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5508                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5509                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5510                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5511
5512                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5513                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5514                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5515                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5516                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5517                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5518
5519                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5520                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5521                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5522
5523                 { 0x1000, 0, 0x00000000, 0x00000001 },
5524                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5525
5526                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5527                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5528                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5529                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5530                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5531                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5532                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5533                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5534                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5535                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5536
5537                 { 0x1800, 0, 0x00000000, 0x00000001 },
5538                 { 0x1804, 0, 0x00000000, 0x00000003 },
5539
5540                 { 0x2800, 0, 0x00000000, 0x00000001 },
5541                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5542                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5543                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5544                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5545                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5546                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5547                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5548                 { 0x2840, 0, 0x00000000, 0xffffffff },
5549                 { 0x2844, 0, 0x00000000, 0xffffffff },
5550                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5551                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5552
5553                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5554                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5555
5556                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5557                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5558                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5559                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5560                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5561                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5562                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5563                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5564                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5565
5566                 { 0x5004, 0, 0x00000000, 0x0000007f },
5567                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5568
5569                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5570                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5571                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5572                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5573                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5574                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5575                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5576                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5577                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5578
5579                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5580                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5581                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5582                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5583                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5584                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5585                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5586                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5587                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5588                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5589                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5590                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5591                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5592                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5593                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5594                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5595                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5596                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5597                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5598                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5599                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5600                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5601                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5602
5603                 { 0xffff, 0, 0x00000000, 0x00000000 },
5604         };
5605
5606         ret = 0;
5607         is_5709 = 0;
5608         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5609                 is_5709 = 1;
5610
5611         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5612                 u32 offset, rw_mask, ro_mask, save_val, val;
5613                 u16 flags = reg_tbl[i].flags;
5614
5615                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5616                         continue;
5617
5618                 offset = (u32) reg_tbl[i].offset;
5619                 rw_mask = reg_tbl[i].rw_mask;
5620                 ro_mask = reg_tbl[i].ro_mask;
5621
5622                 save_val = readl(bp->regview + offset);
5623
5624                 writel(0, bp->regview + offset);
5625
5626                 val = readl(bp->regview + offset);
5627                 if ((val & rw_mask) != 0) {
5628                         goto reg_test_err;
5629                 }
5630
5631                 if ((val & ro_mask) != (save_val & ro_mask)) {
5632                         goto reg_test_err;
5633                 }
5634
5635                 writel(0xffffffff, bp->regview + offset);
5636
5637                 val = readl(bp->regview + offset);
5638                 if ((val & rw_mask) != rw_mask) {
5639                         goto reg_test_err;
5640                 }
5641
5642                 if ((val & ro_mask) != (save_val & ro_mask)) {
5643                         goto reg_test_err;
5644                 }
5645
5646                 writel(save_val, bp->regview + offset);
5647                 continue;
5648
5649 reg_test_err:
5650                 writel(save_val, bp->regview + offset);
5651                 ret = -ENODEV;
5652                 break;
5653         }
5654         return ret;
5655 }
5656
5657 static int
5658 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5659 {
5660         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5661                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5662         int i;
5663
5664         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5665                 u32 offset;
5666
5667                 for (offset = 0; offset < size; offset += 4) {
5668
5669                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5670
5671                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5672                                 test_pattern[i]) {
5673                                 return -ENODEV;
5674                         }
5675                 }
5676         }
5677         return 0;
5678 }
5679
5680 static int
5681 bnx2_test_memory(struct bnx2 *bp)
5682 {
5683         int ret = 0;
5684         int i;
5685         static struct mem_entry {
5686                 u32   offset;
5687                 u32   len;
5688         } mem_tbl_5706[] = {
5689                 { 0x60000,  0x4000 },
5690                 { 0xa0000,  0x3000 },
5691                 { 0xe0000,  0x4000 },
5692                 { 0x120000, 0x4000 },
5693                 { 0x1a0000, 0x4000 },
5694                 { 0x160000, 0x4000 },
5695                 { 0xffffffff, 0    },
5696         },
5697         mem_tbl_5709[] = {
5698                 { 0x60000,  0x4000 },
5699                 { 0xa0000,  0x3000 },
5700                 { 0xe0000,  0x4000 },
5701                 { 0x120000, 0x4000 },
5702                 { 0x1a0000, 0x4000 },
5703                 { 0xffffffff, 0    },
5704         };
5705         struct mem_entry *mem_tbl;
5706
5707         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5708                 mem_tbl = mem_tbl_5709;
5709         else
5710                 mem_tbl = mem_tbl_5706;
5711
5712         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5713                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5714                         mem_tbl[i].len)) != 0) {
5715                         return ret;
5716                 }
5717         }
5718
5719         return ret;
5720 }
5721
5722 #define BNX2_MAC_LOOPBACK       0
5723 #define BNX2_PHY_LOOPBACK       1
5724
5725 static int
5726 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5727 {
5728         unsigned int pkt_size, num_pkts, i;
5729         struct sk_buff *skb;
5730         u8 *data;
5731         unsigned char *packet;
5732         u16 rx_start_idx, rx_idx;
5733         dma_addr_t map;
5734         struct bnx2_tx_bd *txbd;
5735         struct bnx2_sw_bd *rx_buf;
5736         struct l2_fhdr *rx_hdr;
5737         int ret = -ENODEV;
5738         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5739         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5740         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5741
5742         tx_napi = bnapi;
5743
5744         txr = &tx_napi->tx_ring;
5745         rxr = &bnapi->rx_ring;
5746         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5747                 bp->loopback = MAC_LOOPBACK;
5748                 bnx2_set_mac_loopback(bp);
5749         }
5750         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5751                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5752                         return 0;
5753
5754                 bp->loopback = PHY_LOOPBACK;
5755                 bnx2_set_phy_loopback(bp);
5756         }
5757         else
5758                 return -EINVAL;
5759
5760         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5761         skb = netdev_alloc_skb(bp->dev, pkt_size);
5762         if (!skb)
5763                 return -ENOMEM;
5764         packet = skb_put(skb, pkt_size);
5765         memcpy(packet, bp->dev->dev_addr, 6);
5766         memset(packet + 6, 0x0, 8);
5767         for (i = 14; i < pkt_size; i++)
5768                 packet[i] = (unsigned char) (i & 0xff);
5769
5770         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5771                              PCI_DMA_TODEVICE);
5772         if (dma_mapping_error(&bp->pdev->dev, map)) {
5773                 dev_kfree_skb(skb);
5774                 return -EIO;
5775         }
5776
5777         BNX2_WR(bp, BNX2_HC_COMMAND,
5778                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5779
5780         BNX2_RD(bp, BNX2_HC_COMMAND);
5781
5782         udelay(5);
5783         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5784
5785         num_pkts = 0;
5786
5787         txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5788
5789         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5790         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5791         txbd->tx_bd_mss_nbytes = pkt_size;
5792         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5793
5794         num_pkts++;
5795         txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5796         txr->tx_prod_bseq += pkt_size;
5797
5798         BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5799         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5800
5801         udelay(100);
5802
5803         BNX2_WR(bp, BNX2_HC_COMMAND,
5804                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5805
5806         BNX2_RD(bp, BNX2_HC_COMMAND);
5807
5808         udelay(5);
5809
5810         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5811         dev_kfree_skb(skb);
5812
5813         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5814                 goto loopback_test_done;
5815
5816         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5817         if (rx_idx != rx_start_idx + num_pkts) {
5818                 goto loopback_test_done;
5819         }
5820
5821         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5822         data = rx_buf->data;
5823
5824         rx_hdr = get_l2_fhdr(data);
5825         data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5826
5827         dma_sync_single_for_cpu(&bp->pdev->dev,
5828                 dma_unmap_addr(rx_buf, mapping),
5829                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5830
5831         if (rx_hdr->l2_fhdr_status &
5832                 (L2_FHDR_ERRORS_BAD_CRC |
5833                 L2_FHDR_ERRORS_PHY_DECODE |
5834                 L2_FHDR_ERRORS_ALIGNMENT |
5835                 L2_FHDR_ERRORS_TOO_SHORT |
5836                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5837
5838                 goto loopback_test_done;
5839         }
5840
5841         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5842                 goto loopback_test_done;
5843         }
5844
5845         for (i = 14; i < pkt_size; i++) {
5846                 if (*(data + i) != (unsigned char) (i & 0xff)) {
5847                         goto loopback_test_done;
5848                 }
5849         }
5850
5851         ret = 0;
5852
5853 loopback_test_done:
5854         bp->loopback = 0;
5855         return ret;
5856 }
5857
5858 #define BNX2_MAC_LOOPBACK_FAILED        1
5859 #define BNX2_PHY_LOOPBACK_FAILED        2
5860 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5861                                          BNX2_PHY_LOOPBACK_FAILED)
5862
5863 static int
5864 bnx2_test_loopback(struct bnx2 *bp)
5865 {
5866         int rc = 0;
5867
5868         if (!netif_running(bp->dev))
5869                 return BNX2_LOOPBACK_FAILED;
5870
5871         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5872         spin_lock_bh(&bp->phy_lock);
5873         bnx2_init_phy(bp, 1);
5874         spin_unlock_bh(&bp->phy_lock);
5875         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5876                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5877         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5878                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5879         return rc;
5880 }
5881
5882 #define NVRAM_SIZE 0x200
5883 #define CRC32_RESIDUAL 0xdebb20e3
5884
5885 static int
5886 bnx2_test_nvram(struct bnx2 *bp)
5887 {
5888         __be32 buf[NVRAM_SIZE / 4];
5889         u8 *data = (u8 *) buf;
5890         int rc = 0;
5891         u32 magic, csum;
5892
5893         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5894                 goto test_nvram_done;
5895
5896         magic = be32_to_cpu(buf[0]);
5897         if (magic != 0x669955aa) {
5898                 rc = -ENODEV;
5899                 goto test_nvram_done;
5900         }
5901
5902         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5903                 goto test_nvram_done;
5904
5905         csum = ether_crc_le(0x100, data);
5906         if (csum != CRC32_RESIDUAL) {
5907                 rc = -ENODEV;
5908                 goto test_nvram_done;
5909         }
5910
5911         csum = ether_crc_le(0x100, data + 0x100);
5912         if (csum != CRC32_RESIDUAL) {
5913                 rc = -ENODEV;
5914         }
5915
5916 test_nvram_done:
5917         return rc;
5918 }
5919
5920 static int
5921 bnx2_test_link(struct bnx2 *bp)
5922 {
5923         u32 bmsr;
5924
5925         if (!netif_running(bp->dev))
5926                 return -ENODEV;
5927
5928         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5929                 if (bp->link_up)
5930                         return 0;
5931                 return -ENODEV;
5932         }
5933         spin_lock_bh(&bp->phy_lock);
5934         bnx2_enable_bmsr1(bp);
5935         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5936         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5937         bnx2_disable_bmsr1(bp);
5938         spin_unlock_bh(&bp->phy_lock);
5939
5940         if (bmsr & BMSR_LSTATUS) {
5941                 return 0;
5942         }
5943         return -ENODEV;
5944 }
5945
5946 static int
5947 bnx2_test_intr(struct bnx2 *bp)
5948 {
5949         int i;
5950         u16 status_idx;
5951
5952         if (!netif_running(bp->dev))
5953                 return -ENODEV;
5954
5955         status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5956
5957         /* This register is not touched during run-time. */
5958         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5959         BNX2_RD(bp, BNX2_HC_COMMAND);
5960
5961         for (i = 0; i < 10; i++) {
5962                 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5963                         status_idx) {
5964
5965                         break;
5966                 }
5967
5968                 msleep_interruptible(10);
5969         }
5970         if (i < 10)
5971                 return 0;
5972
5973         return -ENODEV;
5974 }
5975
5976 /* Determining link for parallel detection. */
5977 static int
5978 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5979 {
5980         u32 mode_ctl, an_dbg, exp;
5981
5982         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5983                 return 0;
5984
5985         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5986         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5987
5988         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5989                 return 0;
5990
5991         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5992         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5993         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5994
5995         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5996                 return 0;
5997
5998         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5999         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6000         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6001
6002         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
6003                 return 0;
6004
6005         return 1;
6006 }
6007
6008 static void
6009 bnx2_5706_serdes_timer(struct bnx2 *bp)
6010 {
6011         int check_link = 1;
6012
6013         spin_lock(&bp->phy_lock);
6014         if (bp->serdes_an_pending) {
6015                 bp->serdes_an_pending--;
6016                 check_link = 0;
6017         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6018                 u32 bmcr;
6019
6020                 bp->current_interval = BNX2_TIMER_INTERVAL;
6021
6022                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6023
6024                 if (bmcr & BMCR_ANENABLE) {
6025                         if (bnx2_5706_serdes_has_link(bp)) {
6026                                 bmcr &= ~BMCR_ANENABLE;
6027                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6028                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6029                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6030                         }
6031                 }
6032         }
6033         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6034                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6035                 u32 phy2;
6036
6037                 bnx2_write_phy(bp, 0x17, 0x0f01);
6038                 bnx2_read_phy(bp, 0x15, &phy2);
6039                 if (phy2 & 0x20) {
6040                         u32 bmcr;
6041
6042                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6043                         bmcr |= BMCR_ANENABLE;
6044                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6045
6046                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6047                 }
6048         } else
6049                 bp->current_interval = BNX2_TIMER_INTERVAL;
6050
6051         if (check_link) {
6052                 u32 val;
6053
6054                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6055                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6056                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6057
6058                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6059                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6060                                 bnx2_5706s_force_link_dn(bp, 1);
6061                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6062                         } else
6063                                 bnx2_set_link(bp);
6064                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6065                         bnx2_set_link(bp);
6066         }
6067         spin_unlock(&bp->phy_lock);
6068 }
6069
6070 static void
6071 bnx2_5708_serdes_timer(struct bnx2 *bp)
6072 {
6073         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6074                 return;
6075
6076         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6077                 bp->serdes_an_pending = 0;
6078                 return;
6079         }
6080
6081         spin_lock(&bp->phy_lock);
6082         if (bp->serdes_an_pending)
6083                 bp->serdes_an_pending--;
6084         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6085                 u32 bmcr;
6086
6087                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6088                 if (bmcr & BMCR_ANENABLE) {
6089                         bnx2_enable_forced_2g5(bp);
6090                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6091                 } else {
6092                         bnx2_disable_forced_2g5(bp);
6093                         bp->serdes_an_pending = 2;
6094                         bp->current_interval = BNX2_TIMER_INTERVAL;
6095                 }
6096
6097         } else
6098                 bp->current_interval = BNX2_TIMER_INTERVAL;
6099
6100         spin_unlock(&bp->phy_lock);
6101 }
6102
6103 static void
6104 bnx2_timer(unsigned long data)
6105 {
6106         struct bnx2 *bp = (struct bnx2 *) data;
6107
6108         if (!netif_running(bp->dev))
6109                 return;
6110
6111         if (atomic_read(&bp->intr_sem) != 0)
6112                 goto bnx2_restart_timer;
6113
6114         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6115              BNX2_FLAG_USING_MSI)
6116                 bnx2_chk_missed_msi(bp);
6117
6118         bnx2_send_heart_beat(bp);
6119
6120         bp->stats_blk->stat_FwRxDrop =
6121                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6122
6123         /* workaround occasional corrupted counters */
6124         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6125                 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6126                         BNX2_HC_COMMAND_STATS_NOW);
6127
6128         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6129                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6130                         bnx2_5706_serdes_timer(bp);
6131                 else
6132                         bnx2_5708_serdes_timer(bp);
6133         }
6134
6135 bnx2_restart_timer:
6136         mod_timer(&bp->timer, jiffies + bp->current_interval);
6137 }
6138
6139 static int
6140 bnx2_request_irq(struct bnx2 *bp)
6141 {
6142         unsigned long flags;
6143         struct bnx2_irq *irq;
6144         int rc = 0, i;
6145
6146         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6147                 flags = 0;
6148         else
6149                 flags = IRQF_SHARED;
6150
6151         for (i = 0; i < bp->irq_nvecs; i++) {
6152                 irq = &bp->irq_tbl[i];
6153                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6154                                  &bp->bnx2_napi[i]);
6155                 if (rc)
6156                         break;
6157                 irq->requested = 1;
6158         }
6159         return rc;
6160 }
6161
6162 static void
6163 __bnx2_free_irq(struct bnx2 *bp)
6164 {
6165         struct bnx2_irq *irq;
6166         int i;
6167
6168         for (i = 0; i < bp->irq_nvecs; i++) {
6169                 irq = &bp->irq_tbl[i];
6170                 if (irq->requested)
6171                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6172                 irq->requested = 0;
6173         }
6174 }
6175
6176 static void
6177 bnx2_free_irq(struct bnx2 *bp)
6178 {
6179
6180         __bnx2_free_irq(bp);
6181         if (bp->flags & BNX2_FLAG_USING_MSI)
6182                 pci_disable_msi(bp->pdev);
6183         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6184                 pci_disable_msix(bp->pdev);
6185
6186         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6187 }
6188
6189 static void
6190 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6191 {
6192         int i, total_vecs, rc;
6193         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6194         struct net_device *dev = bp->dev;
6195         const int len = sizeof(bp->irq_tbl[0].name);
6196
6197         bnx2_setup_msix_tbl(bp);
6198         BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6199         BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6200         BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6201
6202         /*  Need to flush the previous three writes to ensure MSI-X
6203          *  is setup properly */
6204         BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6205
6206         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6207                 msix_ent[i].entry = i;
6208                 msix_ent[i].vector = 0;
6209         }
6210
6211         total_vecs = msix_vecs;
6212 #ifdef BCM_CNIC
6213         total_vecs++;
6214 #endif
6215         rc = -ENOSPC;
6216         while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6217                 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6218                 if (rc <= 0)
6219                         break;
6220                 if (rc > 0)
6221                         total_vecs = rc;
6222         }
6223
6224         if (rc != 0)
6225                 return;
6226
6227         msix_vecs = total_vecs;
6228 #ifdef BCM_CNIC
6229         msix_vecs--;
6230 #endif
6231         bp->irq_nvecs = msix_vecs;
6232         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6233         for (i = 0; i < total_vecs; i++) {
6234                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6235                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6236                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6237         }
6238 }
6239
6240 static int
6241 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6242 {
6243         int cpus = netif_get_num_default_rss_queues();
6244         int msix_vecs;
6245
6246         if (!bp->num_req_rx_rings)
6247                 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6248         else if (!bp->num_req_tx_rings)
6249                 msix_vecs = max(cpus, bp->num_req_rx_rings);
6250         else
6251                 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6252
6253         msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6254
6255         bp->irq_tbl[0].handler = bnx2_interrupt;
6256         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6257         bp->irq_nvecs = 1;
6258         bp->irq_tbl[0].vector = bp->pdev->irq;
6259
6260         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6261                 bnx2_enable_msix(bp, msix_vecs);
6262
6263         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6264             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6265                 if (pci_enable_msi(bp->pdev) == 0) {
6266                         bp->flags |= BNX2_FLAG_USING_MSI;
6267                         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6268                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6269                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6270                         } else
6271                                 bp->irq_tbl[0].handler = bnx2_msi;
6272
6273                         bp->irq_tbl[0].vector = bp->pdev->irq;
6274                 }
6275         }
6276
6277         if (!bp->num_req_tx_rings)
6278                 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6279         else
6280                 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6281
6282         if (!bp->num_req_rx_rings)
6283                 bp->num_rx_rings = bp->irq_nvecs;
6284         else
6285                 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6286
6287         netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6288
6289         return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6290 }
6291
6292 /* Called with rtnl_lock */
6293 static int
6294 bnx2_open(struct net_device *dev)
6295 {
6296         struct bnx2 *bp = netdev_priv(dev);
6297         int rc;
6298
6299         rc = bnx2_request_firmware(bp);
6300         if (rc < 0)
6301                 goto out;
6302
6303         netif_carrier_off(dev);
6304
6305         bnx2_disable_int(bp);
6306
6307         rc = bnx2_setup_int_mode(bp, disable_msi);
6308         if (rc)
6309                 goto open_err;
6310         bnx2_init_napi(bp);
6311         bnx2_napi_enable(bp);
6312         rc = bnx2_alloc_mem(bp);
6313         if (rc)
6314                 goto open_err;
6315
6316         rc = bnx2_request_irq(bp);
6317         if (rc)
6318                 goto open_err;
6319
6320         rc = bnx2_init_nic(bp, 1);
6321         if (rc)
6322                 goto open_err;
6323
6324         mod_timer(&bp->timer, jiffies + bp->current_interval);
6325
6326         atomic_set(&bp->intr_sem, 0);
6327
6328         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6329
6330         bnx2_enable_int(bp);
6331
6332         if (bp->flags & BNX2_FLAG_USING_MSI) {
6333                 /* Test MSI to make sure it is working
6334                  * If MSI test fails, go back to INTx mode
6335                  */
6336                 if (bnx2_test_intr(bp) != 0) {
6337                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6338
6339                         bnx2_disable_int(bp);
6340                         bnx2_free_irq(bp);
6341
6342                         bnx2_setup_int_mode(bp, 1);
6343
6344                         rc = bnx2_init_nic(bp, 0);
6345
6346                         if (!rc)
6347                                 rc = bnx2_request_irq(bp);
6348
6349                         if (rc) {
6350                                 del_timer_sync(&bp->timer);
6351                                 goto open_err;
6352                         }
6353                         bnx2_enable_int(bp);
6354                 }
6355         }
6356         if (bp->flags & BNX2_FLAG_USING_MSI)
6357                 netdev_info(dev, "using MSI\n");
6358         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6359                 netdev_info(dev, "using MSIX\n");
6360
6361         netif_tx_start_all_queues(dev);
6362 out:
6363         return rc;
6364
6365 open_err:
6366         bnx2_napi_disable(bp);
6367         bnx2_free_skbs(bp);
6368         bnx2_free_irq(bp);
6369         bnx2_free_mem(bp);
6370         bnx2_del_napi(bp);
6371         bnx2_release_firmware(bp);
6372         goto out;
6373 }
6374
6375 static void
6376 bnx2_reset_task(struct work_struct *work)
6377 {
6378         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6379         int rc;
6380         u16 pcicmd;
6381
6382         rtnl_lock();
6383         if (!netif_running(bp->dev)) {
6384                 rtnl_unlock();
6385                 return;
6386         }
6387
6388         bnx2_netif_stop(bp, true);
6389
6390         pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6391         if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6392                 /* in case PCI block has reset */
6393                 pci_restore_state(bp->pdev);
6394                 pci_save_state(bp->pdev);
6395         }
6396         rc = bnx2_init_nic(bp, 1);
6397         if (rc) {
6398                 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6399                 bnx2_napi_enable(bp);
6400                 dev_close(bp->dev);
6401                 rtnl_unlock();
6402                 return;
6403         }
6404
6405         atomic_set(&bp->intr_sem, 1);
6406         bnx2_netif_start(bp, true);
6407         rtnl_unlock();
6408 }
6409
6410 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6411
6412 static void
6413 bnx2_dump_ftq(struct bnx2 *bp)
6414 {
6415         int i;
6416         u32 reg, bdidx, cid, valid;
6417         struct net_device *dev = bp->dev;
6418         static const struct ftq_reg {
6419                 char *name;
6420                 u32 off;
6421         } ftq_arr[] = {
6422                 BNX2_FTQ_ENTRY(RV2P_P),
6423                 BNX2_FTQ_ENTRY(RV2P_T),
6424                 BNX2_FTQ_ENTRY(RV2P_M),
6425                 BNX2_FTQ_ENTRY(TBDR_),
6426                 BNX2_FTQ_ENTRY(TDMA_),
6427                 BNX2_FTQ_ENTRY(TXP_),
6428                 BNX2_FTQ_ENTRY(TXP_),
6429                 BNX2_FTQ_ENTRY(TPAT_),
6430                 BNX2_FTQ_ENTRY(RXP_C),
6431                 BNX2_FTQ_ENTRY(RXP_),
6432                 BNX2_FTQ_ENTRY(COM_COMXQ_),
6433                 BNX2_FTQ_ENTRY(COM_COMTQ_),
6434                 BNX2_FTQ_ENTRY(COM_COMQ_),
6435                 BNX2_FTQ_ENTRY(CP_CPQ_),
6436         };
6437
6438         netdev_err(dev, "<--- start FTQ dump --->\n");
6439         for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6440                 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6441                            bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6442
6443         netdev_err(dev, "CPU states:\n");
6444         for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6445                 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6446                            reg, bnx2_reg_rd_ind(bp, reg),
6447                            bnx2_reg_rd_ind(bp, reg + 4),
6448                            bnx2_reg_rd_ind(bp, reg + 8),
6449                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6450                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6451                            bnx2_reg_rd_ind(bp, reg + 0x20));
6452
6453         netdev_err(dev, "<--- end FTQ dump --->\n");
6454         netdev_err(dev, "<--- start TBDC dump --->\n");
6455         netdev_err(dev, "TBDC free cnt: %ld\n",
6456                    BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6457         netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6458         for (i = 0; i < 0x20; i++) {
6459                 int j = 0;
6460
6461                 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6462                 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6463                         BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6464                 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6465                 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6466                         BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6467                         j++;
6468
6469                 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6470                 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6471                 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6472                 netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6473                            i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6474                            bdidx >> 24, (valid >> 8) & 0x0ff);
6475         }
6476         netdev_err(dev, "<--- end TBDC dump --->\n");
6477 }
6478
6479 static void
6480 bnx2_dump_state(struct bnx2 *bp)
6481 {
6482         struct net_device *dev = bp->dev;
6483         u32 val1, val2;
6484
6485         pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6486         netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6487                    atomic_read(&bp->intr_sem), val1);
6488         pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6489         pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6490         netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6491         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6492                    BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6493                    BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6494         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6495                    BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6496         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6497                    BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6498         if (bp->flags & BNX2_FLAG_USING_MSIX)
6499                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6500                            BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6501 }
6502
6503 static void
6504 bnx2_tx_timeout(struct net_device *dev)
6505 {
6506         struct bnx2 *bp = netdev_priv(dev);
6507
6508         bnx2_dump_ftq(bp);
6509         bnx2_dump_state(bp);
6510         bnx2_dump_mcp_state(bp);
6511
6512         /* This allows the netif to be shutdown gracefully before resetting */
6513         schedule_work(&bp->reset_task);
6514 }
6515
6516 /* Called with netif_tx_lock.
6517  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6518  * netif_wake_queue().
6519  */
6520 static netdev_tx_t
6521 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6522 {
6523         struct bnx2 *bp = netdev_priv(dev);
6524         dma_addr_t mapping;
6525         struct bnx2_tx_bd *txbd;
6526         struct bnx2_sw_tx_bd *tx_buf;
6527         u32 len, vlan_tag_flags, last_frag, mss;
6528         u16 prod, ring_prod;
6529         int i;
6530         struct bnx2_napi *bnapi;
6531         struct bnx2_tx_ring_info *txr;
6532         struct netdev_queue *txq;
6533
6534         /*  Determine which tx ring we will be placed on */
6535         i = skb_get_queue_mapping(skb);
6536         bnapi = &bp->bnx2_napi[i];
6537         txr = &bnapi->tx_ring;
6538         txq = netdev_get_tx_queue(dev, i);
6539
6540         if (unlikely(bnx2_tx_avail(bp, txr) <
6541             (skb_shinfo(skb)->nr_frags + 1))) {
6542                 netif_tx_stop_queue(txq);
6543                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6544
6545                 return NETDEV_TX_BUSY;
6546         }
6547         len = skb_headlen(skb);
6548         prod = txr->tx_prod;
6549         ring_prod = BNX2_TX_RING_IDX(prod);
6550
6551         vlan_tag_flags = 0;
6552         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6553                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6554         }
6555
6556         if (vlan_tx_tag_present(skb)) {
6557                 vlan_tag_flags |=
6558                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6559         }
6560
6561         if ((mss = skb_shinfo(skb)->gso_size)) {
6562                 u32 tcp_opt_len;
6563                 struct iphdr *iph;
6564
6565                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6566
6567                 tcp_opt_len = tcp_optlen(skb);
6568
6569                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6570                         u32 tcp_off = skb_transport_offset(skb) -
6571                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6572
6573                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6574                                           TX_BD_FLAGS_SW_FLAGS;
6575                         if (likely(tcp_off == 0))
6576                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6577                         else {
6578                                 tcp_off >>= 3;
6579                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6580                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6581                                                   ((tcp_off & 0x10) <<
6582                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6583                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6584                         }
6585                 } else {
6586                         iph = ip_hdr(skb);
6587                         if (tcp_opt_len || (iph->ihl > 5)) {
6588                                 vlan_tag_flags |= ((iph->ihl - 5) +
6589                                                    (tcp_opt_len >> 2)) << 8;
6590                         }
6591                 }
6592         } else
6593                 mss = 0;
6594
6595         mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6596         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6597                 dev_kfree_skb(skb);
6598                 return NETDEV_TX_OK;
6599         }
6600
6601         tx_buf = &txr->tx_buf_ring[ring_prod];
6602         tx_buf->skb = skb;
6603         dma_unmap_addr_set(tx_buf, mapping, mapping);
6604
6605         txbd = &txr->tx_desc_ring[ring_prod];
6606
6607         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6608         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6609         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6610         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6611
6612         last_frag = skb_shinfo(skb)->nr_frags;
6613         tx_buf->nr_frags = last_frag;
6614         tx_buf->is_gso = skb_is_gso(skb);
6615
6616         for (i = 0; i < last_frag; i++) {
6617                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6618
6619                 prod = BNX2_NEXT_TX_BD(prod);
6620                 ring_prod = BNX2_TX_RING_IDX(prod);
6621                 txbd = &txr->tx_desc_ring[ring_prod];
6622
6623                 len = skb_frag_size(frag);
6624                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6625                                            DMA_TO_DEVICE);
6626                 if (dma_mapping_error(&bp->pdev->dev, mapping))
6627                         goto dma_error;
6628                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6629                                    mapping);
6630
6631                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6632                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6633                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6634                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6635
6636         }
6637         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6638
6639         /* Sync BD data before updating TX mailbox */
6640         wmb();
6641
6642         netdev_tx_sent_queue(txq, skb->len);
6643
6644         prod = BNX2_NEXT_TX_BD(prod);
6645         txr->tx_prod_bseq += skb->len;
6646
6647         BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6648         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6649
6650         mmiowb();
6651
6652         txr->tx_prod = prod;
6653
6654         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6655                 netif_tx_stop_queue(txq);
6656
6657                 /* netif_tx_stop_queue() must be done before checking
6658                  * tx index in bnx2_tx_avail() below, because in
6659                  * bnx2_tx_int(), we update tx index before checking for
6660                  * netif_tx_queue_stopped().
6661                  */
6662                 smp_mb();
6663                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6664                         netif_tx_wake_queue(txq);
6665         }
6666
6667         return NETDEV_TX_OK;
6668 dma_error:
6669         /* save value of frag that failed */
6670         last_frag = i;
6671
6672         /* start back at beginning and unmap skb */
6673         prod = txr->tx_prod;
6674         ring_prod = BNX2_TX_RING_IDX(prod);
6675         tx_buf = &txr->tx_buf_ring[ring_prod];
6676         tx_buf->skb = NULL;
6677         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6678                          skb_headlen(skb), PCI_DMA_TODEVICE);
6679
6680         /* unmap remaining mapped pages */
6681         for (i = 0; i < last_frag; i++) {
6682                 prod = BNX2_NEXT_TX_BD(prod);
6683                 ring_prod = BNX2_TX_RING_IDX(prod);
6684                 tx_buf = &txr->tx_buf_ring[ring_prod];
6685                 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6686                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
6687                                PCI_DMA_TODEVICE);
6688         }
6689
6690         dev_kfree_skb(skb);
6691         return NETDEV_TX_OK;
6692 }
6693
6694 /* Called with rtnl_lock */
6695 static int
6696 bnx2_close(struct net_device *dev)
6697 {
6698         struct bnx2 *bp = netdev_priv(dev);
6699
6700         bnx2_disable_int_sync(bp);
6701         bnx2_napi_disable(bp);
6702         netif_tx_disable(dev);
6703         del_timer_sync(&bp->timer);
6704         bnx2_shutdown_chip(bp);
6705         bnx2_free_irq(bp);
6706         bnx2_free_skbs(bp);
6707         bnx2_free_mem(bp);
6708         bnx2_del_napi(bp);
6709         bp->link_up = 0;
6710         netif_carrier_off(bp->dev);
6711         return 0;
6712 }
6713
6714 static void
6715 bnx2_save_stats(struct bnx2 *bp)
6716 {
6717         u32 *hw_stats = (u32 *) bp->stats_blk;
6718         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6719         int i;
6720
6721         /* The 1st 10 counters are 64-bit counters */
6722         for (i = 0; i < 20; i += 2) {
6723                 u32 hi;
6724                 u64 lo;
6725
6726                 hi = temp_stats[i] + hw_stats[i];
6727                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6728                 if (lo > 0xffffffff)
6729                         hi++;
6730                 temp_stats[i] = hi;
6731                 temp_stats[i + 1] = lo & 0xffffffff;
6732         }
6733
6734         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6735                 temp_stats[i] += hw_stats[i];
6736 }
6737
6738 #define GET_64BIT_NET_STATS64(ctr)              \
6739         (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6740
6741 #define GET_64BIT_NET_STATS(ctr)                                \
6742         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6743         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6744
6745 #define GET_32BIT_NET_STATS(ctr)                                \
6746         (unsigned long) (bp->stats_blk->ctr +                   \
6747                          bp->temp_stats_blk->ctr)
6748
6749 static struct rtnl_link_stats64 *
6750 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6751 {
6752         struct bnx2 *bp = netdev_priv(dev);
6753
6754         if (bp->stats_blk == NULL)
6755                 return net_stats;
6756
6757         net_stats->rx_packets =
6758                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6759                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6760                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6761
6762         net_stats->tx_packets =
6763                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6764                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6765                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6766
6767         net_stats->rx_bytes =
6768                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6769
6770         net_stats->tx_bytes =
6771                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6772
6773         net_stats->multicast =
6774                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6775
6776         net_stats->collisions =
6777                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6778
6779         net_stats->rx_length_errors =
6780                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6781                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6782
6783         net_stats->rx_over_errors =
6784                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6785                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6786
6787         net_stats->rx_frame_errors =
6788                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6789
6790         net_stats->rx_crc_errors =
6791                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6792
6793         net_stats->rx_errors = net_stats->rx_length_errors +
6794                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6795                 net_stats->rx_crc_errors;
6796
6797         net_stats->tx_aborted_errors =
6798                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6799                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6800
6801         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6802             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6803                 net_stats->tx_carrier_errors = 0;
6804         else {
6805                 net_stats->tx_carrier_errors =
6806                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6807         }
6808
6809         net_stats->tx_errors =
6810                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6811                 net_stats->tx_aborted_errors +
6812                 net_stats->tx_carrier_errors;
6813
6814         net_stats->rx_missed_errors =
6815                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6816                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6817                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6818
6819         return net_stats;
6820 }
6821
6822 /* All ethtool functions called with rtnl_lock */
6823
6824 static int
6825 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6826 {
6827         struct bnx2 *bp = netdev_priv(dev);
6828         int support_serdes = 0, support_copper = 0;
6829
6830         cmd->supported = SUPPORTED_Autoneg;
6831         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6832                 support_serdes = 1;
6833                 support_copper = 1;
6834         } else if (bp->phy_port == PORT_FIBRE)
6835                 support_serdes = 1;
6836         else
6837                 support_copper = 1;
6838
6839         if (support_serdes) {
6840                 cmd->supported |= SUPPORTED_1000baseT_Full |
6841                         SUPPORTED_FIBRE;
6842                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6843                         cmd->supported |= SUPPORTED_2500baseX_Full;
6844
6845         }
6846         if (support_copper) {
6847                 cmd->supported |= SUPPORTED_10baseT_Half |
6848                         SUPPORTED_10baseT_Full |
6849                         SUPPORTED_100baseT_Half |
6850                         SUPPORTED_100baseT_Full |
6851                         SUPPORTED_1000baseT_Full |
6852                         SUPPORTED_TP;
6853
6854         }
6855
6856         spin_lock_bh(&bp->phy_lock);
6857         cmd->port = bp->phy_port;
6858         cmd->advertising = bp->advertising;
6859
6860         if (bp->autoneg & AUTONEG_SPEED) {
6861                 cmd->autoneg = AUTONEG_ENABLE;
6862         } else {
6863                 cmd->autoneg = AUTONEG_DISABLE;
6864         }
6865
6866         if (netif_carrier_ok(dev)) {
6867                 ethtool_cmd_speed_set(cmd, bp->line_speed);
6868                 cmd->duplex = bp->duplex;
6869         }
6870         else {
6871                 ethtool_cmd_speed_set(cmd, -1);
6872                 cmd->duplex = -1;
6873         }
6874         spin_unlock_bh(&bp->phy_lock);
6875
6876         cmd->transceiver = XCVR_INTERNAL;
6877         cmd->phy_address = bp->phy_addr;
6878
6879         return 0;
6880 }
6881
6882 static int
6883 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6884 {
6885         struct bnx2 *bp = netdev_priv(dev);
6886         u8 autoneg = bp->autoneg;
6887         u8 req_duplex = bp->req_duplex;
6888         u16 req_line_speed = bp->req_line_speed;
6889         u32 advertising = bp->advertising;
6890         int err = -EINVAL;
6891
6892         spin_lock_bh(&bp->phy_lock);
6893
6894         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6895                 goto err_out_unlock;
6896
6897         if (cmd->port != bp->phy_port &&
6898             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6899                 goto err_out_unlock;
6900
6901         /* If device is down, we can store the settings only if the user
6902          * is setting the currently active port.
6903          */
6904         if (!netif_running(dev) && cmd->port != bp->phy_port)
6905                 goto err_out_unlock;
6906
6907         if (cmd->autoneg == AUTONEG_ENABLE) {
6908                 autoneg |= AUTONEG_SPEED;
6909
6910                 advertising = cmd->advertising;
6911                 if (cmd->port == PORT_TP) {
6912                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6913                         if (!advertising)
6914                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6915                 } else {
6916                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6917                         if (!advertising)
6918                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6919                 }
6920                 advertising |= ADVERTISED_Autoneg;
6921         }
6922         else {
6923                 u32 speed = ethtool_cmd_speed(cmd);
6924                 if (cmd->port == PORT_FIBRE) {
6925                         if ((speed != SPEED_1000 &&
6926                              speed != SPEED_2500) ||
6927                             (cmd->duplex != DUPLEX_FULL))
6928                                 goto err_out_unlock;
6929
6930                         if (speed == SPEED_2500 &&
6931                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6932                                 goto err_out_unlock;
6933                 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6934                         goto err_out_unlock;
6935
6936                 autoneg &= ~AUTONEG_SPEED;
6937                 req_line_speed = speed;
6938                 req_duplex = cmd->duplex;
6939                 advertising = 0;
6940         }
6941
6942         bp->autoneg = autoneg;
6943         bp->advertising = advertising;
6944         bp->req_line_speed = req_line_speed;
6945         bp->req_duplex = req_duplex;
6946
6947         err = 0;
6948         /* If device is down, the new settings will be picked up when it is
6949          * brought up.
6950          */
6951         if (netif_running(dev))
6952                 err = bnx2_setup_phy(bp, cmd->port);
6953
6954 err_out_unlock:
6955         spin_unlock_bh(&bp->phy_lock);
6956
6957         return err;
6958 }
6959
6960 static void
6961 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6962 {
6963         struct bnx2 *bp = netdev_priv(dev);
6964
6965         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6966         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6967         strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6968         strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6969 }
6970
6971 #define BNX2_REGDUMP_LEN                (32 * 1024)
6972
6973 static int
6974 bnx2_get_regs_len(struct net_device *dev)
6975 {
6976         return BNX2_REGDUMP_LEN;
6977 }
6978
6979 static void
6980 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6981 {
6982         u32 *p = _p, i, offset;
6983         u8 *orig_p = _p;
6984         struct bnx2 *bp = netdev_priv(dev);
6985         static const u32 reg_boundaries[] = {
6986                 0x0000, 0x0098, 0x0400, 0x045c,
6987                 0x0800, 0x0880, 0x0c00, 0x0c10,
6988                 0x0c30, 0x0d08, 0x1000, 0x101c,
6989                 0x1040, 0x1048, 0x1080, 0x10a4,
6990                 0x1400, 0x1490, 0x1498, 0x14f0,
6991                 0x1500, 0x155c, 0x1580, 0x15dc,
6992                 0x1600, 0x1658, 0x1680, 0x16d8,
6993                 0x1800, 0x1820, 0x1840, 0x1854,
6994                 0x1880, 0x1894, 0x1900, 0x1984,
6995                 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6996                 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6997                 0x2000, 0x2030, 0x23c0, 0x2400,
6998                 0x2800, 0x2820, 0x2830, 0x2850,
6999                 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7000                 0x3c00, 0x3c94, 0x4000, 0x4010,
7001                 0x4080, 0x4090, 0x43c0, 0x4458,
7002                 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7003                 0x4fc0, 0x5010, 0x53c0, 0x5444,
7004                 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7005                 0x5fc0, 0x6000, 0x6400, 0x6428,
7006                 0x6800, 0x6848, 0x684c, 0x6860,
7007                 0x6888, 0x6910, 0x8000
7008         };
7009
7010         regs->version = 0;
7011
7012         memset(p, 0, BNX2_REGDUMP_LEN);
7013
7014         if (!netif_running(bp->dev))
7015                 return;
7016
7017         i = 0;
7018         offset = reg_boundaries[0];
7019         p += offset;
7020         while (offset < BNX2_REGDUMP_LEN) {
7021                 *p++ = BNX2_RD(bp, offset);
7022                 offset += 4;
7023                 if (offset == reg_boundaries[i + 1]) {
7024                         offset = reg_boundaries[i + 2];
7025                         p = (u32 *) (orig_p + offset);
7026                         i += 2;
7027                 }
7028         }
7029 }
7030
7031 static void
7032 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7033 {
7034         struct bnx2 *bp = netdev_priv(dev);
7035
7036         if (bp->flags & BNX2_FLAG_NO_WOL) {
7037                 wol->supported = 0;
7038                 wol->wolopts = 0;
7039         }
7040         else {
7041                 wol->supported = WAKE_MAGIC;
7042                 if (bp->wol)
7043                         wol->wolopts = WAKE_MAGIC;
7044                 else
7045                         wol->wolopts = 0;
7046         }
7047         memset(&wol->sopass, 0, sizeof(wol->sopass));
7048 }
7049
7050 static int
7051 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7052 {
7053         struct bnx2 *bp = netdev_priv(dev);
7054
7055         if (wol->wolopts & ~WAKE_MAGIC)
7056                 return -EINVAL;
7057
7058         if (wol->wolopts & WAKE_MAGIC) {
7059                 if (bp->flags & BNX2_FLAG_NO_WOL)
7060                         return -EINVAL;
7061
7062                 bp->wol = 1;
7063         }
7064         else {
7065                 bp->wol = 0;
7066         }
7067
7068         device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7069
7070         return 0;
7071 }
7072
7073 static int
7074 bnx2_nway_reset(struct net_device *dev)
7075 {
7076         struct bnx2 *bp = netdev_priv(dev);
7077         u32 bmcr;
7078
7079         if (!netif_running(dev))
7080                 return -EAGAIN;
7081
7082         if (!(bp->autoneg & AUTONEG_SPEED)) {
7083                 return -EINVAL;
7084         }
7085
7086         spin_lock_bh(&bp->phy_lock);
7087
7088         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7089                 int rc;
7090
7091                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7092                 spin_unlock_bh(&bp->phy_lock);
7093                 return rc;
7094         }
7095
7096         /* Force a link down visible on the other side */
7097         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7098                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7099                 spin_unlock_bh(&bp->phy_lock);
7100
7101                 msleep(20);
7102
7103                 spin_lock_bh(&bp->phy_lock);
7104
7105                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7106                 bp->serdes_an_pending = 1;
7107                 mod_timer(&bp->timer, jiffies + bp->current_interval);
7108         }
7109
7110         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7111         bmcr &= ~BMCR_LOOPBACK;
7112         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7113
7114         spin_unlock_bh(&bp->phy_lock);
7115
7116         return 0;
7117 }
7118
7119 static u32
7120 bnx2_get_link(struct net_device *dev)
7121 {
7122         struct bnx2 *bp = netdev_priv(dev);
7123
7124         return bp->link_up;
7125 }
7126
7127 static int
7128 bnx2_get_eeprom_len(struct net_device *dev)
7129 {
7130         struct bnx2 *bp = netdev_priv(dev);
7131
7132         if (bp->flash_info == NULL)
7133                 return 0;
7134
7135         return (int) bp->flash_size;
7136 }
7137
7138 static int
7139 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7140                 u8 *eebuf)
7141 {
7142         struct bnx2 *bp = netdev_priv(dev);
7143         int rc;
7144
7145         /* parameters already validated in ethtool_get_eeprom */
7146
7147         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7148
7149         return rc;
7150 }
7151
7152 static int
7153 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7154                 u8 *eebuf)
7155 {
7156         struct bnx2 *bp = netdev_priv(dev);
7157         int rc;
7158
7159         /* parameters already validated in ethtool_set_eeprom */
7160
7161         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7162
7163         return rc;
7164 }
7165
7166 static int
7167 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7168 {
7169         struct bnx2 *bp = netdev_priv(dev);
7170
7171         memset(coal, 0, sizeof(struct ethtool_coalesce));
7172
7173         coal->rx_coalesce_usecs = bp->rx_ticks;
7174         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7175         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7176         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7177
7178         coal->tx_coalesce_usecs = bp->tx_ticks;
7179         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7180         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7181         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7182
7183         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7184
7185         return 0;
7186 }
7187
7188 static int
7189 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7190 {
7191         struct bnx2 *bp = netdev_priv(dev);
7192
7193         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7194         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7195
7196         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7197         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7198
7199         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7200         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7201
7202         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7203         if (bp->rx_quick_cons_trip_int > 0xff)
7204                 bp->rx_quick_cons_trip_int = 0xff;
7205
7206         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7207         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7208
7209         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7210         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7211
7212         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7213         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7214
7215         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7216         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7217                 0xff;
7218
7219         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7220         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7221                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7222                         bp->stats_ticks = USEC_PER_SEC;
7223         }
7224         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7225                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7226         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7227
7228         if (netif_running(bp->dev)) {
7229                 bnx2_netif_stop(bp, true);
7230                 bnx2_init_nic(bp, 0);
7231                 bnx2_netif_start(bp, true);
7232         }
7233
7234         return 0;
7235 }
7236
7237 static void
7238 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7239 {
7240         struct bnx2 *bp = netdev_priv(dev);
7241
7242         ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7243         ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7244
7245         ering->rx_pending = bp->rx_ring_size;
7246         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7247
7248         ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7249         ering->tx_pending = bp->tx_ring_size;
7250 }
7251
7252 static int
7253 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7254 {
7255         if (netif_running(bp->dev)) {
7256                 /* Reset will erase chipset stats; save them */
7257                 bnx2_save_stats(bp);
7258
7259                 bnx2_netif_stop(bp, true);
7260                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7261                 if (reset_irq) {
7262                         bnx2_free_irq(bp);
7263                         bnx2_del_napi(bp);
7264                 } else {
7265                         __bnx2_free_irq(bp);
7266                 }
7267                 bnx2_free_skbs(bp);
7268                 bnx2_free_mem(bp);
7269         }
7270
7271         bnx2_set_rx_ring_size(bp, rx);
7272         bp->tx_ring_size = tx;
7273
7274         if (netif_running(bp->dev)) {
7275                 int rc = 0;
7276
7277                 if (reset_irq) {
7278                         rc = bnx2_setup_int_mode(bp, disable_msi);
7279                         bnx2_init_napi(bp);
7280                 }
7281
7282                 if (!rc)
7283                         rc = bnx2_alloc_mem(bp);
7284
7285                 if (!rc)
7286                         rc = bnx2_request_irq(bp);
7287
7288                 if (!rc)
7289                         rc = bnx2_init_nic(bp, 0);
7290
7291                 if (rc) {
7292                         bnx2_napi_enable(bp);
7293                         dev_close(bp->dev);
7294                         return rc;
7295                 }
7296 #ifdef BCM_CNIC
7297                 mutex_lock(&bp->cnic_lock);
7298                 /* Let cnic know about the new status block. */
7299                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7300                         bnx2_setup_cnic_irq_info(bp);
7301                 mutex_unlock(&bp->cnic_lock);
7302 #endif
7303                 bnx2_netif_start(bp, true);
7304         }
7305         return 0;
7306 }
7307
7308 static int
7309 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7310 {
7311         struct bnx2 *bp = netdev_priv(dev);
7312         int rc;
7313
7314         if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7315                 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7316                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7317
7318                 return -EINVAL;
7319         }
7320         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7321                                    false);
7322         return rc;
7323 }
7324
7325 static void
7326 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7327 {
7328         struct bnx2 *bp = netdev_priv(dev);
7329
7330         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7331         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7332         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7333 }
7334
7335 static int
7336 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7337 {
7338         struct bnx2 *bp = netdev_priv(dev);
7339
7340         bp->req_flow_ctrl = 0;
7341         if (epause->rx_pause)
7342                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7343         if (epause->tx_pause)
7344                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7345
7346         if (epause->autoneg) {
7347                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7348         }
7349         else {
7350                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7351         }
7352
7353         if (netif_running(dev)) {
7354                 spin_lock_bh(&bp->phy_lock);
7355                 bnx2_setup_phy(bp, bp->phy_port);
7356                 spin_unlock_bh(&bp->phy_lock);
7357         }
7358
7359         return 0;
7360 }
7361
7362 static struct {
7363         char string[ETH_GSTRING_LEN];
7364 } bnx2_stats_str_arr[] = {
7365         { "rx_bytes" },
7366         { "rx_error_bytes" },
7367         { "tx_bytes" },
7368         { "tx_error_bytes" },
7369         { "rx_ucast_packets" },
7370         { "rx_mcast_packets" },
7371         { "rx_bcast_packets" },
7372         { "tx_ucast_packets" },
7373         { "tx_mcast_packets" },
7374         { "tx_bcast_packets" },
7375         { "tx_mac_errors" },
7376         { "tx_carrier_errors" },
7377         { "rx_crc_errors" },
7378         { "rx_align_errors" },
7379         { "tx_single_collisions" },
7380         { "tx_multi_collisions" },
7381         { "tx_deferred" },
7382         { "tx_excess_collisions" },
7383         { "tx_late_collisions" },
7384         { "tx_total_collisions" },
7385         { "rx_fragments" },
7386         { "rx_jabbers" },
7387         { "rx_undersize_packets" },
7388         { "rx_oversize_packets" },
7389         { "rx_64_byte_packets" },
7390         { "rx_65_to_127_byte_packets" },
7391         { "rx_128_to_255_byte_packets" },
7392         { "rx_256_to_511_byte_packets" },
7393         { "rx_512_to_1023_byte_packets" },
7394         { "rx_1024_to_1522_byte_packets" },
7395         { "rx_1523_to_9022_byte_packets" },
7396         { "tx_64_byte_packets" },
7397         { "tx_65_to_127_byte_packets" },
7398         { "tx_128_to_255_byte_packets" },
7399         { "tx_256_to_511_byte_packets" },
7400         { "tx_512_to_1023_byte_packets" },
7401         { "tx_1024_to_1522_byte_packets" },
7402         { "tx_1523_to_9022_byte_packets" },
7403         { "rx_xon_frames" },
7404         { "rx_xoff_frames" },
7405         { "tx_xon_frames" },
7406         { "tx_xoff_frames" },
7407         { "rx_mac_ctrl_frames" },
7408         { "rx_filtered_packets" },
7409         { "rx_ftq_discards" },
7410         { "rx_discards" },
7411         { "rx_fw_discards" },
7412 };
7413
7414 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7415
7416 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7417
7418 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7419     STATS_OFFSET32(stat_IfHCInOctets_hi),
7420     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7421     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7422     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7423     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7424     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7425     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7426     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7427     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7428     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7429     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7430     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7431     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7432     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7433     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7434     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7435     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7436     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7437     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7438     STATS_OFFSET32(stat_EtherStatsCollisions),
7439     STATS_OFFSET32(stat_EtherStatsFragments),
7440     STATS_OFFSET32(stat_EtherStatsJabbers),
7441     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7442     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7443     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7444     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7445     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7446     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7447     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7448     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7449     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7450     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7451     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7452     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7453     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7454     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7455     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7456     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7457     STATS_OFFSET32(stat_XonPauseFramesReceived),
7458     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7459     STATS_OFFSET32(stat_OutXonSent),
7460     STATS_OFFSET32(stat_OutXoffSent),
7461     STATS_OFFSET32(stat_MacControlFramesReceived),
7462     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7463     STATS_OFFSET32(stat_IfInFTQDiscards),
7464     STATS_OFFSET32(stat_IfInMBUFDiscards),
7465     STATS_OFFSET32(stat_FwRxDrop),
7466 };
7467
7468 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7469  * skipped because of errata.
7470  */
7471 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7472         8,0,8,8,8,8,8,8,8,8,
7473         4,0,4,4,4,4,4,4,4,4,
7474         4,4,4,4,4,4,4,4,4,4,
7475         4,4,4,4,4,4,4,4,4,4,
7476         4,4,4,4,4,4,4,
7477 };
7478
7479 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7480         8,0,8,8,8,8,8,8,8,8,
7481         4,4,4,4,4,4,4,4,4,4,
7482         4,4,4,4,4,4,4,4,4,4,
7483         4,4,4,4,4,4,4,4,4,4,
7484         4,4,4,4,4,4,4,
7485 };
7486
7487 #define BNX2_NUM_TESTS 6
7488
7489 static struct {
7490         char string[ETH_GSTRING_LEN];
7491 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7492         { "register_test (offline)" },
7493         { "memory_test (offline)" },
7494         { "loopback_test (offline)" },
7495         { "nvram_test (online)" },
7496         { "interrupt_test (online)" },
7497         { "link_test (online)" },
7498 };
7499
7500 static int
7501 bnx2_get_sset_count(struct net_device *dev, int sset)
7502 {
7503         switch (sset) {
7504         case ETH_SS_TEST:
7505                 return BNX2_NUM_TESTS;
7506         case ETH_SS_STATS:
7507                 return BNX2_NUM_STATS;
7508         default:
7509                 return -EOPNOTSUPP;
7510         }
7511 }
7512
7513 static void
7514 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7515 {
7516         struct bnx2 *bp = netdev_priv(dev);
7517
7518         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7519         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7520                 int i;
7521
7522                 bnx2_netif_stop(bp, true);
7523                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7524                 bnx2_free_skbs(bp);
7525
7526                 if (bnx2_test_registers(bp) != 0) {
7527                         buf[0] = 1;
7528                         etest->flags |= ETH_TEST_FL_FAILED;
7529                 }
7530                 if (bnx2_test_memory(bp) != 0) {
7531                         buf[1] = 1;
7532                         etest->flags |= ETH_TEST_FL_FAILED;
7533                 }
7534                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7535                         etest->flags |= ETH_TEST_FL_FAILED;
7536
7537                 if (!netif_running(bp->dev))
7538                         bnx2_shutdown_chip(bp);
7539                 else {
7540                         bnx2_init_nic(bp, 1);
7541                         bnx2_netif_start(bp, true);
7542                 }
7543
7544                 /* wait for link up */
7545                 for (i = 0; i < 7; i++) {
7546                         if (bp->link_up)
7547                                 break;
7548                         msleep_interruptible(1000);
7549                 }
7550         }
7551
7552         if (bnx2_test_nvram(bp) != 0) {
7553                 buf[3] = 1;
7554                 etest->flags |= ETH_TEST_FL_FAILED;
7555         }
7556         if (bnx2_test_intr(bp) != 0) {
7557                 buf[4] = 1;
7558                 etest->flags |= ETH_TEST_FL_FAILED;
7559         }
7560
7561         if (bnx2_test_link(bp) != 0) {
7562                 buf[5] = 1;
7563                 etest->flags |= ETH_TEST_FL_FAILED;
7564
7565         }
7566 }
7567
7568 static void
7569 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7570 {
7571         switch (stringset) {
7572         case ETH_SS_STATS:
7573                 memcpy(buf, bnx2_stats_str_arr,
7574                         sizeof(bnx2_stats_str_arr));
7575                 break;
7576         case ETH_SS_TEST:
7577                 memcpy(buf, bnx2_tests_str_arr,
7578                         sizeof(bnx2_tests_str_arr));
7579                 break;
7580         }
7581 }
7582
7583 static void
7584 bnx2_get_ethtool_stats(struct net_device *dev,
7585                 struct ethtool_stats *stats, u64 *buf)
7586 {
7587         struct bnx2 *bp = netdev_priv(dev);
7588         int i;
7589         u32 *hw_stats = (u32 *) bp->stats_blk;
7590         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7591         u8 *stats_len_arr = NULL;
7592
7593         if (hw_stats == NULL) {
7594                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7595                 return;
7596         }
7597
7598         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7599             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7600             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7601             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7602                 stats_len_arr = bnx2_5706_stats_len_arr;
7603         else
7604                 stats_len_arr = bnx2_5708_stats_len_arr;
7605
7606         for (i = 0; i < BNX2_NUM_STATS; i++) {
7607                 unsigned long offset;
7608
7609                 if (stats_len_arr[i] == 0) {
7610                         /* skip this counter */
7611                         buf[i] = 0;
7612                         continue;
7613                 }
7614
7615                 offset = bnx2_stats_offset_arr[i];
7616                 if (stats_len_arr[i] == 4) {
7617                         /* 4-byte counter */
7618                         buf[i] = (u64) *(hw_stats + offset) +
7619                                  *(temp_stats + offset);
7620                         continue;
7621                 }
7622                 /* 8-byte counter */
7623                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7624                          *(hw_stats + offset + 1) +
7625                          (((u64) *(temp_stats + offset)) << 32) +
7626                          *(temp_stats + offset + 1);
7627         }
7628 }
7629
7630 static int
7631 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7632 {
7633         struct bnx2 *bp = netdev_priv(dev);
7634
7635         switch (state) {
7636         case ETHTOOL_ID_ACTIVE:
7637                 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7638                 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7639                 return 1;       /* cycle on/off once per second */
7640
7641         case ETHTOOL_ID_ON:
7642                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7643                         BNX2_EMAC_LED_1000MB_OVERRIDE |
7644                         BNX2_EMAC_LED_100MB_OVERRIDE |
7645                         BNX2_EMAC_LED_10MB_OVERRIDE |
7646                         BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7647                         BNX2_EMAC_LED_TRAFFIC);
7648                 break;
7649
7650         case ETHTOOL_ID_OFF:
7651                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7652                 break;
7653
7654         case ETHTOOL_ID_INACTIVE:
7655                 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7656                 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7657                 break;
7658         }
7659
7660         return 0;
7661 }
7662
7663 static netdev_features_t
7664 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7665 {
7666         struct bnx2 *bp = netdev_priv(dev);
7667
7668         if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7669                 features |= NETIF_F_HW_VLAN_CTAG_RX;
7670
7671         return features;
7672 }
7673
7674 static int
7675 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7676 {
7677         struct bnx2 *bp = netdev_priv(dev);
7678
7679         /* TSO with VLAN tag won't work with current firmware */
7680         if (features & NETIF_F_HW_VLAN_CTAG_TX)
7681                 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7682         else
7683                 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7684
7685         if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7686             !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7687             netif_running(dev)) {
7688                 bnx2_netif_stop(bp, false);
7689                 dev->features = features;
7690                 bnx2_set_rx_mode(dev);
7691                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7692                 bnx2_netif_start(bp, false);
7693                 return 1;
7694         }
7695
7696         return 0;
7697 }
7698
7699 static void bnx2_get_channels(struct net_device *dev,
7700                               struct ethtool_channels *channels)
7701 {
7702         struct bnx2 *bp = netdev_priv(dev);
7703         u32 max_rx_rings = 1;
7704         u32 max_tx_rings = 1;
7705
7706         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7707                 max_rx_rings = RX_MAX_RINGS;
7708                 max_tx_rings = TX_MAX_RINGS;
7709         }
7710
7711         channels->max_rx = max_rx_rings;
7712         channels->max_tx = max_tx_rings;
7713         channels->max_other = 0;
7714         channels->max_combined = 0;
7715         channels->rx_count = bp->num_rx_rings;
7716         channels->tx_count = bp->num_tx_rings;
7717         channels->other_count = 0;
7718         channels->combined_count = 0;
7719 }
7720
7721 static int bnx2_set_channels(struct net_device *dev,
7722                               struct ethtool_channels *channels)
7723 {
7724         struct bnx2 *bp = netdev_priv(dev);
7725         u32 max_rx_rings = 1;
7726         u32 max_tx_rings = 1;
7727         int rc = 0;
7728
7729         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7730                 max_rx_rings = RX_MAX_RINGS;
7731                 max_tx_rings = TX_MAX_RINGS;
7732         }
7733         if (channels->rx_count > max_rx_rings ||
7734             channels->tx_count > max_tx_rings)
7735                 return -EINVAL;
7736
7737         bp->num_req_rx_rings = channels->rx_count;
7738         bp->num_req_tx_rings = channels->tx_count;
7739
7740         if (netif_running(dev))
7741                 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7742                                            bp->tx_ring_size, true);
7743
7744         return rc;
7745 }
7746
7747 static const struct ethtool_ops bnx2_ethtool_ops = {
7748         .get_settings           = bnx2_get_settings,
7749         .set_settings           = bnx2_set_settings,
7750         .get_drvinfo            = bnx2_get_drvinfo,
7751         .get_regs_len           = bnx2_get_regs_len,
7752         .get_regs               = bnx2_get_regs,
7753         .get_wol                = bnx2_get_wol,
7754         .set_wol                = bnx2_set_wol,
7755         .nway_reset             = bnx2_nway_reset,
7756         .get_link               = bnx2_get_link,
7757         .get_eeprom_len         = bnx2_get_eeprom_len,
7758         .get_eeprom             = bnx2_get_eeprom,
7759         .set_eeprom             = bnx2_set_eeprom,
7760         .get_coalesce           = bnx2_get_coalesce,
7761         .set_coalesce           = bnx2_set_coalesce,
7762         .get_ringparam          = bnx2_get_ringparam,
7763         .set_ringparam          = bnx2_set_ringparam,
7764         .get_pauseparam         = bnx2_get_pauseparam,
7765         .set_pauseparam         = bnx2_set_pauseparam,
7766         .self_test              = bnx2_self_test,
7767         .get_strings            = bnx2_get_strings,
7768         .set_phys_id            = bnx2_set_phys_id,
7769         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7770         .get_sset_count         = bnx2_get_sset_count,
7771         .get_channels           = bnx2_get_channels,
7772         .set_channels           = bnx2_set_channels,
7773 };
7774
7775 /* Called with rtnl_lock */
7776 static int
7777 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7778 {
7779         struct mii_ioctl_data *data = if_mii(ifr);
7780         struct bnx2 *bp = netdev_priv(dev);
7781         int err;
7782
7783         switch(cmd) {
7784         case SIOCGMIIPHY:
7785                 data->phy_id = bp->phy_addr;
7786
7787                 /* fallthru */
7788         case SIOCGMIIREG: {
7789                 u32 mii_regval;
7790
7791                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7792                         return -EOPNOTSUPP;
7793
7794                 if (!netif_running(dev))
7795                         return -EAGAIN;
7796
7797                 spin_lock_bh(&bp->phy_lock);
7798                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7799                 spin_unlock_bh(&bp->phy_lock);
7800
7801                 data->val_out = mii_regval;
7802
7803                 return err;
7804         }
7805
7806         case SIOCSMIIREG:
7807                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7808                         return -EOPNOTSUPP;
7809
7810                 if (!netif_running(dev))
7811                         return -EAGAIN;
7812
7813                 spin_lock_bh(&bp->phy_lock);
7814                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7815                 spin_unlock_bh(&bp->phy_lock);
7816
7817                 return err;
7818
7819         default:
7820                 /* do nothing */
7821                 break;
7822         }
7823         return -EOPNOTSUPP;
7824 }
7825
7826 /* Called with rtnl_lock */
7827 static int
7828 bnx2_change_mac_addr(struct net_device *dev, void *p)
7829 {
7830         struct sockaddr *addr = p;
7831         struct bnx2 *bp = netdev_priv(dev);
7832
7833         if (!is_valid_ether_addr(addr->sa_data))
7834                 return -EADDRNOTAVAIL;
7835
7836         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7837         if (netif_running(dev))
7838                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7839
7840         return 0;
7841 }
7842
7843 /* Called with rtnl_lock */
7844 static int
7845 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7846 {
7847         struct bnx2 *bp = netdev_priv(dev);
7848
7849         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7850                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7851                 return -EINVAL;
7852
7853         dev->mtu = new_mtu;
7854         return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7855                                      false);
7856 }
7857
7858 #ifdef CONFIG_NET_POLL_CONTROLLER
7859 static void
7860 poll_bnx2(struct net_device *dev)
7861 {
7862         struct bnx2 *bp = netdev_priv(dev);
7863         int i;
7864
7865         for (i = 0; i < bp->irq_nvecs; i++) {
7866                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7867
7868                 disable_irq(irq->vector);
7869                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7870                 enable_irq(irq->vector);
7871         }
7872 }
7873 #endif
7874
7875 static void
7876 bnx2_get_5709_media(struct bnx2 *bp)
7877 {
7878         u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7879         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7880         u32 strap;
7881
7882         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7883                 return;
7884         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7885                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7886                 return;
7887         }
7888
7889         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7890                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7891         else
7892                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7893
7894         if (bp->func == 0) {
7895                 switch (strap) {
7896                 case 0x4:
7897                 case 0x5:
7898                 case 0x6:
7899                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7900                         return;
7901                 }
7902         } else {
7903                 switch (strap) {
7904                 case 0x1:
7905                 case 0x2:
7906                 case 0x4:
7907                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7908                         return;
7909                 }
7910         }
7911 }
7912
7913 static void
7914 bnx2_get_pci_speed(struct bnx2 *bp)
7915 {
7916         u32 reg;
7917
7918         reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7919         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7920                 u32 clkreg;
7921
7922                 bp->flags |= BNX2_FLAG_PCIX;
7923
7924                 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7925
7926                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7927                 switch (clkreg) {
7928                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7929                         bp->bus_speed_mhz = 133;
7930                         break;
7931
7932                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7933                         bp->bus_speed_mhz = 100;
7934                         break;
7935
7936                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7937                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7938                         bp->bus_speed_mhz = 66;
7939                         break;
7940
7941                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7942                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7943                         bp->bus_speed_mhz = 50;
7944                         break;
7945
7946                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7947                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7948                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7949                         bp->bus_speed_mhz = 33;
7950                         break;
7951                 }
7952         }
7953         else {
7954                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7955                         bp->bus_speed_mhz = 66;
7956                 else
7957                         bp->bus_speed_mhz = 33;
7958         }
7959
7960         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7961                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7962
7963 }
7964
7965 static void
7966 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7967 {
7968         int rc, i, j;
7969         u8 *data;
7970         unsigned int block_end, rosize, len;
7971
7972 #define BNX2_VPD_NVRAM_OFFSET   0x300
7973 #define BNX2_VPD_LEN            128
7974 #define BNX2_MAX_VER_SLEN       30
7975
7976         data = kmalloc(256, GFP_KERNEL);
7977         if (!data)
7978                 return;
7979
7980         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7981                              BNX2_VPD_LEN);
7982         if (rc)
7983                 goto vpd_done;
7984
7985         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7986                 data[i] = data[i + BNX2_VPD_LEN + 3];
7987                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7988                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7989                 data[i + 3] = data[i + BNX2_VPD_LEN];
7990         }
7991
7992         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7993         if (i < 0)
7994                 goto vpd_done;
7995
7996         rosize = pci_vpd_lrdt_size(&data[i]);
7997         i += PCI_VPD_LRDT_TAG_SIZE;
7998         block_end = i + rosize;
7999
8000         if (block_end > BNX2_VPD_LEN)
8001                 goto vpd_done;
8002
8003         j = pci_vpd_find_info_keyword(data, i, rosize,
8004                                       PCI_VPD_RO_KEYWORD_MFR_ID);
8005         if (j < 0)
8006                 goto vpd_done;
8007
8008         len = pci_vpd_info_field_size(&data[j]);
8009
8010         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8011         if (j + len > block_end || len != 4 ||
8012             memcmp(&data[j], "1028", 4))
8013                 goto vpd_done;
8014
8015         j = pci_vpd_find_info_keyword(data, i, rosize,
8016                                       PCI_VPD_RO_KEYWORD_VENDOR0);
8017         if (j < 0)
8018                 goto vpd_done;
8019
8020         len = pci_vpd_info_field_size(&data[j]);
8021
8022         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8023         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8024                 goto vpd_done;
8025
8026         memcpy(bp->fw_version, &data[j], len);
8027         bp->fw_version[len] = ' ';
8028
8029 vpd_done:
8030         kfree(data);
8031 }
8032
8033 static int
8034 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8035 {
8036         struct bnx2 *bp;
8037         int rc, i, j;
8038         u32 reg;
8039         u64 dma_mask, persist_dma_mask;
8040         int err;
8041
8042         SET_NETDEV_DEV(dev, &pdev->dev);
8043         bp = netdev_priv(dev);
8044
8045         bp->flags = 0;
8046         bp->phy_flags = 0;
8047
8048         bp->temp_stats_blk =
8049                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8050
8051         if (bp->temp_stats_blk == NULL) {
8052                 rc = -ENOMEM;
8053                 goto err_out;
8054         }
8055
8056         /* enable device (incl. PCI PM wakeup), and bus-mastering */
8057         rc = pci_enable_device(pdev);
8058         if (rc) {
8059                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8060                 goto err_out;
8061         }
8062
8063         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8064                 dev_err(&pdev->dev,
8065                         "Cannot find PCI device base address, aborting\n");
8066                 rc = -ENODEV;
8067                 goto err_out_disable;
8068         }
8069
8070         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8071         if (rc) {
8072                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8073                 goto err_out_disable;
8074         }
8075
8076         pci_set_master(pdev);
8077
8078         bp->pm_cap = pdev->pm_cap;
8079         if (bp->pm_cap == 0) {
8080                 dev_err(&pdev->dev,
8081                         "Cannot find power management capability, aborting\n");
8082                 rc = -EIO;
8083                 goto err_out_release;
8084         }
8085
8086         bp->dev = dev;
8087         bp->pdev = pdev;
8088
8089         spin_lock_init(&bp->phy_lock);
8090         spin_lock_init(&bp->indirect_lock);
8091 #ifdef BCM_CNIC
8092         mutex_init(&bp->cnic_lock);
8093 #endif
8094         INIT_WORK(&bp->reset_task, bnx2_reset_task);
8095
8096         bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8097                                                          TX_MAX_TSS_RINGS + 1));
8098         if (!bp->regview) {
8099                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8100                 rc = -ENOMEM;
8101                 goto err_out_release;
8102         }
8103
8104         /* Configure byte swap and enable write to the reg_window registers.
8105          * Rely on CPU to do target byte swapping on big endian systems
8106          * The chip's target access swapping will not swap all accesses
8107          */
8108         BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8109                 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8110                 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8111
8112         bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8113
8114         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8115                 if (!pci_is_pcie(pdev)) {
8116                         dev_err(&pdev->dev, "Not PCIE, aborting\n");
8117                         rc = -EIO;
8118                         goto err_out_unmap;
8119                 }
8120                 bp->flags |= BNX2_FLAG_PCIE;
8121                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8122                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8123
8124                 /* AER (Advanced Error Reporting) hooks */
8125                 err = pci_enable_pcie_error_reporting(pdev);
8126                 if (!err)
8127                         bp->flags |= BNX2_FLAG_AER_ENABLED;
8128
8129         } else {
8130                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8131                 if (bp->pcix_cap == 0) {
8132                         dev_err(&pdev->dev,
8133                                 "Cannot find PCIX capability, aborting\n");
8134                         rc = -EIO;
8135                         goto err_out_unmap;
8136                 }
8137                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8138         }
8139
8140         if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8141             BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8142                 if (pdev->msix_cap)
8143                         bp->flags |= BNX2_FLAG_MSIX_CAP;
8144         }
8145
8146         if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8147             BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8148                 if (pdev->msi_cap)
8149                         bp->flags |= BNX2_FLAG_MSI_CAP;
8150         }
8151
8152         /* 5708 cannot support DMA addresses > 40-bit.  */
8153         if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8154                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8155         else
8156                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8157
8158         /* Configure DMA attributes. */
8159         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8160                 dev->features |= NETIF_F_HIGHDMA;
8161                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8162                 if (rc) {
8163                         dev_err(&pdev->dev,
8164                                 "pci_set_consistent_dma_mask failed, aborting\n");
8165                         goto err_out_unmap;
8166                 }
8167         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8168                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8169                 goto err_out_unmap;
8170         }
8171
8172         if (!(bp->flags & BNX2_FLAG_PCIE))
8173                 bnx2_get_pci_speed(bp);
8174
8175         /* 5706A0 may falsely detect SERR and PERR. */
8176         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8177                 reg = BNX2_RD(bp, PCI_COMMAND);
8178                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8179                 BNX2_WR(bp, PCI_COMMAND, reg);
8180         } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8181                 !(bp->flags & BNX2_FLAG_PCIX)) {
8182
8183                 dev_err(&pdev->dev,
8184                         "5706 A1 can only be used in a PCIX bus, aborting\n");
8185                 goto err_out_unmap;
8186         }
8187
8188         bnx2_init_nvram(bp);
8189
8190         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8191
8192         if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8193                 bp->func = 1;
8194
8195         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8196             BNX2_SHM_HDR_SIGNATURE_SIG) {
8197                 u32 off = bp->func << 2;
8198
8199                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8200         } else
8201                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8202
8203         /* Get the permanent MAC address.  First we need to make sure the
8204          * firmware is actually running.
8205          */
8206         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8207
8208         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8209             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8210                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8211                 rc = -ENODEV;
8212                 goto err_out_unmap;
8213         }
8214
8215         bnx2_read_vpd_fw_ver(bp);
8216
8217         j = strlen(bp->fw_version);
8218         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8219         for (i = 0; i < 3 && j < 24; i++) {
8220                 u8 num, k, skip0;
8221
8222                 if (i == 0) {
8223                         bp->fw_version[j++] = 'b';
8224                         bp->fw_version[j++] = 'c';
8225                         bp->fw_version[j++] = ' ';
8226                 }
8227                 num = (u8) (reg >> (24 - (i * 8)));
8228                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8229                         if (num >= k || !skip0 || k == 1) {
8230                                 bp->fw_version[j++] = (num / k) + '0';
8231                                 skip0 = 0;
8232                         }
8233                 }
8234                 if (i != 2)
8235                         bp->fw_version[j++] = '.';
8236         }
8237         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8238         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8239                 bp->wol = 1;
8240
8241         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8242                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8243
8244                 for (i = 0; i < 30; i++) {
8245                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8246                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8247                                 break;
8248                         msleep(10);
8249                 }
8250         }
8251         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8252         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8253         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8254             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8255                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8256
8257                 if (j < 32)
8258                         bp->fw_version[j++] = ' ';
8259                 for (i = 0; i < 3 && j < 28; i++) {
8260                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8261                         reg = be32_to_cpu(reg);
8262                         memcpy(&bp->fw_version[j], &reg, 4);
8263                         j += 4;
8264                 }
8265         }
8266
8267         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8268         bp->mac_addr[0] = (u8) (reg >> 8);
8269         bp->mac_addr[1] = (u8) reg;
8270
8271         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8272         bp->mac_addr[2] = (u8) (reg >> 24);
8273         bp->mac_addr[3] = (u8) (reg >> 16);
8274         bp->mac_addr[4] = (u8) (reg >> 8);
8275         bp->mac_addr[5] = (u8) reg;
8276
8277         bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8278         bnx2_set_rx_ring_size(bp, 255);
8279
8280         bp->tx_quick_cons_trip_int = 2;
8281         bp->tx_quick_cons_trip = 20;
8282         bp->tx_ticks_int = 18;
8283         bp->tx_ticks = 80;
8284
8285         bp->rx_quick_cons_trip_int = 2;
8286         bp->rx_quick_cons_trip = 12;
8287         bp->rx_ticks_int = 18;
8288         bp->rx_ticks = 18;
8289
8290         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8291
8292         bp->current_interval = BNX2_TIMER_INTERVAL;
8293
8294         bp->phy_addr = 1;
8295
8296         /* Disable WOL support if we are running on a SERDES chip. */
8297         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8298                 bnx2_get_5709_media(bp);
8299         else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8300                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8301
8302         bp->phy_port = PORT_TP;
8303         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8304                 bp->phy_port = PORT_FIBRE;
8305                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8306                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8307                         bp->flags |= BNX2_FLAG_NO_WOL;
8308                         bp->wol = 0;
8309                 }
8310                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8311                         /* Don't do parallel detect on this board because of
8312                          * some board problems.  The link will not go down
8313                          * if we do parallel detect.
8314                          */
8315                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8316                             pdev->subsystem_device == 0x310c)
8317                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8318                 } else {
8319                         bp->phy_addr = 2;
8320                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8321                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8322                 }
8323         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8324                    BNX2_CHIP(bp) == BNX2_CHIP_5708)
8325                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8326         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8327                  (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8328                   BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8329                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8330
8331         bnx2_init_fw_cap(bp);
8332
8333         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8334             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8335             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8336             !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8337                 bp->flags |= BNX2_FLAG_NO_WOL;
8338                 bp->wol = 0;
8339         }
8340
8341         if (bp->flags & BNX2_FLAG_NO_WOL)
8342                 device_set_wakeup_capable(&bp->pdev->dev, false);
8343         else
8344                 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8345
8346         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8347                 bp->tx_quick_cons_trip_int =
8348                         bp->tx_quick_cons_trip;
8349                 bp->tx_ticks_int = bp->tx_ticks;
8350                 bp->rx_quick_cons_trip_int =
8351                         bp->rx_quick_cons_trip;
8352                 bp->rx_ticks_int = bp->rx_ticks;
8353                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8354                 bp->com_ticks_int = bp->com_ticks;
8355                 bp->cmd_ticks_int = bp->cmd_ticks;
8356         }
8357
8358         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8359          *
8360          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8361          * with byte enables disabled on the unused 32-bit word.  This is legal
8362          * but causes problems on the AMD 8132 which will eventually stop
8363          * responding after a while.
8364          *
8365          * AMD believes this incompatibility is unique to the 5706, and
8366          * prefers to locally disable MSI rather than globally disabling it.
8367          */
8368         if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8369                 struct pci_dev *amd_8132 = NULL;
8370
8371                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8372                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8373                                                   amd_8132))) {
8374
8375                         if (amd_8132->revision >= 0x10 &&
8376                             amd_8132->revision <= 0x13) {
8377                                 disable_msi = 1;
8378                                 pci_dev_put(amd_8132);
8379                                 break;
8380                         }
8381                 }
8382         }
8383
8384         bnx2_set_default_link(bp);
8385         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8386
8387         init_timer(&bp->timer);
8388         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8389         bp->timer.data = (unsigned long) bp;
8390         bp->timer.function = bnx2_timer;
8391
8392 #ifdef BCM_CNIC
8393         if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8394                 bp->cnic_eth_dev.max_iscsi_conn =
8395                         (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8396                          BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8397         bp->cnic_probe = bnx2_cnic_probe;
8398 #endif
8399         pci_save_state(pdev);
8400
8401         return 0;
8402
8403 err_out_unmap:
8404         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8405                 pci_disable_pcie_error_reporting(pdev);
8406                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8407         }
8408
8409         pci_iounmap(pdev, bp->regview);
8410         bp->regview = NULL;
8411
8412 err_out_release:
8413         pci_release_regions(pdev);
8414
8415 err_out_disable:
8416         pci_disable_device(pdev);
8417         pci_set_drvdata(pdev, NULL);
8418
8419 err_out:
8420         return rc;
8421 }
8422
8423 static char *
8424 bnx2_bus_string(struct bnx2 *bp, char *str)
8425 {
8426         char *s = str;
8427
8428         if (bp->flags & BNX2_FLAG_PCIE) {
8429                 s += sprintf(s, "PCI Express");
8430         } else {
8431                 s += sprintf(s, "PCI");
8432                 if (bp->flags & BNX2_FLAG_PCIX)
8433                         s += sprintf(s, "-X");
8434                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8435                         s += sprintf(s, " 32-bit");
8436                 else
8437                         s += sprintf(s, " 64-bit");
8438                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8439         }
8440         return str;
8441 }
8442
8443 static void
8444 bnx2_del_napi(struct bnx2 *bp)
8445 {
8446         int i;
8447
8448         for (i = 0; i < bp->irq_nvecs; i++)
8449                 netif_napi_del(&bp->bnx2_napi[i].napi);
8450 }
8451
8452 static void
8453 bnx2_init_napi(struct bnx2 *bp)
8454 {
8455         int i;
8456
8457         for (i = 0; i < bp->irq_nvecs; i++) {
8458                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8459                 int (*poll)(struct napi_struct *, int);
8460
8461                 if (i == 0)
8462                         poll = bnx2_poll;
8463                 else
8464                         poll = bnx2_poll_msix;
8465
8466                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8467                 bnapi->bp = bp;
8468         }
8469 }
8470
8471 static const struct net_device_ops bnx2_netdev_ops = {
8472         .ndo_open               = bnx2_open,
8473         .ndo_start_xmit         = bnx2_start_xmit,
8474         .ndo_stop               = bnx2_close,
8475         .ndo_get_stats64        = bnx2_get_stats64,
8476         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8477         .ndo_do_ioctl           = bnx2_ioctl,
8478         .ndo_validate_addr      = eth_validate_addr,
8479         .ndo_set_mac_address    = bnx2_change_mac_addr,
8480         .ndo_change_mtu         = bnx2_change_mtu,
8481         .ndo_fix_features       = bnx2_fix_features,
8482         .ndo_set_features       = bnx2_set_features,
8483         .ndo_tx_timeout         = bnx2_tx_timeout,
8484 #ifdef CONFIG_NET_POLL_CONTROLLER
8485         .ndo_poll_controller    = poll_bnx2,
8486 #endif
8487 };
8488
8489 static int
8490 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8491 {
8492         static int version_printed = 0;
8493         struct net_device *dev;
8494         struct bnx2 *bp;
8495         int rc;
8496         char str[40];
8497
8498         if (version_printed++ == 0)
8499                 pr_info("%s", version);
8500
8501         /* dev zeroed in init_etherdev */
8502         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8503         if (!dev)
8504                 return -ENOMEM;
8505
8506         rc = bnx2_init_board(pdev, dev);
8507         if (rc < 0)
8508                 goto err_free;
8509
8510         dev->netdev_ops = &bnx2_netdev_ops;
8511         dev->watchdog_timeo = TX_TIMEOUT;
8512         dev->ethtool_ops = &bnx2_ethtool_ops;
8513
8514         bp = netdev_priv(dev);
8515
8516         pci_set_drvdata(pdev, dev);
8517
8518         memcpy(dev->dev_addr, bp->mac_addr, 6);
8519
8520         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8521                 NETIF_F_TSO | NETIF_F_TSO_ECN |
8522                 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8523
8524         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8525                 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8526
8527         dev->vlan_features = dev->hw_features;
8528         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8529         dev->features |= dev->hw_features;
8530         dev->priv_flags |= IFF_UNICAST_FLT;
8531
8532         if ((rc = register_netdev(dev))) {
8533                 dev_err(&pdev->dev, "Cannot register net device\n");
8534                 goto error;
8535         }
8536
8537         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8538                     "node addr %pM\n", board_info[ent->driver_data].name,
8539                     ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8540                     ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8541                     bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8542                     pdev->irq, dev->dev_addr);
8543
8544         return 0;
8545
8546 error:
8547         pci_iounmap(pdev, bp->regview);
8548         pci_release_regions(pdev);
8549         pci_disable_device(pdev);
8550         pci_set_drvdata(pdev, NULL);
8551 err_free:
8552         free_netdev(dev);
8553         return rc;
8554 }
8555
8556 static void
8557 bnx2_remove_one(struct pci_dev *pdev)
8558 {
8559         struct net_device *dev = pci_get_drvdata(pdev);
8560         struct bnx2 *bp = netdev_priv(dev);
8561
8562         unregister_netdev(dev);
8563
8564         del_timer_sync(&bp->timer);
8565         cancel_work_sync(&bp->reset_task);
8566
8567         pci_iounmap(bp->pdev, bp->regview);
8568
8569         kfree(bp->temp_stats_blk);
8570
8571         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8572                 pci_disable_pcie_error_reporting(pdev);
8573                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8574         }
8575
8576         bnx2_release_firmware(bp);
8577
8578         free_netdev(dev);
8579
8580         pci_release_regions(pdev);
8581         pci_disable_device(pdev);
8582         pci_set_drvdata(pdev, NULL);
8583 }
8584
8585 static int
8586 bnx2_suspend(struct device *device)
8587 {
8588         struct pci_dev *pdev = to_pci_dev(device);
8589         struct net_device *dev = pci_get_drvdata(pdev);
8590         struct bnx2 *bp = netdev_priv(dev);
8591
8592         if (netif_running(dev)) {
8593                 cancel_work_sync(&bp->reset_task);
8594                 bnx2_netif_stop(bp, true);
8595                 netif_device_detach(dev);
8596                 del_timer_sync(&bp->timer);
8597                 bnx2_shutdown_chip(bp);
8598                 __bnx2_free_irq(bp);
8599                 bnx2_free_skbs(bp);
8600         }
8601         bnx2_setup_wol(bp);
8602         return 0;
8603 }
8604
8605 static int
8606 bnx2_resume(struct device *device)
8607 {
8608         struct pci_dev *pdev = to_pci_dev(device);
8609         struct net_device *dev = pci_get_drvdata(pdev);
8610         struct bnx2 *bp = netdev_priv(dev);
8611
8612         if (!netif_running(dev))
8613                 return 0;
8614
8615         bnx2_set_power_state(bp, PCI_D0);
8616         netif_device_attach(dev);
8617         bnx2_request_irq(bp);
8618         bnx2_init_nic(bp, 1);
8619         bnx2_netif_start(bp, true);
8620         return 0;
8621 }
8622
8623 #ifdef CONFIG_PM_SLEEP
8624 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8625 #define BNX2_PM_OPS (&bnx2_pm_ops)
8626
8627 #else
8628
8629 #define BNX2_PM_OPS NULL
8630
8631 #endif /* CONFIG_PM_SLEEP */
8632 /**
8633  * bnx2_io_error_detected - called when PCI error is detected
8634  * @pdev: Pointer to PCI device
8635  * @state: The current pci connection state
8636  *
8637  * This function is called after a PCI bus error affecting
8638  * this device has been detected.
8639  */
8640 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8641                                                pci_channel_state_t state)
8642 {
8643         struct net_device *dev = pci_get_drvdata(pdev);
8644         struct bnx2 *bp = netdev_priv(dev);
8645
8646         rtnl_lock();
8647         netif_device_detach(dev);
8648
8649         if (state == pci_channel_io_perm_failure) {
8650                 rtnl_unlock();
8651                 return PCI_ERS_RESULT_DISCONNECT;
8652         }
8653
8654         if (netif_running(dev)) {
8655                 bnx2_netif_stop(bp, true);
8656                 del_timer_sync(&bp->timer);
8657                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8658         }
8659
8660         pci_disable_device(pdev);
8661         rtnl_unlock();
8662
8663         /* Request a slot slot reset. */
8664         return PCI_ERS_RESULT_NEED_RESET;
8665 }
8666
8667 /**
8668  * bnx2_io_slot_reset - called after the pci bus has been reset.
8669  * @pdev: Pointer to PCI device
8670  *
8671  * Restart the card from scratch, as if from a cold-boot.
8672  */
8673 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8674 {
8675         struct net_device *dev = pci_get_drvdata(pdev);
8676         struct bnx2 *bp = netdev_priv(dev);
8677         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8678         int err = 0;
8679
8680         rtnl_lock();
8681         if (pci_enable_device(pdev)) {
8682                 dev_err(&pdev->dev,
8683                         "Cannot re-enable PCI device after reset\n");
8684         } else {
8685                 pci_set_master(pdev);
8686                 pci_restore_state(pdev);
8687                 pci_save_state(pdev);
8688
8689                 if (netif_running(dev))
8690                         err = bnx2_init_nic(bp, 1);
8691
8692                 if (!err)
8693                         result = PCI_ERS_RESULT_RECOVERED;
8694         }
8695
8696         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8697                 bnx2_napi_enable(bp);
8698                 dev_close(dev);
8699         }
8700         rtnl_unlock();
8701
8702         if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8703                 return result;
8704
8705         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8706         if (err) {
8707                 dev_err(&pdev->dev,
8708                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8709                          err); /* non-fatal, continue */
8710         }
8711
8712         return result;
8713 }
8714
8715 /**
8716  * bnx2_io_resume - called when traffic can start flowing again.
8717  * @pdev: Pointer to PCI device
8718  *
8719  * This callback is called when the error recovery driver tells us that
8720  * its OK to resume normal operation.
8721  */
8722 static void bnx2_io_resume(struct pci_dev *pdev)
8723 {
8724         struct net_device *dev = pci_get_drvdata(pdev);
8725         struct bnx2 *bp = netdev_priv(dev);
8726
8727         rtnl_lock();
8728         if (netif_running(dev))
8729                 bnx2_netif_start(bp, true);
8730
8731         netif_device_attach(dev);
8732         rtnl_unlock();
8733 }
8734
8735 static void bnx2_shutdown(struct pci_dev *pdev)
8736 {
8737         struct net_device *dev = pci_get_drvdata(pdev);
8738         struct bnx2 *bp;
8739
8740         if (!dev)
8741                 return;
8742
8743         bp = netdev_priv(dev);
8744         if (!bp)
8745                 return;
8746
8747         rtnl_lock();
8748         if (netif_running(dev))
8749                 dev_close(bp->dev);
8750
8751         if (system_state == SYSTEM_POWER_OFF)
8752                 bnx2_set_power_state(bp, PCI_D3hot);
8753
8754         rtnl_unlock();
8755 }
8756
8757 static const struct pci_error_handlers bnx2_err_handler = {
8758         .error_detected = bnx2_io_error_detected,
8759         .slot_reset     = bnx2_io_slot_reset,
8760         .resume         = bnx2_io_resume,
8761 };
8762
8763 static struct pci_driver bnx2_pci_driver = {
8764         .name           = DRV_MODULE_NAME,
8765         .id_table       = bnx2_pci_tbl,
8766         .probe          = bnx2_init_one,
8767         .remove         = bnx2_remove_one,
8768         .driver.pm      = BNX2_PM_OPS,
8769         .err_handler    = &bnx2_err_handler,
8770         .shutdown       = bnx2_shutdown,
8771 };
8772
8773 module_pci_driver(bnx2_pci_driver);