1 /* bnx2x_main.c: QLogic Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
4 * Copyright (c) 2014 QLogic Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/kernel.h>
25 #include <linux/device.h> /* for dev_info() */
26 #include <linux/timer.h>
27 #include <linux/errno.h>
28 #include <linux/ioport.h>
29 #include <linux/slab.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/aer.h>
33 #include <linux/init.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/bitops.h>
39 #include <linux/irq.h>
40 #include <linux/delay.h>
41 #include <asm/byteorder.h>
42 #include <linux/time.h>
43 #include <linux/ethtool.h>
44 #include <linux/mii.h>
45 #include <linux/if_vlan.h>
46 #include <linux/crash_dump.h>
50 #include <net/vxlan.h>
51 #include <net/checksum.h>
52 #include <net/ip6_checksum.h>
53 #include <linux/workqueue.h>
54 #include <linux/crc32.h>
55 #include <linux/crc32c.h>
56 #include <linux/prefetch.h>
57 #include <linux/zlib.h>
59 #include <linux/semaphore.h>
60 #include <linux/stringify.h>
61 #include <linux/vmalloc.h>
63 #include "bnx2x_init.h"
64 #include "bnx2x_init_ops.h"
65 #include "bnx2x_cmn.h"
66 #include "bnx2x_vfpf.h"
67 #include "bnx2x_dcb.h"
69 #include <linux/firmware.h>
70 #include "bnx2x_fw_file_hdr.h"
72 #define FW_FILE_VERSION \
73 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
74 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
75 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
76 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
77 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
78 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
79 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
81 /* Time in jiffies before concluding the transmitter is hung */
82 #define TX_TIMEOUT (5*HZ)
84 static char version[] =
85 "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
86 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
88 MODULE_AUTHOR("Eliezer Tamir");
89 MODULE_DESCRIPTION("QLogic "
90 "BCM57710/57711/57711E/"
91 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
92 "57840/57840_MF Driver");
93 MODULE_LICENSE("GPL");
94 MODULE_VERSION(DRV_MODULE_VERSION);
95 MODULE_FIRMWARE(FW_FILE_NAME_E1);
96 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
97 MODULE_FIRMWARE(FW_FILE_NAME_E2);
100 module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
101 MODULE_PARM_DESC(num_queues,
102 " Set number of queues (default is as a number of CPUs)");
104 static int disable_tpa;
105 module_param(disable_tpa, int, S_IRUGO);
106 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
109 module_param(int_mode, int, S_IRUGO);
110 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
113 static int dropless_fc;
114 module_param(dropless_fc, int, S_IRUGO);
115 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
117 static int mrrs = -1;
118 module_param(mrrs, int, S_IRUGO);
119 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
122 module_param(debug, int, S_IRUGO);
123 MODULE_PARM_DESC(debug, " Default debug msglevel");
125 static struct workqueue_struct *bnx2x_wq;
126 struct workqueue_struct *bnx2x_iov_wq;
128 struct bnx2x_mac_vals {
139 enum bnx2x_board_type {
163 /* indexed by board_type, above */
167 [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
168 [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" },
169 [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" },
170 [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" },
171 [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
172 [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
173 [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" },
174 [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
175 [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
176 [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" },
177 [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
178 [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
179 [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
180 [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
181 [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
182 [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
183 [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" },
184 [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
185 [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
186 [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
187 [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
190 #ifndef PCI_DEVICE_ID_NX2_57710
191 #define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
193 #ifndef PCI_DEVICE_ID_NX2_57711
194 #define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
196 #ifndef PCI_DEVICE_ID_NX2_57711E
197 #define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
199 #ifndef PCI_DEVICE_ID_NX2_57712
200 #define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
202 #ifndef PCI_DEVICE_ID_NX2_57712_MF
203 #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
205 #ifndef PCI_DEVICE_ID_NX2_57712_VF
206 #define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
208 #ifndef PCI_DEVICE_ID_NX2_57800
209 #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
211 #ifndef PCI_DEVICE_ID_NX2_57800_MF
212 #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
214 #ifndef PCI_DEVICE_ID_NX2_57800_VF
215 #define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
217 #ifndef PCI_DEVICE_ID_NX2_57810
218 #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
220 #ifndef PCI_DEVICE_ID_NX2_57810_MF
221 #define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
223 #ifndef PCI_DEVICE_ID_NX2_57840_O
224 #define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
226 #ifndef PCI_DEVICE_ID_NX2_57810_VF
227 #define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
229 #ifndef PCI_DEVICE_ID_NX2_57840_4_10
230 #define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
232 #ifndef PCI_DEVICE_ID_NX2_57840_2_20
233 #define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
235 #ifndef PCI_DEVICE_ID_NX2_57840_MFO
236 #define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
238 #ifndef PCI_DEVICE_ID_NX2_57840_MF
239 #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
241 #ifndef PCI_DEVICE_ID_NX2_57840_VF
242 #define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
244 #ifndef PCI_DEVICE_ID_NX2_57811
245 #define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
247 #ifndef PCI_DEVICE_ID_NX2_57811_MF
248 #define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
250 #ifndef PCI_DEVICE_ID_NX2_57811_VF
251 #define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
254 static const struct pci_device_id bnx2x_pci_tbl[] = {
255 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
256 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
257 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
258 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
259 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
260 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
261 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
262 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
263 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
268 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
270 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
272 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
273 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
274 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
275 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
276 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
277 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
278 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
282 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
284 /* Global resources for unloading a previously loaded device */
285 #define BNX2X_PREV_WAIT_NEEDED 1
286 static DEFINE_SEMAPHORE(bnx2x_prev_sem);
287 static LIST_HEAD(bnx2x_prev_list);
289 /* Forward declaration */
290 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
291 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
292 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
294 /****************************************************************************
295 * General service functions
296 ****************************************************************************/
298 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
300 static void __storm_memset_dma_mapping(struct bnx2x *bp,
301 u32 addr, dma_addr_t mapping)
303 REG_WR(bp, addr, U64_LO(mapping));
304 REG_WR(bp, addr + 4, U64_HI(mapping));
307 static void storm_memset_spq_addr(struct bnx2x *bp,
308 dma_addr_t mapping, u16 abs_fid)
310 u32 addr = XSEM_REG_FAST_MEMORY +
311 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
313 __storm_memset_dma_mapping(bp, addr, mapping);
316 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
319 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
321 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
323 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
325 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
329 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
332 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
334 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
336 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
338 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
342 static void storm_memset_eq_data(struct bnx2x *bp,
343 struct event_ring_data *eq_data,
346 size_t size = sizeof(struct event_ring_data);
348 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
350 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
353 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
356 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
357 REG_WR16(bp, addr, eq_prod);
361 * locking is done by mcp
363 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
365 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
366 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
367 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
368 PCICFG_VENDOR_ID_OFFSET);
371 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
375 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
376 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
377 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
378 PCICFG_VENDOR_ID_OFFSET);
383 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
384 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
385 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
386 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
387 #define DMAE_DP_DST_NONE "dst_addr [none]"
389 static void bnx2x_dp_dmae(struct bnx2x *bp,
390 struct dmae_command *dmae, int msglvl)
392 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
395 switch (dmae->opcode & DMAE_COMMAND_DST) {
396 case DMAE_CMD_DST_PCI:
397 if (src_type == DMAE_CMD_SRC_PCI)
398 DP(msglvl, "DMAE: opcode 0x%08x\n"
399 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
400 "comp_addr [%x:%08x], comp_val 0x%08x\n",
401 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
402 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
403 dmae->comp_addr_hi, dmae->comp_addr_lo,
406 DP(msglvl, "DMAE: opcode 0x%08x\n"
407 "src [%08x], len [%d*4], dst [%x:%08x]\n"
408 "comp_addr [%x:%08x], comp_val 0x%08x\n",
409 dmae->opcode, dmae->src_addr_lo >> 2,
410 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
411 dmae->comp_addr_hi, dmae->comp_addr_lo,
414 case DMAE_CMD_DST_GRC:
415 if (src_type == DMAE_CMD_SRC_PCI)
416 DP(msglvl, "DMAE: opcode 0x%08x\n"
417 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
418 "comp_addr [%x:%08x], comp_val 0x%08x\n",
419 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
420 dmae->len, dmae->dst_addr_lo >> 2,
421 dmae->comp_addr_hi, dmae->comp_addr_lo,
424 DP(msglvl, "DMAE: opcode 0x%08x\n"
425 "src [%08x], len [%d*4], dst [%08x]\n"
426 "comp_addr [%x:%08x], comp_val 0x%08x\n",
427 dmae->opcode, dmae->src_addr_lo >> 2,
428 dmae->len, dmae->dst_addr_lo >> 2,
429 dmae->comp_addr_hi, dmae->comp_addr_lo,
433 if (src_type == DMAE_CMD_SRC_PCI)
434 DP(msglvl, "DMAE: opcode 0x%08x\n"
435 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
436 "comp_addr [%x:%08x] comp_val 0x%08x\n",
437 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
438 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
441 DP(msglvl, "DMAE: opcode 0x%08x\n"
442 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
443 "comp_addr [%x:%08x] comp_val 0x%08x\n",
444 dmae->opcode, dmae->src_addr_lo >> 2,
445 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
450 for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
451 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
452 i, *(((u32 *)dmae) + i));
455 /* copy command into DMAE command memory and set DMAE command go */
456 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
461 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
462 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
463 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
465 REG_WR(bp, dmae_reg_go_c[idx], 1);
468 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
470 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
474 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
476 return opcode & ~DMAE_CMD_SRC_RESET;
479 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
480 bool with_comp, u8 comp_type)
484 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
485 (dst_type << DMAE_COMMAND_DST_SHIFT));
487 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
489 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
490 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
491 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
492 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
495 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
497 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
500 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
504 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
505 struct dmae_command *dmae,
506 u8 src_type, u8 dst_type)
508 memset(dmae, 0, sizeof(struct dmae_command));
511 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
512 true, DMAE_COMP_PCI);
514 /* fill in the completion parameters */
515 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
516 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
517 dmae->comp_val = DMAE_COMP_VAL;
520 /* issue a dmae command over the init-channel and wait for completion */
521 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
524 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
527 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
529 /* Lock the dmae channel. Disable BHs to prevent a dead-lock
530 * as long as this code is called both from syscall context and
531 * from ndo_set_rx_mode() flow that may be called from BH.
534 spin_lock_bh(&bp->dmae_lock);
536 /* reset completion */
539 /* post the command on the channel used for initializations */
540 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
542 /* wait for completion */
544 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
547 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
548 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
549 BNX2X_ERR("DMAE timeout!\n");
556 if (*comp & DMAE_PCI_ERR_FLAG) {
557 BNX2X_ERR("DMAE PCI error!\n");
563 spin_unlock_bh(&bp->dmae_lock);
568 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
572 struct dmae_command dmae;
574 if (!bp->dmae_ready) {
575 u32 *data = bnx2x_sp(bp, wb_data[0]);
578 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
580 bnx2x_init_str_wr(bp, dst_addr, data, len32);
584 /* set opcode and fixed command fields */
585 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
587 /* fill in addresses and len */
588 dmae.src_addr_lo = U64_LO(dma_addr);
589 dmae.src_addr_hi = U64_HI(dma_addr);
590 dmae.dst_addr_lo = dst_addr >> 2;
591 dmae.dst_addr_hi = 0;
594 /* issue the command and wait for completion */
595 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
597 BNX2X_ERR("DMAE returned failure %d\n", rc);
598 #ifdef BNX2X_STOP_ON_ERROR
604 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
607 struct dmae_command dmae;
609 if (!bp->dmae_ready) {
610 u32 *data = bnx2x_sp(bp, wb_data[0]);
614 for (i = 0; i < len32; i++)
615 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
617 for (i = 0; i < len32; i++)
618 data[i] = REG_RD(bp, src_addr + i*4);
623 /* set opcode and fixed command fields */
624 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
626 /* fill in addresses and len */
627 dmae.src_addr_lo = src_addr >> 2;
628 dmae.src_addr_hi = 0;
629 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
630 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
633 /* issue the command and wait for completion */
634 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
636 BNX2X_ERR("DMAE returned failure %d\n", rc);
637 #ifdef BNX2X_STOP_ON_ERROR
643 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
646 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
649 while (len > dmae_wr_max) {
650 bnx2x_write_dmae(bp, phys_addr + offset,
651 addr + offset, dmae_wr_max);
652 offset += dmae_wr_max * 4;
656 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
668 #define REGS_IN_ENTRY 4
670 static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
676 return XSTORM_ASSERT_LIST_OFFSET(entry);
678 return TSTORM_ASSERT_LIST_OFFSET(entry);
680 return CSTORM_ASSERT_LIST_OFFSET(entry);
682 return USTORM_ASSERT_LIST_OFFSET(entry);
685 BNX2X_ERR("unknown storm\n");
690 static int bnx2x_mc_assert(struct bnx2x *bp)
695 u32 regs[REGS_IN_ENTRY];
696 u32 bar_storm_intmem[STORMS_NUM] = {
702 u32 storm_assert_list_index[STORMS_NUM] = {
703 XSTORM_ASSERT_LIST_INDEX_OFFSET,
704 TSTORM_ASSERT_LIST_INDEX_OFFSET,
705 CSTORM_ASSERT_LIST_INDEX_OFFSET,
706 USTORM_ASSERT_LIST_INDEX_OFFSET
708 char *storms_string[STORMS_NUM] = {
715 for (storm = XSTORM; storm < MAX_STORMS; storm++) {
716 last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
717 storm_assert_list_index[storm]);
719 BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
720 storms_string[storm], last_idx);
722 /* print the asserts */
723 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
724 /* read a single assert entry */
725 for (j = 0; j < REGS_IN_ENTRY; j++)
726 regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
727 bnx2x_get_assert_list_entry(bp,
732 /* log entry if it contains a valid assert */
733 if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
734 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
735 storms_string[storm], i, regs[3],
736 regs[2], regs[1], regs[0]);
744 BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
745 CHIP_IS_E1(bp) ? "everest1" :
746 CHIP_IS_E1H(bp) ? "everest1h" :
747 CHIP_IS_E2(bp) ? "everest2" : "everest3",
748 BCM_5710_FW_MAJOR_VERSION,
749 BCM_5710_FW_MINOR_VERSION,
750 BCM_5710_FW_REVISION_VERSION);
755 #define MCPR_TRACE_BUFFER_SIZE (0x800)
756 #define SCRATCH_BUFFER_SIZE(bp) \
757 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
759 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
765 u32 trace_shmem_base;
767 BNX2X_ERR("NO MCP - can not dump\n");
770 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
771 (bp->common.bc_ver & 0xff0000) >> 16,
772 (bp->common.bc_ver & 0xff00) >> 8,
773 (bp->common.bc_ver & 0xff));
775 if (pci_channel_offline(bp->pdev)) {
776 BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
780 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
781 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
782 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
784 if (BP_PATH(bp) == 0)
785 trace_shmem_base = bp->common.shmem_base;
787 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
790 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
791 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
792 SCRATCH_BUFFER_SIZE(bp)) {
793 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
798 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
800 /* validate TRCB signature */
801 mark = REG_RD(bp, addr);
802 if (mark != MFW_TRACE_SIGNATURE) {
803 BNX2X_ERR("Trace buffer signature is missing.");
807 /* read cyclic buffer pointer */
809 mark = REG_RD(bp, addr);
810 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
811 if (mark >= trace_shmem_base || mark < addr + 4) {
812 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
815 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
819 /* dump buffer after the mark */
820 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
821 for (word = 0; word < 8; word++)
822 data[word] = htonl(REG_RD(bp, offset + 4*word));
824 pr_cont("%s", (char *)data);
827 /* dump buffer before the mark */
828 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
829 for (word = 0; word < 8; word++)
830 data[word] = htonl(REG_RD(bp, offset + 4*word));
832 pr_cont("%s", (char *)data);
834 printk("%s" "end of fw dump\n", lvl);
837 static void bnx2x_fw_dump(struct bnx2x *bp)
839 bnx2x_fw_dump_lvl(bp, KERN_ERR);
842 static void bnx2x_hc_int_disable(struct bnx2x *bp)
844 int port = BP_PORT(bp);
845 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
846 u32 val = REG_RD(bp, addr);
848 /* in E1 we must use only PCI configuration space to disable
849 * MSI/MSIX capability
850 * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
852 if (CHIP_IS_E1(bp)) {
853 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
854 * Use mask register to prevent from HC sending interrupts
855 * after we exit the function
857 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
859 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
860 HC_CONFIG_0_REG_INT_LINE_EN_0 |
861 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
863 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
864 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
865 HC_CONFIG_0_REG_INT_LINE_EN_0 |
866 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
869 "write %x to HC %d (addr 0x%x)\n",
872 /* flush all outstanding writes */
875 REG_WR(bp, addr, val);
876 if (REG_RD(bp, addr) != val)
877 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
880 static void bnx2x_igu_int_disable(struct bnx2x *bp)
882 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
884 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
885 IGU_PF_CONF_INT_LINE_EN |
886 IGU_PF_CONF_ATTN_BIT_EN);
888 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
890 /* flush all outstanding writes */
893 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
894 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
895 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
898 static void bnx2x_int_disable(struct bnx2x *bp)
900 if (bp->common.int_block == INT_BLOCK_HC)
901 bnx2x_hc_int_disable(bp);
903 bnx2x_igu_int_disable(bp);
906 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
910 struct hc_sp_status_block_data sp_sb_data;
911 int func = BP_FUNC(bp);
912 #ifdef BNX2X_STOP_ON_ERROR
913 u16 start = 0, end = 0;
916 if (IS_PF(bp) && disable_int)
917 bnx2x_int_disable(bp);
919 bp->stats_state = STATS_STATE_DISABLED;
920 bp->eth_stats.unrecoverable_error++;
921 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
923 BNX2X_ERR("begin crash dump -----------------\n");
928 struct host_sp_status_block *def_sb = bp->def_status_blk;
929 int data_size, cstorm_offset;
931 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
932 bp->def_idx, bp->def_att_idx, bp->attn_state,
933 bp->spq_prod_idx, bp->stats_counter);
934 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
935 def_sb->atten_status_block.attn_bits,
936 def_sb->atten_status_block.attn_bits_ack,
937 def_sb->atten_status_block.status_block_id,
938 def_sb->atten_status_block.attn_bits_index);
940 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
942 def_sb->sp_sb.index_values[i],
943 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
945 data_size = sizeof(struct hc_sp_status_block_data) /
947 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
948 for (i = 0; i < data_size; i++)
949 *((u32 *)&sp_sb_data + i) =
950 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
953 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
954 sp_sb_data.igu_sb_id,
955 sp_sb_data.igu_seg_id,
956 sp_sb_data.p_func.pf_id,
957 sp_sb_data.p_func.vnic_id,
958 sp_sb_data.p_func.vf_id,
959 sp_sb_data.p_func.vf_valid,
963 for_each_eth_queue(bp, i) {
964 struct bnx2x_fastpath *fp = &bp->fp[i];
966 struct hc_status_block_data_e2 sb_data_e2;
967 struct hc_status_block_data_e1x sb_data_e1x;
968 struct hc_status_block_sm *hc_sm_p =
970 sb_data_e1x.common.state_machine :
971 sb_data_e2.common.state_machine;
972 struct hc_index_data *hc_index_p =
974 sb_data_e1x.index_data :
975 sb_data_e2.index_data;
978 struct bnx2x_fp_txdata txdata;
987 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
988 i, fp->rx_bd_prod, fp->rx_bd_cons,
990 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
991 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
992 fp->rx_sge_prod, fp->last_max_sge,
993 le16_to_cpu(fp->fp_hc_idx));
996 for_each_cos_in_tx_queue(fp, cos)
998 if (!fp->txdata_ptr[cos])
1001 txdata = *fp->txdata_ptr[cos];
1003 if (!txdata.tx_cons_sb)
1006 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
1007 i, txdata.tx_pkt_prod,
1008 txdata.tx_pkt_cons, txdata.tx_bd_prod,
1010 le16_to_cpu(*txdata.tx_cons_sb));
1013 loop = CHIP_IS_E1x(bp) ?
1014 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
1021 BNX2X_ERR(" run indexes (");
1022 for (j = 0; j < HC_SB_MAX_SM; j++)
1024 fp->sb_running_index[j],
1025 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1027 BNX2X_ERR(" indexes (");
1028 for (j = 0; j < loop; j++)
1030 fp->sb_index_values[j],
1031 (j == loop - 1) ? ")" : " ");
1033 /* VF cannot access FW refelection for status block */
1038 data_size = CHIP_IS_E1x(bp) ?
1039 sizeof(struct hc_status_block_data_e1x) :
1040 sizeof(struct hc_status_block_data_e2);
1041 data_size /= sizeof(u32);
1042 sb_data_p = CHIP_IS_E1x(bp) ?
1043 (u32 *)&sb_data_e1x :
1045 /* copy sb data in here */
1046 for (j = 0; j < data_size; j++)
1047 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1048 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1051 if (!CHIP_IS_E1x(bp)) {
1052 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1053 sb_data_e2.common.p_func.pf_id,
1054 sb_data_e2.common.p_func.vf_id,
1055 sb_data_e2.common.p_func.vf_valid,
1056 sb_data_e2.common.p_func.vnic_id,
1057 sb_data_e2.common.same_igu_sb_1b,
1058 sb_data_e2.common.state);
1060 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1061 sb_data_e1x.common.p_func.pf_id,
1062 sb_data_e1x.common.p_func.vf_id,
1063 sb_data_e1x.common.p_func.vf_valid,
1064 sb_data_e1x.common.p_func.vnic_id,
1065 sb_data_e1x.common.same_igu_sb_1b,
1066 sb_data_e1x.common.state);
1070 for (j = 0; j < HC_SB_MAX_SM; j++) {
1071 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1072 j, hc_sm_p[j].__flags,
1073 hc_sm_p[j].igu_sb_id,
1074 hc_sm_p[j].igu_seg_id,
1075 hc_sm_p[j].time_to_expire,
1076 hc_sm_p[j].timer_value);
1080 for (j = 0; j < loop; j++) {
1081 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1082 hc_index_p[j].flags,
1083 hc_index_p[j].timeout);
1087 #ifdef BNX2X_STOP_ON_ERROR
1090 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1091 for (i = 0; i < NUM_EQ_DESC; i++) {
1092 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1094 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1095 i, bp->eq_ring[i].message.opcode,
1096 bp->eq_ring[i].message.error);
1097 BNX2X_ERR("data: %x %x %x\n",
1098 data[0], data[1], data[2]);
1104 for_each_valid_rx_queue(bp, i) {
1105 struct bnx2x_fastpath *fp = &bp->fp[i];
1110 if (!fp->rx_cons_sb)
1113 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1114 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1115 for (j = start; j != end; j = RX_BD(j + 1)) {
1116 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1117 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1119 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1120 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1123 start = RX_SGE(fp->rx_sge_prod);
1124 end = RX_SGE(fp->last_max_sge);
1125 for (j = start; j != end; j = RX_SGE(j + 1)) {
1126 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1127 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1129 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1130 i, j, rx_sge[1], rx_sge[0], sw_page->page);
1133 start = RCQ_BD(fp->rx_comp_cons - 10);
1134 end = RCQ_BD(fp->rx_comp_cons + 503);
1135 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1136 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1138 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1139 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1144 for_each_valid_tx_queue(bp, i) {
1145 struct bnx2x_fastpath *fp = &bp->fp[i];
1150 for_each_cos_in_tx_queue(fp, cos) {
1151 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1153 if (!fp->txdata_ptr[cos])
1156 if (!txdata->tx_cons_sb)
1159 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1160 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1161 for (j = start; j != end; j = TX_BD(j + 1)) {
1162 struct sw_tx_bd *sw_bd =
1163 &txdata->tx_buf_ring[j];
1165 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1166 i, cos, j, sw_bd->skb,
1170 start = TX_BD(txdata->tx_bd_cons - 10);
1171 end = TX_BD(txdata->tx_bd_cons + 254);
1172 for (j = start; j != end; j = TX_BD(j + 1)) {
1173 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1175 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1176 i, cos, j, tx_bd[0], tx_bd[1],
1177 tx_bd[2], tx_bd[3]);
1184 bnx2x_mc_assert(bp);
1186 BNX2X_ERR("end crash dump -----------------\n");
1190 * FLR Support for E2
1192 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
1195 #define FLR_WAIT_USEC 10000 /* 10 milliseconds */
1196 #define FLR_WAIT_INTERVAL 50 /* usec */
1197 #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
1199 struct pbf_pN_buf_regs {
1206 struct pbf_pN_cmd_regs {
1212 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1213 struct pbf_pN_buf_regs *regs,
1216 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1217 u32 cur_cnt = poll_count;
1219 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1220 crd = crd_start = REG_RD(bp, regs->crd);
1221 init_crd = REG_RD(bp, regs->init_crd);
1223 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1224 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
1225 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1227 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1228 (init_crd - crd_start))) {
1230 udelay(FLR_WAIT_INTERVAL);
1231 crd = REG_RD(bp, regs->crd);
1232 crd_freed = REG_RD(bp, regs->crd_freed);
1234 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1236 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
1238 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1239 regs->pN, crd_freed);
1243 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1244 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1247 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1248 struct pbf_pN_cmd_regs *regs,
1251 u32 occup, to_free, freed, freed_start;
1252 u32 cur_cnt = poll_count;
1254 occup = to_free = REG_RD(bp, regs->lines_occup);
1255 freed = freed_start = REG_RD(bp, regs->lines_freed);
1257 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
1258 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1260 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1262 udelay(FLR_WAIT_INTERVAL);
1263 occup = REG_RD(bp, regs->lines_occup);
1264 freed = REG_RD(bp, regs->lines_freed);
1266 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1268 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1270 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1275 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1276 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1279 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1280 u32 expected, u32 poll_count)
1282 u32 cur_cnt = poll_count;
1285 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1286 udelay(FLR_WAIT_INTERVAL);
1291 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1292 char *msg, u32 poll_cnt)
1294 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1296 BNX2X_ERR("%s usage count=%d\n", msg, val);
1302 /* Common routines with VF FLR cleanup */
1303 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1305 /* adjust polling timeout */
1306 if (CHIP_REV_IS_EMUL(bp))
1307 return FLR_POLL_CNT * 2000;
1309 if (CHIP_REV_IS_FPGA(bp))
1310 return FLR_POLL_CNT * 120;
1312 return FLR_POLL_CNT;
1315 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1317 struct pbf_pN_cmd_regs cmd_regs[] = {
1318 {0, (CHIP_IS_E3B0(bp)) ?
1319 PBF_REG_TQ_OCCUPANCY_Q0 :
1320 PBF_REG_P0_TQ_OCCUPANCY,
1321 (CHIP_IS_E3B0(bp)) ?
1322 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1323 PBF_REG_P0_TQ_LINES_FREED_CNT},
1324 {1, (CHIP_IS_E3B0(bp)) ?
1325 PBF_REG_TQ_OCCUPANCY_Q1 :
1326 PBF_REG_P1_TQ_OCCUPANCY,
1327 (CHIP_IS_E3B0(bp)) ?
1328 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1329 PBF_REG_P1_TQ_LINES_FREED_CNT},
1330 {4, (CHIP_IS_E3B0(bp)) ?
1331 PBF_REG_TQ_OCCUPANCY_LB_Q :
1332 PBF_REG_P4_TQ_OCCUPANCY,
1333 (CHIP_IS_E3B0(bp)) ?
1334 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1335 PBF_REG_P4_TQ_LINES_FREED_CNT}
1338 struct pbf_pN_buf_regs buf_regs[] = {
1339 {0, (CHIP_IS_E3B0(bp)) ?
1340 PBF_REG_INIT_CRD_Q0 :
1341 PBF_REG_P0_INIT_CRD ,
1342 (CHIP_IS_E3B0(bp)) ?
1345 (CHIP_IS_E3B0(bp)) ?
1346 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1347 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1348 {1, (CHIP_IS_E3B0(bp)) ?
1349 PBF_REG_INIT_CRD_Q1 :
1350 PBF_REG_P1_INIT_CRD,
1351 (CHIP_IS_E3B0(bp)) ?
1354 (CHIP_IS_E3B0(bp)) ?
1355 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1356 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1357 {4, (CHIP_IS_E3B0(bp)) ?
1358 PBF_REG_INIT_CRD_LB_Q :
1359 PBF_REG_P4_INIT_CRD,
1360 (CHIP_IS_E3B0(bp)) ?
1361 PBF_REG_CREDIT_LB_Q :
1363 (CHIP_IS_E3B0(bp)) ?
1364 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1365 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1370 /* Verify the command queues are flushed P0, P1, P4 */
1371 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1372 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1374 /* Verify the transmission buffers are flushed P0, P1, P4 */
1375 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1376 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1379 #define OP_GEN_PARAM(param) \
1380 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1382 #define OP_GEN_TYPE(type) \
1383 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1385 #define OP_GEN_AGG_VECT(index) \
1386 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1388 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1390 u32 op_gen_command = 0;
1391 u32 comp_addr = BAR_CSTRORM_INTMEM +
1392 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1395 if (REG_RD(bp, comp_addr)) {
1396 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1400 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1401 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1402 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1403 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1405 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1406 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1408 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1409 BNX2X_ERR("FW final cleanup did not succeed\n");
1410 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1411 (REG_RD(bp, comp_addr)));
1415 /* Zero completion for next FLR */
1416 REG_WR(bp, comp_addr, 0);
1421 u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1425 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1426 return status & PCI_EXP_DEVSTA_TRPND;
1429 /* PF FLR specific routines
1431 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1433 /* wait for CFC PF usage-counter to zero (includes all the VFs) */
1434 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1435 CFC_REG_NUM_LCIDS_INSIDE_PF,
1436 "CFC PF usage counter timed out",
1440 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
1441 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1442 DORQ_REG_PF_USAGE_CNT,
1443 "DQ PF usage counter timed out",
1447 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
1448 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1449 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1450 "QM PF usage counter timed out",
1454 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
1455 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1456 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1457 "Timers VNIC usage counter timed out",
1460 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1461 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1462 "Timers NUM_SCANS usage counter timed out",
1466 /* Wait DMAE PF usage counter to zero */
1467 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1468 dmae_reg_go_c[INIT_DMAE_C(bp)],
1469 "DMAE command register timed out",
1476 static void bnx2x_hw_enable_status(struct bnx2x *bp)
1480 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1481 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1483 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1484 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1486 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1487 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1489 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1490 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1492 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1493 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1495 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1496 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1498 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1499 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1501 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1502 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1506 static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1508 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1510 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1512 /* Re-enable PF target read access */
1513 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1515 /* Poll HW usage counters */
1516 DP(BNX2X_MSG_SP, "Polling usage counters\n");
1517 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1520 /* Zero the igu 'trailing edge' and 'leading edge' */
1522 /* Send the FW cleanup command */
1523 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1528 /* Verify TX hw is flushed */
1529 bnx2x_tx_hw_flushed(bp, poll_cnt);
1531 /* Wait 100ms (not adjusted according to platform) */
1534 /* Verify no pending pci transactions */
1535 if (bnx2x_is_pcie_pending(bp->pdev))
1536 BNX2X_ERR("PCIE Transactions still pending\n");
1539 bnx2x_hw_enable_status(bp);
1542 * Master enable - Due to WB DMAE writes performed before this
1543 * register is re-initialized as part of the regular function init
1545 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1550 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1552 int port = BP_PORT(bp);
1553 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1554 u32 val = REG_RD(bp, addr);
1555 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1556 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1557 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1560 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1561 HC_CONFIG_0_REG_INT_LINE_EN_0);
1562 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1563 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1565 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1567 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1568 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1569 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1570 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1572 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1573 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1574 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1575 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1577 if (!CHIP_IS_E1(bp)) {
1579 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1581 REG_WR(bp, addr, val);
1583 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1588 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1591 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1592 (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1594 REG_WR(bp, addr, val);
1596 * Ensure that HC_CONFIG is written before leading/trailing edge config
1601 if (!CHIP_IS_E1(bp)) {
1602 /* init leading/trailing edge */
1604 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1606 /* enable nig and gpio3 attention */
1611 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1612 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1615 /* Make sure that interrupts are indeed enabled from here on */
1619 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1622 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1623 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1624 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1626 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1629 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1630 IGU_PF_CONF_SINGLE_ISR_EN);
1631 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1632 IGU_PF_CONF_ATTN_BIT_EN);
1635 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1637 val &= ~IGU_PF_CONF_INT_LINE_EN;
1638 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1639 IGU_PF_CONF_ATTN_BIT_EN |
1640 IGU_PF_CONF_SINGLE_ISR_EN);
1642 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1643 val |= (IGU_PF_CONF_INT_LINE_EN |
1644 IGU_PF_CONF_ATTN_BIT_EN |
1645 IGU_PF_CONF_SINGLE_ISR_EN);
1648 /* Clean previous status - need to configure igu prior to ack*/
1649 if ((!msix) || single_msix) {
1650 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1654 val |= IGU_PF_CONF_FUNC_EN;
1656 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
1657 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1659 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1661 if (val & IGU_PF_CONF_INT_LINE_EN)
1662 pci_intx(bp->pdev, true);
1666 /* init leading/trailing edge */
1668 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1670 /* enable nig and gpio3 attention */
1675 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1676 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1678 /* Make sure that interrupts are indeed enabled from here on */
1682 void bnx2x_int_enable(struct bnx2x *bp)
1684 if (bp->common.int_block == INT_BLOCK_HC)
1685 bnx2x_hc_int_enable(bp);
1687 bnx2x_igu_int_enable(bp);
1690 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1692 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1696 /* prevent the HW from sending interrupts */
1697 bnx2x_int_disable(bp);
1699 /* make sure all ISRs are done */
1701 synchronize_irq(bp->msix_table[0].vector);
1703 if (CNIC_SUPPORT(bp))
1705 for_each_eth_queue(bp, i)
1706 synchronize_irq(bp->msix_table[offset++].vector);
1708 synchronize_irq(bp->pdev->irq);
1710 /* make sure sp_task is not running */
1711 cancel_delayed_work(&bp->sp_task);
1712 cancel_delayed_work(&bp->period_task);
1713 flush_workqueue(bnx2x_wq);
1719 * General service functions
1722 /* Return true if succeeded to acquire the lock */
1723 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1726 u32 resource_bit = (1 << resource);
1727 int func = BP_FUNC(bp);
1728 u32 hw_lock_control_reg;
1730 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1731 "Trying to take a lock on resource %d\n", resource);
1733 /* Validating that the resource is within range */
1734 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1735 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1736 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1737 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1742 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1744 hw_lock_control_reg =
1745 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1747 /* Try to acquire the lock */
1748 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1749 lock_status = REG_RD(bp, hw_lock_control_reg);
1750 if (lock_status & resource_bit)
1753 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1754 "Failed to get a lock on resource %d\n", resource);
1759 * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1761 * @bp: driver handle
1763 * Returns the recovery leader resource id according to the engine this function
1764 * belongs to. Currently only only 2 engines is supported.
1766 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1769 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1771 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1775 * bnx2x_trylock_leader_lock- try to acquire a leader lock.
1777 * @bp: driver handle
1779 * Tries to acquire a leader lock for current engine.
1781 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1783 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1786 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1788 /* schedule the sp task and mark that interrupt occurred (runs from ISR) */
1789 static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1791 /* Set the interrupt occurred bit for the sp-task to recognize it
1792 * must ack the interrupt and transition according to the IGU
1795 atomic_set(&bp->interrupt_occurred, 1);
1797 /* The sp_task must execute only after this bit
1798 * is set, otherwise we will get out of sync and miss all
1799 * further interrupts. Hence, the barrier.
1803 /* schedule sp_task to workqueue */
1804 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1807 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1809 struct bnx2x *bp = fp->bp;
1810 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1811 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1812 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1813 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1816 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1817 fp->index, cid, command, bp->state,
1818 rr_cqe->ramrod_cqe.ramrod_type);
1820 /* If cid is within VF range, replace the slowpath object with the
1821 * one corresponding to this VF
1823 if (cid >= BNX2X_FIRST_VF_CID &&
1824 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1825 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1828 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1829 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1830 drv_cmd = BNX2X_Q_CMD_UPDATE;
1833 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1834 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1835 drv_cmd = BNX2X_Q_CMD_SETUP;
1838 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1839 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1840 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1843 case (RAMROD_CMD_ID_ETH_HALT):
1844 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1845 drv_cmd = BNX2X_Q_CMD_HALT;
1848 case (RAMROD_CMD_ID_ETH_TERMINATE):
1849 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1850 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1853 case (RAMROD_CMD_ID_ETH_EMPTY):
1854 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1855 drv_cmd = BNX2X_Q_CMD_EMPTY;
1858 case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1859 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1860 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1864 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1865 command, fp->index);
1869 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1870 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1871 /* q_obj->complete_cmd() failure means that this was
1872 * an unexpected completion.
1874 * In this case we don't want to increase the bp->spq_left
1875 * because apparently we haven't sent this command the first
1878 #ifdef BNX2X_STOP_ON_ERROR
1884 smp_mb__before_atomic();
1885 atomic_inc(&bp->cq_spq_left);
1886 /* push the change in bp->spq_left and towards the memory */
1887 smp_mb__after_atomic();
1889 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1891 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1892 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1893 /* if Q update ramrod is completed for last Q in AFEX vif set
1894 * flow, then ACK MCP at the end
1896 * mark pending ACK to MCP bit.
1897 * prevent case that both bits are cleared.
1898 * At the end of load/unload driver checks that
1899 * sp_state is cleared, and this order prevents
1902 smp_mb__before_atomic();
1903 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1905 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1906 smp_mb__after_atomic();
1908 /* schedule the sp task as mcp ack is required */
1909 bnx2x_schedule_sp_task(bp);
1915 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1917 struct bnx2x *bp = netdev_priv(dev_instance);
1918 u16 status = bnx2x_ack_int(bp);
1923 /* Return here if interrupt is shared and it's not for us */
1924 if (unlikely(status == 0)) {
1925 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1928 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1930 #ifdef BNX2X_STOP_ON_ERROR
1931 if (unlikely(bp->panic))
1935 for_each_eth_queue(bp, i) {
1936 struct bnx2x_fastpath *fp = &bp->fp[i];
1938 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1939 if (status & mask) {
1940 /* Handle Rx or Tx according to SB id */
1941 for_each_cos_in_tx_queue(fp, cos)
1942 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1943 prefetch(&fp->sb_running_index[SM_RX_ID]);
1944 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1949 if (CNIC_SUPPORT(bp)) {
1951 if (status & (mask | 0x1)) {
1952 struct cnic_ops *c_ops = NULL;
1955 c_ops = rcu_dereference(bp->cnic_ops);
1956 if (c_ops && (bp->cnic_eth_dev.drv_state &
1957 CNIC_DRV_STATE_HANDLES_IRQ))
1958 c_ops->cnic_handler(bp->cnic_data, NULL);
1965 if (unlikely(status & 0x1)) {
1967 /* schedule sp task to perform default status block work, ack
1968 * attentions and enable interrupts.
1970 bnx2x_schedule_sp_task(bp);
1977 if (unlikely(status))
1978 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1987 * General service functions
1990 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1993 u32 resource_bit = (1 << resource);
1994 int func = BP_FUNC(bp);
1995 u32 hw_lock_control_reg;
1998 /* Validating that the resource is within range */
1999 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2000 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2001 resource, HW_LOCK_MAX_RESOURCE_VALUE);
2006 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2008 hw_lock_control_reg =
2009 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2012 /* Validating that the resource is not already taken */
2013 lock_status = REG_RD(bp, hw_lock_control_reg);
2014 if (lock_status & resource_bit) {
2015 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
2016 lock_status, resource_bit);
2020 /* Try for 5 second every 5ms */
2021 for (cnt = 0; cnt < 1000; cnt++) {
2022 /* Try to acquire the lock */
2023 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
2024 lock_status = REG_RD(bp, hw_lock_control_reg);
2025 if (lock_status & resource_bit)
2028 usleep_range(5000, 10000);
2030 BNX2X_ERR("Timeout\n");
2034 int bnx2x_release_leader_lock(struct bnx2x *bp)
2036 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2039 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
2042 u32 resource_bit = (1 << resource);
2043 int func = BP_FUNC(bp);
2044 u32 hw_lock_control_reg;
2046 /* Validating that the resource is within range */
2047 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2048 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2049 resource, HW_LOCK_MAX_RESOURCE_VALUE);
2054 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2056 hw_lock_control_reg =
2057 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2060 /* Validating that the resource is currently taken */
2061 lock_status = REG_RD(bp, hw_lock_control_reg);
2062 if (!(lock_status & resource_bit)) {
2063 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2064 lock_status, resource_bit);
2068 REG_WR(bp, hw_lock_control_reg, resource_bit);
2072 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2074 /* The GPIO should be swapped if swap register is set and active */
2075 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2076 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2077 int gpio_shift = gpio_num +
2078 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2079 u32 gpio_mask = (1 << gpio_shift);
2083 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2084 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2088 /* read GPIO value */
2089 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2091 /* get the requested pin value */
2092 if ((gpio_reg & gpio_mask) == gpio_mask)
2100 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2102 /* The GPIO should be swapped if swap register is set and active */
2103 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2104 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2105 int gpio_shift = gpio_num +
2106 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2107 u32 gpio_mask = (1 << gpio_shift);
2110 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2111 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2115 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2116 /* read GPIO and mask except the float bits */
2117 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2120 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2122 "Set GPIO %d (shift %d) -> output low\n",
2123 gpio_num, gpio_shift);
2124 /* clear FLOAT and set CLR */
2125 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2126 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2129 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2131 "Set GPIO %d (shift %d) -> output high\n",
2132 gpio_num, gpio_shift);
2133 /* clear FLOAT and set SET */
2134 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2135 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2138 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2140 "Set GPIO %d (shift %d) -> input\n",
2141 gpio_num, gpio_shift);
2143 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2150 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2151 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2156 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2161 /* Any port swapping should be handled by caller. */
2163 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2164 /* read GPIO and mask except the float bits */
2165 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2166 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2167 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2168 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2171 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2172 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2174 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2177 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2178 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2180 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2183 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2184 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2186 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2190 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2196 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2198 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2203 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2205 /* The GPIO should be swapped if swap register is set and active */
2206 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2207 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2208 int gpio_shift = gpio_num +
2209 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2210 u32 gpio_mask = (1 << gpio_shift);
2213 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2214 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2218 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2220 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2223 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2225 "Clear GPIO INT %d (shift %d) -> output low\n",
2226 gpio_num, gpio_shift);
2227 /* clear SET and set CLR */
2228 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2229 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2232 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2234 "Set GPIO INT %d (shift %d) -> output high\n",
2235 gpio_num, gpio_shift);
2236 /* clear CLR and set SET */
2237 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2238 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2245 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2246 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2251 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2255 /* Only 2 SPIOs are configurable */
2256 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2257 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2261 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2262 /* read SPIO and mask except the float bits */
2263 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2266 case MISC_SPIO_OUTPUT_LOW:
2267 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2268 /* clear FLOAT and set CLR */
2269 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2270 spio_reg |= (spio << MISC_SPIO_CLR_POS);
2273 case MISC_SPIO_OUTPUT_HIGH:
2274 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2275 /* clear FLOAT and set SET */
2276 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2277 spio_reg |= (spio << MISC_SPIO_SET_POS);
2280 case MISC_SPIO_INPUT_HI_Z:
2281 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2283 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2290 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2291 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2296 void bnx2x_calc_fc_adv(struct bnx2x *bp)
2298 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2300 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2302 switch (bp->link_vars.ieee_fc &
2303 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2304 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2305 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2309 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2310 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2318 static void bnx2x_set_requested_fc(struct bnx2x *bp)
2320 /* Initialize link parameters structure variables
2321 * It is recommended to turn off RX FC for jumbo frames
2322 * for better performance
2324 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2325 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2327 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2330 static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2332 u32 pause_enabled = 0;
2334 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2335 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2338 REG_WR(bp, BAR_USTRORM_INTMEM +
2339 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2343 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2344 pause_enabled ? "enabled" : "disabled");
2347 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2349 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2350 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2352 if (!BP_NOMCP(bp)) {
2353 bnx2x_set_requested_fc(bp);
2354 bnx2x_acquire_phy_lock(bp);
2356 if (load_mode == LOAD_DIAG) {
2357 struct link_params *lp = &bp->link_params;
2358 lp->loopback_mode = LOOPBACK_XGXS;
2359 /* Prefer doing PHY loopback at highest speed */
2360 if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
2361 if (lp->speed_cap_mask[cfx_idx] &
2362 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
2363 lp->req_line_speed[cfx_idx] =
2365 else if (lp->speed_cap_mask[cfx_idx] &
2366 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2367 lp->req_line_speed[cfx_idx] =
2370 lp->req_line_speed[cfx_idx] =
2375 if (load_mode == LOAD_LOOPBACK_EXT) {
2376 struct link_params *lp = &bp->link_params;
2377 lp->loopback_mode = LOOPBACK_EXT;
2380 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2382 bnx2x_release_phy_lock(bp);
2384 bnx2x_init_dropless_fc(bp);
2386 bnx2x_calc_fc_adv(bp);
2388 if (bp->link_vars.link_up) {
2389 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2390 bnx2x_link_report(bp);
2392 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2393 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2396 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2400 void bnx2x_link_set(struct bnx2x *bp)
2402 if (!BP_NOMCP(bp)) {
2403 bnx2x_acquire_phy_lock(bp);
2404 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2405 bnx2x_release_phy_lock(bp);
2407 bnx2x_init_dropless_fc(bp);
2409 bnx2x_calc_fc_adv(bp);
2411 BNX2X_ERR("Bootcode is missing - can not set link\n");
2414 static void bnx2x__link_reset(struct bnx2x *bp)
2416 if (!BP_NOMCP(bp)) {
2417 bnx2x_acquire_phy_lock(bp);
2418 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2419 bnx2x_release_phy_lock(bp);
2421 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2424 void bnx2x_force_link_reset(struct bnx2x *bp)
2426 bnx2x_acquire_phy_lock(bp);
2427 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2428 bnx2x_release_phy_lock(bp);
2431 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2435 if (!BP_NOMCP(bp)) {
2436 bnx2x_acquire_phy_lock(bp);
2437 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2439 bnx2x_release_phy_lock(bp);
2441 BNX2X_ERR("Bootcode is missing - can not test link\n");
2446 /* Calculates the sum of vn_min_rates.
2447 It's needed for further normalizing of the min_rates.
2449 sum of vn_min_rates.
2451 0 - if all the min_rates are 0.
2452 In the later case fairness algorithm should be deactivated.
2453 If not all min_rates are zero then those that are zeroes will be set to 1.
2455 static void bnx2x_calc_vn_min(struct bnx2x *bp,
2456 struct cmng_init_input *input)
2461 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2462 u32 vn_cfg = bp->mf_config[vn];
2463 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2464 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2466 /* Skip hidden vns */
2467 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2469 /* If min rate is zero - set it to 1 */
2470 else if (!vn_min_rate)
2471 vn_min_rate = DEF_MIN_RATE;
2475 input->vnic_min_rate[vn] = vn_min_rate;
2478 /* if ETS or all min rates are zeros - disable fairness */
2479 if (BNX2X_IS_ETS_ENABLED(bp)) {
2480 input->flags.cmng_enables &=
2481 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2482 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2483 } else if (all_zero) {
2484 input->flags.cmng_enables &=
2485 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2487 "All MIN values are zeroes fairness will be disabled\n");
2489 input->flags.cmng_enables |=
2490 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2493 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2494 struct cmng_init_input *input)
2497 u32 vn_cfg = bp->mf_config[vn];
2499 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2502 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2504 if (IS_MF_PERCENT_BW(bp)) {
2505 /* maxCfg in percents of linkspeed */
2506 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2507 } else /* SD modes */
2508 /* maxCfg is absolute in 100Mb units */
2509 vn_max_rate = maxCfg * 100;
2512 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2514 input->vnic_max_rate[vn] = vn_max_rate;
2517 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2519 if (CHIP_REV_IS_SLOW(bp))
2520 return CMNG_FNS_NONE;
2522 return CMNG_FNS_MINMAX;
2524 return CMNG_FNS_NONE;
2527 void bnx2x_read_mf_cfg(struct bnx2x *bp)
2529 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2532 return; /* what should be the default value in this case */
2534 /* For 2 port configuration the absolute function number formula
2536 * abs_func = 2 * vn + BP_PORT + BP_PATH
2538 * and there are 4 functions per port
2540 * For 4 port configuration it is
2541 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2543 * and there are 2 functions per port
2545 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2546 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2548 if (func >= E1H_FUNC_MAX)
2552 MF_CFG_RD(bp, func_mf_config[func].config);
2554 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2555 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2556 bp->flags |= MF_FUNC_DIS;
2558 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2559 bp->flags &= ~MF_FUNC_DIS;
2563 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2565 struct cmng_init_input input;
2566 memset(&input, 0, sizeof(struct cmng_init_input));
2568 input.port_rate = bp->link_vars.line_speed;
2570 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2573 /* read mf conf from shmem */
2575 bnx2x_read_mf_cfg(bp);
2577 /* vn_weight_sum and enable fairness if not 0 */
2578 bnx2x_calc_vn_min(bp, &input);
2580 /* calculate and set min-max rate for each vn */
2582 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2583 bnx2x_calc_vn_max(bp, vn, &input);
2585 /* always enable rate shaping and fairness */
2586 input.flags.cmng_enables |=
2587 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2589 bnx2x_init_cmng(&input, &bp->cmng);
2593 /* rate shaping and fairness are disabled */
2595 "rate shaping and fairness are disabled\n");
2598 static void storm_memset_cmng(struct bnx2x *bp,
2599 struct cmng_init *cmng,
2603 size_t size = sizeof(struct cmng_struct_per_port);
2605 u32 addr = BAR_XSTRORM_INTMEM +
2606 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2608 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2610 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2611 int func = func_by_vn(bp, vn);
2613 addr = BAR_XSTRORM_INTMEM +
2614 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2615 size = sizeof(struct rate_shaping_vars_per_vn);
2616 __storm_memset_struct(bp, addr, size,
2617 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2619 addr = BAR_XSTRORM_INTMEM +
2620 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2621 size = sizeof(struct fairness_vars_per_vn);
2622 __storm_memset_struct(bp, addr, size,
2623 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2627 /* init cmng mode in HW according to local configuration */
2628 void bnx2x_set_local_cmng(struct bnx2x *bp)
2630 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2632 if (cmng_fns != CMNG_FNS_NONE) {
2633 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2634 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2636 /* rate shaping and fairness are disabled */
2638 "single function mode without fairness\n");
2642 /* This function is called upon link interrupt */
2643 static void bnx2x_link_attn(struct bnx2x *bp)
2645 /* Make sure that we are synced with the current statistics */
2646 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2648 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2650 bnx2x_init_dropless_fc(bp);
2652 if (bp->link_vars.link_up) {
2654 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2655 struct host_port_stats *pstats;
2657 pstats = bnx2x_sp(bp, port_stats);
2658 /* reset old mac stats */
2659 memset(&(pstats->mac_stx[0]), 0,
2660 sizeof(struct mac_stx));
2662 if (bp->state == BNX2X_STATE_OPEN)
2663 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2666 if (bp->link_vars.link_up && bp->link_vars.line_speed)
2667 bnx2x_set_local_cmng(bp);
2669 __bnx2x_link_report(bp);
2672 bnx2x_link_sync_notify(bp);
2675 void bnx2x__link_status_update(struct bnx2x *bp)
2677 if (bp->state != BNX2X_STATE_OPEN)
2680 /* read updated dcb configuration */
2682 bnx2x_dcbx_pmf_update(bp);
2683 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2684 if (bp->link_vars.link_up)
2685 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2687 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2688 /* indicate link status */
2689 bnx2x_link_report(bp);
2692 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2693 SUPPORTED_10baseT_Full |
2694 SUPPORTED_100baseT_Half |
2695 SUPPORTED_100baseT_Full |
2696 SUPPORTED_1000baseT_Full |
2697 SUPPORTED_2500baseX_Full |
2698 SUPPORTED_10000baseT_Full |
2703 SUPPORTED_Asym_Pause);
2704 bp->port.advertising[0] = bp->port.supported[0];
2706 bp->link_params.bp = bp;
2707 bp->link_params.port = BP_PORT(bp);
2708 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2709 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2710 bp->link_params.req_line_speed[0] = SPEED_10000;
2711 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2712 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2713 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2714 bp->link_vars.line_speed = SPEED_10000;
2715 bp->link_vars.link_status =
2716 (LINK_STATUS_LINK_UP |
2717 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2718 bp->link_vars.link_up = 1;
2719 bp->link_vars.duplex = DUPLEX_FULL;
2720 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2721 __bnx2x_link_report(bp);
2723 bnx2x_sample_bulletin(bp);
2725 /* if bulletin board did not have an update for link status
2726 * __bnx2x_link_report will report current status
2727 * but it will NOT duplicate report in case of already reported
2728 * during sampling bulletin board.
2730 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2734 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2735 u16 vlan_val, u8 allowed_prio)
2737 struct bnx2x_func_state_params func_params = {NULL};
2738 struct bnx2x_func_afex_update_params *f_update_params =
2739 &func_params.params.afex_update;
2741 func_params.f_obj = &bp->func_obj;
2742 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2744 /* no need to wait for RAMROD completion, so don't
2745 * set RAMROD_COMP_WAIT flag
2748 f_update_params->vif_id = vifid;
2749 f_update_params->afex_default_vlan = vlan_val;
2750 f_update_params->allowed_priorities = allowed_prio;
2752 /* if ramrod can not be sent, response to MCP immediately */
2753 if (bnx2x_func_state_change(bp, &func_params) < 0)
2754 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2759 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2760 u16 vif_index, u8 func_bit_map)
2762 struct bnx2x_func_state_params func_params = {NULL};
2763 struct bnx2x_func_afex_viflists_params *update_params =
2764 &func_params.params.afex_viflists;
2768 /* validate only LIST_SET and LIST_GET are received from switch */
2769 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2770 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2773 func_params.f_obj = &bp->func_obj;
2774 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2776 /* set parameters according to cmd_type */
2777 update_params->afex_vif_list_command = cmd_type;
2778 update_params->vif_list_index = vif_index;
2779 update_params->func_bit_map =
2780 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2781 update_params->func_to_clear = 0;
2783 (cmd_type == VIF_LIST_RULE_GET) ?
2784 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2785 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2787 /* if ramrod can not be sent, respond to MCP immediately for
2788 * SET and GET requests (other are not triggered from MCP)
2790 rc = bnx2x_func_state_change(bp, &func_params);
2792 bnx2x_fw_command(bp, drv_msg_code, 0);
2797 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2799 struct afex_stats afex_stats;
2800 u32 func = BP_ABS_FUNC(bp);
2807 u32 addr_to_write, vifid, addrs, stats_type, i;
2809 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2810 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2812 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2813 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2816 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2817 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2818 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2820 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2822 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2826 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2827 addr_to_write = SHMEM2_RD(bp,
2828 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2829 stats_type = SHMEM2_RD(bp,
2830 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2833 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2836 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2838 /* write response to scratchpad, for MCP */
2839 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2840 REG_WR(bp, addr_to_write + i*sizeof(u32),
2841 *(((u32 *)(&afex_stats))+i));
2843 /* send ack message to MCP */
2844 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2847 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2848 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2849 bp->mf_config[BP_VN(bp)] = mf_config;
2851 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2854 /* if VIF_SET is "enabled" */
2855 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2856 /* set rate limit directly to internal RAM */
2857 struct cmng_init_input cmng_input;
2858 struct rate_shaping_vars_per_vn m_rs_vn;
2859 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2860 u32 addr = BAR_XSTRORM_INTMEM +
2861 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2863 bp->mf_config[BP_VN(bp)] = mf_config;
2865 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2866 m_rs_vn.vn_counter.rate =
2867 cmng_input.vnic_max_rate[BP_VN(bp)];
2868 m_rs_vn.vn_counter.quota =
2869 (m_rs_vn.vn_counter.rate *
2870 RS_PERIODIC_TIMEOUT_USEC) / 8;
2872 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2874 /* read relevant values from mf_cfg struct in shmem */
2876 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2877 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2878 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2880 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2881 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2882 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2883 vlan_prio = (mf_config &
2884 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2885 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2886 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2889 func_mf_config[func].afex_config) &
2890 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2891 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2894 func_mf_config[func].afex_config) &
2895 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2896 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2898 /* send ramrod to FW, return in case of failure */
2899 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2903 bp->afex_def_vlan_tag = vlan_val;
2904 bp->afex_vlan_mode = vlan_mode;
2906 /* notify link down because BP->flags is disabled */
2907 bnx2x_link_report(bp);
2909 /* send INVALID VIF ramrod to FW */
2910 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2912 /* Reset the default afex VLAN */
2913 bp->afex_def_vlan_tag = -1;
2918 static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2920 struct bnx2x_func_switch_update_params *switch_update_params;
2921 struct bnx2x_func_state_params func_params;
2923 memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
2924 switch_update_params = &func_params.params.switch_update;
2925 func_params.f_obj = &bp->func_obj;
2926 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2928 /* Prepare parameters for function state transitions */
2929 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2930 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
2932 if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
2933 int func = BP_ABS_FUNC(bp);
2936 /* Re-learn the S-tag from shmem */
2937 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2938 FUNC_MF_CFG_E1HOV_TAG_MASK;
2939 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2942 BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
2946 /* Configure new S-tag in LLH */
2947 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2950 /* Send Ramrod to update FW of change */
2951 __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
2952 &switch_update_params->changes);
2953 switch_update_params->vlan = bp->mf_ov;
2955 if (bnx2x_func_state_change(bp, &func_params) < 0) {
2956 BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
2960 DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
2967 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2970 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2973 static void bnx2x_pmf_update(struct bnx2x *bp)
2975 int port = BP_PORT(bp);
2979 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2982 * We need the mb() to ensure the ordering between the writing to
2983 * bp->port.pmf here and reading it from the bnx2x_periodic_task().
2987 /* queue a periodic task */
2988 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2990 bnx2x_dcbx_pmf_update(bp);
2992 /* enable nig attention */
2993 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2994 if (bp->common.int_block == INT_BLOCK_HC) {
2995 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2996 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2997 } else if (!CHIP_IS_E1x(bp)) {
2998 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2999 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
3002 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3010 * General service functions
3013 /* send the MCP a request, block until there is a reply */
3014 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
3016 int mb_idx = BP_FW_MB_IDX(bp);
3020 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
3022 mutex_lock(&bp->fw_mb_mutex);
3024 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
3025 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
3027 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
3028 (command | seq), param);
3031 /* let the FW do it's magic ... */
3034 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3036 /* Give the FW up to 5 second (500*10ms) */
3037 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
3039 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
3040 cnt*delay, rc, seq);
3042 /* is this a reply to our command? */
3043 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
3044 rc &= FW_MSG_CODE_MASK;
3047 BNX2X_ERR("FW failed to respond!\n");
3051 mutex_unlock(&bp->fw_mb_mutex);
3056 static void storm_memset_func_cfg(struct bnx2x *bp,
3057 struct tstorm_eth_function_common_config *tcfg,
3060 size_t size = sizeof(struct tstorm_eth_function_common_config);
3062 u32 addr = BAR_TSTRORM_INTMEM +
3063 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
3065 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
3068 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3070 if (CHIP_IS_E1x(bp)) {
3071 struct tstorm_eth_function_common_config tcfg = {0};
3073 storm_memset_func_cfg(bp, &tcfg, p->func_id);
3076 /* Enable the function in the FW */
3077 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3078 storm_memset_func_en(bp, p->func_id, 1);
3081 if (p->spq_active) {
3082 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3083 REG_WR(bp, XSEM_REG_FAST_MEMORY +
3084 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
3089 * bnx2x_get_common_flags - Return common flags
3093 * @zero_stats TRUE if statistics zeroing is needed
3095 * Return the flags that are common for the Tx-only and not normal connections.
3097 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3098 struct bnx2x_fastpath *fp,
3101 unsigned long flags = 0;
3103 /* PF driver will always initialize the Queue to an ACTIVE state */
3104 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3106 /* tx only connections collect statistics (on the same index as the
3107 * parent connection). The statistics are zeroed when the parent
3108 * connection is initialized.
3111 __set_bit(BNX2X_Q_FLG_STATS, &flags);
3113 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3115 if (bp->flags & TX_SWITCHING)
3116 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3118 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3119 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3121 #ifdef BNX2X_STOP_ON_ERROR
3122 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3128 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3129 struct bnx2x_fastpath *fp,
3132 unsigned long flags = 0;
3134 /* calculate other queue flags */
3136 __set_bit(BNX2X_Q_FLG_OV, &flags);
3138 if (IS_FCOE_FP(fp)) {
3139 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3140 /* For FCoE - force usage of default priority (for afex) */
3141 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3144 if (fp->mode != TPA_MODE_DISABLED) {
3145 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3146 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3147 if (fp->mode == TPA_MODE_GRO)
3148 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3152 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3153 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3156 /* Always set HW VLAN stripping */
3157 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3159 /* configure silent vlan removal */
3161 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3163 return flags | bnx2x_get_common_flags(bp, fp, true);
3166 static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3167 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3170 gen_init->stat_id = bnx2x_stats_id(fp);
3171 gen_init->spcl_id = fp->cl_id;
3173 /* Always use mini-jumbo MTU for FCoE L2 ring */
3175 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3177 gen_init->mtu = bp->dev->mtu;
3179 gen_init->cos = cos;
3181 gen_init->fp_hsi = ETH_FP_HSI_VERSION;
3184 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3185 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3186 struct bnx2x_rxq_setup_params *rxq_init)
3190 u16 tpa_agg_size = 0;
3192 if (fp->mode != TPA_MODE_DISABLED) {
3193 pause->sge_th_lo = SGE_TH_LO(bp);
3194 pause->sge_th_hi = SGE_TH_HI(bp);
3196 /* validate SGE ring has enough to cross high threshold */
3197 WARN_ON(bp->dropless_fc &&
3198 pause->sge_th_hi + FW_PREFETCH_CNT >
3199 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3201 tpa_agg_size = TPA_AGG_SIZE;
3202 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3204 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3205 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3206 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3209 /* pause - not for e1 */
3210 if (!CHIP_IS_E1(bp)) {
3211 pause->bd_th_lo = BD_TH_LO(bp);
3212 pause->bd_th_hi = BD_TH_HI(bp);
3214 pause->rcq_th_lo = RCQ_TH_LO(bp);
3215 pause->rcq_th_hi = RCQ_TH_HI(bp);
3217 * validate that rings have enough entries to cross
3220 WARN_ON(bp->dropless_fc &&
3221 pause->bd_th_hi + FW_PREFETCH_CNT >
3223 WARN_ON(bp->dropless_fc &&
3224 pause->rcq_th_hi + FW_PREFETCH_CNT >
3225 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3231 rxq_init->dscr_map = fp->rx_desc_mapping;
3232 rxq_init->sge_map = fp->rx_sge_mapping;
3233 rxq_init->rcq_map = fp->rx_comp_mapping;
3234 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3236 /* This should be a maximum number of data bytes that may be
3237 * placed on the BD (not including paddings).
3239 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3240 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3242 rxq_init->cl_qzone_id = fp->cl_qzone_id;
3243 rxq_init->tpa_agg_sz = tpa_agg_size;
3244 rxq_init->sge_buf_sz = sge_sz;
3245 rxq_init->max_sges_pkt = max_sge;
3246 rxq_init->rss_engine_id = BP_FUNC(bp);
3247 rxq_init->mcast_engine_id = BP_FUNC(bp);
3249 /* Maximum number or simultaneous TPA aggregation for this Queue.
3251 * For PF Clients it should be the maximum available number.
3252 * VF driver(s) may want to define it to a smaller value.
3254 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3256 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3257 rxq_init->fw_sb_id = fp->fw_sb_id;
3260 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3262 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3263 /* configure silent vlan removal
3264 * if multi function mode is afex, then mask default vlan
3266 if (IS_MF_AFEX(bp)) {
3267 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3268 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3272 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3273 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3276 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3277 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3278 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3279 txq_init->fw_sb_id = fp->fw_sb_id;
3282 * set the tss leading client id for TX classification ==
3283 * leading RSS client id
3285 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3287 if (IS_FCOE_FP(fp)) {
3288 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3289 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3293 static void bnx2x_pf_init(struct bnx2x *bp)
3295 struct bnx2x_func_init_params func_init = {0};
3296 struct event_ring_data eq_data = { {0} };
3298 if (!CHIP_IS_E1x(bp)) {
3299 /* reset IGU PF statistics: MSIX + ATTN */
3301 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3302 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3303 (CHIP_MODE_IS_4_PORT(bp) ?
3304 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3306 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3307 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3308 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3309 (CHIP_MODE_IS_4_PORT(bp) ?
3310 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3313 func_init.spq_active = true;
3314 func_init.pf_id = BP_FUNC(bp);
3315 func_init.func_id = BP_FUNC(bp);
3316 func_init.spq_map = bp->spq_mapping;
3317 func_init.spq_prod = bp->spq_prod_idx;
3319 bnx2x_func_init(bp, &func_init);
3321 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3324 * Congestion management values depend on the link rate
3325 * There is no active link so initial link rate is set to 10 Gbps.
3326 * When the link comes up The congestion management values are
3327 * re-calculated according to the actual link rate.
3329 bp->link_vars.line_speed = SPEED_10000;
3330 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3332 /* Only the PMF sets the HW */
3334 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3336 /* init Event Queue - PCI bus guarantees correct endianity*/
3337 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3338 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3339 eq_data.producer = bp->eq_prod;
3340 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3341 eq_data.sb_id = DEF_SB_ID;
3342 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3345 static void bnx2x_e1h_disable(struct bnx2x *bp)
3347 int port = BP_PORT(bp);
3349 bnx2x_tx_disable(bp);
3351 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3354 static void bnx2x_e1h_enable(struct bnx2x *bp)
3356 int port = BP_PORT(bp);
3358 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3359 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3361 /* Tx queue should be only re-enabled */
3362 netif_tx_wake_all_queues(bp->dev);
3365 * Should not call netif_carrier_on since it will be called if the link
3366 * is up when checking for link state
3370 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3372 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3374 struct eth_stats_info *ether_stat =
3375 &bp->slowpath->drv_info_to_mcp.ether_stat;
3376 struct bnx2x_vlan_mac_obj *mac_obj =
3377 &bp->sp_objs->mac_obj;
3380 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3381 ETH_STAT_INFO_VERSION_LEN);
3383 /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
3384 * mac_local field in ether_stat struct. The base address is offset by 2
3385 * bytes to account for the field being 8 bytes but a mac address is
3386 * only 6 bytes. Likewise, the stride for the get_n_elements function is
3387 * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
3388 * allocated by the ether_stat struct, so the macs will land in their
3391 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3392 memset(ether_stat->mac_local + i, 0,
3393 sizeof(ether_stat->mac_local[0]));
3394 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3395 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3396 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3398 ether_stat->mtu_size = bp->dev->mtu;
3399 if (bp->dev->features & NETIF_F_RXCSUM)
3400 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3401 if (bp->dev->features & NETIF_F_TSO)
3402 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3403 ether_stat->feature_flags |= bp->common.boot_mode;
3405 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3407 ether_stat->txq_size = bp->tx_ring_size;
3408 ether_stat->rxq_size = bp->rx_ring_size;
3410 #ifdef CONFIG_BNX2X_SRIOV
3411 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3415 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3417 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3418 struct fcoe_stats_info *fcoe_stat =
3419 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3421 if (!CNIC_LOADED(bp))
3424 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3426 fcoe_stat->qos_priority =
3427 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3429 /* insert FCoE stats from ramrod response */
3431 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3432 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3433 tstorm_queue_statistics;
3435 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3436 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3437 xstorm_queue_statistics;
3439 struct fcoe_statistics_params *fw_fcoe_stat =
3440 &bp->fw_stats_data->fcoe;
3442 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3443 fcoe_stat->rx_bytes_lo,
3444 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3446 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3447 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3448 fcoe_stat->rx_bytes_lo,
3449 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3451 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3452 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3453 fcoe_stat->rx_bytes_lo,
3454 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3456 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3457 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3458 fcoe_stat->rx_bytes_lo,
3459 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3461 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3462 fcoe_stat->rx_frames_lo,
3463 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3465 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3466 fcoe_stat->rx_frames_lo,
3467 fcoe_q_tstorm_stats->rcv_ucast_pkts);
3469 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3470 fcoe_stat->rx_frames_lo,
3471 fcoe_q_tstorm_stats->rcv_bcast_pkts);
3473 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3474 fcoe_stat->rx_frames_lo,
3475 fcoe_q_tstorm_stats->rcv_mcast_pkts);
3477 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3478 fcoe_stat->tx_bytes_lo,
3479 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3481 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3482 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3483 fcoe_stat->tx_bytes_lo,
3484 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3486 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3487 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3488 fcoe_stat->tx_bytes_lo,
3489 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3491 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3492 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3493 fcoe_stat->tx_bytes_lo,
3494 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3496 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3497 fcoe_stat->tx_frames_lo,
3498 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3500 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3501 fcoe_stat->tx_frames_lo,
3502 fcoe_q_xstorm_stats->ucast_pkts_sent);
3504 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3505 fcoe_stat->tx_frames_lo,
3506 fcoe_q_xstorm_stats->bcast_pkts_sent);
3508 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3509 fcoe_stat->tx_frames_lo,
3510 fcoe_q_xstorm_stats->mcast_pkts_sent);
3513 /* ask L5 driver to add data to the struct */
3514 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3517 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3519 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3520 struct iscsi_stats_info *iscsi_stat =
3521 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3523 if (!CNIC_LOADED(bp))
3526 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3529 iscsi_stat->qos_priority =
3530 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3532 /* ask L5 driver to add data to the struct */
3533 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3536 /* called due to MCP event (on pmf):
3537 * reread new bandwidth configuration
3539 * notify others function about the change
3541 static void bnx2x_config_mf_bw(struct bnx2x *bp)
3543 if (bp->link_vars.link_up) {
3544 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3545 bnx2x_link_sync_notify(bp);
3547 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3550 static void bnx2x_set_mf_bw(struct bnx2x *bp)
3552 bnx2x_config_mf_bw(bp);
3553 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3556 static void bnx2x_handle_eee_event(struct bnx2x *bp)
3558 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3559 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3562 #define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
3563 #define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
3565 static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3567 enum drv_info_opcode op_code;
3568 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3569 bool release = false;
3572 /* if drv_info version supported by MFW doesn't match - send NACK */
3573 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3574 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3578 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3579 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3581 /* Must prevent other flows from accessing drv_info_to_mcp */
3582 mutex_lock(&bp->drv_info_mutex);
3584 memset(&bp->slowpath->drv_info_to_mcp, 0,
3585 sizeof(union drv_info_to_mcp));
3588 case ETH_STATS_OPCODE:
3589 bnx2x_drv_info_ether_stat(bp);
3591 case FCOE_STATS_OPCODE:
3592 bnx2x_drv_info_fcoe_stat(bp);
3594 case ISCSI_STATS_OPCODE:
3595 bnx2x_drv_info_iscsi_stat(bp);
3598 /* if op code isn't supported - send NACK */
3599 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3603 /* if we got drv_info attn from MFW then these fields are defined in
3606 SHMEM2_WR(bp, drv_info_host_addr_lo,
3607 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3608 SHMEM2_WR(bp, drv_info_host_addr_hi,
3609 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3611 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3613 /* Since possible management wants both this and get_driver_version
3614 * need to wait until management notifies us it finished utilizing
3617 if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3618 DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3619 } else if (!bp->drv_info_mng_owner) {
3620 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3622 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3623 u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3625 /* Management is done; need to clear indication */
3626 if (indication & bit) {
3627 SHMEM2_WR(bp, mfw_drv_indication,
3633 msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3637 DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3638 bp->drv_info_mng_owner = true;
3642 mutex_unlock(&bp->drv_info_mutex);
3645 static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
3651 i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3652 &vals[0], &vals[1], &vals[2], &vals[3]);
3656 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3657 &vals[0], &vals[1], &vals[2], &vals[3]);
3663 return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3666 void bnx2x_update_mng_version(struct bnx2x *bp)
3668 u32 iscsiver = DRV_VER_NOT_LOADED;
3669 u32 fcoever = DRV_VER_NOT_LOADED;
3670 u32 ethver = DRV_VER_NOT_LOADED;
3671 int idx = BP_FW_MB_IDX(bp);
3674 if (!SHMEM2_HAS(bp, func_os_drv_ver))
3677 mutex_lock(&bp->drv_info_mutex);
3678 /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
3679 if (bp->drv_info_mng_owner)
3682 if (bp->state != BNX2X_STATE_OPEN)
3685 /* Parse ethernet driver version */
3686 ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3687 if (!CNIC_LOADED(bp))
3690 /* Try getting storage driver version via cnic */
3691 memset(&bp->slowpath->drv_info_to_mcp, 0,
3692 sizeof(union drv_info_to_mcp));
3693 bnx2x_drv_info_iscsi_stat(bp);
3694 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3695 iscsiver = bnx2x_update_mng_version_utility(version, false);
3697 memset(&bp->slowpath->drv_info_to_mcp, 0,
3698 sizeof(union drv_info_to_mcp));
3699 bnx2x_drv_info_fcoe_stat(bp);
3700 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3701 fcoever = bnx2x_update_mng_version_utility(version, false);
3704 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3705 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3706 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3708 mutex_unlock(&bp->drv_info_mutex);
3710 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3711 ethver, iscsiver, fcoever);
3714 void bnx2x_update_mfw_dump(struct bnx2x *bp)
3719 if (!SHMEM2_HAS(bp, drv_info))
3722 /* Update Driver load time, possibly broken in y2038 */
3723 SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3725 drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3726 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
3728 SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
3730 /* Check & notify On-Chip dump. */
3731 valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
3733 if (valid_dump & FIRST_DUMP_VALID)
3734 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
3736 if (valid_dump & SECOND_DUMP_VALID)
3737 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
3740 static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
3742 u32 cmd_ok, cmd_fail;
3745 if (event & DRV_STATUS_DCC_EVENT_MASK &&
3746 event & DRV_STATUS_OEM_EVENT_MASK) {
3747 BNX2X_ERR("Received simultaneous events %08x\n", event);
3751 if (event & DRV_STATUS_DCC_EVENT_MASK) {
3752 cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
3753 cmd_ok = DRV_MSG_CODE_DCC_OK;
3754 } else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ {
3755 cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
3756 cmd_ok = DRV_MSG_CODE_OEM_OK;
3759 DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
3761 if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3762 DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
3763 /* This is the only place besides the function initialization
3764 * where the bp->flags can change so it is done without any
3767 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3768 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3769 bp->flags |= MF_FUNC_DIS;
3771 bnx2x_e1h_disable(bp);
3773 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3774 bp->flags &= ~MF_FUNC_DIS;
3776 bnx2x_e1h_enable(bp);
3778 event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3779 DRV_STATUS_OEM_DISABLE_ENABLE_PF);
3782 if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3783 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
3784 bnx2x_config_mf_bw(bp);
3785 event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3786 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
3789 /* Report results to MCP */
3791 bnx2x_fw_command(bp, cmd_fail, 0);
3793 bnx2x_fw_command(bp, cmd_ok, 0);
3796 /* must be called under the spq lock */
3797 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3799 struct eth_spe *next_spe = bp->spq_prod_bd;
3801 if (bp->spq_prod_bd == bp->spq_last_bd) {
3802 bp->spq_prod_bd = bp->spq;
3803 bp->spq_prod_idx = 0;
3804 DP(BNX2X_MSG_SP, "end of spq\n");
3812 /* must be called under the spq lock */
3813 static void bnx2x_sp_prod_update(struct bnx2x *bp)
3815 int func = BP_FUNC(bp);
3818 * Make sure that BD data is updated before writing the producer:
3819 * BD data is written to the memory, the producer is read from the
3820 * memory, thus we need a full memory barrier to ensure the ordering.
3824 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3830 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3832 * @cmd: command to check
3833 * @cmd_type: command type
3835 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3837 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3838 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3839 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3840 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3841 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3842 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3843 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3850 * bnx2x_sp_post - place a single command on an SP ring
3852 * @bp: driver handle
3853 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
3854 * @cid: SW CID the command is related to
3855 * @data_hi: command private data address (high 32 bits)
3856 * @data_lo: command private data address (low 32 bits)
3857 * @cmd_type: command type (e.g. NONE, ETH)
3859 * SP data is handled as if it's always an address pair, thus data fields are
3860 * not swapped to little endian in upper functions. Instead this function swaps
3861 * data as if it's two u32 fields.
3863 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3864 u32 data_hi, u32 data_lo, int cmd_type)
3866 struct eth_spe *spe;
3868 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3870 #ifdef BNX2X_STOP_ON_ERROR
3871 if (unlikely(bp->panic)) {
3872 BNX2X_ERR("Can't post SP when there is panic\n");
3877 spin_lock_bh(&bp->spq_lock);
3880 if (!atomic_read(&bp->eq_spq_left)) {
3881 BNX2X_ERR("BUG! EQ ring full!\n");
3882 spin_unlock_bh(&bp->spq_lock);
3886 } else if (!atomic_read(&bp->cq_spq_left)) {
3887 BNX2X_ERR("BUG! SPQ ring full!\n");
3888 spin_unlock_bh(&bp->spq_lock);
3893 spe = bnx2x_sp_get_next(bp);
3895 /* CID needs port number to be encoded int it */
3896 spe->hdr.conn_and_cmd_data =
3897 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3900 /* In some cases, type may already contain the func-id
3901 * mainly in SRIOV related use cases, so we add it here only
3902 * if it's not already set.
3904 if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3905 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3907 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3908 SPE_HDR_FUNCTION_ID);
3913 spe->hdr.type = cpu_to_le16(type);
3915 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3916 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3919 * It's ok if the actual decrement is issued towards the memory
3920 * somewhere between the spin_lock and spin_unlock. Thus no
3921 * more explicit memory barrier is needed.
3924 atomic_dec(&bp->eq_spq_left);
3926 atomic_dec(&bp->cq_spq_left);
3929 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3930 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3931 (u32)(U64_LO(bp->spq_mapping) +
3932 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3933 HW_CID(bp, cid), data_hi, data_lo, type,
3934 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3936 bnx2x_sp_prod_update(bp);
3937 spin_unlock_bh(&bp->spq_lock);
3941 /* acquire split MCP access lock register */
3942 static int bnx2x_acquire_alr(struct bnx2x *bp)
3948 for (j = 0; j < 1000; j++) {
3949 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3950 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3951 if (val & MCPR_ACCESS_LOCK_LOCK)
3954 usleep_range(5000, 10000);
3956 if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3957 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3964 /* release split MCP access lock register */
3965 static void bnx2x_release_alr(struct bnx2x *bp)
3967 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3970 #define BNX2X_DEF_SB_ATT_IDX 0x0001
3971 #define BNX2X_DEF_SB_IDX 0x0002
3973 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3975 struct host_sp_status_block *def_sb = bp->def_status_blk;
3978 barrier(); /* status block is written to by the chip */
3979 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3980 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3981 rc |= BNX2X_DEF_SB_ATT_IDX;
3984 if (bp->def_idx != def_sb->sp_sb.running_index) {
3985 bp->def_idx = def_sb->sp_sb.running_index;
3986 rc |= BNX2X_DEF_SB_IDX;
3989 /* Do not reorder: indices reading should complete before handling */
3995 * slow path service functions
3998 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4000 int port = BP_PORT(bp);
4001 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4002 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4003 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4004 NIG_REG_MASK_INTERRUPT_PORT0;
4009 if (bp->attn_state & asserted)
4010 BNX2X_ERR("IGU ERROR\n");
4012 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4013 aeu_mask = REG_RD(bp, aeu_addr);
4015 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4016 aeu_mask, asserted);
4017 aeu_mask &= ~(asserted & 0x3ff);
4018 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4020 REG_WR(bp, aeu_addr, aeu_mask);
4021 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4023 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4024 bp->attn_state |= asserted;
4025 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4027 if (asserted & ATTN_HARD_WIRED_MASK) {
4028 if (asserted & ATTN_NIG_FOR_FUNC) {
4030 bnx2x_acquire_phy_lock(bp);
4032 /* save nig interrupt mask */
4033 nig_mask = REG_RD(bp, nig_int_mask_addr);
4035 /* If nig_mask is not set, no need to call the update
4039 REG_WR(bp, nig_int_mask_addr, 0);
4041 bnx2x_link_attn(bp);
4044 /* handle unicore attn? */
4046 if (asserted & ATTN_SW_TIMER_4_FUNC)
4047 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4049 if (asserted & GPIO_2_FUNC)
4050 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4052 if (asserted & GPIO_3_FUNC)
4053 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4055 if (asserted & GPIO_4_FUNC)
4056 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4059 if (asserted & ATTN_GENERAL_ATTN_1) {
4060 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4061 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4063 if (asserted & ATTN_GENERAL_ATTN_2) {
4064 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4065 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4067 if (asserted & ATTN_GENERAL_ATTN_3) {
4068 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4069 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4072 if (asserted & ATTN_GENERAL_ATTN_4) {
4073 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4074 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4076 if (asserted & ATTN_GENERAL_ATTN_5) {
4077 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4078 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4080 if (asserted & ATTN_GENERAL_ATTN_6) {
4081 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4082 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4086 } /* if hardwired */
4088 if (bp->common.int_block == INT_BLOCK_HC)
4089 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4090 COMMAND_REG_ATTN_BITS_SET);
4092 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
4094 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
4095 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4096 REG_WR(bp, reg_addr, asserted);
4098 /* now set back the mask */
4099 if (asserted & ATTN_NIG_FOR_FUNC) {
4100 /* Verify that IGU ack through BAR was written before restoring
4101 * NIG mask. This loop should exit after 2-3 iterations max.
4103 if (bp->common.int_block != INT_BLOCK_HC) {
4104 u32 cnt = 0, igu_acked;
4106 igu_acked = REG_RD(bp,
4107 IGU_REG_ATTENTION_ACK_BITS);
4108 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
4109 (++cnt < MAX_IGU_ATTN_ACK_TO));
4112 "Failed to verify IGU ack on time\n");
4115 REG_WR(bp, nig_int_mask_addr, nig_mask);
4116 bnx2x_release_phy_lock(bp);
4120 static void bnx2x_fan_failure(struct bnx2x *bp)
4122 int port = BP_PORT(bp);
4124 /* mark the failure */
4127 dev_info.port_hw_config[port].external_phy_config);
4129 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4130 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4131 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
4134 /* log the failure */
4135 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
4136 "Please contact OEM Support for assistance\n");
4138 /* Schedule device reset (unload)
4139 * This is due to some boards consuming sufficient power when driver is
4140 * up to overheat if fan fails.
4142 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
4145 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4147 int port = BP_PORT(bp);
4151 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4152 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4154 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4156 val = REG_RD(bp, reg_offset);
4157 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4158 REG_WR(bp, reg_offset, val);
4160 BNX2X_ERR("SPIO5 hw attention\n");
4162 /* Fan failure attention */
4163 bnx2x_hw_reset_phy(&bp->link_params);
4164 bnx2x_fan_failure(bp);
4167 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
4168 bnx2x_acquire_phy_lock(bp);
4169 bnx2x_handle_module_detect_int(&bp->link_params);
4170 bnx2x_release_phy_lock(bp);
4173 if (attn & HW_INTERRUPT_ASSERT_SET_0) {
4175 val = REG_RD(bp, reg_offset);
4176 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
4177 REG_WR(bp, reg_offset, val);
4179 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4180 (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
4185 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4189 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
4191 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4192 BNX2X_ERR("DB hw attention 0x%x\n", val);
4193 /* DORQ discard attention */
4195 BNX2X_ERR("FATAL error from DORQ\n");
4198 if (attn & HW_INTERRUPT_ASSERT_SET_1) {
4200 int port = BP_PORT(bp);
4203 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
4204 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4206 val = REG_RD(bp, reg_offset);
4207 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
4208 REG_WR(bp, reg_offset, val);
4210 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4211 (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
4216 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4220 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4222 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4223 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4224 /* CFC error attention */
4226 BNX2X_ERR("FATAL error from CFC\n");
4229 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4230 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4231 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
4232 /* RQ_USDMDP_FIFO_OVERFLOW */
4234 BNX2X_ERR("FATAL error from PXP\n");
4236 if (!CHIP_IS_E1x(bp)) {
4237 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
4238 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
4242 if (attn & HW_INTERRUPT_ASSERT_SET_2) {
4244 int port = BP_PORT(bp);
4247 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
4248 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4250 val = REG_RD(bp, reg_offset);
4251 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
4252 REG_WR(bp, reg_offset, val);
4254 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4255 (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
4260 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4264 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4266 if (attn & BNX2X_PMF_LINK_ASSERT) {
4267 int func = BP_FUNC(bp);
4269 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4270 bnx2x_read_mf_cfg(bp);
4271 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4272 func_mf_config[BP_ABS_FUNC(bp)].config);
4274 func_mb[BP_FW_MB_IDX(bp)].drv_status);
4276 if (val & (DRV_STATUS_DCC_EVENT_MASK |
4277 DRV_STATUS_OEM_EVENT_MASK))
4279 (val & (DRV_STATUS_DCC_EVENT_MASK |
4280 DRV_STATUS_OEM_EVENT_MASK)));
4282 if (val & DRV_STATUS_SET_MF_BW)
4283 bnx2x_set_mf_bw(bp);
4285 if (val & DRV_STATUS_DRV_INFO_REQ)
4286 bnx2x_handle_drv_info_req(bp);
4288 if (val & DRV_STATUS_VF_DISABLED)
4289 bnx2x_schedule_iov_task(bp,
4290 BNX2X_IOV_HANDLE_FLR);
4292 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4293 bnx2x_pmf_update(bp);
4296 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4297 bp->dcbx_enabled > 0)
4298 /* start dcbx state machine */
4299 bnx2x_dcbx_set_params(bp,
4300 BNX2X_DCBX_STATE_NEG_RECEIVED);
4301 if (val & DRV_STATUS_AFEX_EVENT_MASK)
4302 bnx2x_handle_afex_cmd(bp,
4303 val & DRV_STATUS_AFEX_EVENT_MASK);
4304 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4305 bnx2x_handle_eee_event(bp);
4307 if (val & DRV_STATUS_OEM_UPDATE_SVID)
4308 bnx2x_schedule_sp_rtnl(bp,
4309 BNX2X_SP_RTNL_UPDATE_SVID, 0);
4311 if (bp->link_vars.periodic_flags &
4312 PERIODIC_FLAGS_LINK_EVENT) {
4313 /* sync with link */
4314 bnx2x_acquire_phy_lock(bp);
4315 bp->link_vars.periodic_flags &=
4316 ~PERIODIC_FLAGS_LINK_EVENT;
4317 bnx2x_release_phy_lock(bp);
4319 bnx2x_link_sync_notify(bp);
4320 bnx2x_link_report(bp);
4322 /* Always call it here: bnx2x_link_report() will
4323 * prevent the link indication duplication.
4325 bnx2x__link_status_update(bp);
4326 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4328 BNX2X_ERR("MC assert!\n");
4329 bnx2x_mc_assert(bp);
4330 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4331 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4332 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4333 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4336 } else if (attn & BNX2X_MCP_ASSERT) {
4338 BNX2X_ERR("MCP assert!\n");
4339 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4343 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4346 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4347 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4348 if (attn & BNX2X_GRC_TIMEOUT) {
4349 val = CHIP_IS_E1(bp) ? 0 :
4350 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4351 BNX2X_ERR("GRC time-out 0x%08x\n", val);
4353 if (attn & BNX2X_GRC_RSV) {
4354 val = CHIP_IS_E1(bp) ? 0 :
4355 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4356 BNX2X_ERR("GRC reserved 0x%08x\n", val);
4358 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4364 * 0-7 - Engine0 load counter.
4365 * 8-15 - Engine1 load counter.
4366 * 16 - Engine0 RESET_IN_PROGRESS bit.
4367 * 17 - Engine1 RESET_IN_PROGRESS bit.
4368 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function
4370 * 19 - Engine1 ONE_IS_LOADED.
4371 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
4372 * leader to complete (check for both RESET_IN_PROGRESS bits and not for
4373 * just the one belonging to its engine).
4376 #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
4378 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
4379 #define BNX2X_PATH0_LOAD_CNT_SHIFT 0
4380 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
4381 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8
4382 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
4383 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
4384 #define BNX2X_GLOBAL_RESET_BIT 0x00040000
4387 * Set the GLOBAL_RESET bit.
4389 * Should be run under rtnl lock
4391 void bnx2x_set_reset_global(struct bnx2x *bp)
4394 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4395 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4396 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4397 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4401 * Clear the GLOBAL_RESET bit.
4403 * Should be run under rtnl lock
4405 static void bnx2x_clear_reset_global(struct bnx2x *bp)
4408 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4409 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4410 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4411 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4415 * Checks the GLOBAL_RESET bit.
4417 * should be run under rtnl lock
4419 static bool bnx2x_reset_is_global(struct bnx2x *bp)
4421 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4423 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4424 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4428 * Clear RESET_IN_PROGRESS bit for the current engine.
4430 * Should be run under rtnl lock
4432 static void bnx2x_set_reset_done(struct bnx2x *bp)
4435 u32 bit = BP_PATH(bp) ?
4436 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4437 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4438 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4442 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4444 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4448 * Set RESET_IN_PROGRESS for the current engine.
4450 * should be run under rtnl lock
4452 void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4455 u32 bit = BP_PATH(bp) ?
4456 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4457 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4458 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4462 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4463 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4467 * Checks the RESET_IN_PROGRESS bit for the given engine.
4468 * should be run under rtnl lock
4470 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4472 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4474 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4476 /* return false if bit is set */
4477 return (val & bit) ? false : true;
4481 * set pf load for the current pf.
4483 * should be run under rtnl lock
4485 void bnx2x_set_pf_load(struct bnx2x *bp)
4488 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4489 BNX2X_PATH0_LOAD_CNT_MASK;
4490 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4491 BNX2X_PATH0_LOAD_CNT_SHIFT;
4493 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4494 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4496 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4498 /* get the current counter value */
4499 val1 = (val & mask) >> shift;
4501 /* set bit of that PF */
4502 val1 |= (1 << bp->pf_num);
4504 /* clear the old value */
4507 /* set the new one */
4508 val |= ((val1 << shift) & mask);
4510 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4511 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4515 * bnx2x_clear_pf_load - clear pf load mark
4517 * @bp: driver handle
4519 * Should be run under rtnl lock.
4520 * Decrements the load counter for the current engine. Returns
4521 * whether other functions are still loaded
4523 bool bnx2x_clear_pf_load(struct bnx2x *bp)
4526 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4527 BNX2X_PATH0_LOAD_CNT_MASK;
4528 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4529 BNX2X_PATH0_LOAD_CNT_SHIFT;
4531 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4532 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4533 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4535 /* get the current counter value */
4536 val1 = (val & mask) >> shift;
4538 /* clear bit of that PF */
4539 val1 &= ~(1 << bp->pf_num);
4541 /* clear the old value */
4544 /* set the new one */
4545 val |= ((val1 << shift) & mask);
4547 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4548 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4553 * Read the load status for the current engine.
4555 * should be run under rtnl lock
4557 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4559 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4560 BNX2X_PATH0_LOAD_CNT_MASK);
4561 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4562 BNX2X_PATH0_LOAD_CNT_SHIFT);
4563 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4565 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4567 val = (val & mask) >> shift;
4569 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4575 static void _print_parity(struct bnx2x *bp, u32 reg)
4577 pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4580 static void _print_next_block(int idx, const char *blk)
4582 pr_cont("%s%s", idx ? ", " : "", blk);
4585 static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4586 int *par_num, bool print)
4594 for (i = 0; sig; i++) {
4595 cur_bit = (0x1UL << i);
4596 if (sig & cur_bit) {
4597 res |= true; /* Each bit is real error! */
4601 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4602 _print_next_block((*par_num)++, "BRB");
4604 BRB1_REG_BRB1_PRTY_STS);
4606 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4607 _print_next_block((*par_num)++,
4609 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4611 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4612 _print_next_block((*par_num)++, "TSDM");
4614 TSDM_REG_TSDM_PRTY_STS);
4616 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4617 _print_next_block((*par_num)++,
4619 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4621 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4622 _print_next_block((*par_num)++, "TCM");
4623 _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4625 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4626 _print_next_block((*par_num)++,
4629 TSEM_REG_TSEM_PRTY_STS_0);
4631 TSEM_REG_TSEM_PRTY_STS_1);
4633 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4634 _print_next_block((*par_num)++, "XPB");
4635 _print_parity(bp, GRCBASE_XPB +
4636 PB_REG_PB_PRTY_STS);
4649 static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4650 int *par_num, bool *global,
4659 for (i = 0; sig; i++) {
4660 cur_bit = (0x1UL << i);
4661 if (sig & cur_bit) {
4662 res |= true; /* Each bit is real error! */
4664 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4666 _print_next_block((*par_num)++, "PBF");
4667 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4670 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4672 _print_next_block((*par_num)++, "QM");
4673 _print_parity(bp, QM_REG_QM_PRTY_STS);
4676 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4678 _print_next_block((*par_num)++, "TM");
4679 _print_parity(bp, TM_REG_TM_PRTY_STS);
4682 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4684 _print_next_block((*par_num)++, "XSDM");
4686 XSDM_REG_XSDM_PRTY_STS);
4689 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4691 _print_next_block((*par_num)++, "XCM");
4692 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4695 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4697 _print_next_block((*par_num)++,
4700 XSEM_REG_XSEM_PRTY_STS_0);
4702 XSEM_REG_XSEM_PRTY_STS_1);
4705 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4707 _print_next_block((*par_num)++,
4710 DORQ_REG_DORQ_PRTY_STS);
4713 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4715 _print_next_block((*par_num)++, "NIG");
4716 if (CHIP_IS_E1x(bp)) {
4718 NIG_REG_NIG_PRTY_STS);
4721 NIG_REG_NIG_PRTY_STS_0);
4723 NIG_REG_NIG_PRTY_STS_1);
4727 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4729 _print_next_block((*par_num)++,
4733 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4735 _print_next_block((*par_num)++,
4737 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4740 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4742 _print_next_block((*par_num)++, "USDM");
4744 USDM_REG_USDM_PRTY_STS);
4747 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4749 _print_next_block((*par_num)++, "UCM");
4750 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4753 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4755 _print_next_block((*par_num)++,
4758 USEM_REG_USEM_PRTY_STS_0);
4760 USEM_REG_USEM_PRTY_STS_1);
4763 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4765 _print_next_block((*par_num)++, "UPB");
4766 _print_parity(bp, GRCBASE_UPB +
4767 PB_REG_PB_PRTY_STS);
4770 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4772 _print_next_block((*par_num)++, "CSDM");
4774 CSDM_REG_CSDM_PRTY_STS);
4777 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4779 _print_next_block((*par_num)++, "CCM");
4780 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4793 static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4794 int *par_num, bool print)
4802 for (i = 0; sig; i++) {
4803 cur_bit = (0x1UL << i);
4804 if (sig & cur_bit) {
4805 res = true; /* Each bit is real error! */
4808 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4809 _print_next_block((*par_num)++,
4812 CSEM_REG_CSEM_PRTY_STS_0);
4814 CSEM_REG_CSEM_PRTY_STS_1);
4816 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4817 _print_next_block((*par_num)++, "PXP");
4818 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4820 PXP2_REG_PXP2_PRTY_STS_0);
4822 PXP2_REG_PXP2_PRTY_STS_1);
4824 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4825 _print_next_block((*par_num)++,
4826 "PXPPCICLOCKCLIENT");
4828 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4829 _print_next_block((*par_num)++, "CFC");
4831 CFC_REG_CFC_PRTY_STS);
4833 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4834 _print_next_block((*par_num)++, "CDU");
4835 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4837 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4838 _print_next_block((*par_num)++, "DMAE");
4840 DMAE_REG_DMAE_PRTY_STS);
4842 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4843 _print_next_block((*par_num)++, "IGU");
4844 if (CHIP_IS_E1x(bp))
4846 HC_REG_HC_PRTY_STS);
4849 IGU_REG_IGU_PRTY_STS);
4851 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4852 _print_next_block((*par_num)++, "MISC");
4854 MISC_REG_MISC_PRTY_STS);
4867 static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4868 int *par_num, bool *global,
4875 for (i = 0; sig; i++) {
4876 cur_bit = (0x1UL << i);
4877 if (sig & cur_bit) {
4879 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4881 _print_next_block((*par_num)++,
4886 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4888 _print_next_block((*par_num)++,
4893 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4895 _print_next_block((*par_num)++,
4900 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4902 /* clear latched SCPAD PATIRY from MCP */
4903 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4916 static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4917 int *par_num, bool print)
4925 for (i = 0; sig; i++) {
4926 cur_bit = (0x1UL << i);
4927 if (sig & cur_bit) {
4928 res = true; /* Each bit is real error! */
4931 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4932 _print_next_block((*par_num)++,
4935 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4937 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4938 _print_next_block((*par_num)++, "ATC");
4940 ATC_REG_ATC_PRTY_STS);
4952 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4957 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4958 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4959 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4960 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4961 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4964 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4965 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4966 sig[0] & HW_PRTY_ASSERT_SET_0,
4967 sig[1] & HW_PRTY_ASSERT_SET_1,
4968 sig[2] & HW_PRTY_ASSERT_SET_2,
4969 sig[3] & HW_PRTY_ASSERT_SET_3,
4970 sig[4] & HW_PRTY_ASSERT_SET_4);
4972 if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4973 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4974 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4975 (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
4976 (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
4978 "Parity errors detected in blocks: ");
4983 res |= bnx2x_check_blocks_with_parity0(bp,
4984 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4985 res |= bnx2x_check_blocks_with_parity1(bp,
4986 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4987 res |= bnx2x_check_blocks_with_parity2(bp,
4988 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4989 res |= bnx2x_check_blocks_with_parity3(bp,
4990 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4991 res |= bnx2x_check_blocks_with_parity4(bp,
4992 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
5002 * bnx2x_chk_parity_attn - checks for parity attentions.
5004 * @bp: driver handle
5005 * @global: true if there was a global attention
5006 * @print: show parity attention in syslog
5008 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
5010 struct attn_route attn = { {0} };
5011 int port = BP_PORT(bp);
5013 attn.sig[0] = REG_RD(bp,
5014 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
5016 attn.sig[1] = REG_RD(bp,
5017 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
5019 attn.sig[2] = REG_RD(bp,
5020 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
5022 attn.sig[3] = REG_RD(bp,
5023 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
5025 /* Since MCP attentions can't be disabled inside the block, we need to
5026 * read AEU registers to see whether they're currently disabled
5028 attn.sig[3] &= ((REG_RD(bp,
5029 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
5030 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
5031 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
5032 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
5034 if (!CHIP_IS_E1x(bp))
5035 attn.sig[4] = REG_RD(bp,
5036 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
5039 return bnx2x_parity_attn(bp, global, print, attn.sig);
5042 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
5045 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
5047 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
5048 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
5049 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
5050 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
5051 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
5052 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
5053 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
5054 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
5055 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
5056 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
5058 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
5059 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
5061 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
5062 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
5063 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
5064 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
5065 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
5066 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
5067 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
5068 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
5070 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
5071 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
5072 BNX2X_ERR("ATC hw attention 0x%x\n", val);
5073 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
5074 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
5075 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
5076 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
5077 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
5078 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
5079 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
5080 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
5081 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
5082 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
5083 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
5084 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
5087 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5088 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
5089 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
5090 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5091 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
5095 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
5097 struct attn_route attn, *group_mask;
5098 int port = BP_PORT(bp);
5103 bool global = false;
5105 /* need to take HW lock because MCP or other port might also
5106 try to handle this event */
5107 bnx2x_acquire_alr(bp);
5109 if (bnx2x_chk_parity_attn(bp, &global, true)) {
5110 #ifndef BNX2X_STOP_ON_ERROR
5111 bp->recovery_state = BNX2X_RECOVERY_INIT;
5112 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5113 /* Disable HW interrupts */
5114 bnx2x_int_disable(bp);
5115 /* In case of parity errors don't handle attentions so that
5116 * other function would "see" parity errors.
5121 bnx2x_release_alr(bp);
5125 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
5126 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
5127 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
5128 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
5129 if (!CHIP_IS_E1x(bp))
5131 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
5135 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
5136 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
5138 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5139 if (deasserted & (1 << index)) {
5140 group_mask = &bp->attn_group[index];
5142 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
5144 group_mask->sig[0], group_mask->sig[1],
5145 group_mask->sig[2], group_mask->sig[3],
5146 group_mask->sig[4]);
5148 bnx2x_attn_int_deasserted4(bp,
5149 attn.sig[4] & group_mask->sig[4]);
5150 bnx2x_attn_int_deasserted3(bp,
5151 attn.sig[3] & group_mask->sig[3]);
5152 bnx2x_attn_int_deasserted1(bp,
5153 attn.sig[1] & group_mask->sig[1]);
5154 bnx2x_attn_int_deasserted2(bp,
5155 attn.sig[2] & group_mask->sig[2]);
5156 bnx2x_attn_int_deasserted0(bp,
5157 attn.sig[0] & group_mask->sig[0]);
5161 bnx2x_release_alr(bp);
5163 if (bp->common.int_block == INT_BLOCK_HC)
5164 reg_addr = (HC_REG_COMMAND_REG + port*32 +
5165 COMMAND_REG_ATTN_BITS_CLR);
5167 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
5170 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
5171 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5172 REG_WR(bp, reg_addr, val);
5174 if (~bp->attn_state & deasserted)
5175 BNX2X_ERR("IGU ERROR\n");
5177 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5178 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5180 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5181 aeu_mask = REG_RD(bp, reg_addr);
5183 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
5184 aeu_mask, deasserted);
5185 aeu_mask |= (deasserted & 0x3ff);
5186 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
5188 REG_WR(bp, reg_addr, aeu_mask);
5189 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5191 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
5192 bp->attn_state &= ~deasserted;
5193 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
5196 static void bnx2x_attn_int(struct bnx2x *bp)
5198 /* read local copy of bits */
5199 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
5201 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
5203 u32 attn_state = bp->attn_state;
5205 /* look for changed bits */
5206 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
5207 u32 deasserted = ~attn_bits & attn_ack & attn_state;
5210 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
5211 attn_bits, attn_ack, asserted, deasserted);
5213 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
5214 BNX2X_ERR("BAD attention state\n");
5216 /* handle bits that were raised */
5218 bnx2x_attn_int_asserted(bp, asserted);
5221 bnx2x_attn_int_deasserted(bp, deasserted);
5224 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
5225 u16 index, u8 op, u8 update)
5227 u32 igu_addr = bp->igu_base_addr;
5228 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
5229 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
5233 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
5235 /* No memory barriers */
5236 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
5237 mmiowb(); /* keep prod updates ordered */
5240 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
5241 union event_ring_elem *elem)
5243 u8 err = elem->message.error;
5245 if (!bp->cnic_eth_dev.starting_cid ||
5246 (cid < bp->cnic_eth_dev.starting_cid &&
5247 cid != bp->cnic_eth_dev.iscsi_l2_cid))
5250 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
5252 if (unlikely(err)) {
5254 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
5256 bnx2x_panic_dump(bp, false);
5258 bnx2x_cnic_cfc_comp(bp, cid, err);
5262 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
5264 struct bnx2x_mcast_ramrod_params rparam;
5267 memset(&rparam, 0, sizeof(rparam));
5269 rparam.mcast_obj = &bp->mcast_obj;
5271 netif_addr_lock_bh(bp->dev);
5273 /* Clear pending state for the last command */
5274 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5276 /* If there are pending mcast commands - send them */
5277 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5278 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5280 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
5284 netif_addr_unlock_bh(bp->dev);
5287 static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5288 union event_ring_elem *elem)
5290 unsigned long ramrod_flags = 0;
5292 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
5293 u32 cid = echo & BNX2X_SWCID_MASK;
5294 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5296 /* Always push next commands out, don't wait here */
5297 __set_bit(RAMROD_CONT, &ramrod_flags);
5299 switch (echo >> BNX2X_SWCID_SHIFT) {
5300 case BNX2X_FILTER_MAC_PENDING:
5301 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5302 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5303 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5305 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5308 case BNX2X_FILTER_VLAN_PENDING:
5309 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
5310 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
5312 case BNX2X_FILTER_MCAST_PENDING:
5313 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
5314 /* This is only relevant for 57710 where multicast MACs are
5315 * configured as unicast MACs using the same ramrod.
5317 bnx2x_handle_mcast_eqe(bp);
5320 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
5324 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5327 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5329 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5332 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5334 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5336 netif_addr_lock_bh(bp->dev);
5338 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5340 /* Send rx_mode command again if was requested */
5341 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5342 bnx2x_set_storm_rx_mode(bp);
5343 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5345 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5346 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5348 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5350 netif_addr_unlock_bh(bp->dev);
5353 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5354 union event_ring_elem *elem)
5356 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5358 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5359 elem->message.data.vif_list_event.func_bit_map);
5360 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5361 elem->message.data.vif_list_event.func_bit_map);
5362 } else if (elem->message.data.vif_list_event.echo ==
5363 VIF_LIST_RULE_SET) {
5364 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5365 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5369 /* called with rtnl_lock */
5370 static void bnx2x_after_function_update(struct bnx2x *bp)
5373 struct bnx2x_fastpath *fp;
5374 struct bnx2x_queue_state_params queue_params = {NULL};
5375 struct bnx2x_queue_update_params *q_update_params =
5376 &queue_params.params.update;
5378 /* Send Q update command with afex vlan removal values for all Qs */
5379 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5381 /* set silent vlan removal values according to vlan mode */
5382 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5383 &q_update_params->update_flags);
5384 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5385 &q_update_params->update_flags);
5386 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5388 /* in access mode mark mask and value are 0 to strip all vlans */
5389 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5390 q_update_params->silent_removal_value = 0;
5391 q_update_params->silent_removal_mask = 0;
5393 q_update_params->silent_removal_value =
5394 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5395 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5398 for_each_eth_queue(bp, q) {
5399 /* Set the appropriate Queue object */
5401 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5403 /* send the ramrod */
5404 rc = bnx2x_queue_state_change(bp, &queue_params);
5406 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5410 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5411 fp = &bp->fp[FCOE_IDX(bp)];
5412 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5414 /* clear pending completion bit */
5415 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5417 /* mark latest Q bit */
5418 smp_mb__before_atomic();
5419 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5420 smp_mb__after_atomic();
5422 /* send Q update ramrod for FCoE Q */
5423 rc = bnx2x_queue_state_change(bp, &queue_params);
5425 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5428 /* If no FCoE ring - ACK MCP now */
5429 bnx2x_link_report(bp);
5430 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5434 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5435 struct bnx2x *bp, u32 cid)
5437 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5439 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5440 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5442 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5445 static void bnx2x_eq_int(struct bnx2x *bp)
5447 u16 hw_cons, sw_cons, sw_prod;
5448 union event_ring_elem *elem;
5452 int rc, spqe_cnt = 0;
5453 struct bnx2x_queue_sp_obj *q_obj;
5454 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5455 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5457 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5459 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
5460 * when we get the next-page we need to adjust so the loop
5461 * condition below will be met. The next element is the size of a
5462 * regular element and hence incrementing by 1
5464 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5467 /* This function may never run in parallel with itself for a
5468 * specific bp, thus there is no need in "paired" read memory
5471 sw_cons = bp->eq_cons;
5472 sw_prod = bp->eq_prod;
5474 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
5475 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5477 for (; sw_cons != hw_cons;
5478 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5480 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5482 rc = bnx2x_iov_eq_sp_event(bp, elem);
5484 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5489 opcode = elem->message.opcode;
5491 /* handle eq element */
5493 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5494 bnx2x_vf_mbx_schedule(bp,
5495 &elem->message.data.vf_pf_event);
5498 case EVENT_RING_OPCODE_STAT_QUERY:
5499 DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
5500 "got statistics comp event %d\n",
5502 /* nothing to do with stats comp */
5505 case EVENT_RING_OPCODE_CFC_DEL:
5506 /* handle according to cid range */
5508 * we may want to verify here that the bp state is
5512 /* elem CID originates from FW; actually LE */
5513 cid = SW_CID(elem->message.data.cfc_del_event.cid);
5516 "got delete ramrod for MULTI[%d]\n", cid);
5518 if (CNIC_LOADED(bp) &&
5519 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5522 q_obj = bnx2x_cid_to_q_obj(bp, cid);
5524 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5529 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5530 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5531 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5532 if (f_obj->complete_cmd(bp, f_obj,
5533 BNX2X_F_CMD_TX_STOP))
5537 case EVENT_RING_OPCODE_START_TRAFFIC:
5538 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5539 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5540 if (f_obj->complete_cmd(bp, f_obj,
5541 BNX2X_F_CMD_TX_START))
5545 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5546 echo = elem->message.data.function_update_event.echo;
5547 if (echo == SWITCH_UPDATE) {
5548 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5549 "got FUNC_SWITCH_UPDATE ramrod\n");
5550 if (f_obj->complete_cmd(
5551 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5555 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
5557 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5558 "AFEX: ramrod completed FUNCTION_UPDATE\n");
5559 f_obj->complete_cmd(bp, f_obj,
5560 BNX2X_F_CMD_AFEX_UPDATE);
5562 /* We will perform the Queues update from
5563 * sp_rtnl task as all Queue SP operations
5564 * should run under rtnl_lock.
5566 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5571 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5572 f_obj->complete_cmd(bp, f_obj,
5573 BNX2X_F_CMD_AFEX_VIFLISTS);
5574 bnx2x_after_afex_vif_lists(bp, elem);
5576 case EVENT_RING_OPCODE_FUNCTION_START:
5577 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5578 "got FUNC_START ramrod\n");
5579 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5584 case EVENT_RING_OPCODE_FUNCTION_STOP:
5585 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5586 "got FUNC_STOP ramrod\n");
5587 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5592 case EVENT_RING_OPCODE_SET_TIMESYNC:
5593 DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
5594 "got set_timesync ramrod completion\n");
5595 if (f_obj->complete_cmd(bp, f_obj,
5596 BNX2X_F_CMD_SET_TIMESYNC))
5601 switch (opcode | bp->state) {
5602 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5604 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5605 BNX2X_STATE_OPENING_WAIT4_PORT):
5606 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5607 BNX2X_STATE_CLOSING_WAIT4_HALT):
5608 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5609 SW_CID(elem->message.data.eth_event.echo));
5610 rss_raw->clear_pending(rss_raw);
5613 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5614 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5615 case (EVENT_RING_OPCODE_SET_MAC |
5616 BNX2X_STATE_CLOSING_WAIT4_HALT):
5617 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5619 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5621 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5622 BNX2X_STATE_CLOSING_WAIT4_HALT):
5623 DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
5624 bnx2x_handle_classification_eqe(bp, elem);
5627 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5629 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5631 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5632 BNX2X_STATE_CLOSING_WAIT4_HALT):
5633 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5634 bnx2x_handle_mcast_eqe(bp);
5637 case (EVENT_RING_OPCODE_FILTERS_RULES |
5639 case (EVENT_RING_OPCODE_FILTERS_RULES |
5641 case (EVENT_RING_OPCODE_FILTERS_RULES |
5642 BNX2X_STATE_CLOSING_WAIT4_HALT):
5643 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5644 bnx2x_handle_rx_mode_eqe(bp);
5647 /* unknown event log error and continue */
5648 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5649 elem->message.opcode, bp->state);
5655 smp_mb__before_atomic();
5656 atomic_add(spqe_cnt, &bp->eq_spq_left);
5658 bp->eq_cons = sw_cons;
5659 bp->eq_prod = sw_prod;
5660 /* Make sure that above mem writes were issued towards the memory */
5663 /* update producer */
5664 bnx2x_update_eq_prod(bp, bp->eq_prod);
5667 static void bnx2x_sp_task(struct work_struct *work)
5669 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5671 DP(BNX2X_MSG_SP, "sp task invoked\n");
5673 /* make sure the atomic interrupt_occurred has been written */
5675 if (atomic_read(&bp->interrupt_occurred)) {
5677 /* what work needs to be performed? */
5678 u16 status = bnx2x_update_dsb_idx(bp);
5680 DP(BNX2X_MSG_SP, "status %x\n", status);
5681 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5682 atomic_set(&bp->interrupt_occurred, 0);
5685 if (status & BNX2X_DEF_SB_ATT_IDX) {
5687 status &= ~BNX2X_DEF_SB_ATT_IDX;
5690 /* SP events: STAT_QUERY and others */
5691 if (status & BNX2X_DEF_SB_IDX) {
5692 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5694 if (FCOE_INIT(bp) &&
5695 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5696 /* Prevent local bottom-halves from running as
5697 * we are going to change the local NAPI list.
5700 napi_schedule(&bnx2x_fcoe(bp, napi));
5704 /* Handle EQ completions */
5706 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5707 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5709 status &= ~BNX2X_DEF_SB_IDX;
5712 /* if status is non zero then perhaps something went wrong */
5713 if (unlikely(status))
5715 "got an unknown interrupt! (status 0x%x)\n", status);
5717 /* ack status block only if something was actually handled */
5718 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5719 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5722 /* afex - poll to check if VIFSET_ACK should be sent to MFW */
5723 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5725 bnx2x_link_report(bp);
5726 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5730 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5732 struct net_device *dev = dev_instance;
5733 struct bnx2x *bp = netdev_priv(dev);
5735 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5736 IGU_INT_DISABLE, 0);
5738 #ifdef BNX2X_STOP_ON_ERROR
5739 if (unlikely(bp->panic))
5743 if (CNIC_LOADED(bp)) {
5744 struct cnic_ops *c_ops;
5747 c_ops = rcu_dereference(bp->cnic_ops);
5749 c_ops->cnic_handler(bp->cnic_data, NULL);
5753 /* schedule sp task to perform default status block work, ack
5754 * attentions and enable interrupts.
5756 bnx2x_schedule_sp_task(bp);
5761 /* end of slow path */
5763 void bnx2x_drv_pulse(struct bnx2x *bp)
5765 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5766 bp->fw_drv_pulse_wr_seq);
5769 static void bnx2x_timer(unsigned long data)
5771 struct bnx2x *bp = (struct bnx2x *) data;
5773 if (!netif_running(bp->dev))
5778 int mb_idx = BP_FW_MB_IDX(bp);
5782 ++bp->fw_drv_pulse_wr_seq;
5783 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5784 drv_pulse = bp->fw_drv_pulse_wr_seq;
5785 bnx2x_drv_pulse(bp);
5787 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5788 MCP_PULSE_SEQ_MASK);
5789 /* The delta between driver pulse and mcp response
5790 * should not get too big. If the MFW is more than 5 pulses
5791 * behind, we should worry about it enough to generate an error
5794 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5795 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5796 drv_pulse, mcp_pulse);
5799 if (bp->state == BNX2X_STATE_OPEN)
5800 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5802 /* sample pf vf bulletin board for new posts from pf */
5804 bnx2x_timer_sriov(bp);
5806 mod_timer(&bp->timer, jiffies + bp->current_interval);
5809 /* end of Statistics */
5814 * nic init service functions
5817 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5820 if (!(len%4) && !(addr%4))
5821 for (i = 0; i < len; i += 4)
5822 REG_WR(bp, addr + i, fill);
5824 for (i = 0; i < len; i++)
5825 REG_WR8(bp, addr + i, fill);
5828 /* helper: writes FP SP data to FW - data_size in dwords */
5829 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5835 for (index = 0; index < data_size; index++)
5836 REG_WR(bp, BAR_CSTRORM_INTMEM +
5837 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5839 *(sb_data_p + index));
5842 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5846 struct hc_status_block_data_e2 sb_data_e2;
5847 struct hc_status_block_data_e1x sb_data_e1x;
5849 /* disable the function first */
5850 if (!CHIP_IS_E1x(bp)) {
5851 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5852 sb_data_e2.common.state = SB_DISABLED;
5853 sb_data_e2.common.p_func.vf_valid = false;
5854 sb_data_p = (u32 *)&sb_data_e2;
5855 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5857 memset(&sb_data_e1x, 0,
5858 sizeof(struct hc_status_block_data_e1x));
5859 sb_data_e1x.common.state = SB_DISABLED;
5860 sb_data_e1x.common.p_func.vf_valid = false;
5861 sb_data_p = (u32 *)&sb_data_e1x;
5862 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5864 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5866 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5867 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5868 CSTORM_STATUS_BLOCK_SIZE);
5869 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5870 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5871 CSTORM_SYNC_BLOCK_SIZE);
5874 /* helper: writes SP SB data to FW */
5875 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5876 struct hc_sp_status_block_data *sp_sb_data)
5878 int func = BP_FUNC(bp);
5880 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5881 REG_WR(bp, BAR_CSTRORM_INTMEM +
5882 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5884 *((u32 *)sp_sb_data + i));
5887 static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5889 int func = BP_FUNC(bp);
5890 struct hc_sp_status_block_data sp_sb_data;
5891 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5893 sp_sb_data.state = SB_DISABLED;
5894 sp_sb_data.p_func.vf_valid = false;
5896 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5898 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5899 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5900 CSTORM_SP_STATUS_BLOCK_SIZE);
5901 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5902 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5903 CSTORM_SP_SYNC_BLOCK_SIZE);
5906 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5907 int igu_sb_id, int igu_seg_id)
5909 hc_sm->igu_sb_id = igu_sb_id;
5910 hc_sm->igu_seg_id = igu_seg_id;
5911 hc_sm->timer_value = 0xFF;
5912 hc_sm->time_to_expire = 0xFFFFFFFF;
5915 /* allocates state machine ids. */
5916 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5918 /* zero out state machine indices */
5920 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5923 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5924 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5925 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5926 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5930 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5931 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5934 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5935 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5936 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5937 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5938 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5939 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5940 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5941 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5944 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5945 u8 vf_valid, int fw_sb_id, int igu_sb_id)
5949 struct hc_status_block_data_e2 sb_data_e2;
5950 struct hc_status_block_data_e1x sb_data_e1x;
5951 struct hc_status_block_sm *hc_sm_p;
5955 if (CHIP_INT_MODE_IS_BC(bp))
5956 igu_seg_id = HC_SEG_ACCESS_NORM;
5958 igu_seg_id = IGU_SEG_ACCESS_NORM;
5960 bnx2x_zero_fp_sb(bp, fw_sb_id);
5962 if (!CHIP_IS_E1x(bp)) {
5963 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5964 sb_data_e2.common.state = SB_ENABLED;
5965 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5966 sb_data_e2.common.p_func.vf_id = vfid;
5967 sb_data_e2.common.p_func.vf_valid = vf_valid;
5968 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5969 sb_data_e2.common.same_igu_sb_1b = true;
5970 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5971 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5972 hc_sm_p = sb_data_e2.common.state_machine;
5973 sb_data_p = (u32 *)&sb_data_e2;
5974 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5975 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5977 memset(&sb_data_e1x, 0,
5978 sizeof(struct hc_status_block_data_e1x));
5979 sb_data_e1x.common.state = SB_ENABLED;
5980 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5981 sb_data_e1x.common.p_func.vf_id = 0xff;
5982 sb_data_e1x.common.p_func.vf_valid = false;
5983 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5984 sb_data_e1x.common.same_igu_sb_1b = true;
5985 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5986 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5987 hc_sm_p = sb_data_e1x.common.state_machine;
5988 sb_data_p = (u32 *)&sb_data_e1x;
5989 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5990 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5993 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5994 igu_sb_id, igu_seg_id);
5995 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5996 igu_sb_id, igu_seg_id);
5998 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
6000 /* write indices to HW - PCI guarantees endianity of regpairs */
6001 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
6004 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
6005 u16 tx_usec, u16 rx_usec)
6007 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
6009 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6010 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
6012 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6013 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
6015 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6016 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
6020 static void bnx2x_init_def_sb(struct bnx2x *bp)
6022 struct host_sp_status_block *def_sb = bp->def_status_blk;
6023 dma_addr_t mapping = bp->def_status_blk_mapping;
6024 int igu_sp_sb_index;
6026 int port = BP_PORT(bp);
6027 int func = BP_FUNC(bp);
6028 int reg_offset, reg_offset_en5;
6031 struct hc_sp_status_block_data sp_sb_data;
6032 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
6034 if (CHIP_INT_MODE_IS_BC(bp)) {
6035 igu_sp_sb_index = DEF_SB_IGU_ID;
6036 igu_seg_id = HC_SEG_ACCESS_DEF;
6038 igu_sp_sb_index = bp->igu_dsb_id;
6039 igu_seg_id = IGU_SEG_ACCESS_DEF;
6043 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6044 atten_status_block);
6045 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
6049 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6050 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6051 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
6052 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
6053 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
6055 /* take care of sig[0]..sig[4] */
6056 for (sindex = 0; sindex < 4; sindex++)
6057 bp->attn_group[index].sig[sindex] =
6058 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
6060 if (!CHIP_IS_E1x(bp))
6062 * enable5 is separate from the rest of the registers,
6063 * and therefore the address skip is 4
6064 * and not 16 between the different groups
6066 bp->attn_group[index].sig[4] = REG_RD(bp,
6067 reg_offset_en5 + 0x4*index);
6069 bp->attn_group[index].sig[4] = 0;
6072 if (bp->common.int_block == INT_BLOCK_HC) {
6073 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
6074 HC_REG_ATTN_MSG0_ADDR_L);
6076 REG_WR(bp, reg_offset, U64_LO(section));
6077 REG_WR(bp, reg_offset + 4, U64_HI(section));
6078 } else if (!CHIP_IS_E1x(bp)) {
6079 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
6080 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
6083 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6086 bnx2x_zero_sp_sb(bp);
6088 /* PCI guarantees endianity of regpairs */
6089 sp_sb_data.state = SB_ENABLED;
6090 sp_sb_data.host_sb_addr.lo = U64_LO(section);
6091 sp_sb_data.host_sb_addr.hi = U64_HI(section);
6092 sp_sb_data.igu_sb_id = igu_sp_sb_index;
6093 sp_sb_data.igu_seg_id = igu_seg_id;
6094 sp_sb_data.p_func.pf_id = func;
6095 sp_sb_data.p_func.vnic_id = BP_VN(bp);
6096 sp_sb_data.p_func.vf_id = 0xff;
6098 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
6100 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
6103 void bnx2x_update_coalesce(struct bnx2x *bp)
6107 for_each_eth_queue(bp, i)
6108 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
6109 bp->tx_ticks, bp->rx_ticks);
6112 static void bnx2x_init_sp_ring(struct bnx2x *bp)
6114 spin_lock_init(&bp->spq_lock);
6115 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
6117 bp->spq_prod_idx = 0;
6118 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
6119 bp->spq_prod_bd = bp->spq;
6120 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
6123 static void bnx2x_init_eq_ring(struct bnx2x *bp)
6126 for (i = 1; i <= NUM_EQ_PAGES; i++) {
6127 union event_ring_elem *elem =
6128 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
6130 elem->next_page.addr.hi =
6131 cpu_to_le32(U64_HI(bp->eq_mapping +
6132 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
6133 elem->next_page.addr.lo =
6134 cpu_to_le32(U64_LO(bp->eq_mapping +
6135 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
6138 bp->eq_prod = NUM_EQ_DESC;
6139 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6140 /* we want a warning message before it gets wrought... */
6141 atomic_set(&bp->eq_spq_left,
6142 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
6145 /* called with netif_addr_lock_bh() */
6146 static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
6147 unsigned long rx_mode_flags,
6148 unsigned long rx_accept_flags,
6149 unsigned long tx_accept_flags,
6150 unsigned long ramrod_flags)
6152 struct bnx2x_rx_mode_ramrod_params ramrod_param;
6155 memset(&ramrod_param, 0, sizeof(ramrod_param));
6157 /* Prepare ramrod parameters */
6158 ramrod_param.cid = 0;
6159 ramrod_param.cl_id = cl_id;
6160 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
6161 ramrod_param.func_id = BP_FUNC(bp);
6163 ramrod_param.pstate = &bp->sp_state;
6164 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
6166 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
6167 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
6169 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
6171 ramrod_param.ramrod_flags = ramrod_flags;
6172 ramrod_param.rx_mode_flags = rx_mode_flags;
6174 ramrod_param.rx_accept_flags = rx_accept_flags;
6175 ramrod_param.tx_accept_flags = tx_accept_flags;
6177 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
6179 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
6186 static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
6187 unsigned long *rx_accept_flags,
6188 unsigned long *tx_accept_flags)
6190 /* Clear the flags first */
6191 *rx_accept_flags = 0;
6192 *tx_accept_flags = 0;
6195 case BNX2X_RX_MODE_NONE:
6197 * 'drop all' supersedes any accept flags that may have been
6198 * passed to the function.
6201 case BNX2X_RX_MODE_NORMAL:
6202 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6203 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
6204 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6206 /* internal switching mode */
6207 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6208 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
6209 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6211 if (bp->accept_any_vlan) {
6212 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6213 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6217 case BNX2X_RX_MODE_ALLMULTI:
6218 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6219 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6220 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6222 /* internal switching mode */
6223 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6224 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6225 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6227 if (bp->accept_any_vlan) {
6228 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6229 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6233 case BNX2X_RX_MODE_PROMISC:
6234 /* According to definition of SI mode, iface in promisc mode
6235 * should receive matched and unmatched (in resolution of port)
6238 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
6239 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6240 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6241 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6243 /* internal switching mode */
6244 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6245 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6248 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
6250 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6252 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6253 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6257 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
6264 /* called with netif_addr_lock_bh() */
6265 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
6267 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
6268 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
6272 /* Configure rx_mode of FCoE Queue */
6273 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
6275 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
6280 __set_bit(RAMROD_RX, &ramrod_flags);
6281 __set_bit(RAMROD_TX, &ramrod_flags);
6283 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
6284 rx_accept_flags, tx_accept_flags,
6288 static void bnx2x_init_internal_common(struct bnx2x *bp)
6292 /* Zero this manually as its initialization is
6293 currently missing in the initTool */
6294 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
6295 REG_WR(bp, BAR_USTRORM_INTMEM +
6296 USTORM_AGG_DATA_OFFSET + i * 4, 0);
6297 if (!CHIP_IS_E1x(bp)) {
6298 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6299 CHIP_INT_MODE_IS_BC(bp) ?
6300 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
6304 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6306 switch (load_code) {
6307 case FW_MSG_CODE_DRV_LOAD_COMMON:
6308 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6309 bnx2x_init_internal_common(bp);
6312 case FW_MSG_CODE_DRV_LOAD_PORT:
6316 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6317 /* internal memory per function is
6318 initialized inside bnx2x_pf_init */
6322 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6327 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6329 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6332 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6334 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6337 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6339 if (CHIP_IS_E1x(fp->bp))
6340 return BP_L_ID(fp->bp) + fp->index;
6341 else /* We want Client ID to be the same as IGU SB ID for 57712 */
6342 return bnx2x_fp_igu_sb_id(fp);
6345 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6347 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6349 unsigned long q_type = 0;
6350 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6351 fp->rx_queue = fp_idx;
6353 fp->cl_id = bnx2x_fp_cl_id(fp);
6354 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6355 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6356 /* qZone id equals to FW (per path) client id */
6357 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
6360 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6362 /* Setup SB indices */
6363 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6365 /* Configure Queue State object */
6366 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6367 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6369 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6372 for_each_cos_in_tx_queue(fp, cos) {
6373 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6374 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6375 FP_COS_TO_TXQ(fp, cos, bp),
6376 BNX2X_TX_SB_INDEX_BASE + cos, fp);
6377 cids[cos] = fp->txdata_ptr[cos]->cid;
6380 /* nothing more for vf to do here */
6384 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6385 fp->fw_sb_id, fp->igu_sb_id);
6386 bnx2x_update_fpsb_idx(fp);
6387 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6388 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6389 bnx2x_sp_mapping(bp, q_rdata), q_type);
6392 * Configure classification DBs: Always enable Tx switching
6394 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6397 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6398 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6402 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6406 for (i = 1; i <= NUM_TX_RINGS; i++) {
6407 struct eth_tx_next_bd *tx_next_bd =
6408 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6410 tx_next_bd->addr_hi =
6411 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6412 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6413 tx_next_bd->addr_lo =
6414 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6415 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6418 *txdata->tx_cons_sb = cpu_to_le16(0);
6420 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6421 txdata->tx_db.data.zero_fill1 = 0;
6422 txdata->tx_db.data.prod = 0;
6424 txdata->tx_pkt_prod = 0;
6425 txdata->tx_pkt_cons = 0;
6426 txdata->tx_bd_prod = 0;
6427 txdata->tx_bd_cons = 0;
6431 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6435 for_each_tx_queue_cnic(bp, i)
6436 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6439 static void bnx2x_init_tx_rings(struct bnx2x *bp)
6444 for_each_eth_queue(bp, i)
6445 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6446 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6449 static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
6451 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
6452 unsigned long q_type = 0;
6454 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
6455 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
6456 BNX2X_FCOE_ETH_CL_ID_IDX);
6457 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
6458 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
6459 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
6460 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
6461 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
6462 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
6465 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
6467 /* qZone id equals to FW (per path) client id */
6468 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
6470 bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
6471 bnx2x_rx_ustorm_prods_offset(fp);
6473 /* Configure Queue State object */
6474 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6475 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6477 /* No multi-CoS for FCoE L2 client */
6478 BUG_ON(fp->max_cos != 1);
6480 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
6481 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6482 bnx2x_sp_mapping(bp, q_rdata), q_type);
6485 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6486 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6490 void bnx2x_nic_init_cnic(struct bnx2x *bp)
6493 bnx2x_init_fcoe_fp(bp);
6495 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6496 BNX2X_VF_ID_INVALID, false,
6497 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6499 /* ensure status block indices were read */
6501 bnx2x_init_rx_rings_cnic(bp);
6502 bnx2x_init_tx_rings_cnic(bp);
6509 void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6513 /* Setup NIC internals and enable interrupts */
6514 for_each_eth_queue(bp, i)
6515 bnx2x_init_eth_fp(bp, i);
6517 /* ensure status block indices were read */
6519 bnx2x_init_rx_rings(bp);
6520 bnx2x_init_tx_rings(bp);
6523 /* Initialize MOD_ABS interrupts */
6524 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6525 bp->common.shmem_base,
6526 bp->common.shmem2_base, BP_PORT(bp));
6528 /* initialize the default status block and sp ring */
6529 bnx2x_init_def_sb(bp);
6530 bnx2x_update_dsb_idx(bp);
6531 bnx2x_init_sp_ring(bp);
6533 bnx2x_memset_stats(bp);
6537 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6539 bnx2x_init_eq_ring(bp);
6540 bnx2x_init_internal(bp, load_code);
6542 bnx2x_stats_init(bp);
6544 /* flush all before enabling interrupts */
6548 bnx2x_int_enable(bp);
6550 /* Check for SPIO5 */
6551 bnx2x_attn_int_deasserted0(bp,
6552 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6553 AEU_INPUTS_ATTN_BITS_SPIO5);
6556 /* gzip service functions */
6557 static int bnx2x_gunzip_init(struct bnx2x *bp)
6559 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6560 &bp->gunzip_mapping, GFP_KERNEL);
6561 if (bp->gunzip_buf == NULL)
6564 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6565 if (bp->strm == NULL)
6568 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6569 if (bp->strm->workspace == NULL)
6579 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6580 bp->gunzip_mapping);
6581 bp->gunzip_buf = NULL;
6584 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6588 static void bnx2x_gunzip_end(struct bnx2x *bp)
6591 vfree(bp->strm->workspace);
6596 if (bp->gunzip_buf) {
6597 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6598 bp->gunzip_mapping);
6599 bp->gunzip_buf = NULL;
6603 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6607 /* check gzip header */
6608 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6609 BNX2X_ERR("Bad gzip header\n");
6617 if (zbuf[3] & FNAME)
6618 while ((zbuf[n++] != 0) && (n < len));
6620 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6621 bp->strm->avail_in = len - n;
6622 bp->strm->next_out = bp->gunzip_buf;
6623 bp->strm->avail_out = FW_BUF_SIZE;
6625 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6629 rc = zlib_inflate(bp->strm, Z_FINISH);
6630 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6631 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6634 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6635 if (bp->gunzip_outlen & 0x3)
6637 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6639 bp->gunzip_outlen >>= 2;
6641 zlib_inflateEnd(bp->strm);
6643 if (rc == Z_STREAM_END)
6649 /* nic load/unload */
6652 * General service functions
6655 /* send a NIG loopback debug packet */
6656 static void bnx2x_lb_pckt(struct bnx2x *bp)
6660 /* Ethernet source and destination addresses */
6661 wb_write[0] = 0x55555555;
6662 wb_write[1] = 0x55555555;
6663 wb_write[2] = 0x20; /* SOP */
6664 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6666 /* NON-IP protocol */
6667 wb_write[0] = 0x09000000;
6668 wb_write[1] = 0x55555555;
6669 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
6670 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6673 /* some of the internal memories
6674 * are not directly readable from the driver
6675 * to test them we send debug packets
6677 static int bnx2x_int_mem_test(struct bnx2x *bp)
6683 if (CHIP_REV_IS_FPGA(bp))
6685 else if (CHIP_REV_IS_EMUL(bp))
6690 /* Disable inputs of parser neighbor blocks */
6691 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6692 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6693 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6694 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6696 /* Write 0 to parser credits for CFC search request */
6697 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6699 /* send Ethernet packet */
6702 /* TODO do i reset NIG statistic? */
6703 /* Wait until NIG register shows 1 packet of size 0x10 */
6704 count = 1000 * factor;
6707 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6708 val = *bnx2x_sp(bp, wb_data[0]);
6712 usleep_range(10000, 20000);
6716 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6720 /* Wait until PRS register shows 1 packet */
6721 count = 1000 * factor;
6723 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6727 usleep_range(10000, 20000);
6731 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6735 /* Reset and init BRB, PRS */
6736 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6738 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6740 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6741 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6743 DP(NETIF_MSG_HW, "part2\n");
6745 /* Disable inputs of parser neighbor blocks */
6746 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6747 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6748 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6749 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6751 /* Write 0 to parser credits for CFC search request */
6752 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6754 /* send 10 Ethernet packets */
6755 for (i = 0; i < 10; i++)
6758 /* Wait until NIG register shows 10 + 1
6759 packets of size 11*0x10 = 0xb0 */
6760 count = 1000 * factor;
6763 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6764 val = *bnx2x_sp(bp, wb_data[0]);
6768 usleep_range(10000, 20000);
6772 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6776 /* Wait until PRS register shows 2 packets */
6777 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6779 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6781 /* Write 1 to parser credits for CFC search request */
6782 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6784 /* Wait until PRS register shows 3 packets */
6785 msleep(10 * factor);
6786 /* Wait until NIG register shows 1 packet of size 0x10 */
6787 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6789 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6791 /* clear NIG EOP FIFO */
6792 for (i = 0; i < 11; i++)
6793 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6794 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6796 BNX2X_ERR("clear of NIG failed\n");
6800 /* Reset and init BRB, PRS, NIG */
6801 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6803 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6805 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6806 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6807 if (!CNIC_SUPPORT(bp))
6809 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6811 /* Enable inputs of parser neighbor blocks */
6812 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6813 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6814 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6815 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6817 DP(NETIF_MSG_HW, "done\n");
6822 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6826 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6827 if (!CHIP_IS_E1x(bp))
6828 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6830 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6831 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6832 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6834 * mask read length error interrupts in brb for parser
6835 * (parsing unit and 'checksum and crc' unit)
6836 * these errors are legal (PU reads fixed length and CAC can cause
6837 * read length error on truncated packets)
6839 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6840 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6841 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6842 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6843 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6844 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6845 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6846 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6847 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6848 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6849 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6850 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6851 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6852 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6853 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6854 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6855 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6856 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6857 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6859 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
6860 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6861 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6862 if (!CHIP_IS_E1x(bp))
6863 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6864 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6865 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6867 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6868 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6869 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6870 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6872 if (!CHIP_IS_E1x(bp))
6873 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
6874 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6876 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6877 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6878 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6879 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
6882 static void bnx2x_reset_common(struct bnx2x *bp)
6887 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6890 if (CHIP_IS_E3(bp)) {
6891 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6892 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6895 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6898 static void bnx2x_setup_dmae(struct bnx2x *bp)
6901 spin_lock_init(&bp->dmae_lock);
6904 static void bnx2x_init_pxp(struct bnx2x *bp)
6907 int r_order, w_order;
6909 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6910 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6911 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6913 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6915 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6919 bnx2x_init_pxp_arb(bp, r_order, w_order);
6922 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6932 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6933 SHARED_HW_CFG_FAN_FAILURE_MASK;
6935 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6939 * The fan failure mechanism is usually related to the PHY type since
6940 * the power consumption of the board is affected by the PHY. Currently,
6941 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6943 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6944 for (port = PORT_0; port < PORT_MAX; port++) {
6946 bnx2x_fan_failure_det_req(
6948 bp->common.shmem_base,
6949 bp->common.shmem2_base,
6953 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6955 if (is_required == 0)
6958 /* Fan failure is indicated by SPIO 5 */
6959 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6961 /* set to active low mode */
6962 val = REG_RD(bp, MISC_REG_SPIO_INT);
6963 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6964 REG_WR(bp, MISC_REG_SPIO_INT, val);
6966 /* enable interrupt to signal the IGU */
6967 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6968 val |= MISC_SPIO_SPIO5;
6969 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6972 void bnx2x_pf_disable(struct bnx2x *bp)
6974 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6975 val &= ~IGU_PF_CONF_FUNC_EN;
6977 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6978 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6979 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6982 static void bnx2x__common_init_phy(struct bnx2x *bp)
6984 u32 shmem_base[2], shmem2_base[2];
6985 /* Avoid common init in case MFW supports LFA */
6986 if (SHMEM2_RD(bp, size) >
6987 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6989 shmem_base[0] = bp->common.shmem_base;
6990 shmem2_base[0] = bp->common.shmem2_base;
6991 if (!CHIP_IS_E1x(bp)) {
6993 SHMEM2_RD(bp, other_shmem_base_addr);
6995 SHMEM2_RD(bp, other_shmem2_base_addr);
6997 bnx2x_acquire_phy_lock(bp);
6998 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
6999 bp->common.chip_id);
7000 bnx2x_release_phy_lock(bp);
7003 static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
7005 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
7006 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
7007 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
7008 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
7009 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
7011 /* make sure this value is 0 */
7012 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
7014 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
7015 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
7016 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
7017 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
7020 static void bnx2x_set_endianity(struct bnx2x *bp)
7023 bnx2x_config_endianity(bp, 1);
7025 bnx2x_config_endianity(bp, 0);
7029 static void bnx2x_reset_endianity(struct bnx2x *bp)
7031 bnx2x_config_endianity(bp, 0);
7035 * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
7037 * @bp: driver handle
7039 static int bnx2x_init_hw_common(struct bnx2x *bp)
7043 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
7046 * take the RESET lock to protect undi_unload flow from accessing
7047 * registers while we're resetting the chip
7049 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7051 bnx2x_reset_common(bp);
7052 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
7055 if (CHIP_IS_E3(bp)) {
7056 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
7057 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
7059 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
7061 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7063 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
7065 if (!CHIP_IS_E1x(bp)) {
7069 * 4-port mode or 2-port mode we need to turn of master-enable
7070 * for everyone, after that, turn it back on for self.
7071 * so, we disregard multi-function or not, and always disable
7072 * for all functions on the given path, this means 0,2,4,6 for
7073 * path 0 and 1,3,5,7 for path 1
7075 for (abs_func_id = BP_PATH(bp);
7076 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
7077 if (abs_func_id == BP_ABS_FUNC(bp)) {
7079 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
7084 bnx2x_pretend_func(bp, abs_func_id);
7085 /* clear pf enable */
7086 bnx2x_pf_disable(bp);
7087 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7091 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
7092 if (CHIP_IS_E1(bp)) {
7093 /* enable HW interrupt from PXP on USDM overflow
7094 bit 16 on INT_MASK_0 */
7095 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
7098 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
7100 bnx2x_set_endianity(bp);
7101 bnx2x_ilt_init_page_size(bp, INITOP_SET);
7103 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
7104 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
7106 /* let the HW do it's magic ... */
7108 /* finish PXP init */
7109 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
7111 BNX2X_ERR("PXP2 CFG failed\n");
7114 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
7116 BNX2X_ERR("PXP2 RD_INIT failed\n");
7120 /* Timers bug workaround E2 only. We need to set the entire ILT to
7121 * have entries with value "0" and valid bit on.
7122 * This needs to be done by the first PF that is loaded in a path
7123 * (i.e. common phase)
7125 if (!CHIP_IS_E1x(bp)) {
7126 /* In E2 there is a bug in the timers block that can cause function 6 / 7
7127 * (i.e. vnic3) to start even if it is marked as "scan-off".
7128 * This occurs when a different function (func2,3) is being marked
7129 * as "scan-off". Real-life scenario for example: if a driver is being
7130 * load-unloaded while func6,7 are down. This will cause the timer to access
7131 * the ilt, translate to a logical address and send a request to read/write.
7132 * Since the ilt for the function that is down is not valid, this will cause
7133 * a translation error which is unrecoverable.
7134 * The Workaround is intended to make sure that when this happens nothing fatal
7135 * will occur. The workaround:
7136 * 1. First PF driver which loads on a path will:
7137 * a. After taking the chip out of reset, by using pretend,
7138 * it will write "0" to the following registers of
7140 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
7141 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
7142 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
7143 * And for itself it will write '1' to
7144 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
7145 * dmae-operations (writing to pram for example.)
7146 * note: can be done for only function 6,7 but cleaner this
7148 * b. Write zero+valid to the entire ILT.
7149 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
7150 * VNIC3 (of that port). The range allocated will be the
7151 * entire ILT. This is needed to prevent ILT range error.
7152 * 2. Any PF driver load flow:
7153 * a. ILT update with the physical addresses of the allocated
7155 * b. Wait 20msec. - note that this timeout is needed to make
7156 * sure there are no requests in one of the PXP internal
7157 * queues with "old" ILT addresses.
7158 * c. PF enable in the PGLC.
7159 * d. Clear the was_error of the PF in the PGLC. (could have
7160 * occurred while driver was down)
7161 * e. PF enable in the CFC (WEAK + STRONG)
7162 * f. Timers scan enable
7163 * 3. PF driver unload flow:
7164 * a. Clear the Timers scan_en.
7165 * b. Polling for scan_on=0 for that PF.
7166 * c. Clear the PF enable bit in the PXP.
7167 * d. Clear the PF enable in the CFC (WEAK + STRONG)
7168 * e. Write zero+valid to all ILT entries (The valid bit must
7170 * f. If this is VNIC 3 of a port then also init
7171 * first_timers_ilt_entry to zero and last_timers_ilt_entry
7172 * to the last entry in the ILT.
7175 * Currently the PF error in the PGLC is non recoverable.
7176 * In the future the there will be a recovery routine for this error.
7177 * Currently attention is masked.
7178 * Having an MCP lock on the load/unload process does not guarantee that
7179 * there is no Timer disable during Func6/7 enable. This is because the
7180 * Timers scan is currently being cleared by the MCP on FLR.
7181 * Step 2.d can be done only for PF6/7 and the driver can also check if
7182 * there is error before clearing it. But the flow above is simpler and
7184 * All ILT entries are written by zero+valid and not just PF6/7
7185 * ILT entries since in the future the ILT entries allocation for
7186 * PF-s might be dynamic.
7188 struct ilt_client_info ilt_cli;
7189 struct bnx2x_ilt ilt;
7190 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7191 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
7193 /* initialize dummy TM client */
7195 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7196 ilt_cli.client_num = ILT_CLIENT_TM;
7198 /* Step 1: set zeroes to all ilt page entries with valid bit on
7199 * Step 2: set the timers first/last ilt entry to point
7200 * to the entire range to prevent ILT range error for 3rd/4th
7201 * vnic (this code assumes existence of the vnic)
7203 * both steps performed by call to bnx2x_ilt_client_init_op()
7204 * with dummy TM client
7206 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
7207 * and his brother are split registers
7209 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
7210 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
7211 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7213 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
7214 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
7215 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
7218 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
7219 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
7221 if (!CHIP_IS_E1x(bp)) {
7222 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
7223 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
7224 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
7226 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
7228 /* let the HW do it's magic ... */
7231 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
7232 } while (factor-- && (val != 1));
7235 BNX2X_ERR("ATC_INIT failed\n");
7240 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
7242 bnx2x_iov_init_dmae(bp);
7244 /* clean the DMAE memory */
7246 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
7248 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
7250 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
7252 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
7254 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
7256 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
7257 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
7258 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
7259 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
7261 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
7263 /* QM queues pointers table */
7264 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
7266 /* soft reset pulse */
7267 REG_WR(bp, QM_REG_SOFT_RESET, 1);
7268 REG_WR(bp, QM_REG_SOFT_RESET, 0);
7270 if (CNIC_SUPPORT(bp))
7271 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
7273 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
7275 if (!CHIP_REV_IS_SLOW(bp))
7276 /* enable hw interrupt from doorbell Q */
7277 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
7279 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
7281 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
7282 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
7284 if (!CHIP_IS_E1(bp))
7285 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
7287 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
7288 if (IS_MF_AFEX(bp)) {
7289 /* configure that VNTag and VLAN headers must be
7290 * received in afex mode
7292 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
7293 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
7294 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
7295 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
7296 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
7298 /* Bit-map indicating which L2 hdrs may appear
7299 * after the basic Ethernet header
7301 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
7302 bp->path_has_ovlan ? 7 : 6);
7306 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
7307 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
7308 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
7309 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
7311 if (!CHIP_IS_E1x(bp)) {
7312 /* reset VFC memories */
7313 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7314 VFC_MEMORIES_RST_REG_CAM_RST |
7315 VFC_MEMORIES_RST_REG_RAM_RST);
7316 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7317 VFC_MEMORIES_RST_REG_CAM_RST |
7318 VFC_MEMORIES_RST_REG_RAM_RST);
7323 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
7324 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
7325 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
7326 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
7329 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7331 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7334 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
7335 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
7336 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
7338 if (!CHIP_IS_E1x(bp)) {
7339 if (IS_MF_AFEX(bp)) {
7340 /* configure that VNTag and VLAN headers must be
7343 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
7344 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
7345 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
7346 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
7347 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
7349 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
7350 bp->path_has_ovlan ? 7 : 6);
7354 REG_WR(bp, SRC_REG_SOFT_RST, 1);
7356 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
7358 if (CNIC_SUPPORT(bp)) {
7359 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
7360 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
7361 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
7362 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
7363 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
7364 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
7365 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
7366 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
7367 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
7368 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
7370 REG_WR(bp, SRC_REG_SOFT_RST, 0);
7372 if (sizeof(union cdu_context) != 1024)
7373 /* we currently assume that a context is 1024 bytes */
7374 dev_alert(&bp->pdev->dev,
7375 "please adjust the size of cdu_context(%ld)\n",
7376 (long)sizeof(union cdu_context));
7378 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
7379 val = (4 << 24) + (0 << 12) + 1024;
7380 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
7382 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
7383 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7384 /* enable context validation interrupt from CFC */
7385 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7387 /* set the thresholds to prevent CFC/CDU race */
7388 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7390 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7392 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7393 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7395 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7396 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7398 /* Reset PCIE errors for debug */
7399 REG_WR(bp, 0x2814, 0xffffffff);
7400 REG_WR(bp, 0x3820, 0xffffffff);
7402 if (!CHIP_IS_E1x(bp)) {
7403 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7404 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7405 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7406 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7407 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7408 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7409 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7410 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7411 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7412 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7413 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7416 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7417 if (!CHIP_IS_E1(bp)) {
7418 /* in E3 this done in per-port section */
7419 if (!CHIP_IS_E3(bp))
7420 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7422 if (CHIP_IS_E1H(bp))
7423 /* not applicable for E2 (and above ...) */
7424 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7426 if (CHIP_REV_IS_SLOW(bp))
7429 /* finish CFC init */
7430 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7432 BNX2X_ERR("CFC LL_INIT failed\n");
7435 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7437 BNX2X_ERR("CFC AC_INIT failed\n");
7440 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7442 BNX2X_ERR("CFC CAM_INIT failed\n");
7445 REG_WR(bp, CFC_REG_DEBUG0, 0);
7447 if (CHIP_IS_E1(bp)) {
7448 /* read NIG statistic
7449 to see if this is our first up since powerup */
7450 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7451 val = *bnx2x_sp(bp, wb_data[0]);
7453 /* do internal memory self test */
7454 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7455 BNX2X_ERR("internal mem self test failed\n");
7460 bnx2x_setup_fan_failure_detection(bp);
7462 /* clear PXP2 attentions */
7463 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7465 bnx2x_enable_blocks_attention(bp);
7466 bnx2x_enable_blocks_parity(bp);
7468 if (!BP_NOMCP(bp)) {
7469 if (CHIP_IS_E1x(bp))
7470 bnx2x__common_init_phy(bp);
7472 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7474 if (SHMEM2_HAS(bp, netproc_fw_ver))
7475 SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
7481 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
7483 * @bp: driver handle
7485 static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7487 int rc = bnx2x_init_hw_common(bp);
7492 /* In E2 2-PORT mode, same ext phy is used for the two paths */
7494 bnx2x__common_init_phy(bp);
7499 static int bnx2x_init_hw_port(struct bnx2x *bp)
7501 int port = BP_PORT(bp);
7502 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7506 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
7508 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7510 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7511 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7512 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7514 /* Timers bug workaround: disables the pf_master bit in pglue at
7515 * common phase, we need to enable it here before any dmae access are
7516 * attempted. Therefore we manually added the enable-master to the
7517 * port phase (it also happens in the function phase)
7519 if (!CHIP_IS_E1x(bp))
7520 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7522 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7523 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7524 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7525 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7527 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7528 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7529 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7530 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7532 /* QM cid (connection) count */
7533 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7535 if (CNIC_SUPPORT(bp)) {
7536 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7537 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7538 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7541 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7543 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7545 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7548 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7549 else if (bp->dev->mtu > 4096) {
7550 if (bp->flags & ONE_PORT_FLAG)
7554 /* (24*1024 + val*4)/256 */
7555 low = 96 + (val/64) +
7556 ((val % 64) ? 1 : 0);
7559 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7560 high = low + 56; /* 14*1024/256 */
7561 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7562 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7565 if (CHIP_MODE_IS_4_PORT(bp))
7566 REG_WR(bp, (BP_PORT(bp) ?
7567 BRB1_REG_MAC_GUARANTIED_1 :
7568 BRB1_REG_MAC_GUARANTIED_0), 40);
7570 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7571 if (CHIP_IS_E3B0(bp)) {
7572 if (IS_MF_AFEX(bp)) {
7573 /* configure headers for AFEX mode */
7574 REG_WR(bp, BP_PORT(bp) ?
7575 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7576 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7577 REG_WR(bp, BP_PORT(bp) ?
7578 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7579 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7580 REG_WR(bp, BP_PORT(bp) ?
7581 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7582 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7584 /* Ovlan exists only if we are in multi-function +
7585 * switch-dependent mode, in switch-independent there
7586 * is no ovlan headers
7588 REG_WR(bp, BP_PORT(bp) ?
7589 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7590 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7591 (bp->path_has_ovlan ? 7 : 6));
7595 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7596 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7597 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7598 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7600 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7601 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7602 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7603 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7605 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7606 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7608 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7610 if (CHIP_IS_E1x(bp)) {
7611 /* configure PBF to work without PAUSE mtu 9000 */
7612 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7614 /* update threshold */
7615 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7616 /* update init credit */
7617 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7620 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7622 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7625 if (CNIC_SUPPORT(bp))
7626 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7628 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7629 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7631 if (CHIP_IS_E1(bp)) {
7632 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7633 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7635 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7637 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7639 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7640 /* init aeu_mask_attn_func_0/1:
7641 * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use
7642 * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF
7643 * bits 4-7 are used for "per vn group attention" */
7644 val = IS_MF(bp) ? 0xF7 : 0x7;
7645 /* Enable DCBX attention for all but E1 */
7646 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7647 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7649 /* SCPAD_PARITY should NOT trigger close the gates */
7650 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7653 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7655 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7658 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7660 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7662 if (!CHIP_IS_E1x(bp)) {
7663 /* Bit-map indicating which L2 hdrs may appear after the
7664 * basic Ethernet header
7667 REG_WR(bp, BP_PORT(bp) ?
7668 NIG_REG_P1_HDRS_AFTER_BASIC :
7669 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7671 REG_WR(bp, BP_PORT(bp) ?
7672 NIG_REG_P1_HDRS_AFTER_BASIC :
7673 NIG_REG_P0_HDRS_AFTER_BASIC,
7674 IS_MF_SD(bp) ? 7 : 6);
7677 REG_WR(bp, BP_PORT(bp) ?
7678 NIG_REG_LLH1_MF_MODE :
7679 NIG_REG_LLH_MF_MODE, IS_MF(bp));
7681 if (!CHIP_IS_E3(bp))
7682 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7684 if (!CHIP_IS_E1(bp)) {
7685 /* 0x2 disable mf_ov, 0x1 enable */
7686 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7687 (IS_MF_SD(bp) ? 0x1 : 0x2));
7689 if (!CHIP_IS_E1x(bp)) {
7691 switch (bp->mf_mode) {
7692 case MULTI_FUNCTION_SD:
7695 case MULTI_FUNCTION_SI:
7696 case MULTI_FUNCTION_AFEX:
7701 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7702 NIG_REG_LLH0_CLS_TYPE), val);
7705 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7706 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7707 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7711 /* If SPIO5 is set to generate interrupts, enable it for this port */
7712 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7713 if (val & MISC_SPIO_SPIO5) {
7714 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7715 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7716 val = REG_RD(bp, reg_addr);
7717 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7718 REG_WR(bp, reg_addr, val);
7724 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7730 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7732 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7734 wb_write[0] = ONCHIP_ADDR1(addr);
7735 wb_write[1] = ONCHIP_ADDR2(addr);
7736 REG_WR_DMAE(bp, reg, wb_write, 2);
7739 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7741 u32 data, ctl, cnt = 100;
7742 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7743 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7744 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7745 u32 sb_bit = 1 << (idu_sb_id%32);
7746 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7747 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7749 /* Not supported in BC mode */
7750 if (CHIP_INT_MODE_IS_BC(bp))
7753 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7754 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
7755 IGU_REGULAR_CLEANUP_SET |
7756 IGU_REGULAR_BCLEANUP;
7758 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
7759 func_encode << IGU_CTRL_REG_FID_SHIFT |
7760 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7762 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7763 data, igu_addr_data);
7764 REG_WR(bp, igu_addr_data, data);
7767 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7769 REG_WR(bp, igu_addr_ctl, ctl);
7773 /* wait for clean up to finish */
7774 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7777 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7779 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7780 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7784 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7786 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
7789 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7791 u32 i, base = FUNC_ILT_BASE(func);
7792 for (i = base; i < base + ILT_PER_FUNC; i++)
7793 bnx2x_ilt_wr(bp, i, 0);
7796 static void bnx2x_init_searcher(struct bnx2x *bp)
7798 int port = BP_PORT(bp);
7799 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7800 /* T1 hash bits value determines the T1 number of entries */
7801 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7804 static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7807 struct bnx2x_func_state_params func_params = {NULL};
7808 struct bnx2x_func_switch_update_params *switch_update_params =
7809 &func_params.params.switch_update;
7811 /* Prepare parameters for function state transitions */
7812 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7813 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7815 func_params.f_obj = &bp->func_obj;
7816 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7818 /* Function parameters */
7819 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
7820 &switch_update_params->changes);
7822 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
7823 &switch_update_params->changes);
7825 rc = bnx2x_func_state_change(bp, &func_params);
7830 static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7832 int rc, i, port = BP_PORT(bp);
7833 int vlan_en = 0, mac_en[NUM_MACS];
7835 /* Close input from network */
7836 if (bp->mf_mode == SINGLE_FUNCTION) {
7837 bnx2x_set_rx_filter(&bp->link_params, 0);
7839 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7840 NIG_REG_LLH0_FUNC_EN);
7841 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7842 NIG_REG_LLH0_FUNC_EN, 0);
7843 for (i = 0; i < NUM_MACS; i++) {
7844 mac_en[i] = REG_RD(bp, port ?
7845 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7847 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7849 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7851 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7855 /* Close BMC to host */
7856 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7857 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7859 /* Suspend Tx switching to the PF. Completion of this ramrod
7860 * further guarantees that all the packets of that PF / child
7861 * VFs in BRB were processed by the Parser, so it is safe to
7862 * change the NIC_MODE register.
7864 rc = bnx2x_func_switch_update(bp, 1);
7866 BNX2X_ERR("Can't suspend tx-switching!\n");
7870 /* Change NIC_MODE register */
7871 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7873 /* Open input from network */
7874 if (bp->mf_mode == SINGLE_FUNCTION) {
7875 bnx2x_set_rx_filter(&bp->link_params, 1);
7877 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7878 NIG_REG_LLH0_FUNC_EN, vlan_en);
7879 for (i = 0; i < NUM_MACS; i++) {
7880 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7882 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7887 /* Enable BMC to host */
7888 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7889 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7891 /* Resume Tx switching to the PF */
7892 rc = bnx2x_func_switch_update(bp, 0);
7894 BNX2X_ERR("Can't resume tx-switching!\n");
7898 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7902 int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7906 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7908 if (CONFIGURE_NIC_MODE(bp)) {
7909 /* Configure searcher as part of function hw init */
7910 bnx2x_init_searcher(bp);
7912 /* Reset NIC mode */
7913 rc = bnx2x_reset_nic_mode(bp);
7915 BNX2X_ERR("Can't change NIC mode!\n");
7922 /* previous driver DMAE transaction may have occurred when pre-boot stage ended
7923 * and boot began, or when kdump kernel was loaded. Either case would invalidate
7924 * the addresses of the transaction, resulting in was-error bit set in the pci
7925 * causing all hw-to-host pcie transactions to timeout. If this happened we want
7926 * to clear the interrupt which detected this from the pglueb and the was done
7929 static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
7931 if (!CHIP_IS_E1x(bp))
7932 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
7933 1 << BP_ABS_FUNC(bp));
7936 static int bnx2x_init_hw_func(struct bnx2x *bp)
7938 int port = BP_PORT(bp);
7939 int func = BP_FUNC(bp);
7940 int init_phase = PHASE_PF0 + func;
7941 struct bnx2x_ilt *ilt = BP_ILT(bp);
7944 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7945 int i, main_mem_width, rc;
7947 DP(NETIF_MSG_HW, "starting func init func %d\n", func);
7949 /* FLR cleanup - hmmm */
7950 if (!CHIP_IS_E1x(bp)) {
7951 rc = bnx2x_pf_flr_clnup(bp);
7958 /* set MSI reconfigure capability */
7959 if (bp->common.int_block == INT_BLOCK_HC) {
7960 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7961 val = REG_RD(bp, addr);
7962 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7963 REG_WR(bp, addr, val);
7966 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7967 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7970 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7973 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7974 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7976 /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
7977 * those of the VFs, so start line should be reset
7979 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7980 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7981 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7982 ilt->lines[cdu_ilt_start + i].page_mapping =
7983 bp->context[i].cxt_mapping;
7984 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7987 bnx2x_ilt_init_op(bp, INITOP_SET);
7989 if (!CONFIGURE_NIC_MODE(bp)) {
7990 bnx2x_init_searcher(bp);
7991 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7992 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7995 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7996 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
7999 if (!CHIP_IS_E1x(bp)) {
8000 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
8002 /* Turn on a single ISR mode in IGU if driver is going to use
8005 if (!(bp->flags & USING_MSIX_FLAG))
8006 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
8008 * Timers workaround bug: function init part.
8009 * Need to wait 20msec after initializing ILT,
8010 * needed to make sure there are no requests in
8011 * one of the PXP internal queues with "old" ILT addresses
8015 * Master enable - Due to WB DMAE writes performed before this
8016 * register is re-initialized as part of the regular function
8019 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
8020 /* Enable the function in IGU */
8021 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
8026 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
8028 bnx2x_clean_pglue_errors(bp);
8030 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
8031 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
8032 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
8033 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
8034 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
8035 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
8036 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
8037 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
8038 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
8039 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
8040 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
8041 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
8042 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
8044 if (!CHIP_IS_E1x(bp))
8045 REG_WR(bp, QM_REG_PF_EN, 1);
8047 if (!CHIP_IS_E1x(bp)) {
8048 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8049 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8050 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8051 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8053 bnx2x_init_block(bp, BLOCK_QM, init_phase);
8055 bnx2x_init_block(bp, BLOCK_TM, init_phase);
8056 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
8057 REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */
8059 bnx2x_iov_init_dq(bp);
8061 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
8062 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
8063 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
8064 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
8065 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
8066 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
8067 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
8068 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
8069 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
8070 if (!CHIP_IS_E1x(bp))
8071 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
8073 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
8075 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
8077 if (!CHIP_IS_E1x(bp))
8078 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
8081 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
8082 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
8083 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
8088 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
8090 /* HC init per function */
8091 if (bp->common.int_block == INT_BLOCK_HC) {
8092 if (CHIP_IS_E1H(bp)) {
8093 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8095 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8096 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8098 bnx2x_init_block(bp, BLOCK_HC, init_phase);
8101 int num_segs, sb_idx, prod_offset;
8103 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8105 if (!CHIP_IS_E1x(bp)) {
8106 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8107 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8110 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
8112 if (!CHIP_IS_E1x(bp)) {
8116 * E2 mode: address 0-135 match to the mapping memory;
8117 * 136 - PF0 default prod; 137 - PF1 default prod;
8118 * 138 - PF2 default prod; 139 - PF3 default prod;
8119 * 140 - PF0 attn prod; 141 - PF1 attn prod;
8120 * 142 - PF2 attn prod; 143 - PF3 attn prod;
8123 * E1.5 mode - In backward compatible mode;
8124 * for non default SB; each even line in the memory
8125 * holds the U producer and each odd line hold
8126 * the C producer. The first 128 producers are for
8127 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
8128 * producers are for the DSB for each PF.
8129 * Each PF has five segments: (the order inside each
8130 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
8131 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
8132 * 144-147 attn prods;
8134 /* non-default-status-blocks */
8135 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8136 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
8137 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
8138 prod_offset = (bp->igu_base_sb + sb_idx) *
8141 for (i = 0; i < num_segs; i++) {
8142 addr = IGU_REG_PROD_CONS_MEMORY +
8143 (prod_offset + i) * 4;
8144 REG_WR(bp, addr, 0);
8146 /* send consumer update with value 0 */
8147 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
8148 USTORM_ID, 0, IGU_INT_NOP, 1);
8149 bnx2x_igu_clear_sb(bp,
8150 bp->igu_base_sb + sb_idx);
8153 /* default-status-blocks */
8154 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8155 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
8157 if (CHIP_MODE_IS_4_PORT(bp))
8158 dsb_idx = BP_FUNC(bp);
8160 dsb_idx = BP_VN(bp);
8162 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
8163 IGU_BC_BASE_DSB_PROD + dsb_idx :
8164 IGU_NORM_BASE_DSB_PROD + dsb_idx);
8167 * igu prods come in chunks of E1HVN_MAX (4) -
8168 * does not matters what is the current chip mode
8170 for (i = 0; i < (num_segs * E1HVN_MAX);
8172 addr = IGU_REG_PROD_CONS_MEMORY +
8173 (prod_offset + i)*4;
8174 REG_WR(bp, addr, 0);
8176 /* send consumer update with 0 */
8177 if (CHIP_INT_MODE_IS_BC(bp)) {
8178 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8179 USTORM_ID, 0, IGU_INT_NOP, 1);
8180 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8181 CSTORM_ID, 0, IGU_INT_NOP, 1);
8182 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8183 XSTORM_ID, 0, IGU_INT_NOP, 1);
8184 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8185 TSTORM_ID, 0, IGU_INT_NOP, 1);
8186 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8187 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8189 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8190 USTORM_ID, 0, IGU_INT_NOP, 1);
8191 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8192 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8194 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
8196 /* !!! These should become driver const once
8197 rf-tool supports split-68 const */
8198 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
8199 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
8200 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
8201 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
8202 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
8203 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
8207 /* Reset PCIE errors for debug */
8208 REG_WR(bp, 0x2114, 0xffffffff);
8209 REG_WR(bp, 0x2120, 0xffffffff);
8211 if (CHIP_IS_E1x(bp)) {
8212 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
8213 main_mem_base = HC_REG_MAIN_MEMORY +
8214 BP_PORT(bp) * (main_mem_size * 4);
8215 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
8218 val = REG_RD(bp, main_mem_prty_clr);
8221 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
8224 /* Clear "false" parity errors in MSI-X table */
8225 for (i = main_mem_base;
8226 i < main_mem_base + main_mem_size * 4;
8227 i += main_mem_width) {
8228 bnx2x_read_dmae(bp, i, main_mem_width / 4);
8229 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
8230 i, main_mem_width / 4);
8232 /* Clear HC parity attention */
8233 REG_RD(bp, main_mem_prty_clr);
8236 #ifdef BNX2X_STOP_ON_ERROR
8237 /* Enable STORMs SP logging */
8238 REG_WR8(bp, BAR_USTRORM_INTMEM +
8239 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8240 REG_WR8(bp, BAR_TSTRORM_INTMEM +
8241 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8242 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8243 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8244 REG_WR8(bp, BAR_XSTRORM_INTMEM +
8245 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8248 bnx2x_phy_probe(&bp->link_params);
8253 void bnx2x_free_mem_cnic(struct bnx2x *bp)
8255 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
8257 if (!CHIP_IS_E1x(bp))
8258 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
8259 sizeof(struct host_hc_status_block_e2));
8261 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
8262 sizeof(struct host_hc_status_block_e1x));
8264 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8267 void bnx2x_free_mem(struct bnx2x *bp)
8271 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
8272 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
8277 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
8278 sizeof(struct host_sp_status_block));
8280 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
8281 sizeof(struct bnx2x_slowpath));
8283 for (i = 0; i < L2_ILT_LINES(bp); i++)
8284 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
8285 bp->context[i].size);
8286 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
8288 BNX2X_FREE(bp->ilt->lines);
8290 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
8292 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
8293 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8295 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8297 bnx2x_iov_free_mem(bp);
8300 int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
8302 if (!CHIP_IS_E1x(bp)) {
8303 /* size = the status block + ramrod buffers */
8304 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8305 sizeof(struct host_hc_status_block_e2));
8306 if (!bp->cnic_sb.e2_sb)
8309 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8310 sizeof(struct host_hc_status_block_e1x));
8311 if (!bp->cnic_sb.e1x_sb)
8315 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8316 /* allocate searcher T2 table, as it wasn't allocated before */
8317 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8322 /* write address to which L5 should insert its values */
8323 bp->cnic_eth_dev.addr_drv_info_to_mcp =
8324 &bp->slowpath->drv_info_to_mcp;
8326 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
8332 bnx2x_free_mem_cnic(bp);
8333 BNX2X_ERR("Can't allocate memory\n");
8337 int bnx2x_alloc_mem(struct bnx2x *bp)
8339 int i, allocated, context_size;
8341 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8342 /* allocate searcher T2 table */
8343 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8348 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
8349 sizeof(struct host_sp_status_block));
8350 if (!bp->def_status_blk)
8353 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
8354 sizeof(struct bnx2x_slowpath));
8358 /* Allocate memory for CDU context:
8359 * This memory is allocated separately and not in the generic ILT
8360 * functions because CDU differs in few aspects:
8361 * 1. There are multiple entities allocating memory for context -
8362 * 'regular' driver, CNIC and SRIOV driver. Each separately controls
8363 * its own ILT lines.
8364 * 2. Since CDU page-size is not a single 4KB page (which is the case
8365 * for the other ILT clients), to be efficient we want to support
8366 * allocation of sub-page-size in the last entry.
8367 * 3. Context pointers are used by the driver to pass to FW / update
8368 * the context (for the other ILT clients the pointers are used just to
8369 * free the memory during unload).
8371 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
8373 for (i = 0, allocated = 0; allocated < context_size; i++) {
8374 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8375 (context_size - allocated));
8376 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
8377 bp->context[i].size);
8378 if (!bp->context[i].vcxt)
8380 allocated += bp->context[i].size;
8382 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
8384 if (!bp->ilt->lines)
8387 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8390 if (bnx2x_iov_alloc_mem(bp))
8393 /* Slow path ring */
8394 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
8399 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
8400 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8408 BNX2X_ERR("Can't allocate memory\n");
8413 * Init service functions
8416 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
8417 struct bnx2x_vlan_mac_obj *obj, bool set,
8418 int mac_type, unsigned long *ramrod_flags)
8421 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8423 memset(&ramrod_param, 0, sizeof(ramrod_param));
8425 /* Fill general parameters */
8426 ramrod_param.vlan_mac_obj = obj;
8427 ramrod_param.ramrod_flags = *ramrod_flags;
8429 /* Fill a user request section if needed */
8430 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8431 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
8433 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
8435 /* Set the command: ADD or DEL */
8437 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8439 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8442 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8444 if (rc == -EEXIST) {
8445 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8446 /* do not treat adding same MAC as error */
8449 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
8454 int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8455 struct bnx2x_vlan_mac_obj *obj, bool set,
8456 unsigned long *ramrod_flags)
8459 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8461 memset(&ramrod_param, 0, sizeof(ramrod_param));
8463 /* Fill general parameters */
8464 ramrod_param.vlan_mac_obj = obj;
8465 ramrod_param.ramrod_flags = *ramrod_flags;
8467 /* Fill a user request section if needed */
8468 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8469 ramrod_param.user_req.u.vlan.vlan = vlan;
8470 __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
8471 /* Set the command: ADD or DEL */
8473 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8475 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8478 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8480 if (rc == -EEXIST) {
8481 /* Do not treat adding same vlan as error. */
8482 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8484 } else if (rc < 0) {
8485 BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
8491 static int bnx2x_del_all_vlans(struct bnx2x *bp)
8493 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
8494 unsigned long ramrod_flags = 0, vlan_flags = 0;
8495 struct bnx2x_vlan_entry *vlan;
8498 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8499 __set_bit(BNX2X_VLAN, &vlan_flags);
8500 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
8504 /* Mark that hw forgot all entries */
8505 list_for_each_entry(vlan, &bp->vlan_reg, link)
8512 int bnx2x_del_all_macs(struct bnx2x *bp,
8513 struct bnx2x_vlan_mac_obj *mac_obj,
8514 int mac_type, bool wait_for_comp)
8517 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
8519 /* Wait for completion of requested */
8521 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8523 /* Set the mac type of addresses we want to clear */
8524 __set_bit(mac_type, &vlan_mac_flags);
8526 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8528 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
8533 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8536 unsigned long ramrod_flags = 0;
8538 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8539 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8540 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8541 &bp->sp_objs->mac_obj, set,
8542 BNX2X_ETH_MAC, &ramrod_flags);
8544 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8545 bp->fp->index, set);
8549 int bnx2x_setup_leading(struct bnx2x *bp)
8552 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8554 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8558 * bnx2x_set_int_mode - configure interrupt mode
8560 * @bp: driver handle
8562 * In case of MSI-X it will also try to enable MSI-X.
8564 int bnx2x_set_int_mode(struct bnx2x *bp)
8568 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8569 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8574 case BNX2X_INT_MODE_MSIX:
8575 /* attempt to enable msix */
8576 rc = bnx2x_enable_msix(bp);
8582 /* vfs use only msix */
8583 if (rc && IS_VF(bp))
8586 /* failed to enable multiple MSI-X */
8587 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8589 1 + bp->num_cnic_queues);
8591 /* falling through... */
8592 case BNX2X_INT_MODE_MSI:
8593 bnx2x_enable_msi(bp);
8595 /* falling through... */
8596 case BNX2X_INT_MODE_INTX:
8597 bp->num_ethernet_queues = 1;
8598 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8599 BNX2X_DEV_INFO("set number of queues to 1\n");
8602 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8608 /* must be called prior to any HW initializations */
8609 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8612 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
8613 return L2_ILT_LINES(bp);
8616 void bnx2x_ilt_set_info(struct bnx2x *bp)
8618 struct ilt_client_info *ilt_client;
8619 struct bnx2x_ilt *ilt = BP_ILT(bp);
8622 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8623 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8626 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8627 ilt_client->client_num = ILT_CLIENT_CDU;
8628 ilt_client->page_size = CDU_ILT_PAGE_SZ;
8629 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8630 ilt_client->start = line;
8631 line += bnx2x_cid_ilt_lines(bp);
8633 if (CNIC_SUPPORT(bp))
8634 line += CNIC_ILT_LINES;
8635 ilt_client->end = line - 1;
8637 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8640 ilt_client->page_size,
8642 ilog2(ilt_client->page_size >> 12));
8645 if (QM_INIT(bp->qm_cid_count)) {
8646 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8647 ilt_client->client_num = ILT_CLIENT_QM;
8648 ilt_client->page_size = QM_ILT_PAGE_SZ;
8649 ilt_client->flags = 0;
8650 ilt_client->start = line;
8652 /* 4 bytes for each cid */
8653 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8656 ilt_client->end = line - 1;
8659 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8662 ilt_client->page_size,
8664 ilog2(ilt_client->page_size >> 12));
8667 if (CNIC_SUPPORT(bp)) {
8669 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8670 ilt_client->client_num = ILT_CLIENT_SRC;
8671 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8672 ilt_client->flags = 0;
8673 ilt_client->start = line;
8674 line += SRC_ILT_LINES;
8675 ilt_client->end = line - 1;
8678 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8681 ilt_client->page_size,
8683 ilog2(ilt_client->page_size >> 12));
8686 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8687 ilt_client->client_num = ILT_CLIENT_TM;
8688 ilt_client->page_size = TM_ILT_PAGE_SZ;
8689 ilt_client->flags = 0;
8690 ilt_client->start = line;
8691 line += TM_ILT_LINES;
8692 ilt_client->end = line - 1;
8695 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8698 ilt_client->page_size,
8700 ilog2(ilt_client->page_size >> 12));
8703 BUG_ON(line > ILT_MAX_LINES);
8707 * bnx2x_pf_q_prep_init - prepare INIT transition parameters
8709 * @bp: driver handle
8710 * @fp: pointer to fastpath
8711 * @init_params: pointer to parameters structure
8713 * parameters configured:
8714 * - HC configuration
8715 * - Queue's CDU context
8717 static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8718 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8721 int cxt_index, cxt_offset;
8723 /* FCoE Queue uses Default SB, thus has no HC capabilities */
8724 if (!IS_FCOE_FP(fp)) {
8725 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8726 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8728 /* If HC is supported, enable host coalescing in the transition
8731 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8732 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8735 init_params->rx.hc_rate = bp->rx_ticks ?
8736 (1000000 / bp->rx_ticks) : 0;
8737 init_params->tx.hc_rate = bp->tx_ticks ?
8738 (1000000 / bp->tx_ticks) : 0;
8741 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8745 * CQ index among the SB indices: FCoE clients uses the default
8746 * SB, therefore it's different.
8748 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8749 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8752 /* set maximum number of COSs supported by this queue */
8753 init_params->max_cos = fp->max_cos;
8755 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8756 fp->index, init_params->max_cos);
8758 /* set the context pointers queue object */
8759 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8760 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8761 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8763 init_params->cxts[cos] =
8764 &bp->context[cxt_index].vcxt[cxt_offset].eth;
8768 static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8769 struct bnx2x_queue_state_params *q_params,
8770 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8771 int tx_index, bool leading)
8773 memset(tx_only_params, 0, sizeof(*tx_only_params));
8775 /* Set the command */
8776 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8778 /* Set tx-only QUEUE flags: don't zero statistics */
8779 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8781 /* choose the index of the cid to send the slow path on */
8782 tx_only_params->cid_index = tx_index;
8784 /* Set general TX_ONLY_SETUP parameters */
8785 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8787 /* Set Tx TX_ONLY_SETUP parameters */
8788 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8791 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8792 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8793 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8794 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8796 /* send the ramrod */
8797 return bnx2x_queue_state_change(bp, q_params);
8801 * bnx2x_setup_queue - setup queue
8803 * @bp: driver handle
8804 * @fp: pointer to fastpath
8805 * @leading: is leading
8807 * This function performs 2 steps in a Queue state machine
8808 * actually: 1) RESET->INIT 2) INIT->SETUP
8811 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8814 struct bnx2x_queue_state_params q_params = {NULL};
8815 struct bnx2x_queue_setup_params *setup_params =
8816 &q_params.params.setup;
8817 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8818 &q_params.params.tx_only;
8822 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8824 /* reset IGU state skip FCoE L2 queue */
8825 if (!IS_FCOE_FP(fp))
8826 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8829 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8830 /* We want to wait for completion in this context */
8831 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8833 /* Prepare the INIT parameters */
8834 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8836 /* Set the command */
8837 q_params.cmd = BNX2X_Q_CMD_INIT;
8839 /* Change the state to INIT */
8840 rc = bnx2x_queue_state_change(bp, &q_params);
8842 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8846 DP(NETIF_MSG_IFUP, "init complete\n");
8848 /* Now move the Queue to the SETUP state... */
8849 memset(setup_params, 0, sizeof(*setup_params));
8851 /* Set QUEUE flags */
8852 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8854 /* Set general SETUP parameters */
8855 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8856 FIRST_TX_COS_INDEX);
8858 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8859 &setup_params->rxq_params);
8861 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8862 FIRST_TX_COS_INDEX);
8864 /* Set the command */
8865 q_params.cmd = BNX2X_Q_CMD_SETUP;
8868 bp->fcoe_init = true;
8870 /* Change the state to SETUP */
8871 rc = bnx2x_queue_state_change(bp, &q_params);
8873 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8877 /* loop through the relevant tx-only indices */
8878 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8879 tx_index < fp->max_cos;
8882 /* prepare and send tx-only ramrod*/
8883 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8884 tx_only_params, tx_index, leading);
8886 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8887 fp->index, tx_index);
8895 static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8897 struct bnx2x_fastpath *fp = &bp->fp[index];
8898 struct bnx2x_fp_txdata *txdata;
8899 struct bnx2x_queue_state_params q_params = {NULL};
8902 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8904 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8905 /* We want to wait for completion in this context */
8906 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8908 /* close tx-only connections */
8909 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8910 tx_index < fp->max_cos;
8913 /* ascertain this is a normal queue*/
8914 txdata = fp->txdata_ptr[tx_index];
8916 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8919 /* send halt terminate on tx-only connection */
8920 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8921 memset(&q_params.params.terminate, 0,
8922 sizeof(q_params.params.terminate));
8923 q_params.params.terminate.cid_index = tx_index;
8925 rc = bnx2x_queue_state_change(bp, &q_params);
8929 /* send halt terminate on tx-only connection */
8930 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8931 memset(&q_params.params.cfc_del, 0,
8932 sizeof(q_params.params.cfc_del));
8933 q_params.params.cfc_del.cid_index = tx_index;
8934 rc = bnx2x_queue_state_change(bp, &q_params);
8938 /* Stop the primary connection: */
8939 /* ...halt the connection */
8940 q_params.cmd = BNX2X_Q_CMD_HALT;
8941 rc = bnx2x_queue_state_change(bp, &q_params);
8945 /* ...terminate the connection */
8946 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8947 memset(&q_params.params.terminate, 0,
8948 sizeof(q_params.params.terminate));
8949 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8950 rc = bnx2x_queue_state_change(bp, &q_params);
8953 /* ...delete cfc entry */
8954 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8955 memset(&q_params.params.cfc_del, 0,
8956 sizeof(q_params.params.cfc_del));
8957 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8958 return bnx2x_queue_state_change(bp, &q_params);
8961 static void bnx2x_reset_func(struct bnx2x *bp)
8963 int port = BP_PORT(bp);
8964 int func = BP_FUNC(bp);
8967 /* Disable the function in the FW */
8968 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8969 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8970 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8971 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8974 for_each_eth_queue(bp, i) {
8975 struct bnx2x_fastpath *fp = &bp->fp[i];
8976 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8977 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8981 if (CNIC_LOADED(bp))
8983 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8984 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8985 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8988 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8989 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
8992 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
8993 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8997 if (bp->common.int_block == INT_BLOCK_HC) {
8998 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8999 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
9001 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
9002 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
9005 if (CNIC_LOADED(bp)) {
9006 /* Disable Timer scan */
9007 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
9009 * Wait for at least 10ms and up to 2 second for the timers
9012 for (i = 0; i < 200; i++) {
9013 usleep_range(10000, 20000);
9014 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
9019 bnx2x_clear_func_ilt(bp, func);
9021 /* Timers workaround bug for E2: if this is vnic-3,
9022 * we need to set the entire ilt range for this timers.
9024 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
9025 struct ilt_client_info ilt_cli;
9026 /* use dummy TM client */
9027 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
9029 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
9030 ilt_cli.client_num = ILT_CLIENT_TM;
9032 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
9035 /* this assumes that reset_port() called before reset_func()*/
9036 if (!CHIP_IS_E1x(bp))
9037 bnx2x_pf_disable(bp);
9042 static void bnx2x_reset_port(struct bnx2x *bp)
9044 int port = BP_PORT(bp);
9047 /* Reset physical Link */
9048 bnx2x__link_reset(bp);
9050 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
9052 /* Do not rcv packets to BRB */
9053 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
9054 /* Do not direct rcv packets that are not for MCP to the BRB */
9055 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
9056 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9059 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
9062 /* Check for BRB port occupancy */
9063 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
9065 DP(NETIF_MSG_IFDOWN,
9066 "BRB1 is not empty %d blocks are occupied\n", val);
9068 /* TODO: Close Doorbell port? */
9071 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
9073 struct bnx2x_func_state_params func_params = {NULL};
9075 /* Prepare parameters for function state transitions */
9076 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9078 func_params.f_obj = &bp->func_obj;
9079 func_params.cmd = BNX2X_F_CMD_HW_RESET;
9081 func_params.params.hw_init.load_phase = load_code;
9083 return bnx2x_func_state_change(bp, &func_params);
9086 static int bnx2x_func_stop(struct bnx2x *bp)
9088 struct bnx2x_func_state_params func_params = {NULL};
9091 /* Prepare parameters for function state transitions */
9092 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9093 func_params.f_obj = &bp->func_obj;
9094 func_params.cmd = BNX2X_F_CMD_STOP;
9097 * Try to stop the function the 'good way'. If fails (in case
9098 * of a parity error during bnx2x_chip_cleanup()) and we are
9099 * not in a debug mode, perform a state transaction in order to
9100 * enable further HW_RESET transaction.
9102 rc = bnx2x_func_state_change(bp, &func_params);
9104 #ifdef BNX2X_STOP_ON_ERROR
9107 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
9108 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
9109 return bnx2x_func_state_change(bp, &func_params);
9117 * bnx2x_send_unload_req - request unload mode from the MCP.
9119 * @bp: driver handle
9120 * @unload_mode: requested function's unload mode
9122 * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
9124 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
9127 int port = BP_PORT(bp);
9129 /* Select the UNLOAD request mode */
9130 if (unload_mode == UNLOAD_NORMAL)
9131 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9133 else if (bp->flags & NO_WOL_FLAG)
9134 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
9137 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
9138 u8 *mac_addr = bp->dev->dev_addr;
9139 struct pci_dev *pdev = bp->pdev;
9143 /* The mac address is written to entries 1-4 to
9144 * preserve entry 0 which is used by the PMF
9146 u8 entry = (BP_VN(bp) + 1)*8;
9148 val = (mac_addr[0] << 8) | mac_addr[1];
9149 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
9151 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
9152 (mac_addr[4] << 8) | mac_addr[5];
9153 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
9155 /* Enable the PME and clear the status */
9156 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
9157 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
9158 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
9160 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
9163 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9165 /* Send the request to the MCP */
9167 reset_code = bnx2x_fw_command(bp, reset_code, 0);
9169 int path = BP_PATH(bp);
9171 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
9172 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9173 bnx2x_load_count[path][2]);
9174 bnx2x_load_count[path][0]--;
9175 bnx2x_load_count[path][1 + port]--;
9176 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
9177 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9178 bnx2x_load_count[path][2]);
9179 if (bnx2x_load_count[path][0] == 0)
9180 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
9181 else if (bnx2x_load_count[path][1 + port] == 0)
9182 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
9184 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
9191 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
9193 * @bp: driver handle
9194 * @keep_link: true iff link should be kept up
9196 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
9198 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
9200 /* Report UNLOAD_DONE to MCP */
9202 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
9205 static int bnx2x_func_wait_started(struct bnx2x *bp)
9208 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
9214 * (assumption: No Attention from MCP at this stage)
9215 * PMF probably in the middle of TX disable/enable transaction
9216 * 1. Sync IRS for default SB
9217 * 2. Sync SP queue - this guarantees us that attention handling started
9218 * 3. Wait, that TX disable/enable transaction completes
9220 * 1+2 guarantee that if DCBx attention was scheduled it already changed
9221 * pending bit of transaction from STARTED-->TX_STOPPED, if we already
9222 * received completion for the transaction the state is TX_STOPPED.
9223 * State will return to STARTED after completion of TX_STOPPED-->STARTED
9227 /* make sure default SB ISR is done */
9229 synchronize_irq(bp->msix_table[0].vector);
9231 synchronize_irq(bp->pdev->irq);
9233 flush_workqueue(bnx2x_wq);
9234 flush_workqueue(bnx2x_iov_wq);
9236 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
9237 BNX2X_F_STATE_STARTED && tout--)
9240 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
9241 BNX2X_F_STATE_STARTED) {
9242 #ifdef BNX2X_STOP_ON_ERROR
9243 BNX2X_ERR("Wrong function state\n");
9247 * Failed to complete the transaction in a "good way"
9248 * Force both transactions with CLR bit
9250 struct bnx2x_func_state_params func_params = {NULL};
9252 DP(NETIF_MSG_IFDOWN,
9253 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
9255 func_params.f_obj = &bp->func_obj;
9256 __set_bit(RAMROD_DRV_CLR_ONLY,
9257 &func_params.ramrod_flags);
9259 /* STARTED-->TX_ST0PPED */
9260 func_params.cmd = BNX2X_F_CMD_TX_STOP;
9261 bnx2x_func_state_change(bp, &func_params);
9263 /* TX_ST0PPED-->STARTED */
9264 func_params.cmd = BNX2X_F_CMD_TX_START;
9265 return bnx2x_func_state_change(bp, &func_params);
9272 static void bnx2x_disable_ptp(struct bnx2x *bp)
9274 int port = BP_PORT(bp);
9276 /* Disable sending PTP packets to host */
9277 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
9278 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
9280 /* Reset PTP event detection rules */
9281 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
9282 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
9283 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
9284 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
9285 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
9286 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
9287 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
9288 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
9290 /* Disable the PTP feature */
9291 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
9292 NIG_REG_P0_PTP_EN, 0x0);
9295 /* Called during unload, to stop PTP-related stuff */
9296 static void bnx2x_stop_ptp(struct bnx2x *bp)
9298 /* Cancel PTP work queue. Should be done after the Tx queues are
9299 * drained to prevent additional scheduling.
9301 cancel_work_sync(&bp->ptp_task);
9303 if (bp->ptp_tx_skb) {
9304 dev_kfree_skb_any(bp->ptp_tx_skb);
9305 bp->ptp_tx_skb = NULL;
9308 /* Disable PTP in HW */
9309 bnx2x_disable_ptp(bp);
9311 DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
9314 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
9316 int port = BP_PORT(bp);
9319 struct bnx2x_mcast_ramrod_params rparam = {NULL};
9322 /* Wait until tx fastpath tasks complete */
9323 for_each_tx_queue(bp, i) {
9324 struct bnx2x_fastpath *fp = &bp->fp[i];
9326 for_each_cos_in_tx_queue(fp, cos)
9327 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
9328 #ifdef BNX2X_STOP_ON_ERROR
9334 /* Give HW time to discard old tx messages */
9335 usleep_range(1000, 2000);
9337 /* Clean all ETH MACs */
9338 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
9341 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
9343 /* Clean up UC list */
9344 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
9347 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
9350 /* The whole *vlan_obj structure may be not initialized if VLAN
9351 * filtering offload is not supported by hardware. Currently this is
9352 * true for all hardware covered by CHIP_IS_E1x().
9354 if (!CHIP_IS_E1x(bp)) {
9355 /* Remove all currently configured VLANs */
9356 rc = bnx2x_del_all_vlans(bp);
9358 BNX2X_ERR("Failed to delete all VLANs\n");
9362 if (!CHIP_IS_E1(bp))
9363 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
9365 /* Set "drop all" (stop Rx).
9366 * We need to take a netif_addr_lock() here in order to prevent
9367 * a race between the completion code and this code.
9369 netif_addr_lock_bh(bp->dev);
9370 /* Schedule the rx_mode command */
9371 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
9372 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
9374 bnx2x_set_storm_rx_mode(bp);
9376 /* Cleanup multicast configuration */
9377 rparam.mcast_obj = &bp->mcast_obj;
9378 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
9380 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
9382 netif_addr_unlock_bh(bp->dev);
9384 bnx2x_iov_chip_cleanup(bp);
9387 * Send the UNLOAD_REQUEST to the MCP. This will return if
9388 * this function should perform FUNC, PORT or COMMON HW
9391 reset_code = bnx2x_send_unload_req(bp, unload_mode);
9394 * (assumption: No Attention from MCP at this stage)
9395 * PMF probably in the middle of TX disable/enable transaction
9397 rc = bnx2x_func_wait_started(bp);
9399 BNX2X_ERR("bnx2x_func_wait_started failed\n");
9400 #ifdef BNX2X_STOP_ON_ERROR
9405 /* Close multi and leading connections
9406 * Completions for ramrods are collected in a synchronous way
9408 for_each_eth_queue(bp, i)
9409 if (bnx2x_stop_queue(bp, i))
9410 #ifdef BNX2X_STOP_ON_ERROR
9416 if (CNIC_LOADED(bp)) {
9417 for_each_cnic_queue(bp, i)
9418 if (bnx2x_stop_queue(bp, i))
9419 #ifdef BNX2X_STOP_ON_ERROR
9426 /* If SP settings didn't get completed so far - something
9427 * very wrong has happen.
9429 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
9430 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
9432 #ifndef BNX2X_STOP_ON_ERROR
9435 rc = bnx2x_func_stop(bp);
9437 BNX2X_ERR("Function stop failed!\n");
9438 #ifdef BNX2X_STOP_ON_ERROR
9443 /* stop_ptp should be after the Tx queues are drained to prevent
9444 * scheduling to the cancelled PTP work queue. It should also be after
9445 * function stop ramrod is sent, since as part of this ramrod FW access
9448 if (bp->flags & PTP_SUPPORTED)
9451 /* Disable HW interrupts, NAPI */
9452 bnx2x_netif_stop(bp, 1);
9453 /* Delete all NAPI objects */
9454 bnx2x_del_all_napi(bp);
9455 if (CNIC_LOADED(bp))
9456 bnx2x_del_all_napi_cnic(bp);
9461 /* Reset the chip, unless PCI function is offline. If we reach this
9462 * point following a PCI error handling, it means device is really
9463 * in a bad state and we're about to remove it, so reset the chip
9464 * is not a good idea.
9466 if (!pci_channel_offline(bp->pdev)) {
9467 rc = bnx2x_reset_hw(bp, reset_code);
9469 BNX2X_ERR("HW_RESET failed\n");
9472 /* Report UNLOAD_DONE to MCP */
9473 bnx2x_send_unload_done(bp, keep_link);
9476 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
9480 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
9482 if (CHIP_IS_E1(bp)) {
9483 int port = BP_PORT(bp);
9484 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9485 MISC_REG_AEU_MASK_ATTN_FUNC_0;
9487 val = REG_RD(bp, addr);
9489 REG_WR(bp, addr, val);
9491 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
9492 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
9493 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
9494 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
9498 /* Close gates #2, #3 and #4: */
9499 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
9503 /* Gates #2 and #4a are closed/opened for "not E1" only */
9504 if (!CHIP_IS_E1(bp)) {
9506 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
9508 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
9512 if (CHIP_IS_E1x(bp)) {
9513 /* Prevent interrupts from HC on both ports */
9514 val = REG_RD(bp, HC_REG_CONFIG_1);
9515 REG_WR(bp, HC_REG_CONFIG_1,
9516 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
9517 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
9519 val = REG_RD(bp, HC_REG_CONFIG_0);
9520 REG_WR(bp, HC_REG_CONFIG_0,
9521 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
9522 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
9524 /* Prevent incoming interrupts in IGU */
9525 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9527 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
9529 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
9530 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
9533 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
9534 close ? "closing" : "opening");
9538 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
9540 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
9542 /* Do some magic... */
9543 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9544 *magic_val = val & SHARED_MF_CLP_MAGIC;
9545 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9549 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
9551 * @bp: driver handle
9552 * @magic_val: old value of the `magic' bit.
9554 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9556 /* Restore the `magic' bit value... */
9557 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9558 MF_CFG_WR(bp, shared_mf_config.clp_mb,
9559 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
9563 * bnx2x_reset_mcp_prep - prepare for MCP reset.
9565 * @bp: driver handle
9566 * @magic_val: old value of 'magic' bit.
9568 * Takes care of CLP configurations.
9570 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9573 u32 validity_offset;
9575 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
9577 /* Set `magic' bit in order to save MF config */
9578 if (!CHIP_IS_E1(bp))
9579 bnx2x_clp_reset_prep(bp, magic_val);
9581 /* Get shmem offset */
9582 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9584 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9586 /* Clear validity map flags */
9588 REG_WR(bp, shmem + validity_offset, 0);
9591 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
9592 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
9595 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
9597 * @bp: driver handle
9599 static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9601 /* special handling for emulation and FPGA,
9602 wait 10 times longer */
9603 if (CHIP_REV_IS_SLOW(bp))
9604 msleep(MCP_ONE_TIMEOUT*10);
9606 msleep(MCP_ONE_TIMEOUT);
9610 * initializes bp->common.shmem_base and waits for validity signature to appear
9612 static int bnx2x_init_shmem(struct bnx2x *bp)
9618 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9620 /* If we read all 0xFFs, means we are in PCI error state and
9621 * should bail out to avoid crashes on adapter's FW reads.
9623 if (bp->common.shmem_base == 0xFFFFFFFF) {
9624 bp->flags |= NO_MCP_FLAG;
9628 if (bp->common.shmem_base) {
9629 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9630 if (val & SHR_MEM_VALIDITY_MB)
9634 bnx2x_mcp_wait_one(bp);
9636 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
9638 BNX2X_ERR("BAD MCP validity signature\n");
9643 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9645 int rc = bnx2x_init_shmem(bp);
9647 /* Restore the `magic' bit value */
9648 if (!CHIP_IS_E1(bp))
9649 bnx2x_clp_reset_done(bp, magic_val);
9654 static void bnx2x_pxp_prep(struct bnx2x *bp)
9656 if (!CHIP_IS_E1(bp)) {
9657 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9658 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9664 * Reset the whole chip except for:
9666 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
9669 * - MISC (including AEU)
9673 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9675 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
9676 u32 global_bits2, stay_reset2;
9679 * Bits that have to be set in reset_mask2 if we want to reset 'global'
9680 * (per chip) blocks.
9683 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9684 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
9686 /* Don't reset the following blocks.
9687 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
9688 * reset, as in 4 port device they might still be owned
9689 * by the MCP (there is only one leader per path).
9692 MISC_REGISTERS_RESET_REG_1_RST_HC |
9693 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9694 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9697 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
9698 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9699 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9700 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9701 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9702 MISC_REGISTERS_RESET_REG_2_RST_GRC |
9703 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
9704 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9705 MISC_REGISTERS_RESET_REG_2_RST_ATC |
9706 MISC_REGISTERS_RESET_REG_2_PGLC |
9707 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9708 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9709 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9710 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9711 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9712 MISC_REGISTERS_RESET_REG_2_UMAC1;
9715 * Keep the following blocks in reset:
9716 * - all xxMACs are handled by the bnx2x_link code.
9719 MISC_REGISTERS_RESET_REG_2_XMAC |
9720 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9722 /* Full reset masks according to the chip */
9723 reset_mask1 = 0xffffffff;
9726 reset_mask2 = 0xffff;
9727 else if (CHIP_IS_E1H(bp))
9728 reset_mask2 = 0x1ffff;
9729 else if (CHIP_IS_E2(bp))
9730 reset_mask2 = 0xfffff;
9731 else /* CHIP_IS_E3 */
9732 reset_mask2 = 0x3ffffff;
9734 /* Don't reset global blocks unless we need to */
9736 reset_mask2 &= ~global_bits2;
9739 * In case of attention in the QM, we need to reset PXP
9740 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
9741 * because otherwise QM reset would release 'close the gates' shortly
9742 * before resetting the PXP, then the PSWRQ would send a write
9743 * request to PGLUE. Then when PXP is reset, PGLUE would try to
9744 * read the payload data from PSWWR, but PSWWR would not
9745 * respond. The write queue in PGLUE would stuck, dmae commands
9746 * would not return. Therefore it's important to reset the second
9747 * reset register (containing the
9748 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
9749 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
9752 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9753 reset_mask2 & (~not_reset_mask2));
9755 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9756 reset_mask1 & (~not_reset_mask1));
9761 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9762 reset_mask2 & (~stay_reset2));
9767 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9772 * bnx2x_er_poll_igu_vq - poll for pending writes bit.
9773 * It should get cleared in no more than 1s.
9775 * @bp: driver handle
9777 * It should get cleared in no more than 1s. Returns 0 if
9778 * pending writes bit gets cleared.
9780 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9786 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9791 usleep_range(1000, 2000);
9792 } while (cnt-- > 0);
9795 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9803 static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9807 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9810 /* Empty the Tetris buffer, wait for 1s */
9812 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9813 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9814 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9815 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9816 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9818 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9820 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9821 ((port_is_idle_0 & 0x1) == 0x1) &&
9822 ((port_is_idle_1 & 0x1) == 0x1) &&
9823 (pgl_exp_rom2 == 0xffffffff) &&
9824 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9826 usleep_range(1000, 2000);
9827 } while (cnt-- > 0);
9830 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9831 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9832 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9839 /* Close gates #2, #3 and #4 */
9840 bnx2x_set_234_gates(bp, true);
9842 /* Poll for IGU VQs for 57712 and newer chips */
9843 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9846 /* TBD: Indicate that "process kill" is in progress to MCP */
9848 /* Clear "unprepared" bit */
9849 REG_WR(bp, MISC_REG_UNPREPARED, 0);
9852 /* Make sure all is written to the chip before the reset */
9855 /* Wait for 1ms to empty GLUE and PCI-E core queues,
9856 * PSWHST, GRC and PSWRD Tetris buffer.
9858 usleep_range(1000, 2000);
9860 /* Prepare to chip reset: */
9863 bnx2x_reset_mcp_prep(bp, &val);
9869 /* reset the chip */
9870 bnx2x_process_kill_chip_reset(bp, global);
9873 /* clear errors in PGB */
9874 if (!CHIP_IS_E1x(bp))
9875 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
9877 /* Recover after reset: */
9879 if (global && bnx2x_reset_mcp_comp(bp, val))
9882 /* TBD: Add resetting the NO_MCP mode DB here */
9884 /* Open the gates #2, #3 and #4 */
9885 bnx2x_set_234_gates(bp, false);
9887 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
9888 * reset state, re-enable attentions. */
9893 static int bnx2x_leader_reset(struct bnx2x *bp)
9896 bool global = bnx2x_reset_is_global(bp);
9899 /* if not going to reset MCP - load "fake" driver to reset HW while
9900 * driver is owner of the HW
9902 if (!global && !BP_NOMCP(bp)) {
9903 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9904 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9906 BNX2X_ERR("MCP response failure, aborting\n");
9908 goto exit_leader_reset;
9910 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9911 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9912 BNX2X_ERR("MCP unexpected resp, aborting\n");
9914 goto exit_leader_reset2;
9916 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9918 BNX2X_ERR("MCP response failure, aborting\n");
9920 goto exit_leader_reset2;
9924 /* Try to recover after the failure */
9925 if (bnx2x_process_kill(bp, global)) {
9926 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9929 goto exit_leader_reset2;
9933 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
9936 bnx2x_set_reset_done(bp);
9938 bnx2x_clear_reset_global(bp);
9941 /* unload "fake driver" if it was loaded */
9942 if (!global && !BP_NOMCP(bp)) {
9943 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9944 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9948 bnx2x_release_leader_lock(bp);
9953 static void bnx2x_recovery_failed(struct bnx2x *bp)
9955 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9957 /* Disconnect this device */
9958 netif_device_detach(bp->dev);
9961 * Block ifup for all function on this engine until "process kill"
9964 bnx2x_set_reset_in_progress(bp);
9966 /* Shut down the power */
9967 bnx2x_set_power_state(bp, PCI_D3hot);
9969 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9975 * Assumption: runs under rtnl lock. This together with the fact
9976 * that it's called only from bnx2x_sp_rtnl() ensure that it
9977 * will never be called when netif_running(bp->dev) is false.
9979 static void bnx2x_parity_recover(struct bnx2x *bp)
9981 bool global = false;
9982 u32 error_recovered, error_unrecovered;
9985 DP(NETIF_MSG_HW, "Handling parity\n");
9987 switch (bp->recovery_state) {
9988 case BNX2X_RECOVERY_INIT:
9989 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
9990 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
9991 WARN_ON(!is_parity);
9993 /* Try to get a LEADER_LOCK HW lock */
9994 if (bnx2x_trylock_leader_lock(bp)) {
9995 bnx2x_set_reset_in_progress(bp);
9997 * Check if there is a global attention and if
9998 * there was a global attention, set the global
10003 bnx2x_set_reset_global(bp);
10008 /* Stop the driver */
10009 /* If interface has been removed - break */
10010 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
10013 bp->recovery_state = BNX2X_RECOVERY_WAIT;
10015 /* Ensure "is_leader", MCP command sequence and
10016 * "recovery_state" update values are seen on other
10022 case BNX2X_RECOVERY_WAIT:
10023 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
10024 if (bp->is_leader) {
10025 int other_engine = BP_PATH(bp) ? 0 : 1;
10026 bool other_load_status =
10027 bnx2x_get_load_status(bp, other_engine);
10029 bnx2x_get_load_status(bp, BP_PATH(bp));
10030 global = bnx2x_reset_is_global(bp);
10033 * In case of a parity in a global block, let
10034 * the first leader that performs a
10035 * leader_reset() reset the global blocks in
10036 * order to clear global attentions. Otherwise
10037 * the gates will remain closed for that
10041 (global && other_load_status)) {
10042 /* Wait until all other functions get
10045 schedule_delayed_work(&bp->sp_rtnl_task,
10049 /* If all other functions got down -
10050 * try to bring the chip back to
10051 * normal. In any case it's an exit
10052 * point for a leader.
10054 if (bnx2x_leader_reset(bp)) {
10055 bnx2x_recovery_failed(bp);
10059 /* If we are here, means that the
10060 * leader has succeeded and doesn't
10061 * want to be a leader any more. Try
10062 * to continue as a none-leader.
10066 } else { /* non-leader */
10067 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
10068 /* Try to get a LEADER_LOCK HW lock as
10069 * long as a former leader may have
10070 * been unloaded by the user or
10071 * released a leadership by another
10074 if (bnx2x_trylock_leader_lock(bp)) {
10075 /* I'm a leader now! Restart a
10082 schedule_delayed_work(&bp->sp_rtnl_task,
10088 * If there was a global attention, wait
10089 * for it to be cleared.
10091 if (bnx2x_reset_is_global(bp)) {
10092 schedule_delayed_work(
10099 bp->eth_stats.recoverable_error;
10100 error_unrecovered =
10101 bp->eth_stats.unrecoverable_error;
10102 bp->recovery_state =
10103 BNX2X_RECOVERY_NIC_LOADING;
10104 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
10105 error_unrecovered++;
10106 netdev_err(bp->dev,
10107 "Recovery failed. Power cycle needed\n");
10108 /* Disconnect this device */
10109 netif_device_detach(bp->dev);
10110 /* Shut down the power */
10111 bnx2x_set_power_state(
10115 bp->recovery_state =
10116 BNX2X_RECOVERY_DONE;
10120 bp->eth_stats.recoverable_error =
10122 bp->eth_stats.unrecoverable_error =
10134 static int bnx2x_udp_port_update(struct bnx2x *bp)
10136 struct bnx2x_func_switch_update_params *switch_update_params;
10137 struct bnx2x_func_state_params func_params = {NULL};
10138 struct bnx2x_udp_tunnel *udp_tunnel;
10139 u16 vxlan_port = 0, geneve_port = 0;
10142 switch_update_params = &func_params.params.switch_update;
10144 /* Prepare parameters for function state transitions */
10145 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
10146 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
10148 func_params.f_obj = &bp->func_obj;
10149 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
10151 /* Function parameters */
10152 __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
10153 &switch_update_params->changes);
10155 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) {
10156 udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
10157 geneve_port = udp_tunnel->dst_port;
10158 switch_update_params->geneve_dst_port = geneve_port;
10161 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) {
10162 udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
10163 vxlan_port = udp_tunnel->dst_port;
10164 switch_update_params->vxlan_dst_port = vxlan_port;
10167 /* Re-enable inner-rss for the offloaded UDP tunnels */
10168 __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
10169 &switch_update_params->changes);
10171 rc = bnx2x_func_state_change(bp, &func_params);
10173 BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n",
10174 vxlan_port, geneve_port, rc);
10177 "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n",
10178 vxlan_port, geneve_port);
10183 static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port,
10184 enum bnx2x_udp_port_type type)
10186 struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
10188 if (!netif_running(bp->dev) || !IS_PF(bp) || CHIP_IS_E1x(bp))
10191 if (udp_port->count && udp_port->dst_port == port) {
10196 if (udp_port->count) {
10198 "UDP tunnel [%d] - destination port limit reached\n",
10203 udp_port->dst_port = port;
10204 udp_port->count = 1;
10205 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
10208 static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
10209 enum bnx2x_udp_port_type type)
10211 struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
10213 if (!IS_PF(bp) || CHIP_IS_E1x(bp))
10216 if (!udp_port->count || udp_port->dst_port != port) {
10217 DP(BNX2X_MSG_SP, "Invalid UDP tunnel [%d] port\n",
10222 /* Remove reference, and make certain it's no longer in use */
10224 if (udp_port->count)
10226 udp_port->dst_port = 0;
10228 if (netif_running(bp->dev))
10229 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
10231 DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n",
10235 static void bnx2x_udp_tunnel_add(struct net_device *netdev,
10236 struct udp_tunnel_info *ti)
10238 struct bnx2x *bp = netdev_priv(netdev);
10239 u16 t_port = ntohs(ti->port);
10241 switch (ti->type) {
10242 case UDP_TUNNEL_TYPE_VXLAN:
10243 __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10245 case UDP_TUNNEL_TYPE_GENEVE:
10246 __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
10253 static void bnx2x_udp_tunnel_del(struct net_device *netdev,
10254 struct udp_tunnel_info *ti)
10256 struct bnx2x *bp = netdev_priv(netdev);
10257 u16 t_port = ntohs(ti->port);
10259 switch (ti->type) {
10260 case UDP_TUNNEL_TYPE_VXLAN:
10261 __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10263 case UDP_TUNNEL_TYPE_GENEVE:
10264 __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
10271 static int bnx2x_close(struct net_device *dev);
10273 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
10274 * scheduled on a general queue in order to prevent a dead lock.
10276 static void bnx2x_sp_rtnl_task(struct work_struct *work)
10278 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
10282 if (!netif_running(bp->dev)) {
10287 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
10288 #ifdef BNX2X_STOP_ON_ERROR
10289 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10290 "you will need to reboot when done\n");
10291 goto sp_rtnl_not_reset;
10294 * Clear all pending SP commands as we are going to reset the
10297 bp->sp_rtnl_state = 0;
10300 bnx2x_parity_recover(bp);
10306 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
10307 #ifdef BNX2X_STOP_ON_ERROR
10308 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10309 "you will need to reboot when done\n");
10310 goto sp_rtnl_not_reset;
10314 * Clear all pending SP commands as we are going to reset the
10317 bp->sp_rtnl_state = 0;
10320 /* Immediately indicate link as down */
10321 bp->link_vars.link_up = 0;
10322 bp->force_link_down = true;
10323 netif_carrier_off(bp->dev);
10324 BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
10326 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10327 bnx2x_nic_load(bp, LOAD_NORMAL);
10332 #ifdef BNX2X_STOP_ON_ERROR
10335 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
10336 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
10337 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
10338 bnx2x_after_function_update(bp);
10340 * in case of fan failure we need to reset id if the "stop on error"
10341 * debug flag is set, since we trying to prevent permanent overheating
10344 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
10345 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
10346 netif_device_detach(bp->dev);
10347 bnx2x_close(bp->dev);
10352 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
10354 "sending set mcast vf pf channel message from rtnl sp-task\n");
10355 bnx2x_vfpf_set_mcast(bp->dev);
10357 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
10358 &bp->sp_rtnl_state)){
10359 if (netif_carrier_ok(bp->dev)) {
10360 bnx2x_tx_disable(bp);
10361 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
10365 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
10366 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
10367 bnx2x_set_rx_mode_inner(bp);
10370 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
10371 &bp->sp_rtnl_state))
10372 bnx2x_pf_set_vfs_vlan(bp);
10374 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
10375 bnx2x_dcbx_stop_hw_tx(bp);
10376 bnx2x_dcbx_resume_hw_tx(bp);
10379 if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
10380 &bp->sp_rtnl_state))
10381 bnx2x_update_mng_version(bp);
10383 if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
10384 bnx2x_handle_update_svid_cmd(bp);
10386 if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
10387 &bp->sp_rtnl_state)) {
10388 if (bnx2x_udp_port_update(bp)) {
10389 /* On error, forget configuration */
10390 memset(bp->udp_tunnel_ports, 0,
10391 sizeof(struct bnx2x_udp_tunnel) *
10392 BNX2X_UDP_PORT_MAX);
10394 /* Since we don't store additional port information,
10395 * if no ports are configured for any feature ask for
10396 * information about currently configured ports.
10398 if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count &&
10399 !bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
10400 udp_tunnel_get_rx_info(bp->dev);
10404 /* work which needs rtnl lock not-taken (as it takes the lock itself and
10405 * can be called from other contexts as well)
10409 /* enable SR-IOV if applicable */
10410 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
10411 &bp->sp_rtnl_state)) {
10412 bnx2x_disable_sriov(bp);
10413 bnx2x_enable_sriov(bp);
10417 static void bnx2x_period_task(struct work_struct *work)
10419 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
10421 if (!netif_running(bp->dev))
10422 goto period_task_exit;
10424 if (CHIP_REV_IS_SLOW(bp)) {
10425 BNX2X_ERR("period task called on emulation, ignoring\n");
10426 goto period_task_exit;
10429 bnx2x_acquire_phy_lock(bp);
10431 * The barrier is needed to ensure the ordering between the writing to
10432 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
10433 * the reading here.
10436 if (bp->port.pmf) {
10437 bnx2x_period_func(&bp->link_params, &bp->link_vars);
10439 /* Re-queue task in 1 sec */
10440 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
10443 bnx2x_release_phy_lock(bp);
10449 * Init service functions
10452 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
10454 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
10455 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
10456 return base + (BP_ABS_FUNC(bp)) * stride;
10459 static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
10460 u8 port, u32 reset_reg,
10461 struct bnx2x_mac_vals *vals)
10463 u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
10466 if (!(mask & reset_reg))
10469 BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
10470 base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
10471 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
10472 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
10473 REG_WR(bp, vals->umac_addr[port], 0);
10478 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10479 struct bnx2x_mac_vals *vals)
10481 u32 val, base_addr, offset, mask, reset_reg;
10482 bool mac_stopped = false;
10483 u8 port = BP_PORT(bp);
10485 /* reset addresses as they also mark which values were changed */
10486 memset(vals, 0, sizeof(*vals));
10488 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
10490 if (!CHIP_IS_E3(bp)) {
10491 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
10492 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
10493 if ((mask & reset_reg) && val) {
10495 BNX2X_DEV_INFO("Disable bmac Rx\n");
10496 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
10497 : NIG_REG_INGRESS_BMAC0_MEM;
10498 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
10499 : BIGMAC_REGISTER_BMAC_CONTROL;
10502 * use rd/wr since we cannot use dmae. This is safe
10503 * since MCP won't access the bus due to the request
10504 * to unload, and no function on the path can be
10505 * loaded at this time.
10507 wb_data[0] = REG_RD(bp, base_addr + offset);
10508 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
10509 vals->bmac_addr = base_addr + offset;
10510 vals->bmac_val[0] = wb_data[0];
10511 vals->bmac_val[1] = wb_data[1];
10512 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
10513 REG_WR(bp, vals->bmac_addr, wb_data[0]);
10514 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
10516 BNX2X_DEV_INFO("Disable emac Rx\n");
10517 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
10518 vals->emac_val = REG_RD(bp, vals->emac_addr);
10519 REG_WR(bp, vals->emac_addr, 0);
10520 mac_stopped = true;
10522 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
10523 BNX2X_DEV_INFO("Disable xmac Rx\n");
10524 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
10525 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
10526 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10528 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10530 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
10531 vals->xmac_val = REG_RD(bp, vals->xmac_addr);
10532 REG_WR(bp, vals->xmac_addr, 0);
10533 mac_stopped = true;
10536 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
10538 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
10546 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
10547 #define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
10548 0x1848 + ((f) << 4))
10549 #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
10550 #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
10551 #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
10553 #define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10554 #define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10555 #define BCM_5710_UNDI_FW_MF_VERS (0x05)
10557 static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10559 /* UNDI marks its presence in DORQ -
10560 * it initializes CID offset for normal bell to 0x7
10562 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10563 MISC_REGISTERS_RESET_REG_1_RST_DORQ))
10566 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10567 BNX2X_DEV_INFO("UNDI previously loaded\n");
10574 static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
10579 if (BP_FUNC(bp) < 2)
10580 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
10582 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
10584 tmp_reg = REG_RD(bp, addr);
10585 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
10586 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
10588 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
10589 REG_WR(bp, addr, tmp_reg);
10591 BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
10592 BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
10595 static int bnx2x_prev_mcp_done(struct bnx2x *bp)
10597 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
10598 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
10600 BNX2X_ERR("MCP response failure, aborting\n");
10607 static struct bnx2x_prev_path_list *
10608 bnx2x_prev_path_get_entry(struct bnx2x *bp)
10610 struct bnx2x_prev_path_list *tmp_list;
10612 list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
10613 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
10614 bp->pdev->bus->number == tmp_list->bus &&
10615 BP_PATH(bp) == tmp_list->path)
10621 static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
10623 struct bnx2x_prev_path_list *tmp_list;
10626 rc = down_interruptible(&bnx2x_prev_sem);
10628 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10632 tmp_list = bnx2x_prev_path_get_entry(bp);
10637 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
10641 up(&bnx2x_prev_sem);
10646 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
10648 struct bnx2x_prev_path_list *tmp_list;
10651 if (down_trylock(&bnx2x_prev_sem))
10654 tmp_list = bnx2x_prev_path_get_entry(bp);
10656 if (tmp_list->aer) {
10657 DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
10661 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
10666 up(&bnx2x_prev_sem);
10671 bool bnx2x_port_after_undi(struct bnx2x *bp)
10673 struct bnx2x_prev_path_list *entry;
10676 down(&bnx2x_prev_sem);
10678 entry = bnx2x_prev_path_get_entry(bp);
10679 val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
10681 up(&bnx2x_prev_sem);
10686 static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
10688 struct bnx2x_prev_path_list *tmp_list;
10691 rc = down_interruptible(&bnx2x_prev_sem);
10693 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10697 /* Check whether the entry for this path already exists */
10698 tmp_list = bnx2x_prev_path_get_entry(bp);
10700 if (!tmp_list->aer) {
10701 BNX2X_ERR("Re-Marking the path.\n");
10703 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
10707 up(&bnx2x_prev_sem);
10710 up(&bnx2x_prev_sem);
10712 /* Create an entry for this path and add it */
10713 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
10715 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
10719 tmp_list->bus = bp->pdev->bus->number;
10720 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
10721 tmp_list->path = BP_PATH(bp);
10723 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
10725 rc = down_interruptible(&bnx2x_prev_sem);
10727 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10730 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
10732 list_add(&tmp_list->list, &bnx2x_prev_list);
10733 up(&bnx2x_prev_sem);
10739 static int bnx2x_do_flr(struct bnx2x *bp)
10741 struct pci_dev *dev = bp->pdev;
10743 if (CHIP_IS_E1x(bp)) {
10744 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
10748 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
10749 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
10750 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
10751 bp->common.bc_ver);
10755 if (!pci_wait_for_pending_transaction(dev))
10756 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
10758 BNX2X_DEV_INFO("Initiating FLR\n");
10759 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
10764 static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10768 BNX2X_DEV_INFO("Uncommon unload Flow\n");
10770 /* Test if previous unload process was already finished for this path */
10771 if (bnx2x_prev_is_path_marked(bp))
10772 return bnx2x_prev_mcp_done(bp);
10774 BNX2X_DEV_INFO("Path is unmarked\n");
10776 /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */
10777 if (bnx2x_prev_is_after_undi(bp))
10780 /* If function has FLR capabilities, and existing FW version matches
10781 * the one required, then FLR will be sufficient to clean any residue
10782 * left by previous driver
10784 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
10787 /* fw version is good */
10788 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10789 rc = bnx2x_do_flr(bp);
10793 /* FLR was performed */
10794 BNX2X_DEV_INFO("FLR successful\n");
10798 BNX2X_DEV_INFO("Could not FLR\n");
10801 /* Close the MCP request, return failure*/
10802 rc = bnx2x_prev_mcp_done(bp);
10804 rc = BNX2X_PREV_WAIT_NEEDED;
10809 static int bnx2x_prev_unload_common(struct bnx2x *bp)
10811 u32 reset_reg, tmp_reg = 0, rc;
10812 bool prev_undi = false;
10813 struct bnx2x_mac_vals mac_vals;
10815 /* It is possible a previous function received 'common' answer,
10816 * but hasn't loaded yet, therefore creating a scenario of
10817 * multiple functions receiving 'common' on the same path.
10819 BNX2X_DEV_INFO("Common unload Flow\n");
10821 memset(&mac_vals, 0, sizeof(mac_vals));
10823 if (bnx2x_prev_is_path_marked(bp))
10824 return bnx2x_prev_mcp_done(bp);
10826 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10828 /* Reset should be performed after BRB is emptied */
10829 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10830 u32 timer_count = 1000;
10832 /* Close the MAC Rx to prevent BRB from filling up */
10833 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10835 /* close LLH filters for both ports towards the BRB */
10836 bnx2x_set_rx_filter(&bp->link_params, 0);
10837 bp->link_params.port ^= 1;
10838 bnx2x_set_rx_filter(&bp->link_params, 0);
10839 bp->link_params.port ^= 1;
10841 /* Check if the UNDI driver was previously loaded */
10842 if (bnx2x_prev_is_after_undi(bp)) {
10844 /* clear the UNDI indication */
10845 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10846 /* clear possible idle check errors */
10847 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10849 if (!CHIP_IS_E1x(bp))
10850 /* block FW from writing to host */
10851 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10853 /* wait until BRB is empty */
10854 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10855 while (timer_count) {
10856 u32 prev_brb = tmp_reg;
10858 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10862 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
10864 /* reset timer as long as BRB actually gets emptied */
10865 if (prev_brb > tmp_reg)
10866 timer_count = 1000;
10870 /* If UNDI resides in memory, manually increment it */
10872 bnx2x_prev_unload_undi_inc(bp, 1);
10878 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10881 /* No packets are in the pipeline, path is ready for reset */
10882 bnx2x_reset_common(bp);
10884 if (mac_vals.xmac_addr)
10885 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10886 if (mac_vals.umac_addr[0])
10887 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
10888 if (mac_vals.umac_addr[1])
10889 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
10890 if (mac_vals.emac_addr)
10891 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10892 if (mac_vals.bmac_addr) {
10893 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10894 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10897 rc = bnx2x_prev_mark_path(bp, prev_undi);
10899 bnx2x_prev_mcp_done(bp);
10903 return bnx2x_prev_mcp_done(bp);
10906 static int bnx2x_prev_unload(struct bnx2x *bp)
10908 int time_counter = 10;
10909 u32 rc, fw, hw_lock_reg, hw_lock_val;
10910 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10912 /* clear hw from errors which may have resulted from an interrupted
10913 * dmae transaction.
10915 bnx2x_clean_pglue_errors(bp);
10917 /* Release previously held locks */
10918 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10919 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10920 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10922 hw_lock_val = REG_RD(bp, hw_lock_reg);
10924 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10925 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10926 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10927 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10930 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10931 REG_WR(bp, hw_lock_reg, 0xffffffff);
10933 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10935 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10936 BNX2X_DEV_INFO("Release previously held alr\n");
10937 bnx2x_release_alr(bp);
10942 /* Lock MCP using an unload request */
10943 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10945 BNX2X_ERR("MCP response failure, aborting\n");
10950 rc = down_interruptible(&bnx2x_prev_sem);
10952 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10955 /* If Path is marked by EEH, ignore unload status */
10956 aer = !!(bnx2x_prev_path_get_entry(bp) &&
10957 bnx2x_prev_path_get_entry(bp)->aer);
10958 up(&bnx2x_prev_sem);
10961 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10962 rc = bnx2x_prev_unload_common(bp);
10966 /* non-common reply from MCP might require looping */
10967 rc = bnx2x_prev_unload_uncommon(bp);
10968 if (rc != BNX2X_PREV_WAIT_NEEDED)
10972 } while (--time_counter);
10974 if (!time_counter || rc) {
10975 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
10976 rc = -EPROBE_DEFER;
10979 /* Mark function if its port was used to boot from SAN */
10980 if (bnx2x_port_after_undi(bp))
10981 bp->link_params.feature_config_flags |=
10982 FEATURE_CONFIG_BOOT_FROM_SAN;
10984 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
10989 static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10991 u32 val, val2, val3, val4, id, boot_mode;
10994 /* Get the chip revision id and number. */
10995 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
10996 val = REG_RD(bp, MISC_REG_CHIP_NUM);
10997 id = ((val & 0xffff) << 16);
10998 val = REG_RD(bp, MISC_REG_CHIP_REV);
10999 id |= ((val & 0xf) << 12);
11001 /* Metal is read from PCI regs, but we can't access >=0x400 from
11002 * the configuration space (so we need to reg_rd)
11004 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
11005 id |= (((val >> 24) & 0xf) << 4);
11006 val = REG_RD(bp, MISC_REG_BOND_ID);
11008 bp->common.chip_id = id;
11010 /* force 57811 according to MISC register */
11011 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
11012 if (CHIP_IS_57810(bp))
11013 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
11014 (bp->common.chip_id & 0x0000FFFF);
11015 else if (CHIP_IS_57810_MF(bp))
11016 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
11017 (bp->common.chip_id & 0x0000FFFF);
11018 bp->common.chip_id |= 0x1;
11021 /* Set doorbell size */
11022 bp->db_size = (1 << BNX2X_DB_SHIFT);
11024 if (!CHIP_IS_E1x(bp)) {
11025 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
11026 if ((val & 1) == 0)
11027 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
11029 val = (val >> 1) & 1;
11030 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
11032 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
11035 if (CHIP_MODE_IS_4_PORT(bp))
11036 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
11038 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
11040 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
11041 bp->pfid = bp->pf_num; /* 0..7 */
11044 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
11046 bp->link_params.chip_id = bp->common.chip_id;
11047 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
11049 val = (REG_RD(bp, 0x2874) & 0x55);
11050 if ((bp->common.chip_id & 0x1) ||
11051 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
11052 bp->flags |= ONE_PORT_FLAG;
11053 BNX2X_DEV_INFO("single port device\n");
11056 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
11057 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
11058 (val & MCPR_NVM_CFG4_FLASH_SIZE));
11059 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
11060 bp->common.flash_size, bp->common.flash_size);
11062 bnx2x_init_shmem(bp);
11064 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
11065 MISC_REG_GENERIC_CR_1 :
11066 MISC_REG_GENERIC_CR_0));
11068 bp->link_params.shmem_base = bp->common.shmem_base;
11069 bp->link_params.shmem2_base = bp->common.shmem2_base;
11070 if (SHMEM2_RD(bp, size) >
11071 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
11072 bp->link_params.lfa_base =
11073 REG_RD(bp, bp->common.shmem2_base +
11074 (u32)offsetof(struct shmem2_region,
11075 lfa_host_addr[BP_PORT(bp)]));
11077 bp->link_params.lfa_base = 0;
11078 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
11079 bp->common.shmem_base, bp->common.shmem2_base);
11081 if (!bp->common.shmem_base) {
11082 BNX2X_DEV_INFO("MCP not active\n");
11083 bp->flags |= NO_MCP_FLAG;
11087 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
11088 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
11090 bp->link_params.hw_led_mode = ((bp->common.hw_config &
11091 SHARED_HW_CFG_LED_MODE_MASK) >>
11092 SHARED_HW_CFG_LED_MODE_SHIFT);
11094 bp->link_params.feature_config_flags = 0;
11095 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
11096 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
11097 bp->link_params.feature_config_flags |=
11098 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11100 bp->link_params.feature_config_flags &=
11101 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11103 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
11104 bp->common.bc_ver = val;
11105 BNX2X_DEV_INFO("bc_ver %X\n", val);
11106 if (val < BNX2X_BC_VER) {
11107 /* for now only warn
11108 * later we might need to enforce this */
11109 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
11110 BNX2X_BC_VER, val);
11112 bp->link_params.feature_config_flags |=
11113 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
11114 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
11116 bp->link_params.feature_config_flags |=
11117 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
11118 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
11119 bp->link_params.feature_config_flags |=
11120 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
11121 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
11122 bp->link_params.feature_config_flags |=
11123 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
11124 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
11126 bp->link_params.feature_config_flags |=
11127 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
11128 FEATURE_CONFIG_MT_SUPPORT : 0;
11130 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
11131 BC_SUPPORTS_PFC_STATS : 0;
11133 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
11134 BC_SUPPORTS_FCOE_FEATURES : 0;
11136 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
11137 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
11139 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
11140 BC_SUPPORTS_RMMOD_CMD : 0;
11142 boot_mode = SHMEM_RD(bp,
11143 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
11144 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
11145 switch (boot_mode) {
11146 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
11147 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
11149 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
11150 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
11152 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
11153 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
11155 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
11156 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
11160 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
11161 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
11163 BNX2X_DEV_INFO("%sWoL capable\n",
11164 (bp->flags & NO_WOL_FLAG) ? "not " : "");
11166 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
11167 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
11168 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
11169 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
11171 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
11172 val, val2, val3, val4);
11175 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
11176 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
11178 static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
11180 int pfid = BP_FUNC(bp);
11183 u8 fid, igu_sb_cnt = 0;
11185 bp->igu_base_sb = 0xff;
11186 if (CHIP_INT_MODE_IS_BC(bp)) {
11187 int vn = BP_VN(bp);
11188 igu_sb_cnt = bp->igu_sb_cnt;
11189 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
11192 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
11193 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
11198 /* IGU in normal mode - read CAM */
11199 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
11201 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
11202 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
11204 fid = IGU_FID(val);
11205 if ((fid & IGU_FID_ENCODE_IS_PF)) {
11206 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
11208 if (IGU_VEC(val) == 0)
11209 /* default status block */
11210 bp->igu_dsb_id = igu_sb_id;
11212 if (bp->igu_base_sb == 0xff)
11213 bp->igu_base_sb = igu_sb_id;
11219 #ifdef CONFIG_PCI_MSI
11220 /* Due to new PF resource allocation by MFW T7.4 and above, it's
11221 * optional that number of CAM entries will not be equal to the value
11222 * advertised in PCI.
11223 * Driver should use the minimal value of both as the actual status
11226 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
11229 if (igu_sb_cnt == 0) {
11230 BNX2X_ERR("CAM configuration error\n");
11237 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
11239 int cfg_size = 0, idx, port = BP_PORT(bp);
11241 /* Aggregation of supported attributes of all external phys */
11242 bp->port.supported[0] = 0;
11243 bp->port.supported[1] = 0;
11244 switch (bp->link_params.num_phys) {
11246 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
11250 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
11254 if (bp->link_params.multi_phy_config &
11255 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11256 bp->port.supported[1] =
11257 bp->link_params.phy[EXT_PHY1].supported;
11258 bp->port.supported[0] =
11259 bp->link_params.phy[EXT_PHY2].supported;
11261 bp->port.supported[0] =
11262 bp->link_params.phy[EXT_PHY1].supported;
11263 bp->port.supported[1] =
11264 bp->link_params.phy[EXT_PHY2].supported;
11270 if (!(bp->port.supported[0] || bp->port.supported[1])) {
11271 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
11273 dev_info.port_hw_config[port].external_phy_config),
11275 dev_info.port_hw_config[port].external_phy_config2));
11279 if (CHIP_IS_E3(bp))
11280 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
11282 switch (switch_cfg) {
11283 case SWITCH_CFG_1G:
11284 bp->port.phy_addr = REG_RD(
11285 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
11287 case SWITCH_CFG_10G:
11288 bp->port.phy_addr = REG_RD(
11289 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
11292 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
11293 bp->port.link_config[0]);
11297 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
11298 /* mask what we support according to speed_cap_mask per configuration */
11299 for (idx = 0; idx < cfg_size; idx++) {
11300 if (!(bp->link_params.speed_cap_mask[idx] &
11301 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
11302 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
11304 if (!(bp->link_params.speed_cap_mask[idx] &
11305 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
11306 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
11308 if (!(bp->link_params.speed_cap_mask[idx] &
11309 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
11310 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
11312 if (!(bp->link_params.speed_cap_mask[idx] &
11313 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
11314 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
11316 if (!(bp->link_params.speed_cap_mask[idx] &
11317 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
11318 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
11319 SUPPORTED_1000baseT_Full);
11321 if (!(bp->link_params.speed_cap_mask[idx] &
11322 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
11323 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
11325 if (!(bp->link_params.speed_cap_mask[idx] &
11326 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
11327 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
11329 if (!(bp->link_params.speed_cap_mask[idx] &
11330 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
11331 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
11334 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
11335 bp->port.supported[1]);
11338 static void bnx2x_link_settings_requested(struct bnx2x *bp)
11340 u32 link_config, idx, cfg_size = 0;
11341 bp->port.advertising[0] = 0;
11342 bp->port.advertising[1] = 0;
11343 switch (bp->link_params.num_phys) {
11352 for (idx = 0; idx < cfg_size; idx++) {
11353 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
11354 link_config = bp->port.link_config[idx];
11355 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
11356 case PORT_FEATURE_LINK_SPEED_AUTO:
11357 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
11358 bp->link_params.req_line_speed[idx] =
11360 bp->port.advertising[idx] |=
11361 bp->port.supported[idx];
11362 if (bp->link_params.phy[EXT_PHY1].type ==
11363 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
11364 bp->port.advertising[idx] |=
11365 (SUPPORTED_100baseT_Half |
11366 SUPPORTED_100baseT_Full);
11368 /* force 10G, no AN */
11369 bp->link_params.req_line_speed[idx] =
11371 bp->port.advertising[idx] |=
11372 (ADVERTISED_10000baseT_Full |
11378 case PORT_FEATURE_LINK_SPEED_10M_FULL:
11379 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
11380 bp->link_params.req_line_speed[idx] =
11382 bp->port.advertising[idx] |=
11383 (ADVERTISED_10baseT_Full |
11386 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11388 bp->link_params.speed_cap_mask[idx]);
11393 case PORT_FEATURE_LINK_SPEED_10M_HALF:
11394 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
11395 bp->link_params.req_line_speed[idx] =
11397 bp->link_params.req_duplex[idx] =
11399 bp->port.advertising[idx] |=
11400 (ADVERTISED_10baseT_Half |
11403 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11405 bp->link_params.speed_cap_mask[idx]);
11410 case PORT_FEATURE_LINK_SPEED_100M_FULL:
11411 if (bp->port.supported[idx] &
11412 SUPPORTED_100baseT_Full) {
11413 bp->link_params.req_line_speed[idx] =
11415 bp->port.advertising[idx] |=
11416 (ADVERTISED_100baseT_Full |
11419 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11421 bp->link_params.speed_cap_mask[idx]);
11426 case PORT_FEATURE_LINK_SPEED_100M_HALF:
11427 if (bp->port.supported[idx] &
11428 SUPPORTED_100baseT_Half) {
11429 bp->link_params.req_line_speed[idx] =
11431 bp->link_params.req_duplex[idx] =
11433 bp->port.advertising[idx] |=
11434 (ADVERTISED_100baseT_Half |
11437 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11439 bp->link_params.speed_cap_mask[idx]);
11444 case PORT_FEATURE_LINK_SPEED_1G:
11445 if (bp->port.supported[idx] &
11446 SUPPORTED_1000baseT_Full) {
11447 bp->link_params.req_line_speed[idx] =
11449 bp->port.advertising[idx] |=
11450 (ADVERTISED_1000baseT_Full |
11452 } else if (bp->port.supported[idx] &
11453 SUPPORTED_1000baseKX_Full) {
11454 bp->link_params.req_line_speed[idx] =
11456 bp->port.advertising[idx] |=
11457 ADVERTISED_1000baseKX_Full;
11459 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11461 bp->link_params.speed_cap_mask[idx]);
11466 case PORT_FEATURE_LINK_SPEED_2_5G:
11467 if (bp->port.supported[idx] &
11468 SUPPORTED_2500baseX_Full) {
11469 bp->link_params.req_line_speed[idx] =
11471 bp->port.advertising[idx] |=
11472 (ADVERTISED_2500baseX_Full |
11475 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11477 bp->link_params.speed_cap_mask[idx]);
11482 case PORT_FEATURE_LINK_SPEED_10G_CX4:
11483 if (bp->port.supported[idx] &
11484 SUPPORTED_10000baseT_Full) {
11485 bp->link_params.req_line_speed[idx] =
11487 bp->port.advertising[idx] |=
11488 (ADVERTISED_10000baseT_Full |
11490 } else if (bp->port.supported[idx] &
11491 SUPPORTED_10000baseKR_Full) {
11492 bp->link_params.req_line_speed[idx] =
11494 bp->port.advertising[idx] |=
11495 (ADVERTISED_10000baseKR_Full |
11498 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11500 bp->link_params.speed_cap_mask[idx]);
11504 case PORT_FEATURE_LINK_SPEED_20G:
11505 bp->link_params.req_line_speed[idx] = SPEED_20000;
11509 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
11511 bp->link_params.req_line_speed[idx] =
11513 bp->port.advertising[idx] =
11514 bp->port.supported[idx];
11518 bp->link_params.req_flow_ctrl[idx] = (link_config &
11519 PORT_FEATURE_FLOW_CONTROL_MASK);
11520 if (bp->link_params.req_flow_ctrl[idx] ==
11521 BNX2X_FLOW_CTRL_AUTO) {
11522 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
11523 bp->link_params.req_flow_ctrl[idx] =
11524 BNX2X_FLOW_CTRL_NONE;
11526 bnx2x_set_requested_fc(bp);
11529 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
11530 bp->link_params.req_line_speed[idx],
11531 bp->link_params.req_duplex[idx],
11532 bp->link_params.req_flow_ctrl[idx],
11533 bp->port.advertising[idx]);
11537 static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
11539 __be16 mac_hi_be = cpu_to_be16(mac_hi);
11540 __be32 mac_lo_be = cpu_to_be32(mac_lo);
11541 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
11542 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
11545 static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
11547 int port = BP_PORT(bp);
11549 u32 ext_phy_type, ext_phy_config, eee_mode;
11551 bp->link_params.bp = bp;
11552 bp->link_params.port = port;
11554 bp->link_params.lane_config =
11555 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
11557 bp->link_params.speed_cap_mask[0] =
11559 dev_info.port_hw_config[port].speed_capability_mask) &
11560 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11561 bp->link_params.speed_cap_mask[1] =
11563 dev_info.port_hw_config[port].speed_capability_mask2) &
11564 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11565 bp->port.link_config[0] =
11566 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
11568 bp->port.link_config[1] =
11569 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
11571 bp->link_params.multi_phy_config =
11572 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
11573 /* If the device is capable of WoL, set the default state according
11576 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
11577 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
11578 (config & PORT_FEATURE_WOL_ENABLED));
11580 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11581 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
11582 bp->flags |= NO_ISCSI_FLAG;
11583 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11584 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
11585 bp->flags |= NO_FCOE_FLAG;
11587 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
11588 bp->link_params.lane_config,
11589 bp->link_params.speed_cap_mask[0],
11590 bp->port.link_config[0]);
11592 bp->link_params.switch_cfg = (bp->port.link_config[0] &
11593 PORT_FEATURE_CONNECTED_SWITCH_MASK);
11594 bnx2x_phy_probe(&bp->link_params);
11595 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
11597 bnx2x_link_settings_requested(bp);
11600 * If connected directly, work with the internal PHY, otherwise, work
11601 * with the external PHY
11605 dev_info.port_hw_config[port].external_phy_config);
11606 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
11607 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
11608 bp->mdio.prtad = bp->port.phy_addr;
11610 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
11611 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
11613 XGXS_EXT_PHY_ADDR(ext_phy_config);
11615 /* Configure link feature according to nvram value */
11616 eee_mode = (((SHMEM_RD(bp, dev_info.
11617 port_feature_config[port].eee_power_mode)) &
11618 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
11619 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
11620 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
11621 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
11622 EEE_MODE_ENABLE_LPI |
11623 EEE_MODE_OUTPUT_TIME;
11625 bp->link_params.eee_mode = 0;
11629 void bnx2x_get_iscsi_info(struct bnx2x *bp)
11631 u32 no_flags = NO_ISCSI_FLAG;
11632 int port = BP_PORT(bp);
11633 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11634 drv_lic_key[port].max_iscsi_conn);
11636 if (!CNIC_SUPPORT(bp)) {
11637 bp->flags |= no_flags;
11641 /* Get the number of maximum allowed iSCSI connections */
11642 bp->cnic_eth_dev.max_iscsi_conn =
11643 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
11644 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
11646 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
11647 bp->cnic_eth_dev.max_iscsi_conn);
11650 * If maximum allowed number of connections is zero -
11651 * disable the feature.
11653 if (!bp->cnic_eth_dev.max_iscsi_conn)
11654 bp->flags |= no_flags;
11657 static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
11660 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11661 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
11662 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11663 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
11666 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11667 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
11668 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11669 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
11672 static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
11679 /* iterate over absolute function ids for this path: */
11680 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
11681 if (IS_MF_SD(bp)) {
11682 u32 cfg = MF_CFG_RD(bp,
11683 func_mf_config[fid].config);
11685 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
11686 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
11687 FUNC_MF_CFG_PROTOCOL_FCOE))
11690 u32 cfg = MF_CFG_RD(bp,
11691 func_ext_config[fid].
11694 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
11695 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
11700 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
11702 for (port = 0; port < port_cnt; port++) {
11703 u32 lic = SHMEM_RD(bp,
11704 drv_lic_key[port].max_fcoe_conn) ^
11705 FW_ENCODE_32BIT_PATTERN;
11714 static void bnx2x_get_fcoe_info(struct bnx2x *bp)
11716 int port = BP_PORT(bp);
11717 int func = BP_ABS_FUNC(bp);
11718 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11719 drv_lic_key[port].max_fcoe_conn);
11720 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
11722 if (!CNIC_SUPPORT(bp)) {
11723 bp->flags |= NO_FCOE_FLAG;
11727 /* Get the number of maximum allowed FCoE connections */
11728 bp->cnic_eth_dev.max_fcoe_conn =
11729 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
11730 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
11732 /* Calculate the number of maximum allowed FCoE tasks */
11733 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
11735 /* check if FCoE resources must be shared between different functions */
11737 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
11739 /* Read the WWN: */
11742 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11744 dev_info.port_hw_config[port].
11745 fcoe_wwn_port_name_upper);
11746 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11748 dev_info.port_hw_config[port].
11749 fcoe_wwn_port_name_lower);
11752 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11754 dev_info.port_hw_config[port].
11755 fcoe_wwn_node_name_upper);
11756 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11758 dev_info.port_hw_config[port].
11759 fcoe_wwn_node_name_lower);
11760 } else if (!IS_MF_SD(bp)) {
11761 /* Read the WWN info only if the FCoE feature is enabled for
11764 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
11765 bnx2x_get_ext_wwn_info(bp, func);
11767 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
11768 bnx2x_get_ext_wwn_info(bp, func);
11771 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
11774 * If maximum allowed number of connections is zero -
11775 * disable the feature.
11777 if (!bp->cnic_eth_dev.max_fcoe_conn) {
11778 bp->flags |= NO_FCOE_FLAG;
11779 eth_zero_addr(bp->fip_mac);
11783 static void bnx2x_get_cnic_info(struct bnx2x *bp)
11786 * iSCSI may be dynamically disabled but reading
11787 * info here we will decrease memory usage by driver
11788 * if the feature is disabled for good
11790 bnx2x_get_iscsi_info(bp);
11791 bnx2x_get_fcoe_info(bp);
11794 static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11797 int func = BP_ABS_FUNC(bp);
11798 int port = BP_PORT(bp);
11799 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11800 u8 *fip_mac = bp->fip_mac;
11803 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
11804 * FCoE MAC then the appropriate feature should be disabled.
11805 * In non SD mode features configuration comes from struct
11808 if (!IS_MF_SD(bp)) {
11809 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11810 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11811 val2 = MF_CFG_RD(bp, func_ext_config[func].
11812 iscsi_mac_addr_upper);
11813 val = MF_CFG_RD(bp, func_ext_config[func].
11814 iscsi_mac_addr_lower);
11815 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11817 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11819 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11822 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11823 val2 = MF_CFG_RD(bp, func_ext_config[func].
11824 fcoe_mac_addr_upper);
11825 val = MF_CFG_RD(bp, func_ext_config[func].
11826 fcoe_mac_addr_lower);
11827 bnx2x_set_mac_buf(fip_mac, val, val2);
11829 ("Read FCoE L2 MAC: %pM\n", fip_mac);
11831 bp->flags |= NO_FCOE_FLAG;
11834 bp->mf_ext_config = cfg;
11836 } else { /* SD MODE */
11837 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11838 /* use primary mac as iscsi mac */
11839 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11841 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11843 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11844 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11845 /* use primary mac as fip mac */
11846 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11847 BNX2X_DEV_INFO("SD FCoE MODE\n");
11849 ("Read FIP MAC: %pM\n", fip_mac);
11853 /* If this is a storage-only interface, use SAN mac as
11854 * primary MAC. Notice that for SD this is already the case,
11855 * as the SAN mac was copied from the primary MAC.
11857 if (IS_MF_FCOE_AFEX(bp))
11858 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
11860 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11862 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11864 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11866 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11867 fcoe_fip_mac_upper);
11868 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11869 fcoe_fip_mac_lower);
11870 bnx2x_set_mac_buf(fip_mac, val, val2);
11873 /* Disable iSCSI OOO if MAC configuration is invalid. */
11874 if (!is_valid_ether_addr(iscsi_mac)) {
11875 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11876 eth_zero_addr(iscsi_mac);
11879 /* Disable FCoE if MAC configuration is invalid. */
11880 if (!is_valid_ether_addr(fip_mac)) {
11881 bp->flags |= NO_FCOE_FLAG;
11882 eth_zero_addr(bp->fip_mac);
11886 static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11889 int func = BP_ABS_FUNC(bp);
11890 int port = BP_PORT(bp);
11892 /* Zero primary MAC configuration */
11893 eth_zero_addr(bp->dev->dev_addr);
11895 if (BP_NOMCP(bp)) {
11896 BNX2X_ERROR("warning: random MAC workaround active\n");
11897 eth_hw_addr_random(bp->dev);
11898 } else if (IS_MF(bp)) {
11899 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11900 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11901 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11902 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11903 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11905 if (CNIC_SUPPORT(bp))
11906 bnx2x_get_cnic_mac_hwinfo(bp);
11908 /* in SF read MACs from port configuration */
11909 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11910 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11911 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11913 if (CNIC_SUPPORT(bp))
11914 bnx2x_get_cnic_mac_hwinfo(bp);
11917 if (!BP_NOMCP(bp)) {
11918 /* Read physical port identifier from shmem */
11919 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11920 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11921 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11922 bp->flags |= HAS_PHYS_PORT_ID;
11925 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11927 if (!is_valid_ether_addr(bp->dev->dev_addr))
11928 dev_err(&bp->pdev->dev,
11929 "bad Ethernet MAC address configuration: %pM\n"
11930 "change it manually before bringing up the appropriate network interface\n",
11931 bp->dev->dev_addr);
11934 static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11942 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11943 /* Take function: tmp = func */
11944 tmp = BP_ABS_FUNC(bp);
11945 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11946 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11948 /* Take port: tmp = port */
11951 dev_info.port_hw_config[tmp].generic_features);
11952 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11957 static void validate_set_si_mode(struct bnx2x *bp)
11959 u8 func = BP_ABS_FUNC(bp);
11962 val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11964 /* check for legal mac (upper bytes) */
11965 if (val != 0xffff) {
11966 bp->mf_mode = MULTI_FUNCTION_SI;
11967 bp->mf_config[BP_VN(bp)] =
11968 MF_CFG_RD(bp, func_mf_config[func].config);
11970 BNX2X_DEV_INFO("illegal MAC address for SI\n");
11973 static int bnx2x_get_hwinfo(struct bnx2x *bp)
11975 int /*abs*/func = BP_ABS_FUNC(bp);
11977 u32 val = 0, val2 = 0;
11980 /* Validate that chip access is feasible */
11981 if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
11982 dev_err(&bp->pdev->dev,
11983 "Chip read returns all Fs. Preventing probe from continuing\n");
11987 bnx2x_get_common_hwinfo(bp);
11990 * initialize IGU parameters
11992 if (CHIP_IS_E1x(bp)) {
11993 bp->common.int_block = INT_BLOCK_HC;
11995 bp->igu_dsb_id = DEF_SB_IGU_ID;
11996 bp->igu_base_sb = 0;
11998 bp->common.int_block = INT_BLOCK_IGU;
12000 /* do not allow device reset during IGU info processing */
12001 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
12003 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
12005 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
12008 BNX2X_DEV_INFO("FORCING Normal Mode\n");
12010 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
12011 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
12012 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
12014 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
12016 usleep_range(1000, 2000);
12019 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
12020 dev_err(&bp->pdev->dev,
12021 "FORCING Normal Mode failed!!!\n");
12022 bnx2x_release_hw_lock(bp,
12023 HW_LOCK_RESOURCE_RESET);
12028 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
12029 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
12030 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
12032 BNX2X_DEV_INFO("IGU Normal Mode\n");
12034 rc = bnx2x_get_igu_cam_info(bp);
12035 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
12041 * set base FW non-default (fast path) status block id, this value is
12042 * used to initialize the fw_sb_id saved on the fp/queue structure to
12043 * determine the id used by the FW.
12045 if (CHIP_IS_E1x(bp))
12046 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
12048 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
12049 * the same queue are indicated on the same IGU SB). So we prefer
12050 * FW and IGU SBs to be the same value.
12052 bp->base_fw_ndsb = bp->igu_base_sb;
12054 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
12055 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
12056 bp->igu_sb_cnt, bp->base_fw_ndsb);
12059 * Initialize MF configuration
12064 bp->mf_sub_mode = 0;
12066 mfw_vn = BP_FW_MB_IDX(bp);
12068 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
12069 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
12070 bp->common.shmem2_base, SHMEM2_RD(bp, size),
12071 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
12073 if (SHMEM2_HAS(bp, mf_cfg_addr))
12074 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
12076 bp->common.mf_cfg_base = bp->common.shmem_base +
12077 offsetof(struct shmem_region, func_mb) +
12078 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
12080 * get mf configuration:
12081 * 1. Existence of MF configuration
12082 * 2. MAC address must be legal (check only upper bytes)
12083 * for Switch-Independent mode;
12084 * OVLAN must be legal for Switch-Dependent mode
12085 * 3. SF_MODE configures specific MF mode
12087 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12088 /* get mf configuration */
12090 dev_info.shared_feature_config.config);
12091 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
12094 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
12095 validate_set_si_mode(bp);
12097 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
12098 if ((!CHIP_IS_E1x(bp)) &&
12099 (MF_CFG_RD(bp, func_mf_config[func].
12100 mac_upper) != 0xffff) &&
12102 afex_driver_support))) {
12103 bp->mf_mode = MULTI_FUNCTION_AFEX;
12104 bp->mf_config[vn] = MF_CFG_RD(bp,
12105 func_mf_config[func].config);
12107 BNX2X_DEV_INFO("can not configure afex mode\n");
12110 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
12111 /* get OV configuration */
12112 val = MF_CFG_RD(bp,
12113 func_mf_config[FUNC_0].e1hov_tag);
12114 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
12116 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12117 bp->mf_mode = MULTI_FUNCTION_SD;
12118 bp->mf_config[vn] = MF_CFG_RD(bp,
12119 func_mf_config[func].config);
12121 BNX2X_DEV_INFO("illegal OV for SD\n");
12123 case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
12124 bp->mf_mode = MULTI_FUNCTION_SD;
12125 bp->mf_sub_mode = SUB_MF_MODE_BD;
12126 bp->mf_config[vn] =
12128 func_mf_config[func].config);
12130 if (SHMEM2_HAS(bp, mtu_size)) {
12131 int mtu_idx = BP_FW_MB_IDX(bp);
12135 mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
12136 mtu_size = (u16)mtu;
12137 DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
12140 /* if valid: update device mtu */
12141 if ((mtu_size >= ETH_MIN_PACKET_SIZE) &&
12143 ETH_MAX_JUMBO_PACKET_SIZE))
12144 bp->dev->mtu = mtu_size;
12147 case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
12148 bp->mf_mode = MULTI_FUNCTION_SD;
12149 bp->mf_sub_mode = SUB_MF_MODE_UFP;
12150 bp->mf_config[vn] =
12152 func_mf_config[func].config);
12154 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
12155 bp->mf_config[vn] = 0;
12157 case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
12158 val2 = SHMEM_RD(bp,
12159 dev_info.shared_hw_config.config_3);
12160 val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
12162 case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
12163 validate_set_si_mode(bp);
12165 SUB_MF_MODE_NPAR1_DOT_5;
12168 /* Unknown configuration */
12169 bp->mf_config[vn] = 0;
12170 BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
12175 /* Unknown configuration: reset mf_config */
12176 bp->mf_config[vn] = 0;
12177 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
12181 BNX2X_DEV_INFO("%s function mode\n",
12182 IS_MF(bp) ? "multi" : "single");
12184 switch (bp->mf_mode) {
12185 case MULTI_FUNCTION_SD:
12186 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
12187 FUNC_MF_CFG_E1HOV_TAG_MASK;
12188 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12190 bp->path_has_ovlan = true;
12192 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
12193 func, bp->mf_ov, bp->mf_ov);
12194 } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
12195 (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
12196 dev_err(&bp->pdev->dev,
12197 "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
12199 bp->path_has_ovlan = true;
12201 dev_err(&bp->pdev->dev,
12202 "No valid MF OV for func %d, aborting\n",
12207 case MULTI_FUNCTION_AFEX:
12208 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
12210 case MULTI_FUNCTION_SI:
12211 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
12216 dev_err(&bp->pdev->dev,
12217 "VN %d is in a single function mode, aborting\n",
12224 /* check if other port on the path needs ovlan:
12225 * Since MF configuration is shared between ports
12226 * Possible mixed modes are only
12227 * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
12229 if (CHIP_MODE_IS_4_PORT(bp) &&
12230 !bp->path_has_ovlan &&
12232 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12233 u8 other_port = !BP_PORT(bp);
12234 u8 other_func = BP_PATH(bp) + 2*other_port;
12235 val = MF_CFG_RD(bp,
12236 func_mf_config[other_func].e1hov_tag);
12237 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
12238 bp->path_has_ovlan = true;
12242 /* adjust igu_sb_cnt to MF for E1H */
12243 if (CHIP_IS_E1H(bp) && IS_MF(bp))
12244 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
12247 bnx2x_get_port_hwinfo(bp);
12249 /* Get MAC addresses */
12250 bnx2x_get_mac_hwinfo(bp);
12252 bnx2x_get_cnic_info(bp);
12257 static void bnx2x_read_fwinfo(struct bnx2x *bp)
12259 int cnt, i, block_end, rodi;
12260 char vpd_start[BNX2X_VPD_LEN+1];
12261 char str_id_reg[VENDOR_ID_LEN+1];
12262 char str_id_cap[VENDOR_ID_LEN+1];
12264 char *vpd_extended_data = NULL;
12267 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
12268 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
12270 if (cnt < BNX2X_VPD_LEN)
12271 goto out_not_found;
12273 /* VPD RO tag should be first tag after identifier string, hence
12274 * we should be able to find it in first BNX2X_VPD_LEN chars
12276 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
12277 PCI_VPD_LRDT_RO_DATA);
12279 goto out_not_found;
12281 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
12282 pci_vpd_lrdt_size(&vpd_start[i]);
12284 i += PCI_VPD_LRDT_TAG_SIZE;
12286 if (block_end > BNX2X_VPD_LEN) {
12287 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
12288 if (vpd_extended_data == NULL)
12289 goto out_not_found;
12291 /* read rest of vpd image into vpd_extended_data */
12292 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
12293 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
12294 block_end - BNX2X_VPD_LEN,
12295 vpd_extended_data + BNX2X_VPD_LEN);
12296 if (cnt < (block_end - BNX2X_VPD_LEN))
12297 goto out_not_found;
12298 vpd_data = vpd_extended_data;
12300 vpd_data = vpd_start;
12302 /* now vpd_data holds full vpd content in both cases */
12304 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12305 PCI_VPD_RO_KEYWORD_MFR_ID);
12307 goto out_not_found;
12309 len = pci_vpd_info_field_size(&vpd_data[rodi]);
12311 if (len != VENDOR_ID_LEN)
12312 goto out_not_found;
12314 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12316 /* vendor specific info */
12317 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
12318 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
12319 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
12320 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
12322 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12323 PCI_VPD_RO_KEYWORD_VENDOR0);
12325 len = pci_vpd_info_field_size(&vpd_data[rodi]);
12327 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12329 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
12330 memcpy(bp->fw_ver, &vpd_data[rodi], len);
12331 bp->fw_ver[len] = ' ';
12334 kfree(vpd_extended_data);
12338 kfree(vpd_extended_data);
12342 static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
12346 if (CHIP_REV_IS_FPGA(bp))
12347 SET_FLAGS(flags, MODE_FPGA);
12348 else if (CHIP_REV_IS_EMUL(bp))
12349 SET_FLAGS(flags, MODE_EMUL);
12351 SET_FLAGS(flags, MODE_ASIC);
12353 if (CHIP_MODE_IS_4_PORT(bp))
12354 SET_FLAGS(flags, MODE_PORT4);
12356 SET_FLAGS(flags, MODE_PORT2);
12358 if (CHIP_IS_E2(bp))
12359 SET_FLAGS(flags, MODE_E2);
12360 else if (CHIP_IS_E3(bp)) {
12361 SET_FLAGS(flags, MODE_E3);
12362 if (CHIP_REV(bp) == CHIP_REV_Ax)
12363 SET_FLAGS(flags, MODE_E3_A0);
12364 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
12365 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
12369 SET_FLAGS(flags, MODE_MF);
12370 switch (bp->mf_mode) {
12371 case MULTI_FUNCTION_SD:
12372 SET_FLAGS(flags, MODE_MF_SD);
12374 case MULTI_FUNCTION_SI:
12375 SET_FLAGS(flags, MODE_MF_SI);
12377 case MULTI_FUNCTION_AFEX:
12378 SET_FLAGS(flags, MODE_MF_AFEX);
12382 SET_FLAGS(flags, MODE_SF);
12384 #if defined(__LITTLE_ENDIAN)
12385 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
12386 #else /*(__BIG_ENDIAN)*/
12387 SET_FLAGS(flags, MODE_BIG_ENDIAN);
12389 INIT_MODE_FLAGS(bp) = flags;
12392 static int bnx2x_init_bp(struct bnx2x *bp)
12397 mutex_init(&bp->port.phy_mutex);
12398 mutex_init(&bp->fw_mb_mutex);
12399 mutex_init(&bp->drv_info_mutex);
12400 sema_init(&bp->stats_lock, 1);
12401 bp->drv_info_mng_owner = false;
12402 INIT_LIST_HEAD(&bp->vlan_reg);
12404 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
12405 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
12406 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
12407 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
12409 rc = bnx2x_get_hwinfo(bp);
12413 eth_zero_addr(bp->dev->dev_addr);
12416 bnx2x_set_modes_bitmap(bp);
12418 rc = bnx2x_alloc_mem_bp(bp);
12422 bnx2x_read_fwinfo(bp);
12424 func = BP_FUNC(bp);
12426 /* need to reset chip if undi was active */
12427 if (IS_PF(bp) && !BP_NOMCP(bp)) {
12430 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12431 DRV_MSG_SEQ_NUMBER_MASK;
12432 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12434 rc = bnx2x_prev_unload(bp);
12436 bnx2x_free_mem_bp(bp);
12441 if (CHIP_REV_IS_FPGA(bp))
12442 dev_err(&bp->pdev->dev, "FPGA detected\n");
12444 if (BP_NOMCP(bp) && (func == 0))
12445 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
12447 bp->disable_tpa = disable_tpa;
12448 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
12449 /* Reduce memory usage in kdump environment by disabling TPA */
12450 bp->disable_tpa |= is_kdump_kernel();
12452 /* Set TPA flags */
12453 if (bp->disable_tpa) {
12454 bp->dev->hw_features &= ~NETIF_F_LRO;
12455 bp->dev->features &= ~NETIF_F_LRO;
12458 if (CHIP_IS_E1(bp))
12459 bp->dropless_fc = 0;
12461 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
12465 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
12467 bp->rx_ring_size = MAX_RX_AVAIL;
12469 /* make sure that the numbers are in the right granularity */
12470 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
12471 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
12473 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
12475 init_timer(&bp->timer);
12476 bp->timer.expires = jiffies + bp->current_interval;
12477 bp->timer.data = (unsigned long) bp;
12478 bp->timer.function = bnx2x_timer;
12480 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
12481 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
12482 SHMEM2_HAS(bp, dcbx_en) &&
12483 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
12484 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) &&
12485 SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) {
12486 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
12487 bnx2x_dcbx_init_params(bp);
12489 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
12492 if (CHIP_IS_E1x(bp))
12493 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
12495 bp->cnic_base_cl_id = FP_SB_MAX_E2;
12497 /* multiple tx priority */
12500 else if (CHIP_IS_E1x(bp))
12501 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
12502 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
12503 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
12504 else if (CHIP_IS_E3B0(bp))
12505 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
12507 BNX2X_ERR("unknown chip %x revision %x\n",
12508 CHIP_NUM(bp), CHIP_REV(bp));
12509 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
12511 /* We need at least one default status block for slow-path events,
12512 * second status block for the L2 queue, and a third status block for
12513 * CNIC if supported.
12516 bp->min_msix_vec_cnt = 1;
12517 else if (CNIC_SUPPORT(bp))
12518 bp->min_msix_vec_cnt = 3;
12519 else /* PF w/o cnic */
12520 bp->min_msix_vec_cnt = 2;
12521 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
12523 bp->dump_preset_idx = 1;
12525 if (CHIP_IS_E3B0(bp))
12526 bp->flags |= PTP_SUPPORTED;
12531 /****************************************************************************
12532 * General service functions
12533 ****************************************************************************/
12536 * net_device service functions
12539 /* called with rtnl_lock */
12540 static int bnx2x_open(struct net_device *dev)
12542 struct bnx2x *bp = netdev_priv(dev);
12545 bp->stats_init = true;
12547 netif_carrier_off(dev);
12549 bnx2x_set_power_state(bp, PCI_D0);
12551 /* If parity had happen during the unload, then attentions
12552 * and/or RECOVERY_IN_PROGRES may still be set. In this case we
12553 * want the first function loaded on the current engine to
12554 * complete the recovery.
12555 * Parity recovery is only relevant for PF driver.
12558 int other_engine = BP_PATH(bp) ? 0 : 1;
12559 bool other_load_status, load_status;
12560 bool global = false;
12562 other_load_status = bnx2x_get_load_status(bp, other_engine);
12563 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
12564 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
12565 bnx2x_chk_parity_attn(bp, &global, true)) {
12567 /* If there are attentions and they are in a
12568 * global blocks, set the GLOBAL_RESET bit
12569 * regardless whether it will be this function
12570 * that will complete the recovery or not.
12573 bnx2x_set_reset_global(bp);
12575 /* Only the first function on the current
12576 * engine should try to recover in open. In case
12577 * of attentions in global blocks only the first
12578 * in the chip should try to recover.
12580 if ((!load_status &&
12581 (!global || !other_load_status)) &&
12582 bnx2x_trylock_leader_lock(bp) &&
12583 !bnx2x_leader_reset(bp)) {
12584 netdev_info(bp->dev,
12585 "Recovered in open\n");
12589 /* recovery has failed... */
12590 bnx2x_set_power_state(bp, PCI_D3hot);
12591 bp->recovery_state = BNX2X_RECOVERY_FAILED;
12593 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
12594 "If you still see this message after a few retries then power cycle is required.\n");
12601 bp->recovery_state = BNX2X_RECOVERY_DONE;
12602 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12607 udp_tunnel_get_rx_info(dev);
12612 /* called with rtnl_lock */
12613 static int bnx2x_close(struct net_device *dev)
12615 struct bnx2x *bp = netdev_priv(dev);
12617 /* Unload the driver, release IRQs */
12618 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
12623 struct bnx2x_mcast_list_elem_group
12625 struct list_head mcast_group_link;
12626 struct bnx2x_mcast_list_elem mcast_elems[];
12629 #define MCAST_ELEMS_PER_PG \
12630 ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \
12631 sizeof(struct bnx2x_mcast_list_elem))
12633 static void bnx2x_free_mcast_macs_list(struct list_head *mcast_group_list)
12635 struct bnx2x_mcast_list_elem_group *current_mcast_group;
12637 while (!list_empty(mcast_group_list)) {
12638 current_mcast_group = list_first_entry(mcast_group_list,
12639 struct bnx2x_mcast_list_elem_group,
12641 list_del(¤t_mcast_group->mcast_group_link);
12642 free_page((unsigned long)current_mcast_group);
12646 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
12647 struct bnx2x_mcast_ramrod_params *p,
12648 struct list_head *mcast_group_list)
12650 struct bnx2x_mcast_list_elem *mc_mac;
12651 struct netdev_hw_addr *ha;
12652 struct bnx2x_mcast_list_elem_group *current_mcast_group = NULL;
12653 int mc_count = netdev_mc_count(bp->dev);
12656 INIT_LIST_HEAD(&p->mcast_list);
12657 netdev_for_each_mc_addr(ha, bp->dev) {
12659 current_mcast_group =
12660 (struct bnx2x_mcast_list_elem_group *)
12661 __get_free_page(GFP_ATOMIC);
12662 if (!current_mcast_group) {
12663 bnx2x_free_mcast_macs_list(mcast_group_list);
12664 BNX2X_ERR("Failed to allocate mc MAC list\n");
12667 list_add(¤t_mcast_group->mcast_group_link,
12670 mc_mac = ¤t_mcast_group->mcast_elems[offset];
12671 mc_mac->mac = bnx2x_mc_addr(ha);
12672 list_add_tail(&mc_mac->link, &p->mcast_list);
12674 if (offset == MCAST_ELEMS_PER_PG)
12677 p->mcast_list_len = mc_count;
12682 * bnx2x_set_uc_list - configure a new unicast MACs list.
12684 * @bp: driver handle
12686 * We will use zero (0) as a MAC type for these MACs.
12688 static int bnx2x_set_uc_list(struct bnx2x *bp)
12691 struct net_device *dev = bp->dev;
12692 struct netdev_hw_addr *ha;
12693 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
12694 unsigned long ramrod_flags = 0;
12696 /* First schedule a cleanup up of old configuration */
12697 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
12699 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
12703 netdev_for_each_uc_addr(ha, dev) {
12704 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
12705 BNX2X_UC_LIST_MAC, &ramrod_flags);
12706 if (rc == -EEXIST) {
12708 "Failed to schedule ADD operations: %d\n", rc);
12709 /* do not treat adding same MAC as error */
12712 } else if (rc < 0) {
12714 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
12720 /* Execute the pending commands */
12721 __set_bit(RAMROD_CONT, &ramrod_flags);
12722 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
12723 BNX2X_UC_LIST_MAC, &ramrod_flags);
12726 static int bnx2x_set_mc_list_e1x(struct bnx2x *bp)
12728 LIST_HEAD(mcast_group_list);
12729 struct net_device *dev = bp->dev;
12730 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12733 rparam.mcast_obj = &bp->mcast_obj;
12735 /* first, clear all configured multicast MACs */
12736 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12738 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
12742 /* then, configure a new MACs list */
12743 if (netdev_mc_count(dev)) {
12744 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12748 /* Now add the new MACs */
12749 rc = bnx2x_config_mcast(bp, &rparam,
12750 BNX2X_MCAST_CMD_ADD);
12752 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12755 bnx2x_free_mcast_macs_list(&mcast_group_list);
12761 static int bnx2x_set_mc_list(struct bnx2x *bp)
12763 LIST_HEAD(mcast_group_list);
12764 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12765 struct net_device *dev = bp->dev;
12768 /* On older adapters, we need to flush and re-add filters */
12769 if (CHIP_IS_E1x(bp))
12770 return bnx2x_set_mc_list_e1x(bp);
12772 rparam.mcast_obj = &bp->mcast_obj;
12774 if (netdev_mc_count(dev)) {
12775 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12779 /* Override the curently configured set of mc filters */
12780 rc = bnx2x_config_mcast(bp, &rparam,
12781 BNX2X_MCAST_CMD_SET);
12783 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12786 bnx2x_free_mcast_macs_list(&mcast_group_list);
12788 /* If no mc addresses are required, flush the configuration */
12789 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12791 BNX2X_ERR("Failed to clear multicast configuration %d\n",
12798 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
12799 static void bnx2x_set_rx_mode(struct net_device *dev)
12801 struct bnx2x *bp = netdev_priv(dev);
12803 if (bp->state != BNX2X_STATE_OPEN) {
12804 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12807 /* Schedule an SP task to handle rest of change */
12808 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
12813 void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12815 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12817 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
12819 netif_addr_lock_bh(bp->dev);
12821 if (bp->dev->flags & IFF_PROMISC) {
12822 rx_mode = BNX2X_RX_MODE_PROMISC;
12823 } else if ((bp->dev->flags & IFF_ALLMULTI) ||
12824 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
12826 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12829 /* some multicasts */
12830 if (bnx2x_set_mc_list(bp) < 0)
12831 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12833 /* release bh lock, as bnx2x_set_uc_list might sleep */
12834 netif_addr_unlock_bh(bp->dev);
12835 if (bnx2x_set_uc_list(bp) < 0)
12836 rx_mode = BNX2X_RX_MODE_PROMISC;
12837 netif_addr_lock_bh(bp->dev);
12839 /* configuring mcast to a vf involves sleeping (when we
12840 * wait for the pf's response).
12842 bnx2x_schedule_sp_rtnl(bp,
12843 BNX2X_SP_RTNL_VFPF_MCAST, 0);
12847 bp->rx_mode = rx_mode;
12848 /* handle ISCSI SD mode */
12849 if (IS_MF_ISCSI_ONLY(bp))
12850 bp->rx_mode = BNX2X_RX_MODE_NONE;
12852 /* Schedule the rx_mode command */
12853 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
12854 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
12855 netif_addr_unlock_bh(bp->dev);
12860 bnx2x_set_storm_rx_mode(bp);
12861 netif_addr_unlock_bh(bp->dev);
12863 /* VF will need to request the PF to make this change, and so
12864 * the VF needs to release the bottom-half lock prior to the
12865 * request (as it will likely require sleep on the VF side)
12867 netif_addr_unlock_bh(bp->dev);
12868 bnx2x_vfpf_storm_rx_mode(bp);
12872 /* called with rtnl_lock */
12873 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12874 int devad, u16 addr)
12876 struct bnx2x *bp = netdev_priv(netdev);
12880 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12881 prtad, devad, addr);
12883 /* The HW expects different devad if CL22 is used */
12884 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12886 bnx2x_acquire_phy_lock(bp);
12887 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
12888 bnx2x_release_phy_lock(bp);
12889 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12896 /* called with rtnl_lock */
12897 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12898 u16 addr, u16 value)
12900 struct bnx2x *bp = netdev_priv(netdev);
12904 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
12905 prtad, devad, addr, value);
12907 /* The HW expects different devad if CL22 is used */
12908 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12910 bnx2x_acquire_phy_lock(bp);
12911 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
12912 bnx2x_release_phy_lock(bp);
12916 /* called with rtnl_lock */
12917 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12919 struct bnx2x *bp = netdev_priv(dev);
12920 struct mii_ioctl_data *mdio = if_mii(ifr);
12922 if (!netif_running(dev))
12926 case SIOCSHWTSTAMP:
12927 return bnx2x_hwtstamp_ioctl(bp, ifr);
12929 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12930 mdio->phy_id, mdio->reg_num, mdio->val_in);
12931 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12935 #ifdef CONFIG_NET_POLL_CONTROLLER
12936 static void poll_bnx2x(struct net_device *dev)
12938 struct bnx2x *bp = netdev_priv(dev);
12941 for_each_eth_queue(bp, i) {
12942 struct bnx2x_fastpath *fp = &bp->fp[i];
12943 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
12948 static int bnx2x_validate_addr(struct net_device *dev)
12950 struct bnx2x *bp = netdev_priv(dev);
12952 /* query the bulletin board for mac address configured by the PF */
12954 bnx2x_sample_bulletin(bp);
12956 if (!is_valid_ether_addr(dev->dev_addr)) {
12957 BNX2X_ERR("Non-valid Ethernet address\n");
12958 return -EADDRNOTAVAIL;
12963 static int bnx2x_get_phys_port_id(struct net_device *netdev,
12964 struct netdev_phys_item_id *ppid)
12966 struct bnx2x *bp = netdev_priv(netdev);
12968 if (!(bp->flags & HAS_PHYS_PORT_ID))
12969 return -EOPNOTSUPP;
12971 ppid->id_len = sizeof(bp->phys_port_id);
12972 memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12977 static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
12978 struct net_device *dev,
12979 netdev_features_t features)
12981 features = vlan_features_check(skb, features);
12982 return vxlan_features_check(skb, features);
12985 static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
12990 unsigned long ramrod_flags = 0;
12992 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
12993 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
12994 add, &ramrod_flags);
12996 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
13002 static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
13004 struct bnx2x_vlan_entry *vlan;
13007 /* Configure all non-configured entries */
13008 list_for_each_entry(vlan, &bp->vlan_reg, link) {
13012 if (bp->vlan_cnt >= bp->vlan_credit)
13015 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
13017 BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
13021 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
13029 static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
13031 bool need_accept_any_vlan;
13033 need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
13035 if (bp->accept_any_vlan != need_accept_any_vlan) {
13036 bp->accept_any_vlan = need_accept_any_vlan;
13037 DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
13038 bp->accept_any_vlan ? "raised" : "cleared");
13041 bnx2x_set_rx_mode_inner(bp);
13043 bnx2x_vfpf_storm_rx_mode(bp);
13048 int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
13050 /* Don't set rx mode here. Our caller will do it. */
13051 bnx2x_vlan_configure(bp, false);
13056 static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
13058 struct bnx2x *bp = netdev_priv(dev);
13059 struct bnx2x_vlan_entry *vlan;
13061 DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
13063 vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
13069 list_add_tail(&vlan->link, &bp->vlan_reg);
13071 if (netif_running(dev))
13072 bnx2x_vlan_configure(bp, true);
13077 static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
13079 struct bnx2x *bp = netdev_priv(dev);
13080 struct bnx2x_vlan_entry *vlan;
13081 bool found = false;
13084 DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
13086 list_for_each_entry(vlan, &bp->vlan_reg, link)
13087 if (vlan->vid == vid) {
13093 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
13097 if (netif_running(dev) && vlan->hw) {
13098 rc = __bnx2x_vlan_configure_vid(bp, vid, false);
13099 DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
13103 list_del(&vlan->link);
13106 if (netif_running(dev))
13107 bnx2x_vlan_configure(bp, true);
13109 DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
13114 static const struct net_device_ops bnx2x_netdev_ops = {
13115 .ndo_open = bnx2x_open,
13116 .ndo_stop = bnx2x_close,
13117 .ndo_start_xmit = bnx2x_start_xmit,
13118 .ndo_select_queue = bnx2x_select_queue,
13119 .ndo_set_rx_mode = bnx2x_set_rx_mode,
13120 .ndo_set_mac_address = bnx2x_change_mac_addr,
13121 .ndo_validate_addr = bnx2x_validate_addr,
13122 .ndo_do_ioctl = bnx2x_ioctl,
13123 .ndo_change_mtu = bnx2x_change_mtu,
13124 .ndo_fix_features = bnx2x_fix_features,
13125 .ndo_set_features = bnx2x_set_features,
13126 .ndo_tx_timeout = bnx2x_tx_timeout,
13127 .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
13128 .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
13129 #ifdef CONFIG_NET_POLL_CONTROLLER
13130 .ndo_poll_controller = poll_bnx2x,
13132 .ndo_setup_tc = __bnx2x_setup_tc,
13133 #ifdef CONFIG_BNX2X_SRIOV
13134 .ndo_set_vf_mac = bnx2x_set_vf_mac,
13135 .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
13136 .ndo_get_vf_config = bnx2x_get_vf_config,
13138 #ifdef NETDEV_FCOE_WWNN
13139 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
13142 .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
13143 .ndo_set_vf_link_state = bnx2x_set_vf_link_state,
13144 .ndo_features_check = bnx2x_features_check,
13145 .ndo_udp_tunnel_add = bnx2x_udp_tunnel_add,
13146 .ndo_udp_tunnel_del = bnx2x_udp_tunnel_del,
13149 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
13151 struct device *dev = &bp->pdev->dev;
13153 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
13154 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
13155 dev_err(dev, "System does not support DMA, aborting\n");
13162 static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
13164 if (bp->flags & AER_ENABLED) {
13165 pci_disable_pcie_error_reporting(bp->pdev);
13166 bp->flags &= ~AER_ENABLED;
13170 static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13171 struct net_device *dev, unsigned long board_type)
13175 bool chip_is_e1x = (board_type == BCM57710 ||
13176 board_type == BCM57711 ||
13177 board_type == BCM57711E);
13179 SET_NETDEV_DEV(dev, &pdev->dev);
13184 rc = pci_enable_device(pdev);
13186 dev_err(&bp->pdev->dev,
13187 "Cannot enable PCI device, aborting\n");
13191 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13192 dev_err(&bp->pdev->dev,
13193 "Cannot find PCI device base address, aborting\n");
13195 goto err_out_disable;
13198 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13199 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
13201 goto err_out_disable;
13204 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
13205 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
13206 PCICFG_REVESION_ID_ERROR_VAL) {
13207 pr_err("PCI device error, probably due to fan failure, aborting\n");
13209 goto err_out_disable;
13212 if (atomic_read(&pdev->enable_cnt) == 1) {
13213 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
13215 dev_err(&bp->pdev->dev,
13216 "Cannot obtain PCI resources, aborting\n");
13217 goto err_out_disable;
13220 pci_set_master(pdev);
13221 pci_save_state(pdev);
13225 if (!pdev->pm_cap) {
13226 dev_err(&bp->pdev->dev,
13227 "Cannot find power management capability, aborting\n");
13229 goto err_out_release;
13233 if (!pci_is_pcie(pdev)) {
13234 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
13236 goto err_out_release;
13239 rc = bnx2x_set_coherency_mask(bp);
13241 goto err_out_release;
13243 dev->mem_start = pci_resource_start(pdev, 0);
13244 dev->base_addr = dev->mem_start;
13245 dev->mem_end = pci_resource_end(pdev, 0);
13247 dev->irq = pdev->irq;
13249 bp->regview = pci_ioremap_bar(pdev, 0);
13250 if (!bp->regview) {
13251 dev_err(&bp->pdev->dev,
13252 "Cannot map register space, aborting\n");
13254 goto err_out_release;
13257 /* In E1/E1H use pci device function given by kernel.
13258 * In E2/E3 read physical function from ME register since these chips
13259 * support Physical Device Assignment where kernel BDF maybe arbitrary
13260 * (depending on hypervisor).
13263 bp->pf_num = PCI_FUNC(pdev->devfn);
13266 pci_read_config_dword(bp->pdev,
13267 PCICFG_ME_REGISTER, &pci_cfg_dword);
13268 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
13269 ME_REG_ABS_PF_NUM_SHIFT);
13271 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
13273 /* clean indirect addresses */
13274 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13275 PCICFG_VENDOR_ID_OFFSET);
13277 /* Set PCIe reset type to fundamental for EEH recovery */
13278 pdev->needs_freset = 1;
13280 /* AER (Advanced Error reporting) configuration */
13281 rc = pci_enable_pcie_error_reporting(pdev);
13283 bp->flags |= AER_ENABLED;
13285 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
13288 * Clean the following indirect addresses for all functions since it
13289 * is not used by the driver.
13292 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
13293 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
13294 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
13295 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
13298 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
13299 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
13300 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
13301 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
13304 /* Enable internal target-read (in case we are probed after PF
13305 * FLR). Must be done prior to any BAR read access. Only for
13310 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13313 dev->watchdog_timeo = TX_TIMEOUT;
13315 dev->netdev_ops = &bnx2x_netdev_ops;
13316 bnx2x_set_ethtool_ops(bp, dev);
13318 dev->priv_flags |= IFF_UNICAST_FLT;
13320 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13321 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13322 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
13323 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
13324 if (!chip_is_e1x) {
13325 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
13326 NETIF_F_GSO_IPXIP4 |
13327 NETIF_F_GSO_UDP_TUNNEL |
13328 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13329 NETIF_F_GSO_PARTIAL;
13331 dev->hw_enc_features =
13332 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13333 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13334 NETIF_F_GSO_IPXIP4 |
13335 NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
13336 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM |
13337 NETIF_F_GSO_PARTIAL;
13339 dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM |
13340 NETIF_F_GSO_UDP_TUNNEL_CSUM;
13343 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13344 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
13348 bp->accept_any_vlan = true;
13350 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13352 /* For VF we'll know whether to enable VLAN filtering after
13353 * getting a response to CHANNEL_TLV_ACQUIRE from PF.
13356 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
13357 dev->features |= NETIF_F_HIGHDMA;
13359 /* Add Loopback capability to the device */
13360 dev->hw_features |= NETIF_F_LOOPBACK;
13363 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
13366 /* MTU range, 46 - 9600 */
13367 dev->min_mtu = ETH_MIN_PACKET_SIZE;
13368 dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE;
13370 /* get_port_hwinfo() will set prtad and mmds properly */
13371 bp->mdio.prtad = MDIO_PRTAD_NONE;
13373 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13374 bp->mdio.dev = dev;
13375 bp->mdio.mdio_read = bnx2x_mdio_read;
13376 bp->mdio.mdio_write = bnx2x_mdio_write;
13381 if (atomic_read(&pdev->enable_cnt) == 1)
13382 pci_release_regions(pdev);
13385 pci_disable_device(pdev);
13391 static int bnx2x_check_firmware(struct bnx2x *bp)
13393 const struct firmware *firmware = bp->firmware;
13394 struct bnx2x_fw_file_hdr *fw_hdr;
13395 struct bnx2x_fw_file_section *sections;
13396 u32 offset, len, num_ops;
13397 __be16 *ops_offsets;
13401 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
13402 BNX2X_ERR("Wrong FW size\n");
13406 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13407 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13409 /* Make sure none of the offsets and sizes make us read beyond
13410 * the end of the firmware data */
13411 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13412 offset = be32_to_cpu(sections[i].offset);
13413 len = be32_to_cpu(sections[i].len);
13414 if (offset + len > firmware->size) {
13415 BNX2X_ERR("Section %d length is out of bounds\n", i);
13420 /* Likewise for the init_ops offsets */
13421 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13422 ops_offsets = (__force __be16 *)(firmware->data + offset);
13423 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13425 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13426 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13427 BNX2X_ERR("Section offset %d is out of bounds\n", i);
13432 /* Check FW version */
13433 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13434 fw_ver = firmware->data + offset;
13435 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13436 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13437 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13438 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13439 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13440 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
13441 BCM_5710_FW_MAJOR_VERSION,
13442 BCM_5710_FW_MINOR_VERSION,
13443 BCM_5710_FW_REVISION_VERSION,
13444 BCM_5710_FW_ENGINEERING_VERSION);
13451 static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13453 const __be32 *source = (const __be32 *)_source;
13454 u32 *target = (u32 *)_target;
13457 for (i = 0; i < n/4; i++)
13458 target[i] = be32_to_cpu(source[i]);
13462 Ops array is stored in the following format:
13463 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13465 static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13467 const __be32 *source = (const __be32 *)_source;
13468 struct raw_op *target = (struct raw_op *)_target;
13471 for (i = 0, j = 0; i < n/8; i++, j += 2) {
13472 tmp = be32_to_cpu(source[j]);
13473 target[i].op = (tmp >> 24) & 0xff;
13474 target[i].offset = tmp & 0xffffff;
13475 target[i].raw_data = be32_to_cpu(source[j + 1]);
13479 /* IRO array is stored in the following format:
13480 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
13482 static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
13484 const __be32 *source = (const __be32 *)_source;
13485 struct iro *target = (struct iro *)_target;
13488 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
13489 target[i].base = be32_to_cpu(source[j]);
13491 tmp = be32_to_cpu(source[j]);
13492 target[i].m1 = (tmp >> 16) & 0xffff;
13493 target[i].m2 = tmp & 0xffff;
13495 tmp = be32_to_cpu(source[j]);
13496 target[i].m3 = (tmp >> 16) & 0xffff;
13497 target[i].size = tmp & 0xffff;
13502 static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13504 const __be16 *source = (const __be16 *)_source;
13505 u16 *target = (u16 *)_target;
13508 for (i = 0; i < n/2; i++)
13509 target[i] = be16_to_cpu(source[i]);
13512 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13514 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13515 bp->arr = kmalloc(len, GFP_KERNEL); \
13518 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13519 (u8 *)bp->arr, len); \
13522 static int bnx2x_init_firmware(struct bnx2x *bp)
13524 const char *fw_file_name;
13525 struct bnx2x_fw_file_hdr *fw_hdr;
13531 if (CHIP_IS_E1(bp))
13532 fw_file_name = FW_FILE_NAME_E1;
13533 else if (CHIP_IS_E1H(bp))
13534 fw_file_name = FW_FILE_NAME_E1H;
13535 else if (!CHIP_IS_E1x(bp))
13536 fw_file_name = FW_FILE_NAME_E2;
13538 BNX2X_ERR("Unsupported chip revision\n");
13541 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
13543 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
13545 BNX2X_ERR("Can't load firmware file %s\n",
13547 goto request_firmware_exit;
13550 rc = bnx2x_check_firmware(bp);
13552 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
13553 goto request_firmware_exit;
13556 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13558 /* Initialize the pointers to the init arrays */
13561 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13564 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13567 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13570 /* STORMs firmware */
13571 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13572 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13573 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13574 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13575 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13576 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13577 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13578 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13579 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13580 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13581 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13582 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13583 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13584 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13585 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13586 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13588 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
13593 kfree(bp->init_ops_offsets);
13594 init_offsets_alloc_err:
13595 kfree(bp->init_ops);
13596 init_ops_alloc_err:
13597 kfree(bp->init_data);
13598 request_firmware_exit:
13599 release_firmware(bp->firmware);
13600 bp->firmware = NULL;
13605 static void bnx2x_release_firmware(struct bnx2x *bp)
13607 kfree(bp->init_ops_offsets);
13608 kfree(bp->init_ops);
13609 kfree(bp->init_data);
13610 release_firmware(bp->firmware);
13611 bp->firmware = NULL;
13614 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
13615 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
13616 .init_hw_cmn = bnx2x_init_hw_common,
13617 .init_hw_port = bnx2x_init_hw_port,
13618 .init_hw_func = bnx2x_init_hw_func,
13620 .reset_hw_cmn = bnx2x_reset_common,
13621 .reset_hw_port = bnx2x_reset_port,
13622 .reset_hw_func = bnx2x_reset_func,
13624 .gunzip_init = bnx2x_gunzip_init,
13625 .gunzip_end = bnx2x_gunzip_end,
13627 .init_fw = bnx2x_init_firmware,
13628 .release_fw = bnx2x_release_firmware,
13631 void bnx2x__init_func_obj(struct bnx2x *bp)
13633 /* Prepare DMAE related driver resources */
13634 bnx2x_setup_dmae(bp);
13636 bnx2x_init_func_obj(bp, &bp->func_obj,
13637 bnx2x_sp(bp, func_rdata),
13638 bnx2x_sp_mapping(bp, func_rdata),
13639 bnx2x_sp(bp, func_afex_rdata),
13640 bnx2x_sp_mapping(bp, func_afex_rdata),
13641 &bnx2x_func_sp_drv);
13644 /* must be called after sriov-enable */
13645 static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
13647 int cid_count = BNX2X_L2_MAX_CID(bp);
13650 cid_count += BNX2X_VF_CIDS;
13652 if (CNIC_SUPPORT(bp))
13653 cid_count += CNIC_CID_MAX;
13655 return roundup(cid_count, QM_CID_ROUND);
13659 * bnx2x_get_num_none_def_sbs - return the number of none default SBs
13664 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
13670 * If MSI-X is not supported - return number of SBs needed to support
13671 * one fast path queue: one FP queue + SB for CNIC
13673 if (!pdev->msix_cap) {
13674 dev_info(&pdev->dev, "no msix capability found\n");
13675 return 1 + cnic_cnt;
13677 dev_info(&pdev->dev, "msix capability found\n");
13680 * The value in the PCI configuration space is the index of the last
13681 * entry, namely one less than the actual size of the table, which is
13682 * exactly what we want to return from this function: number of all SBs
13683 * without the default SB.
13684 * For VFs there is no default SB, then we return (index+1).
13686 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
13688 index = control & PCI_MSIX_FLAGS_QSIZE;
13693 static int set_max_cos_est(int chip_id)
13699 return BNX2X_MULTI_TX_COS_E1X;
13702 return BNX2X_MULTI_TX_COS_E2_E3A0;
13707 case BCM57840_4_10:
13708 case BCM57840_2_20:
13714 return BNX2X_MULTI_TX_COS_E3B0;
13722 pr_err("Unknown board_type (%d), aborting\n", chip_id);
13727 static int set_is_vf(int chip_id)
13741 /* nig_tsgen registers relative address */
13742 #define tsgen_ctrl 0x0
13743 #define tsgen_freecount 0x10
13744 #define tsgen_synctime_t0 0x20
13745 #define tsgen_offset_t0 0x28
13746 #define tsgen_drift_t0 0x30
13747 #define tsgen_synctime_t1 0x58
13748 #define tsgen_offset_t1 0x60
13749 #define tsgen_drift_t1 0x68
13751 /* FW workaround for setting drift */
13752 static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
13753 int best_val, int best_period)
13755 struct bnx2x_func_state_params func_params = {NULL};
13756 struct bnx2x_func_set_timesync_params *set_timesync_params =
13757 &func_params.params.set_timesync;
13759 /* Prepare parameters for function state transitions */
13760 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
13761 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
13763 func_params.f_obj = &bp->func_obj;
13764 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
13766 /* Function parameters */
13767 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
13768 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
13769 set_timesync_params->add_sub_drift_adjust_value =
13770 drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
13771 set_timesync_params->drift_adjust_value = best_val;
13772 set_timesync_params->drift_adjust_period = best_period;
13774 return bnx2x_func_state_change(bp, &func_params);
13777 static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13779 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13782 int val, period, period1, period2, dif, dif1, dif2;
13783 int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
13785 DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
13787 if (!netif_running(bp->dev)) {
13789 "PTP adjfreq called while the interface is down\n");
13800 best_period = 0x1FFFFFF;
13801 } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
13805 /* Changed not to allow val = 8, 16, 24 as these values
13806 * are not supported in workaround.
13808 for (val = 0; val <= 31; val++) {
13809 if ((val & 0x7) == 0)
13811 period1 = val * 1000000 / ppb;
13812 period2 = period1 + 1;
13814 dif1 = ppb - (val * 1000000 / period1);
13816 dif1 = BNX2X_MAX_PHC_DRIFT;
13819 dif2 = ppb - (val * 1000000 / period2);
13822 dif = (dif1 < dif2) ? dif1 : dif2;
13823 period = (dif1 < dif2) ? period1 : period2;
13824 if (dif < best_dif) {
13827 best_period = period;
13832 rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
13835 BNX2X_ERR("Failed to set drift\n");
13839 DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
13845 static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13847 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13849 if (!netif_running(bp->dev)) {
13851 "PTP adjtime called while the interface is down\n");
13855 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13857 timecounter_adjtime(&bp->timecounter, delta);
13862 static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
13864 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13867 if (!netif_running(bp->dev)) {
13869 "PTP gettime called while the interface is down\n");
13873 ns = timecounter_read(&bp->timecounter);
13875 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
13877 *ts = ns_to_timespec64(ns);
13882 static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
13883 const struct timespec64 *ts)
13885 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13888 if (!netif_running(bp->dev)) {
13890 "PTP settime called while the interface is down\n");
13894 ns = timespec64_to_ns(ts);
13896 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
13898 /* Re-init the timecounter */
13899 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
13904 /* Enable (or disable) ancillary features of the phc subsystem */
13905 static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13906 struct ptp_clock_request *rq, int on)
13908 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13910 BNX2X_ERR("PHC ancillary features are not supported\n");
13914 static void bnx2x_register_phc(struct bnx2x *bp)
13916 /* Fill the ptp_clock_info struct and register PTP clock*/
13917 bp->ptp_clock_info.owner = THIS_MODULE;
13918 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
13919 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */
13920 bp->ptp_clock_info.n_alarm = 0;
13921 bp->ptp_clock_info.n_ext_ts = 0;
13922 bp->ptp_clock_info.n_per_out = 0;
13923 bp->ptp_clock_info.pps = 0;
13924 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
13925 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
13926 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
13927 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
13928 bp->ptp_clock_info.enable = bnx2x_ptp_enable;
13930 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13931 if (IS_ERR(bp->ptp_clock)) {
13932 bp->ptp_clock = NULL;
13933 BNX2X_ERR("PTP clock registeration failed\n");
13937 static int bnx2x_init_one(struct pci_dev *pdev,
13938 const struct pci_device_id *ent)
13940 struct net_device *dev = NULL;
13942 enum pcie_link_width pcie_width;
13943 enum pci_bus_speed pcie_speed;
13944 int rc, max_non_def_sbs;
13945 int rx_count, tx_count, rss_count, doorbell_size;
13950 /* Management FW 'remembers' living interfaces. Allow it some time
13951 * to forget previously living interfaces, allowing a proper re-load.
13953 if (is_kdump_kernel()) {
13954 ktime_t now = ktime_get_boottime();
13955 ktime_t fw_ready_time = ktime_set(5, 0);
13957 if (ktime_before(now, fw_ready_time))
13958 msleep(ktime_ms_delta(fw_ready_time, now));
13961 /* An estimated maximum supported CoS number according to the chip
13963 * We will try to roughly estimate the maximum number of CoSes this chip
13964 * may support in order to minimize the memory allocated for Tx
13965 * netdev_queue's. This number will be accurately calculated during the
13966 * initialization of bp->max_cos based on the chip versions AND chip
13967 * revision in the bnx2x_init_bp().
13969 max_cos_est = set_max_cos_est(ent->driver_data);
13970 if (max_cos_est < 0)
13971 return max_cos_est;
13972 is_vf = set_is_vf(ent->driver_data);
13973 cnic_cnt = is_vf ? 0 : 1;
13975 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
13977 /* add another SB for VF as it has no default SB */
13978 max_non_def_sbs += is_vf ? 1 : 0;
13980 /* Maximum number of RSS queues: one IGU SB goes to CNIC */
13981 rss_count = max_non_def_sbs - cnic_cnt;
13986 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
13987 rx_count = rss_count + cnic_cnt;
13989 /* Maximum number of netdev Tx queues:
13990 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
13992 tx_count = rss_count * max_cos_est + cnic_cnt;
13994 /* dev zeroed in init_etherdev */
13995 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
13999 bp = netdev_priv(dev);
14003 bp->flags |= IS_VF_FLAG;
14005 bp->igu_sb_cnt = max_non_def_sbs;
14006 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
14007 bp->msg_enable = debug;
14008 bp->cnic_support = cnic_cnt;
14009 bp->cnic_probe = bnx2x_cnic_probe;
14011 pci_set_drvdata(pdev, dev);
14013 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
14019 BNX2X_DEV_INFO("This is a %s function\n",
14020 IS_PF(bp) ? "physical" : "virtual");
14021 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
14022 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
14023 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
14024 tx_count, rx_count);
14026 rc = bnx2x_init_bp(bp);
14028 goto init_one_exit;
14030 /* Map doorbells here as we need the real value of bp->max_cos which
14031 * is initialized in bnx2x_init_bp() to determine the number of
14035 bp->doorbells = bnx2x_vf_doorbells(bp);
14036 rc = bnx2x_vf_pci_alloc(bp);
14038 goto init_one_freemem;
14040 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
14041 if (doorbell_size > pci_resource_len(pdev, 2)) {
14042 dev_err(&bp->pdev->dev,
14043 "Cannot map doorbells, bar size too small, aborting\n");
14045 goto init_one_freemem;
14047 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
14050 if (!bp->doorbells) {
14051 dev_err(&bp->pdev->dev,
14052 "Cannot map doorbell space, aborting\n");
14054 goto init_one_freemem;
14058 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
14060 goto init_one_freemem;
14062 #ifdef CONFIG_BNX2X_SRIOV
14063 /* VF with OLD Hypervisor or old PF do not support filtering */
14064 if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
14065 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
14066 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
14071 /* Enable SRIOV if capability found in configuration space */
14072 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
14074 goto init_one_freemem;
14076 /* calc qm_cid_count */
14077 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
14078 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
14080 /* disable FCOE L2 queue for E1x*/
14081 if (CHIP_IS_E1x(bp))
14082 bp->flags |= NO_FCOE_FLAG;
14084 /* Set bp->num_queues for MSI-X mode*/
14085 bnx2x_set_num_queues(bp);
14087 /* Configure interrupt mode: try to enable MSI-X/MSI if
14090 rc = bnx2x_set_int_mode(bp);
14092 dev_err(&pdev->dev, "Cannot set interrupts\n");
14093 goto init_one_freemem;
14095 BNX2X_DEV_INFO("set interrupts successfully\n");
14097 /* register the net device */
14098 rc = register_netdev(dev);
14100 dev_err(&pdev->dev, "Cannot register net device\n");
14101 goto init_one_freemem;
14103 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
14105 if (!NO_FCOE(bp)) {
14106 /* Add storage MAC address */
14108 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14111 if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
14112 pcie_speed == PCI_SPEED_UNKNOWN ||
14113 pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
14114 BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
14117 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
14118 board_info[ent->driver_data].name,
14119 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
14121 pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
14122 pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
14123 pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
14125 dev->base_addr, bp->pdev->irq, dev->dev_addr);
14127 bnx2x_register_phc(bp);
14129 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
14130 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
14135 bnx2x_free_mem_bp(bp);
14138 bnx2x_disable_pcie_error_reporting(bp);
14141 iounmap(bp->regview);
14143 if (IS_PF(bp) && bp->doorbells)
14144 iounmap(bp->doorbells);
14148 if (atomic_read(&pdev->enable_cnt) == 1)
14149 pci_release_regions(pdev);
14151 pci_disable_device(pdev);
14156 static void __bnx2x_remove(struct pci_dev *pdev,
14157 struct net_device *dev,
14159 bool remove_netdev)
14161 if (bp->ptp_clock) {
14162 ptp_clock_unregister(bp->ptp_clock);
14163 bp->ptp_clock = NULL;
14166 /* Delete storage MAC address */
14167 if (!NO_FCOE(bp)) {
14169 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14174 /* Delete app tlvs from dcbnl */
14175 bnx2x_dcbnl_update_applist(bp, true);
14180 (bp->flags & BC_SUPPORTS_RMMOD_CMD))
14181 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
14183 /* Close the interface - either directly or implicitly */
14184 if (remove_netdev) {
14185 unregister_netdev(dev);
14192 bnx2x_iov_remove_one(bp);
14194 /* Power on: we can't let PCI layer write to us while we are in D3 */
14196 bnx2x_set_power_state(bp, PCI_D0);
14197 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
14199 /* Set endianity registers to reset values in case next driver
14200 * boots in different endianty environment.
14202 bnx2x_reset_endianity(bp);
14205 /* Disable MSI/MSI-X */
14206 bnx2x_disable_msi(bp);
14210 bnx2x_set_power_state(bp, PCI_D3hot);
14212 /* Make sure RESET task is not scheduled before continuing */
14213 cancel_delayed_work_sync(&bp->sp_rtnl_task);
14215 /* send message via vfpf channel to release the resources of this vf */
14217 bnx2x_vfpf_release(bp);
14219 /* Assumes no further PCIe PM changes will occur */
14220 if (system_state == SYSTEM_POWER_OFF) {
14221 pci_wake_from_d3(pdev, bp->wol);
14222 pci_set_power_state(pdev, PCI_D3hot);
14225 bnx2x_disable_pcie_error_reporting(bp);
14226 if (remove_netdev) {
14228 iounmap(bp->regview);
14230 /* For vfs, doorbells are part of the regview and were unmapped
14231 * along with it. FW is only loaded by PF.
14235 iounmap(bp->doorbells);
14237 bnx2x_release_firmware(bp);
14239 bnx2x_vf_pci_dealloc(bp);
14241 bnx2x_free_mem_bp(bp);
14245 if (atomic_read(&pdev->enable_cnt) == 1)
14246 pci_release_regions(pdev);
14248 pci_disable_device(pdev);
14252 static void bnx2x_remove_one(struct pci_dev *pdev)
14254 struct net_device *dev = pci_get_drvdata(pdev);
14258 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
14261 bp = netdev_priv(dev);
14263 __bnx2x_remove(pdev, dev, bp, true);
14266 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
14268 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
14270 bp->rx_mode = BNX2X_RX_MODE_NONE;
14272 if (CNIC_LOADED(bp))
14273 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
14276 bnx2x_tx_disable(bp);
14277 /* Delete all NAPI objects */
14278 bnx2x_del_all_napi(bp);
14279 if (CNIC_LOADED(bp))
14280 bnx2x_del_all_napi_cnic(bp);
14281 netdev_reset_tc(bp->dev);
14283 del_timer_sync(&bp->timer);
14284 cancel_delayed_work_sync(&bp->sp_task);
14285 cancel_delayed_work_sync(&bp->period_task);
14287 if (!down_timeout(&bp->stats_lock, HZ / 10)) {
14288 bp->stats_state = STATS_STATE_DISABLED;
14289 up(&bp->stats_lock);
14292 bnx2x_save_statistics(bp);
14294 netif_carrier_off(bp->dev);
14300 * bnx2x_io_error_detected - called when PCI error is detected
14301 * @pdev: Pointer to PCI device
14302 * @state: The current pci connection state
14304 * This function is called after a PCI bus error affecting
14305 * this device has been detected.
14307 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
14308 pci_channel_state_t state)
14310 struct net_device *dev = pci_get_drvdata(pdev);
14311 struct bnx2x *bp = netdev_priv(dev);
14315 BNX2X_ERR("IO error detected\n");
14317 netif_device_detach(dev);
14319 if (state == pci_channel_io_perm_failure) {
14321 return PCI_ERS_RESULT_DISCONNECT;
14324 if (netif_running(dev))
14325 bnx2x_eeh_nic_unload(bp);
14327 bnx2x_prev_path_mark_eeh(bp);
14329 pci_disable_device(pdev);
14333 /* Request a slot reset */
14334 return PCI_ERS_RESULT_NEED_RESET;
14338 * bnx2x_io_slot_reset - called after the PCI bus has been reset
14339 * @pdev: Pointer to PCI device
14341 * Restart the card from scratch, as if from a cold-boot.
14343 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
14345 struct net_device *dev = pci_get_drvdata(pdev);
14346 struct bnx2x *bp = netdev_priv(dev);
14350 BNX2X_ERR("IO slot reset initializing...\n");
14351 if (pci_enable_device(pdev)) {
14352 dev_err(&pdev->dev,
14353 "Cannot re-enable PCI device after reset\n");
14355 return PCI_ERS_RESULT_DISCONNECT;
14358 pci_set_master(pdev);
14359 pci_restore_state(pdev);
14360 pci_save_state(pdev);
14362 if (netif_running(dev))
14363 bnx2x_set_power_state(bp, PCI_D0);
14365 if (netif_running(dev)) {
14366 BNX2X_ERR("IO slot reset --> driver unload\n");
14368 /* MCP should have been reset; Need to wait for validity */
14369 if (bnx2x_init_shmem(bp)) {
14371 return PCI_ERS_RESULT_DISCONNECT;
14374 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
14378 drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
14379 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
14380 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
14382 bnx2x_drain_tx_queues(bp);
14383 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
14384 bnx2x_netif_stop(bp, 1);
14385 bnx2x_free_irq(bp);
14387 /* Report UNLOAD_DONE to MCP */
14388 bnx2x_send_unload_done(bp, true);
14393 bnx2x_prev_unload(bp);
14395 /* We should have reseted the engine, so It's fair to
14396 * assume the FW will no longer write to the bnx2x driver.
14398 bnx2x_squeeze_objects(bp);
14399 bnx2x_free_skbs(bp);
14400 for_each_rx_queue(bp, i)
14401 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
14402 bnx2x_free_fp_mem(bp);
14403 bnx2x_free_mem(bp);
14405 bp->state = BNX2X_STATE_CLOSED;
14410 /* If AER, perform cleanup of the PCIe registers */
14411 if (bp->flags & AER_ENABLED) {
14412 if (pci_cleanup_aer_uncorrect_error_status(pdev))
14413 BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
14415 DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
14418 return PCI_ERS_RESULT_RECOVERED;
14422 * bnx2x_io_resume - called when traffic can start flowing again
14423 * @pdev: Pointer to PCI device
14425 * This callback is called when the error recovery driver tells us that
14426 * its OK to resume normal operation.
14428 static void bnx2x_io_resume(struct pci_dev *pdev)
14430 struct net_device *dev = pci_get_drvdata(pdev);
14431 struct bnx2x *bp = netdev_priv(dev);
14433 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
14434 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
14440 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
14441 DRV_MSG_SEQ_NUMBER_MASK;
14443 if (netif_running(dev))
14444 bnx2x_nic_load(bp, LOAD_NORMAL);
14446 netif_device_attach(dev);
14451 static const struct pci_error_handlers bnx2x_err_handler = {
14452 .error_detected = bnx2x_io_error_detected,
14453 .slot_reset = bnx2x_io_slot_reset,
14454 .resume = bnx2x_io_resume,
14457 static void bnx2x_shutdown(struct pci_dev *pdev)
14459 struct net_device *dev = pci_get_drvdata(pdev);
14465 bp = netdev_priv(dev);
14470 netif_device_detach(dev);
14473 /* Don't remove the netdevice, as there are scenarios which will cause
14474 * the kernel to hang, e.g., when trying to remove bnx2i while the
14475 * rootfs is mounted from SAN.
14477 __bnx2x_remove(pdev, dev, bp, false);
14480 static struct pci_driver bnx2x_pci_driver = {
14481 .name = DRV_MODULE_NAME,
14482 .id_table = bnx2x_pci_tbl,
14483 .probe = bnx2x_init_one,
14484 .remove = bnx2x_remove_one,
14485 .suspend = bnx2x_suspend,
14486 .resume = bnx2x_resume,
14487 .err_handler = &bnx2x_err_handler,
14488 #ifdef CONFIG_BNX2X_SRIOV
14489 .sriov_configure = bnx2x_sriov_configure,
14491 .shutdown = bnx2x_shutdown,
14494 static int __init bnx2x_init(void)
14498 pr_info("%s", version);
14500 bnx2x_wq = create_singlethread_workqueue("bnx2x");
14501 if (bnx2x_wq == NULL) {
14502 pr_err("Cannot create workqueue\n");
14505 bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
14506 if (!bnx2x_iov_wq) {
14507 pr_err("Cannot create iov workqueue\n");
14508 destroy_workqueue(bnx2x_wq);
14512 ret = pci_register_driver(&bnx2x_pci_driver);
14514 pr_err("Cannot register driver\n");
14515 destroy_workqueue(bnx2x_wq);
14516 destroy_workqueue(bnx2x_iov_wq);
14521 static void __exit bnx2x_cleanup(void)
14523 struct list_head *pos, *q;
14525 pci_unregister_driver(&bnx2x_pci_driver);
14527 destroy_workqueue(bnx2x_wq);
14528 destroy_workqueue(bnx2x_iov_wq);
14530 /* Free globally allocated resources */
14531 list_for_each_safe(pos, q, &bnx2x_prev_list) {
14532 struct bnx2x_prev_path_list *tmp =
14533 list_entry(pos, struct bnx2x_prev_path_list, list);
14539 void bnx2x_notify_link_changed(struct bnx2x *bp)
14541 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
14544 module_init(bnx2x_init);
14545 module_exit(bnx2x_cleanup);
14548 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
14550 * @bp: driver handle
14551 * @set: set or clear the CAM entry
14553 * This function will wait until the ramrod completion returns.
14554 * Return 0 if success, -ENODEV if ramrod doesn't return.
14556 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
14558 unsigned long ramrod_flags = 0;
14560 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
14561 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
14562 &bp->iscsi_l2_mac_obj, true,
14563 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
14566 /* count denotes the number of new completions we have seen */
14567 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
14569 struct eth_spe *spe;
14570 int cxt_index, cxt_offset;
14572 #ifdef BNX2X_STOP_ON_ERROR
14573 if (unlikely(bp->panic))
14577 spin_lock_bh(&bp->spq_lock);
14578 BUG_ON(bp->cnic_spq_pending < count);
14579 bp->cnic_spq_pending -= count;
14581 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
14582 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
14583 & SPE_HDR_CONN_TYPE) >>
14584 SPE_HDR_CONN_TYPE_SHIFT;
14585 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
14586 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
14588 /* Set validation for iSCSI L2 client before sending SETUP
14591 if (type == ETH_CONNECTION_TYPE) {
14592 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
14593 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
14595 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
14596 (cxt_index * ILT_PAGE_CIDS);
14597 bnx2x_set_ctx_validation(bp,
14598 &bp->context[cxt_index].
14599 vcxt[cxt_offset].eth,
14600 BNX2X_ISCSI_ETH_CID(bp));
14605 * There may be not more than 8 L2, not more than 8 L5 SPEs
14606 * and in the air. We also check that number of outstanding
14607 * COMMON ramrods is not more than the EQ and SPQ can
14610 if (type == ETH_CONNECTION_TYPE) {
14611 if (!atomic_read(&bp->cq_spq_left))
14614 atomic_dec(&bp->cq_spq_left);
14615 } else if (type == NONE_CONNECTION_TYPE) {
14616 if (!atomic_read(&bp->eq_spq_left))
14619 atomic_dec(&bp->eq_spq_left);
14620 } else if ((type == ISCSI_CONNECTION_TYPE) ||
14621 (type == FCOE_CONNECTION_TYPE)) {
14622 if (bp->cnic_spq_pending >=
14623 bp->cnic_eth_dev.max_kwqe_pending)
14626 bp->cnic_spq_pending++;
14628 BNX2X_ERR("Unknown SPE type: %d\n", type);
14633 spe = bnx2x_sp_get_next(bp);
14634 *spe = *bp->cnic_kwq_cons;
14636 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
14637 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
14639 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
14640 bp->cnic_kwq_cons = bp->cnic_kwq;
14642 bp->cnic_kwq_cons++;
14644 bnx2x_sp_prod_update(bp);
14645 spin_unlock_bh(&bp->spq_lock);
14648 static int bnx2x_cnic_sp_queue(struct net_device *dev,
14649 struct kwqe_16 *kwqes[], u32 count)
14651 struct bnx2x *bp = netdev_priv(dev);
14654 #ifdef BNX2X_STOP_ON_ERROR
14655 if (unlikely(bp->panic)) {
14656 BNX2X_ERR("Can't post to SP queue while panic\n");
14661 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
14662 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
14663 BNX2X_ERR("Handling parity error recovery. Try again later\n");
14667 spin_lock_bh(&bp->spq_lock);
14669 for (i = 0; i < count; i++) {
14670 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
14672 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
14675 *bp->cnic_kwq_prod = *spe;
14677 bp->cnic_kwq_pending++;
14679 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
14680 spe->hdr.conn_and_cmd_data, spe->hdr.type,
14681 spe->data.update_data_addr.hi,
14682 spe->data.update_data_addr.lo,
14683 bp->cnic_kwq_pending);
14685 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
14686 bp->cnic_kwq_prod = bp->cnic_kwq;
14688 bp->cnic_kwq_prod++;
14691 spin_unlock_bh(&bp->spq_lock);
14693 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
14694 bnx2x_cnic_sp_post(bp, 0);
14699 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14701 struct cnic_ops *c_ops;
14704 mutex_lock(&bp->cnic_mutex);
14705 c_ops = rcu_dereference_protected(bp->cnic_ops,
14706 lockdep_is_held(&bp->cnic_mutex));
14708 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14709 mutex_unlock(&bp->cnic_mutex);
14714 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14716 struct cnic_ops *c_ops;
14720 c_ops = rcu_dereference(bp->cnic_ops);
14722 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14729 * for commands that have no data
14731 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
14733 struct cnic_ctl_info ctl = {0};
14737 return bnx2x_cnic_ctl_send(bp, &ctl);
14740 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
14742 struct cnic_ctl_info ctl = {0};
14744 /* first we tell CNIC and only then we count this as a completion */
14745 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
14746 ctl.data.comp.cid = cid;
14747 ctl.data.comp.error = err;
14749 bnx2x_cnic_ctl_send_bh(bp, &ctl);
14750 bnx2x_cnic_sp_post(bp, 0);
14753 /* Called with netif_addr_lock_bh() taken.
14754 * Sets an rx_mode config for an iSCSI ETH client.
14756 * Completion should be checked outside.
14758 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
14760 unsigned long accept_flags = 0, ramrod_flags = 0;
14761 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14762 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
14765 /* Start accepting on iSCSI L2 ring. Accept all multicasts
14766 * because it's the only way for UIO Queue to accept
14767 * multicasts (in non-promiscuous mode only one Queue per
14768 * function will receive multicast packets (leading in our
14771 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
14772 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
14773 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
14774 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
14776 /* Clear STOP_PENDING bit if START is requested */
14777 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
14779 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
14781 /* Clear START_PENDING bit if STOP is requested */
14782 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
14784 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
14785 set_bit(sched_state, &bp->sp_state);
14787 __set_bit(RAMROD_RX, &ramrod_flags);
14788 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
14793 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
14795 struct bnx2x *bp = netdev_priv(dev);
14798 switch (ctl->cmd) {
14799 case DRV_CTL_CTXTBL_WR_CMD: {
14800 u32 index = ctl->data.io.offset;
14801 dma_addr_t addr = ctl->data.io.dma_addr;
14803 bnx2x_ilt_wr(bp, index, addr);
14807 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
14808 int count = ctl->data.credit.credit_count;
14810 bnx2x_cnic_sp_post(bp, count);
14814 /* rtnl_lock is held. */
14815 case DRV_CTL_START_L2_CMD: {
14816 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14817 unsigned long sp_bits = 0;
14819 /* Configure the iSCSI classification object */
14820 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
14821 cp->iscsi_l2_client_id,
14822 cp->iscsi_l2_cid, BP_FUNC(bp),
14823 bnx2x_sp(bp, mac_rdata),
14824 bnx2x_sp_mapping(bp, mac_rdata),
14825 BNX2X_FILTER_MAC_PENDING,
14826 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
14829 /* Set iSCSI MAC address */
14830 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
14837 /* Start accepting on iSCSI L2 ring */
14839 netif_addr_lock_bh(dev);
14840 bnx2x_set_iscsi_eth_rx_mode(bp, true);
14841 netif_addr_unlock_bh(dev);
14843 /* bits to wait on */
14844 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14845 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
14847 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14848 BNX2X_ERR("rx_mode completion timed out!\n");
14853 /* rtnl_lock is held. */
14854 case DRV_CTL_STOP_L2_CMD: {
14855 unsigned long sp_bits = 0;
14857 /* Stop accepting on iSCSI L2 ring */
14858 netif_addr_lock_bh(dev);
14859 bnx2x_set_iscsi_eth_rx_mode(bp, false);
14860 netif_addr_unlock_bh(dev);
14862 /* bits to wait on */
14863 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14864 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
14866 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14867 BNX2X_ERR("rx_mode completion timed out!\n");
14872 /* Unset iSCSI L2 MAC */
14873 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
14874 BNX2X_ISCSI_ETH_MAC, true);
14877 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
14878 int count = ctl->data.credit.credit_count;
14880 smp_mb__before_atomic();
14881 atomic_add(count, &bp->cq_spq_left);
14882 smp_mb__after_atomic();
14885 case DRV_CTL_ULP_REGISTER_CMD: {
14886 int ulp_type = ctl->data.register_data.ulp_type;
14888 if (CHIP_IS_E3(bp)) {
14889 int idx = BP_FW_MB_IDX(bp);
14890 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14891 int path = BP_PATH(bp);
14892 int port = BP_PORT(bp);
14894 u32 scratch_offset;
14897 /* first write capability to shmem2 */
14898 if (ulp_type == CNIC_ULP_ISCSI)
14899 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14900 else if (ulp_type == CNIC_ULP_FCOE)
14901 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14902 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14904 if ((ulp_type != CNIC_ULP_FCOE) ||
14905 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
14906 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
14909 /* if reached here - should write fcoe capabilities */
14910 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
14911 if (!scratch_offset)
14913 scratch_offset += offsetof(struct glob_ncsi_oem_data,
14914 fcoe_features[path][port]);
14915 host_addr = (u32 *) &(ctl->data.register_data.
14917 for (i = 0; i < sizeof(struct fcoe_capabilities);
14919 REG_WR(bp, scratch_offset + i,
14920 *(host_addr + i/4));
14922 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14926 case DRV_CTL_ULP_UNREGISTER_CMD: {
14927 int ulp_type = ctl->data.ulp_type;
14929 if (CHIP_IS_E3(bp)) {
14930 int idx = BP_FW_MB_IDX(bp);
14933 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14934 if (ulp_type == CNIC_ULP_ISCSI)
14935 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14936 else if (ulp_type == CNIC_ULP_FCOE)
14937 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14938 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14940 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14945 BNX2X_ERR("unknown command %x\n", ctl->cmd);
14949 /* For storage-only interfaces, change driver state */
14950 if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
14951 switch (ctl->drv_state) {
14955 bnx2x_set_os_driver_state(bp,
14956 OS_DRIVER_STATE_ACTIVE);
14959 bnx2x_set_os_driver_state(bp,
14960 OS_DRIVER_STATE_DISABLED);
14963 bnx2x_set_os_driver_state(bp,
14964 OS_DRIVER_STATE_NOT_LOADED);
14967 BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
14974 static int bnx2x_get_fc_npiv(struct net_device *dev,
14975 struct cnic_fc_npiv_tbl *cnic_tbl)
14977 struct bnx2x *bp = netdev_priv(dev);
14978 struct bdn_fc_npiv_tbl *tbl = NULL;
14979 u32 offset, entries;
14983 if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
14986 DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
14988 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
14990 BNX2X_ERR("Failed to allocate fc_npiv table\n");
14994 offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
14996 DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
14999 DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
15001 /* Read the table contents from nvram */
15002 if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
15003 BNX2X_ERR("Failed to read FC-NPIV table\n");
15007 /* Since bnx2x_nvram_read() returns data in be32, we need to convert
15008 * the number of entries back to cpu endianness.
15010 entries = tbl->fc_npiv_cfg.num_of_npiv;
15011 entries = (__force u32)be32_to_cpu((__force __be32)entries);
15012 tbl->fc_npiv_cfg.num_of_npiv = entries;
15014 if (!tbl->fc_npiv_cfg.num_of_npiv) {
15016 "No FC-NPIV table [valid, simply not present]\n");
15018 } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
15019 BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
15020 tbl->fc_npiv_cfg.num_of_npiv);
15023 DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
15024 tbl->fc_npiv_cfg.num_of_npiv);
15027 /* Copy the data into cnic-provided struct */
15028 cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
15029 for (i = 0; i < cnic_tbl->count; i++) {
15030 memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
15031 memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
15040 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
15042 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15044 if (bp->flags & USING_MSIX_FLAG) {
15045 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
15046 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
15047 cp->irq_arr[0].vector = bp->msix_table[1].vector;
15049 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
15050 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
15052 if (!CHIP_IS_E1x(bp))
15053 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
15055 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
15057 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
15058 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
15059 cp->irq_arr[1].status_blk = bp->def_status_blk;
15060 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
15061 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
15066 void bnx2x_setup_cnic_info(struct bnx2x *bp)
15068 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15070 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
15071 bnx2x_cid_ilt_lines(bp);
15072 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
15073 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
15074 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
15076 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
15077 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
15080 if (NO_ISCSI_OOO(bp))
15081 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
15084 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
15087 struct bnx2x *bp = netdev_priv(dev);
15088 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15091 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
15094 BNX2X_ERR("NULL ops received\n");
15098 if (!CNIC_SUPPORT(bp)) {
15099 BNX2X_ERR("Can't register CNIC when not supported\n");
15100 return -EOPNOTSUPP;
15103 if (!CNIC_LOADED(bp)) {
15104 rc = bnx2x_load_cnic(bp);
15106 BNX2X_ERR("CNIC-related load failed\n");
15111 bp->cnic_enabled = true;
15113 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
15117 bp->cnic_kwq_cons = bp->cnic_kwq;
15118 bp->cnic_kwq_prod = bp->cnic_kwq;
15119 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
15121 bp->cnic_spq_pending = 0;
15122 bp->cnic_kwq_pending = 0;
15124 bp->cnic_data = data;
15127 cp->drv_state |= CNIC_DRV_STATE_REGD;
15128 cp->iro_arr = bp->iro_arr;
15130 bnx2x_setup_cnic_irq_info(bp);
15132 rcu_assign_pointer(bp->cnic_ops, ops);
15134 /* Schedule driver to read CNIC driver versions */
15135 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
15140 static int bnx2x_unregister_cnic(struct net_device *dev)
15142 struct bnx2x *bp = netdev_priv(dev);
15143 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15145 mutex_lock(&bp->cnic_mutex);
15147 RCU_INIT_POINTER(bp->cnic_ops, NULL);
15148 mutex_unlock(&bp->cnic_mutex);
15150 bp->cnic_enabled = false;
15151 kfree(bp->cnic_kwq);
15152 bp->cnic_kwq = NULL;
15157 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
15159 struct bnx2x *bp = netdev_priv(dev);
15160 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15162 /* If both iSCSI and FCoE are disabled - return NULL in
15163 * order to indicate CNIC that it should not try to work
15164 * with this device.
15166 if (NO_ISCSI(bp) && NO_FCOE(bp))
15169 cp->drv_owner = THIS_MODULE;
15170 cp->chip_id = CHIP_ID(bp);
15171 cp->pdev = bp->pdev;
15172 cp->io_base = bp->regview;
15173 cp->io_base2 = bp->doorbells;
15174 cp->max_kwqe_pending = 8;
15175 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
15176 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
15177 bnx2x_cid_ilt_lines(bp);
15178 cp->ctx_tbl_len = CNIC_ILT_LINES;
15179 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
15180 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
15181 cp->drv_ctl = bnx2x_drv_ctl;
15182 cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
15183 cp->drv_register_cnic = bnx2x_register_cnic;
15184 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
15185 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
15186 cp->iscsi_l2_client_id =
15187 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
15188 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
15190 if (NO_ISCSI_OOO(bp))
15191 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
15194 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
15197 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
15200 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
15202 cp->ctx_tbl_offset,
15208 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
15210 struct bnx2x *bp = fp->bp;
15211 u32 offset = BAR_USTRORM_INTMEM;
15214 return bnx2x_vf_ustorm_prods_offset(bp, fp);
15215 else if (!CHIP_IS_E1x(bp))
15216 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
15218 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
15223 /* called only on E1H or E2.
15224 * When pretending to be PF, the pretend value is the function number 0...7
15225 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
15228 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
15232 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
15235 /* get my own pretend register */
15236 pretend_reg = bnx2x_get_pretend_reg(bp);
15237 REG_WR(bp, pretend_reg, pretend_func_val);
15238 REG_RD(bp, pretend_reg);
15242 static void bnx2x_ptp_task(struct work_struct *work)
15244 struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
15245 int port = BP_PORT(bp);
15248 struct skb_shared_hwtstamps shhwtstamps;
15250 /* Read Tx timestamp registers */
15251 val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15252 NIG_REG_P0_TLLH_PTP_BUF_SEQID);
15253 if (val_seq & 0x10000) {
15254 /* There is a valid timestamp value */
15255 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
15256 NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
15258 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
15259 NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
15260 /* Reset timestamp register to allow new timestamp */
15261 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15262 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15263 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15265 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
15266 shhwtstamps.hwtstamp = ns_to_ktime(ns);
15267 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
15268 dev_kfree_skb_any(bp->ptp_tx_skb);
15269 bp->ptp_tx_skb = NULL;
15271 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
15274 DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
15275 /* Reschedule to keep checking for a valid timestamp value */
15276 schedule_work(&bp->ptp_task);
15280 void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
15282 int port = BP_PORT(bp);
15285 timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
15286 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
15288 timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
15289 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
15291 /* Reset timestamp register to allow new timestamp */
15292 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15293 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15295 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15297 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
15299 DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
15304 static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
15306 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
15307 int port = BP_PORT(bp);
15311 REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
15312 NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
15313 phc_cycles = wb_data[1];
15314 phc_cycles = (phc_cycles << 32) + wb_data[0];
15316 DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
15321 static void bnx2x_init_cyclecounter(struct bnx2x *bp)
15323 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
15324 bp->cyclecounter.read = bnx2x_cyclecounter_read;
15325 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
15326 bp->cyclecounter.shift = 0;
15327 bp->cyclecounter.mult = 1;
15330 static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
15332 struct bnx2x_func_state_params func_params = {NULL};
15333 struct bnx2x_func_set_timesync_params *set_timesync_params =
15334 &func_params.params.set_timesync;
15336 /* Prepare parameters for function state transitions */
15337 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
15338 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
15340 func_params.f_obj = &bp->func_obj;
15341 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
15343 /* Function parameters */
15344 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
15345 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
15347 return bnx2x_func_state_change(bp, &func_params);
15350 static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
15352 struct bnx2x_queue_state_params q_params;
15355 /* send queue update ramrod to enable PTP packets */
15356 memset(&q_params, 0, sizeof(q_params));
15357 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
15358 q_params.cmd = BNX2X_Q_CMD_UPDATE;
15359 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
15360 &q_params.params.update.update_flags);
15361 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
15362 &q_params.params.update.update_flags);
15364 /* send the ramrod on all the queues of the PF */
15365 for_each_eth_queue(bp, i) {
15366 struct bnx2x_fastpath *fp = &bp->fp[i];
15368 /* Set the appropriate Queue object */
15369 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
15371 /* Update the Queue state */
15372 rc = bnx2x_queue_state_change(bp, &q_params);
15374 BNX2X_ERR("Failed to enable PTP packets\n");
15382 int bnx2x_configure_ptp_filters(struct bnx2x *bp)
15384 int port = BP_PORT(bp);
15387 if (!bp->hwtstamp_ioctl_called)
15390 switch (bp->tx_type) {
15391 case HWTSTAMP_TX_ON:
15392 bp->flags |= TX_TIMESTAMPING_EN;
15393 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15394 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
15395 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15396 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
15398 case HWTSTAMP_TX_ONESTEP_SYNC:
15399 BNX2X_ERR("One-step timestamping is not supported\n");
15403 switch (bp->rx_filter) {
15404 case HWTSTAMP_FILTER_NONE:
15406 case HWTSTAMP_FILTER_ALL:
15407 case HWTSTAMP_FILTER_SOME:
15408 case HWTSTAMP_FILTER_NTP_ALL:
15409 bp->rx_filter = HWTSTAMP_FILTER_NONE;
15411 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
15412 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
15413 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
15414 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
15415 /* Initialize PTP detection for UDP/IPv4 events */
15416 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15417 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
15418 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15419 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
15421 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
15422 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
15423 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
15424 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
15425 /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
15426 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15427 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
15428 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15429 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
15431 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
15432 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
15433 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
15434 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
15435 /* Initialize PTP detection L2 events */
15436 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15437 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
15438 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15439 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
15442 case HWTSTAMP_FILTER_PTP_V2_EVENT:
15443 case HWTSTAMP_FILTER_PTP_V2_SYNC:
15444 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
15445 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
15446 /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
15447 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15448 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
15449 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15450 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
15454 /* Indicate to FW that this PF expects recorded PTP packets */
15455 rc = bnx2x_enable_ptp_packets(bp);
15459 /* Enable sending PTP packets to host */
15460 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15461 NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
15466 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
15468 struct hwtstamp_config config;
15471 DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
15473 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
15476 DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
15477 config.tx_type, config.rx_filter);
15479 if (config.flags) {
15480 BNX2X_ERR("config.flags is reserved for future use\n");
15484 bp->hwtstamp_ioctl_called = 1;
15485 bp->tx_type = config.tx_type;
15486 bp->rx_filter = config.rx_filter;
15488 rc = bnx2x_configure_ptp_filters(bp);
15492 config.rx_filter = bp->rx_filter;
15494 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
15498 /* Configures HW for PTP */
15499 static int bnx2x_configure_ptp(struct bnx2x *bp)
15501 int rc, port = BP_PORT(bp);
15504 /* Reset PTP event detection rules - will be configured in the IOCTL */
15505 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15506 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
15507 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15508 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
15509 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15510 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
15511 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15512 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
15514 /* Disable PTP packets to host - will be configured in the IOCTL*/
15515 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15516 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
15518 /* Enable the PTP feature */
15519 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
15520 NIG_REG_P0_PTP_EN, 0x3F);
15522 /* Enable the free-running counter */
15525 REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
15527 /* Reset drift register (offset register is not reset) */
15528 rc = bnx2x_send_reset_timesync_ramrod(bp);
15530 BNX2X_ERR("Failed to reset PHC drift register\n");
15534 /* Reset possibly old timestamps */
15535 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15536 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15537 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15538 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15543 /* Called during load, to initialize PTP-related stuff */
15544 void bnx2x_init_ptp(struct bnx2x *bp)
15548 /* Configure PTP in HW */
15549 rc = bnx2x_configure_ptp(bp);
15551 BNX2X_ERR("Stopping PTP initialization\n");
15555 /* Init work queue for Tx timestamping */
15556 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
15558 /* Init cyclecounter and timecounter. This is done only in the first
15559 * load. If done in every load, PTP application will fail when doing
15560 * unload / load (e.g. MTU change) while it is running.
15562 if (!bp->timecounter_init_done) {
15563 bnx2x_init_cyclecounter(bp);
15564 timecounter_init(&bp->timecounter, &bp->cyclecounter,
15565 ktime_to_ns(ktime_get_real()));
15566 bp->timecounter_init_done = 1;
15569 DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");