2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
25 #define bfa_ioc_ct_sync_pos(__ioc) \
26 ((u32) (1 << bfa_ioc_pcifn(__ioc)))
27 #define BFA_IOC_SYNC_REQD_SH 16
28 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
29 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
30 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
31 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
32 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
35 * forward declarations
37 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
38 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
39 static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
40 static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
41 static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
42 static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
43 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
44 static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
45 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
46 static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
47 static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
48 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
49 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
50 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
51 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
52 enum bfi_asic_mode asic_mode);
53 static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
54 enum bfi_asic_mode asic_mode);
55 static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
57 static const struct bfa_ioc_hwif nw_hwif_ct = {
58 .ioc_pll_init = bfa_ioc_ct_pll_init,
59 .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
60 .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
61 .ioc_reg_init = bfa_ioc_ct_reg_init,
62 .ioc_map_port = bfa_ioc_ct_map_port,
63 .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
64 .ioc_notify_fail = bfa_ioc_ct_notify_fail,
65 .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
66 .ioc_sync_start = bfa_ioc_ct_sync_start,
67 .ioc_sync_join = bfa_ioc_ct_sync_join,
68 .ioc_sync_leave = bfa_ioc_ct_sync_leave,
69 .ioc_sync_ack = bfa_ioc_ct_sync_ack,
70 .ioc_sync_complete = bfa_ioc_ct_sync_complete,
73 static const struct bfa_ioc_hwif nw_hwif_ct2 = {
74 .ioc_pll_init = bfa_ioc_ct2_pll_init,
75 .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
76 .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
77 .ioc_reg_init = bfa_ioc_ct2_reg_init,
78 .ioc_map_port = bfa_ioc_ct2_map_port,
79 .ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat,
80 .ioc_isr_mode_set = NULL,
81 .ioc_notify_fail = bfa_ioc_ct_notify_fail,
82 .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
83 .ioc_sync_start = bfa_ioc_ct_sync_start,
84 .ioc_sync_join = bfa_ioc_ct_sync_join,
85 .ioc_sync_leave = bfa_ioc_ct_sync_leave,
86 .ioc_sync_ack = bfa_ioc_ct_sync_ack,
87 .ioc_sync_complete = bfa_ioc_ct_sync_complete,
90 /* Called from bfa_ioc_attach() to map asic specific calls. */
92 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
94 ioc->ioc_hwif = &nw_hwif_ct;
98 bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
100 ioc->ioc_hwif = &nw_hwif_ct2;
103 /* Return true if firmware of current driver matches the running firmware. */
105 bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
107 enum bfi_ioc_state ioc_fwstate;
109 struct bfi_ioc_image_hdr fwhdr;
112 * If bios boot (flash based) -- do not increment usage count
114 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
118 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
119 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
122 * If usage count is 0, always return TRUE.
125 writel(1, ioc->ioc_regs.ioc_usage_reg);
126 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
127 writel(0, ioc->ioc_regs.ioc_fail_sync);
131 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
134 * Use count cannot be non-zero and chip in uninitialized state.
136 BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
139 * Check if another driver with a different firmware is active
141 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
142 if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
143 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
148 * Same firmware version. Increment the reference count.
151 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
152 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
157 bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
162 * If bios boot (flash based) -- do not decrement usage count
164 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
169 * decrement usage count
171 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
172 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
173 BUG_ON(!(usecnt > 0));
176 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
178 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
181 /* Notify other functions on HB failure. */
183 bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
185 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
186 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
187 /* Wait for halt to take effect */
188 readl(ioc->ioc_regs.ll_halt);
189 readl(ioc->ioc_regs.alt_ll_halt);
192 /* Host to LPU mailbox message addresses */
193 static const struct {
198 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
199 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
200 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
201 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
204 /* Host <-> LPU mailbox command/status registers - port 0 */
205 static const struct {
209 { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
210 { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
211 { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
212 { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
215 /* Host <-> LPU mailbox command/status registers - port 1 */
216 static const struct {
220 { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
221 { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
222 { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
223 { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
226 static const struct {
234 { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
235 CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
236 CT2_HOSTFN_LPU0_READ_STAT},
237 { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
238 CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
239 CT2_HOSTFN_LPU1_READ_STAT},
243 bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
246 int pcifn = bfa_ioc_pcifn(ioc);
248 rb = bfa_ioc_bar0(ioc);
250 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
251 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
252 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
254 if (ioc->port_id == 0) {
255 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
256 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
257 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
258 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
259 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
260 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
261 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
263 ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
264 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
265 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
266 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
267 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
268 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
269 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
273 * PSS control registers
275 ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
276 ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
277 ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
278 ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
281 * IOC semaphore registers and serialization
283 ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
284 ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
285 ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
286 ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
287 ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
292 ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
293 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
296 * err set reg : for notification of hb failure in fcmode
298 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
302 bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
305 int port = bfa_ioc_portid(ioc);
307 rb = bfa_ioc_bar0(ioc);
309 ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
310 ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
311 ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
312 ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
313 ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
314 ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
317 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
318 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
319 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
320 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
321 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
323 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG;
324 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
325 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
326 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
327 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
331 * PSS control registers
333 ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
334 ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
335 ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG;
336 ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG;
339 * IOC semaphore registers and serialization
341 ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG;
342 ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG;
343 ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG;
344 ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT;
345 ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC;
350 ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
351 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
354 * err set reg : for notification of hb failure in fcmode
356 ioc->ioc_regs.err_set = rb + ERR_SET_REG;
359 /* Initialize IOC to port mapping. */
361 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
363 bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
365 void __iomem *rb = ioc->pcidev.pci_bar_kva;
369 * For catapult, base port id on personality register and IOC type
371 r32 = readl(rb + FNC_PERS_REG);
372 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
373 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
378 bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
380 void __iomem *rb = ioc->pcidev.pci_bar_kva;
383 r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
384 ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
387 /* Set interrupt mode for a function: INTX or MSIX */
389 bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
391 void __iomem *rb = ioc->pcidev.pci_bar_kva;
394 r32 = readl(rb + FNC_PERS_REG);
396 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
400 * If already in desired mode, do not change anything
402 if ((!msix && mode) || (msix && !mode))
406 mode = __F0_INTX_STATUS_MSIX;
408 mode = __F0_INTX_STATUS_INTA;
410 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
411 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
413 writel(r32, rb + FNC_PERS_REG);
417 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
421 r32 = readl(ioc->ioc_regs.lpu_read_stat);
423 writel(1, ioc->ioc_regs.lpu_read_stat);
430 /* MSI-X resource allocation for 1860 with no asic block */
431 #define HOSTFN_MSIX_DEFAULT 64
432 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
433 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
434 #define __MSIX_VT_NUMVT__MK 0x003ff800
435 #define __MSIX_VT_NUMVT__SH 11
436 #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
437 #define __MSIX_VT_OFST_ 0x000007ff
439 bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
441 void __iomem *rb = ioc->pcidev.pci_bar_kva;
444 r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
445 if (r32 & __MSIX_VT_NUMVT__MK) {
446 writel(r32 & __MSIX_VT_OFST_,
447 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
451 writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
452 HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
453 rb + HOSTFN_MSIX_VT_OFST_NUMVT);
454 writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
455 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
458 /* Cleanup hw semaphore and usecnt registers */
460 bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
462 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
463 writel(0, ioc->ioc_regs.ioc_usage_reg);
464 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
467 * Read the hw sem reg to make sure that it is locked
468 * before we clear it. If it is not locked, writing 1
469 * will lock it instead of clearing it.
471 readl(ioc->ioc_regs.ioc_sem_reg);
472 bfa_nw_ioc_hw_sem_release(ioc);
475 /* Synchronized IOC failure processing routines */
477 bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
479 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
480 u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
483 * Driver load time. If the sync required bit for this PCI fn
484 * is set, it is due to an unclean exit by the driver for this
485 * PCI fn in the previous incarnation. Whoever comes here first
486 * should clean it up, no matter which PCI fn.
489 if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
490 writel(0, ioc->ioc_regs.ioc_fail_sync);
491 writel(1, ioc->ioc_regs.ioc_usage_reg);
492 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
493 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
497 return bfa_ioc_ct_sync_complete(ioc);
499 /* Synchronized IOC failure processing routines */
501 bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
503 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
504 u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
506 writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
510 bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
512 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
513 u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
514 bfa_ioc_ct_sync_pos(ioc);
516 writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
520 bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
522 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
524 writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
528 bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
530 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
531 u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
532 u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
539 * The check below is to see whether any other PCI fn
540 * has reinitialized the ASIC (reset sync_ackd bits)
541 * and failed again while this IOC was waiting for hw
542 * semaphore (in bfa_iocpf_sm_semwait()).
544 tmp_ackd = sync_ackd;
545 if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
546 !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
547 sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
549 if (sync_reqd == sync_ackd) {
550 writel(bfa_ioc_ct_clear_sync_ackd(r32),
551 ioc->ioc_regs.ioc_fail_sync);
552 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
553 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
558 * If another PCI fn reinitialized and failed again while
559 * this IOC was waiting for hw sem, the sync_ackd bit for
560 * this IOC need to be set again to allow reinitialization.
562 if (tmp_ackd != sync_ackd)
563 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
568 static enum bfa_status
569 bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
571 u32 pll_sclk, pll_fclk, r32;
572 bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
574 pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
575 __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
576 __APP_PLL_SCLK_JITLMT0_1(3U) |
577 __APP_PLL_SCLK_CNTLMT0_1(1U);
578 pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
579 __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
580 __APP_PLL_LCLK_JITLMT0_1(3U) |
581 __APP_PLL_LCLK_CNTLMT0_1(1U);
584 writel(0, (rb + OP_MODE));
585 writel(__APP_EMS_CMLCKSEL |
586 __APP_EMS_REFCKBUFEN2 |
587 __APP_EMS_CHANNEL_SEL,
588 (rb + ETH_MAC_SER_REG));
590 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
591 writel(__APP_EMS_REFCKBUFEN1,
592 (rb + ETH_MAC_SER_REG));
594 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
595 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
596 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
597 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
598 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
599 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
600 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
601 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
603 __APP_PLL_SCLK_LOGIC_SOFT_RESET,
604 rb + APP_PLL_SCLK_CTL_REG);
606 __APP_PLL_LCLK_LOGIC_SOFT_RESET,
607 rb + APP_PLL_LCLK_CTL_REG);
609 __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
610 rb + APP_PLL_SCLK_CTL_REG);
612 __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
613 rb + APP_PLL_LCLK_CTL_REG);
614 readl(rb + HOSTFN0_INT_MSK);
616 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
617 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
619 __APP_PLL_SCLK_ENABLE,
620 rb + APP_PLL_SCLK_CTL_REG);
622 __APP_PLL_LCLK_ENABLE,
623 rb + APP_PLL_LCLK_CTL_REG);
626 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
627 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
629 r32 = readl((rb + PSS_CTL_REG));
630 r32 &= ~__PSS_LMEM_RESET;
631 writel(r32, (rb + PSS_CTL_REG));
634 writel(0, (rb + PMM_1T_RESET_REG_P0));
635 writel(0, (rb + PMM_1T_RESET_REG_P1));
638 writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
640 r32 = readl((rb + MBIST_STAT_REG));
641 writel(0, (rb + MBIST_CTL_REG));
642 return BFA_STATUS_OK;
646 bfa_ioc_ct2_sclk_init(void __iomem *rb)
651 * put s_clk PLL and PLL FSM in reset
653 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
654 r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
655 r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
656 __APP_PLL_SCLK_LOGIC_SOFT_RESET);
657 writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
660 * Ignore mode and program for the max clock (which is FC16)
661 * Firmware/NFC will do the PLL init appropiately
663 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
664 r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
665 writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
668 * while doing PLL init dont clock gate ethernet subsystem
670 r32 = readl((rb + CT2_CHIP_MISC_PRG));
671 writel((r32 | __ETH_CLK_ENABLE_PORT0),
672 (rb + CT2_CHIP_MISC_PRG));
674 r32 = readl((rb + CT2_PCIE_MISC_REG));
675 writel((r32 | __ETH_CLK_ENABLE_PORT1),
676 (rb + CT2_PCIE_MISC_REG));
681 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
682 r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
683 __APP_PLL_SCLK_CLK_DIV2);
684 writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
687 * poll for s_clk lock or delay 1ms
692 * Dont do clock gating for ethernet subsystem, firmware/NFC will
693 * do this appropriately
698 bfa_ioc_ct2_lclk_init(void __iomem *rb)
703 * put l_clk PLL and PLL FSM in reset
705 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
706 r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
707 r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
708 __APP_PLL_LCLK_LOGIC_SOFT_RESET);
709 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
712 * set LPU speed (set for FC16 which will work for other modes)
714 r32 = readl((rb + CT2_CHIP_MISC_PRG));
715 writel(r32, (rb + CT2_CHIP_MISC_PRG));
718 * set LPU half speed (set for FC16 which will work for other modes)
720 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
721 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
724 * set lclk for mode (set for FC16)
726 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
727 r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
729 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
732 * poll for s_clk lock or delay 1ms
738 bfa_ioc_ct2_mem_init(void __iomem *rb)
742 r32 = readl((rb + PSS_CTL_REG));
743 r32 &= ~__PSS_LMEM_RESET;
744 writel(r32, (rb + PSS_CTL_REG));
747 writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
749 writel(0, (rb + CT2_MBIST_CTL_REG));
753 bfa_ioc_ct2_mac_reset(void __iomem *rb)
757 bfa_ioc_ct2_sclk_init(rb);
758 bfa_ioc_ct2_lclk_init(rb);
761 * release soft reset on s_clk & l_clk
763 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
764 writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
765 (rb + CT2_APP_PLL_SCLK_CTL_REG));
768 * release soft reset on s_clk & l_clk
770 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
771 writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET),
772 (rb + CT2_APP_PLL_LCLK_CTL_REG));
774 /* put port0, port1 MAC & AHB in reset */
775 writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
776 (rb + CT2_CSI_MAC_CONTROL_REG(0)));
777 writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
778 (rb + CT2_CSI_MAC_CONTROL_REG(1)));
781 #define CT2_NFC_MAX_DELAY 1000
782 #define CT2_NFC_VER_VALID 0x143
783 #define BFA_IOC_PLL_POLL 1000000
786 bfa_ioc_ct2_nfc_halted(void __iomem *rb)
790 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
791 if (r32 & __NFC_CONTROLLER_HALTED)
798 bfa_ioc_ct2_nfc_resume(void __iomem *rb)
803 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
804 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
805 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
806 if (!(r32 & __NFC_CONTROLLER_HALTED))
813 static enum bfa_status
814 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
816 volatile u32 wgn, r32;
819 wgn = readl(rb + CT2_WGN_STATUS);
821 nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
823 if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
824 (nfc_ver >= CT2_NFC_VER_VALID)) {
825 if (bfa_ioc_ct2_nfc_halted(rb))
826 bfa_ioc_ct2_nfc_resume(rb);
827 writel(__RESET_AND_START_SCLK_LCLK_PLLS,
828 rb + CT2_CSI_FW_CTL_SET_REG);
830 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
831 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
832 if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
835 BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
837 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
838 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
839 if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
842 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
845 r32 = readl(rb + CT2_CSI_FW_CTL_REG);
846 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
848 writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
849 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
850 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
851 if (r32 & __NFC_CONTROLLER_HALTED)
856 bfa_ioc_ct2_mac_reset(rb);
857 bfa_ioc_ct2_sclk_init(rb);
858 bfa_ioc_ct2_lclk_init(rb);
860 /* release soft reset on s_clk & l_clk */
861 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
862 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
863 rb + CT2_APP_PLL_SCLK_CTL_REG);
864 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
865 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
866 rb + CT2_APP_PLL_LCLK_CTL_REG);
869 /* Announce flash device presence, if flash was corrupted. */
870 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
871 r32 = readl((rb + PSS_GPIO_OUT_REG));
872 writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
873 r32 = readl((rb + PSS_GPIO_OE_REG));
874 writel(r32 | 1, rb + PSS_GPIO_OE_REG);
878 * Mask the interrupts and clear any
879 * pending interrupts left by BIOS/EFI
881 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
882 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
884 /* For first time initialization, no need to clear interrupts */
885 r32 = readl(rb + HOST_SEM5_REG);
887 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
889 writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
890 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
892 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
894 writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
895 readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
899 bfa_ioc_ct2_mem_init(rb);
901 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
902 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
903 return BFA_STATUS_OK;