2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
9 #include "qlcnic_hdr.h"
10 #include "qlcnic_83xx_hw.h"
11 #include "qlcnic_hw.h"
15 #define QLC_83XX_MINIDUMP_FLASH 0x520000
16 #define QLC_83XX_OCM_INDEX 3
17 #define QLC_83XX_PCI_INDEX 0
18 #define QLC_83XX_DMA_ENGINE_INDEX 8
20 static const u32 qlcnic_ms_read_data[] = {
21 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
24 #define QLCNIC_DUMP_WCRB BIT_0
25 #define QLCNIC_DUMP_RWCRB BIT_1
26 #define QLCNIC_DUMP_ANDCRB BIT_2
27 #define QLCNIC_DUMP_ORCRB BIT_3
28 #define QLCNIC_DUMP_POLLCRB BIT_4
29 #define QLCNIC_DUMP_RD_SAVE BIT_5
30 #define QLCNIC_DUMP_WRT_SAVED BIT_6
31 #define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
32 #define QLCNIC_DUMP_SKIP BIT_7
34 #define QLCNIC_DUMP_MASK_MAX 0xff
36 struct qlcnic_pex_dma_descriptor {
42 u32 dma_bus_addr_high;
46 struct qlcnic_common_entry_hdr {
100 u32 read_addr_stride;
172 struct qlcnic_dump_entry {
173 struct qlcnic_common_entry_hdr hdr;
176 struct __cache cache;
182 struct __pollrdmwr pollrdmwr;
184 struct __pollrd pollrd;
188 enum qlcnic_minidump_opcode {
190 QLCNIC_DUMP_READ_CRB = 1,
191 QLCNIC_DUMP_READ_MUX = 2,
192 QLCNIC_DUMP_QUEUE = 3,
193 QLCNIC_DUMP_BRD_CONFIG = 4,
194 QLCNIC_DUMP_READ_OCM = 6,
195 QLCNIC_DUMP_PEG_REG = 7,
196 QLCNIC_DUMP_L1_DTAG = 8,
197 QLCNIC_DUMP_L1_ITAG = 9,
198 QLCNIC_DUMP_L1_DATA = 11,
199 QLCNIC_DUMP_L1_INST = 12,
200 QLCNIC_DUMP_L2_DTAG = 21,
201 QLCNIC_DUMP_L2_ITAG = 22,
202 QLCNIC_DUMP_L2_DATA = 23,
203 QLCNIC_DUMP_L2_INST = 24,
204 QLCNIC_DUMP_POLL_RD = 35,
205 QLCNIC_READ_MUX2 = 36,
206 QLCNIC_READ_POLLRDMWR = 37,
207 QLCNIC_DUMP_READ_ROM = 71,
208 QLCNIC_DUMP_READ_MEM = 72,
209 QLCNIC_DUMP_READ_CTRL = 98,
210 QLCNIC_DUMP_TLHDR = 99,
211 QLCNIC_DUMP_RDEND = 255
214 struct qlcnic_dump_operations {
215 enum qlcnic_minidump_opcode opcode;
216 u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
220 static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
221 struct qlcnic_dump_entry *entry, __le32 *buffer)
225 struct __crb *crb = &entry->region.crb;
229 for (i = 0; i < crb->no_ops; i++) {
230 data = qlcnic_ind_rd(adapter, addr);
231 *buffer++ = cpu_to_le32(addr);
232 *buffer++ = cpu_to_le32(data);
235 return crb->no_ops * 2 * sizeof(u32);
238 static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
239 struct qlcnic_dump_entry *entry, __le32 *buffer)
241 int i, k, timeout = 0;
244 struct __ctrl *ctr = &entry->region.ctrl;
245 struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
248 no_ops = ctr->no_ops;
250 for (i = 0; i < no_ops; i++) {
252 for (k = 0; k < 8; k++) {
253 if (!(ctr->opcode & (1 << k)))
256 case QLCNIC_DUMP_WCRB:
257 qlcnic_ind_wr(adapter, addr, ctr->val1);
259 case QLCNIC_DUMP_RWCRB:
260 data = qlcnic_ind_rd(adapter, addr);
261 qlcnic_ind_wr(adapter, addr, data);
263 case QLCNIC_DUMP_ANDCRB:
264 data = qlcnic_ind_rd(adapter, addr);
265 qlcnic_ind_wr(adapter, addr,
268 case QLCNIC_DUMP_ORCRB:
269 data = qlcnic_ind_rd(adapter, addr);
270 qlcnic_ind_wr(adapter, addr,
273 case QLCNIC_DUMP_POLLCRB:
274 while (timeout <= ctr->timeout) {
275 data = qlcnic_ind_rd(adapter, addr);
276 if ((data & ctr->val2) == ctr->val1)
278 usleep_range(1000, 2000);
281 if (timeout > ctr->timeout) {
282 dev_info(&adapter->pdev->dev,
283 "Timed out, aborting poll CRB\n");
287 case QLCNIC_DUMP_RD_SAVE:
289 addr = t_hdr->saved_state[ctr->index_a];
290 data = qlcnic_ind_rd(adapter, addr);
291 t_hdr->saved_state[ctr->index_v] = data;
293 case QLCNIC_DUMP_WRT_SAVED:
295 data = t_hdr->saved_state[ctr->index_v];
299 addr = t_hdr->saved_state[ctr->index_a];
300 qlcnic_ind_wr(adapter, addr, data);
302 case QLCNIC_DUMP_MOD_SAVE_ST:
303 data = t_hdr->saved_state[ctr->index_v];
304 data <<= ctr->shl_val;
305 data >>= ctr->shr_val;
310 t_hdr->saved_state[ctr->index_v] = data;
313 dev_info(&adapter->pdev->dev,
323 static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
324 struct qlcnic_dump_entry *entry, __le32 *buffer)
328 struct __mux *mux = &entry->region.mux;
331 for (loop = 0; loop < mux->no_ops; loop++) {
332 qlcnic_ind_wr(adapter, mux->addr, val);
333 data = qlcnic_ind_rd(adapter, mux->read_addr);
334 *buffer++ = cpu_to_le32(val);
335 *buffer++ = cpu_to_le32(data);
336 val += mux->val_stride;
338 return 2 * mux->no_ops * sizeof(u32);
341 static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
342 struct qlcnic_dump_entry *entry, __le32 *buffer)
345 u32 cnt, addr, data, que_id = 0;
346 struct __queue *que = &entry->region.que;
348 addr = que->read_addr;
349 cnt = que->read_addr_cnt;
351 for (loop = 0; loop < que->no_ops; loop++) {
352 qlcnic_ind_wr(adapter, que->sel_addr, que_id);
353 addr = que->read_addr;
354 for (i = 0; i < cnt; i++) {
355 data = qlcnic_ind_rd(adapter, addr);
356 *buffer++ = cpu_to_le32(data);
357 addr += que->read_addr_stride;
359 que_id += que->stride;
361 return que->no_ops * cnt * sizeof(u32);
364 static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
365 struct qlcnic_dump_entry *entry, __le32 *buffer)
370 struct __ocm *ocm = &entry->region.ocm;
372 addr = adapter->ahw->pci_base0 + ocm->read_addr;
373 for (i = 0; i < ocm->no_ops; i++) {
375 *buffer++ = cpu_to_le32(data);
376 addr += ocm->read_addr_stride;
378 return ocm->no_ops * sizeof(u32);
381 static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
382 struct qlcnic_dump_entry *entry, __le32 *buffer)
385 u32 fl_addr, size, val, lck_val, addr;
386 struct __mem *rom = &entry->region.mem;
389 size = rom->size / 4;
391 lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
392 if (!lck_val && count < MAX_CTL_CHECK) {
393 usleep_range(10000, 11000);
397 QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
398 adapter->ahw->pci_func);
399 for (i = 0; i < size; i++) {
400 addr = fl_addr & 0xFFFF0000;
401 qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
402 addr = LSW(fl_addr) + FLASH_ROM_DATA;
403 val = qlcnic_ind_rd(adapter, addr);
405 *buffer++ = cpu_to_le32(val);
407 QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
411 static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
412 struct qlcnic_dump_entry *entry, __le32 *buffer)
415 u32 cnt, val, data, addr;
416 struct __cache *l1 = &entry->region.cache;
418 val = l1->init_tag_val;
420 for (i = 0; i < l1->no_ops; i++) {
421 qlcnic_ind_wr(adapter, l1->addr, val);
422 qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
423 addr = l1->read_addr;
424 cnt = l1->read_addr_num;
426 data = qlcnic_ind_rd(adapter, addr);
427 *buffer++ = cpu_to_le32(data);
428 addr += l1->read_addr_stride;
433 return l1->no_ops * l1->read_addr_num * sizeof(u32);
436 static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
437 struct qlcnic_dump_entry *entry, __le32 *buffer)
440 u32 cnt, val, data, addr;
441 u8 poll_mask, poll_to, time_out = 0;
442 struct __cache *l2 = &entry->region.cache;
444 val = l2->init_tag_val;
445 poll_mask = LSB(MSW(l2->ctrl_val));
446 poll_to = MSB(MSW(l2->ctrl_val));
448 for (i = 0; i < l2->no_ops; i++) {
449 qlcnic_ind_wr(adapter, l2->addr, val);
450 if (LSW(l2->ctrl_val))
451 qlcnic_ind_wr(adapter, l2->ctrl_addr,
456 data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
457 if (!(data & poll_mask))
459 usleep_range(1000, 2000);
461 } while (time_out <= poll_to);
463 if (time_out > poll_to) {
464 dev_err(&adapter->pdev->dev,
465 "Timeout exceeded in %s, aborting dump\n",
470 addr = l2->read_addr;
471 cnt = l2->read_addr_num;
473 data = qlcnic_ind_rd(adapter, addr);
474 *buffer++ = cpu_to_le32(data);
475 addr += l2->read_addr_stride;
480 return l2->no_ops * l2->read_addr_num * sizeof(u32);
483 static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
484 struct __mem *mem, __le32 *buffer,
487 u32 addr, data, test;
490 reg_read = mem->size;
492 /* check for data size of multiple of 16 and 16 byte alignment */
493 if ((addr & 0xf) || (reg_read%16)) {
494 dev_info(&adapter->pdev->dev,
495 "Unaligned memory addr:0x%x size:0x%x\n",
501 mutex_lock(&adapter->ahw->mem_lock);
503 while (reg_read != 0) {
504 qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
505 qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
506 qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
508 for (i = 0; i < MAX_CTL_CHECK; i++) {
509 test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
510 if (!(test & TA_CTL_BUSY))
513 if (i == MAX_CTL_CHECK) {
514 if (printk_ratelimit()) {
515 dev_err(&adapter->pdev->dev,
516 "failed to read through agent\n");
521 for (i = 0; i < 4; i++) {
522 data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
523 *buffer++ = cpu_to_le32(data);
530 mutex_unlock(&adapter->ahw->mem_lock);
534 /* DMA register base address */
535 #define QLC_DMA_REG_BASE_ADDR(dma_no) (0x77320000 + (dma_no * 0x10000))
537 /* DMA register offsets w.r.t base address */
538 #define QLC_DMA_CMD_BUFF_ADDR_LOW 0
539 #define QLC_DMA_CMD_BUFF_ADDR_HI 4
540 #define QLC_DMA_CMD_STATUS_CTRL 8
542 #define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
544 static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
547 struct qlcnic_dump_template_hdr *tmpl_hdr;
548 struct device *dev = &adapter->pdev->dev;
549 u32 dma_no, dma_base_addr, temp_addr;
552 tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
553 dma_no = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
554 dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
556 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
557 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
558 mem->desc_card_addr);
562 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
563 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 0);
567 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
568 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
573 /* Wait for DMA to complete */
574 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
575 for (i = 0; i < 400; i++) {
576 dma_sts = qlcnic_ind_rd(adapter, temp_addr);
579 usleep_range(250, 500);
585 dev_info(dev, "PEX DMA operation timed out");
592 static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
594 __le32 *buffer, int *ret)
596 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
597 u32 temp, dma_base_addr, size = 0, read_size = 0;
598 struct qlcnic_pex_dma_descriptor *dma_descr;
599 struct qlcnic_dump_template_hdr *tmpl_hdr;
600 struct device *dev = &adapter->pdev->dev;
601 dma_addr_t dma_phys_addr;
604 tmpl_hdr = fw_dump->tmpl_hdr;
606 /* Check if DMA engine is available */
607 temp = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
608 dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
609 temp = qlcnic_ind_rd(adapter,
610 dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
612 if (!(temp & BIT_31)) {
613 dev_info(dev, "%s: DMA engine is not available\n", __func__);
618 /* Create DMA descriptor */
619 dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor),
626 /* dma_desc_cmd 0:15 = 0
627 * dma_desc_cmd 16:19 = mem->dma_desc_cmd 0:3
628 * dma_desc_cmd 20:23 = pci function number
629 * dma_desc_cmd 24:31 = mem->dma_desc_cmd 8:15
631 dma_phys_addr = fw_dump->phys_addr;
632 dma_buffer = fw_dump->dma_buffer;
634 temp = mem->dma_desc_cmd & 0xff0f;
635 temp |= (adapter->ahw->pci_func & 0xf) << 4;
636 dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000;
637 dma_descr->dma_bus_addr_low = LSD(dma_phys_addr);
638 dma_descr->dma_bus_addr_high = MSD(dma_phys_addr);
639 dma_descr->src_addr_high = 0;
641 /* Collect memory dump using multiple DMA operations if required */
642 while (read_size < mem->size) {
643 if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE)
644 size = QLC_PEX_DMA_READ_SIZE;
646 size = mem->size - read_size;
648 dma_descr->src_addr_low = mem->addr + read_size;
649 dma_descr->read_data_size = size;
651 /* Write DMA descriptor to MS memory*/
652 temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
653 *ret = qlcnic_83xx_ms_mem_write128(adapter, mem->desc_card_addr,
654 (u32 *)dma_descr, temp);
656 dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
657 mem->desc_card_addr);
661 *ret = qlcnic_start_pex_dma(adapter, mem);
663 dev_info(dev, "Failed to start PEX DMA operation\n");
667 memcpy(buffer, dma_buffer, size);
678 static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
679 struct qlcnic_dump_entry *entry, __le32 *buffer)
681 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
682 struct device *dev = &adapter->pdev->dev;
683 struct __mem *mem = &entry->region.mem;
687 if (fw_dump->use_pex_dma) {
688 data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer,
692 "Failed to read memory dump using PEX DMA: mask[0x%x]\n",
698 data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret);
701 "Failed to read memory dump using test agent method: mask[0x%x]\n",
709 static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
710 struct qlcnic_dump_entry *entry, __le32 *buffer)
712 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
716 static int qlcnic_valid_dump_entry(struct device *dev,
717 struct qlcnic_dump_entry *entry, u32 size)
720 if (size != entry->hdr.cap_size) {
722 "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
723 entry->hdr.type, entry->hdr.mask, size,
724 entry->hdr.cap_size);
730 static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
731 struct qlcnic_dump_entry *entry,
734 struct __pollrdmwr *poll = &entry->region.pollrdmwr;
735 u32 data, wait_count, poll_wait, temp;
737 poll_wait = poll->poll_wait;
739 qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
742 while (wait_count < poll_wait) {
743 data = qlcnic_ind_rd(adapter, poll->addr1);
744 if ((data & poll->poll_mask) != 0)
749 if (wait_count == poll_wait) {
750 dev_err(&adapter->pdev->dev,
751 "Timeout exceeded in %s, aborting dump\n",
756 data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
757 qlcnic_ind_wr(adapter, poll->addr2, data);
758 qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
761 while (wait_count < poll_wait) {
762 temp = qlcnic_ind_rd(adapter, poll->addr1);
763 if ((temp & poll->poll_mask) != 0)
768 *buffer++ = cpu_to_le32(poll->addr2);
769 *buffer++ = cpu_to_le32(data);
771 return 2 * sizeof(u32);
775 static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
776 struct qlcnic_dump_entry *entry, __le32 *buffer)
778 struct __pollrd *pollrd = &entry->region.pollrd;
779 u32 data, wait_count, poll_wait, sel_val;
782 poll_wait = pollrd->poll_wait;
783 sel_val = pollrd->sel_val;
785 for (i = 0; i < pollrd->no_ops; i++) {
786 qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
788 while (wait_count < poll_wait) {
789 data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
790 if ((data & pollrd->poll_mask) != 0)
795 if (wait_count == poll_wait) {
796 dev_err(&adapter->pdev->dev,
797 "Timeout exceeded in %s, aborting dump\n",
802 data = qlcnic_ind_rd(adapter, pollrd->read_addr);
803 *buffer++ = cpu_to_le32(sel_val);
804 *buffer++ = cpu_to_le32(data);
805 sel_val += pollrd->sel_val_stride;
807 return pollrd->no_ops * (2 * sizeof(u32));
810 static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
811 struct qlcnic_dump_entry *entry, __le32 *buffer)
813 struct __mux2 *mux2 = &entry->region.mux2;
815 u32 t_sel_val, sel_val1, sel_val2;
818 sel_val1 = mux2->sel_val1;
819 sel_val2 = mux2->sel_val2;
821 for (i = 0; i < mux2->no_ops; i++) {
822 qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
823 t_sel_val = sel_val1 & mux2->sel_val_mask;
824 qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
825 data = qlcnic_ind_rd(adapter, mux2->read_addr);
826 *buffer++ = cpu_to_le32(t_sel_val);
827 *buffer++ = cpu_to_le32(data);
828 qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
829 t_sel_val = sel_val2 & mux2->sel_val_mask;
830 qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
831 data = qlcnic_ind_rd(adapter, mux2->read_addr);
832 *buffer++ = cpu_to_le32(t_sel_val);
833 *buffer++ = cpu_to_le32(data);
834 sel_val1 += mux2->sel_val_stride;
835 sel_val2 += mux2->sel_val_stride;
838 return mux2->no_ops * (4 * sizeof(u32));
841 static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
842 struct qlcnic_dump_entry *entry, __le32 *buffer)
845 struct __mem *rom = &entry->region.mem;
848 size = rom->size / 4;
850 if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
857 static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
858 {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
859 {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
860 {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
861 {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
862 {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
863 {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
864 {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
865 {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
866 {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
867 {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
868 {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
869 {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
870 {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
871 {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
872 {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
873 {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
874 {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
875 {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
876 {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
877 {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
880 static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
881 {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
882 {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
883 {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
884 {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
885 {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
886 {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
887 {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
888 {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
889 {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
890 {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
891 {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
892 {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
893 {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
894 {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
895 {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
896 {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
897 {QLCNIC_READ_MUX2, qlcnic_read_mux2},
898 {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
899 {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
900 {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
901 {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
902 {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
903 {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
906 static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
909 int count = temp_size / sizeof(uint32_t);
911 sum += *temp_buffer++;
913 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
917 static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
918 u8 *buffer, u32 size)
922 if (qlcnic_82xx_check(adapter))
925 if (qlcnic_83xx_lock_flash(adapter))
928 ret = qlcnic_83xx_lockless_flash_read32(adapter,
929 QLC_83XX_MINIDUMP_FLASH,
930 buffer, size / sizeof(u32));
932 qlcnic_83xx_unlock_flash(adapter);
938 qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
939 struct qlcnic_cmd_args *cmd)
941 struct qlcnic_dump_template_hdr tmp_hdr;
942 u32 size = sizeof(struct qlcnic_dump_template_hdr) / sizeof(u32);
945 if (qlcnic_82xx_check(adapter))
948 if (qlcnic_83xx_lock_flash(adapter))
951 ret = qlcnic_83xx_lockless_flash_read32(adapter,
952 QLC_83XX_MINIDUMP_FLASH,
953 (u8 *)&tmp_hdr, size);
955 qlcnic_83xx_unlock_flash(adapter);
957 cmd->rsp.arg[2] = tmp_hdr.size;
958 cmd->rsp.arg[3] = tmp_hdr.version;
963 static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
964 u32 *version, u32 *temp_size,
968 struct qlcnic_cmd_args cmd;
970 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
973 err = qlcnic_issue_cmd(adapter, &cmd);
974 if (err != QLCNIC_RCODE_SUCCESS) {
975 if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
976 qlcnic_free_mbx_args(&cmd);
982 *temp_size = cmd.rsp.arg[2];
983 *version = cmd.rsp.arg[3];
984 qlcnic_free_mbx_args(&cmd);
992 static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
993 u32 *buffer, u32 temp_size)
998 struct qlcnic_cmd_args cmd;
999 dma_addr_t tmp_addr_t = 0;
1001 tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
1002 &tmp_addr_t, GFP_KERNEL);
1006 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
1011 cmd.req.arg[1] = LSD(tmp_addr_t);
1012 cmd.req.arg[2] = MSD(tmp_addr_t);
1013 cmd.req.arg[3] = temp_size;
1014 err = qlcnic_issue_cmd(adapter, &cmd);
1017 if (err == QLCNIC_RCODE_SUCCESS) {
1018 for (i = 0; i < temp_size / sizeof(u32); i++)
1019 *buffer++ = __le32_to_cpu(*tmp_buf++);
1022 qlcnic_free_mbx_args(&cmd);
1025 dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
1030 int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
1034 u32 version, csum, *tmp_buf;
1035 struct qlcnic_hardware_context *ahw;
1036 struct qlcnic_dump_template_hdr *tmpl_hdr;
1037 u8 use_flash_temp = 0;
1041 err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
1044 dev_err(&adapter->pdev->dev,
1045 "Can't get template size %d\n", err);
1049 ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
1050 if (!ahw->fw_dump.tmpl_hdr)
1053 tmp_buf = (u32 *)ahw->fw_dump.tmpl_hdr;
1057 err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
1061 err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
1065 dev_err(&adapter->pdev->dev,
1066 "Failed to get minidump template header %d\n",
1068 vfree(ahw->fw_dump.tmpl_hdr);
1069 ahw->fw_dump.tmpl_hdr = NULL;
1074 csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
1077 dev_err(&adapter->pdev->dev,
1078 "Template header checksum validation failed\n");
1079 vfree(ahw->fw_dump.tmpl_hdr);
1080 ahw->fw_dump.tmpl_hdr = NULL;
1084 tmpl_hdr = ahw->fw_dump.tmpl_hdr;
1085 tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
1087 if ((tmpl_hdr->version & 0xfffff) >= 0x20001)
1088 ahw->fw_dump.use_pex_dma = true;
1090 ahw->fw_dump.use_pex_dma = false;
1092 ahw->fw_dump.enable = 1;
1097 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1102 char *msg[] = {mesg, NULL};
1103 int i, k, ops_cnt, ops_index, dump_size = 0;
1104 u32 entry_offset, dump, no_entries, buf_offset = 0;
1105 struct qlcnic_dump_entry *entry;
1106 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1107 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
1108 static const struct qlcnic_dump_operations *fw_dump_ops;
1109 struct device *dev = &adapter->pdev->dev;
1110 struct qlcnic_hardware_context *ahw;
1115 if (!fw_dump->enable) {
1116 dev_info(&adapter->pdev->dev, "Dump not enabled\n");
1121 dev_info(&adapter->pdev->dev,
1122 "Previous dump not cleared, not capturing dump\n");
1126 netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
1127 /* Calculate the size for dump data area only */
1128 for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
1129 if (i & tmpl_hdr->drv_cap_mask)
1130 dump_size += tmpl_hdr->cap_sizes[k];
1134 fw_dump->data = vzalloc(dump_size);
1138 buffer = fw_dump->data;
1139 fw_dump->size = dump_size;
1140 no_entries = tmpl_hdr->num_entries;
1141 entry_offset = tmpl_hdr->offset;
1142 tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
1143 tmpl_hdr->sys_info[1] = adapter->fw_version;
1145 if (fw_dump->use_pex_dma) {
1146 temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
1147 &fw_dump->phys_addr,
1150 fw_dump->use_pex_dma = false;
1152 fw_dump->dma_buffer = temp_buffer;
1155 if (qlcnic_82xx_check(adapter)) {
1156 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
1157 fw_dump_ops = qlcnic_fw_dump_ops;
1159 ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
1160 fw_dump_ops = qlcnic_83xx_fw_dump_ops;
1161 ocm_window = tmpl_hdr->ocm_wnd_reg[adapter->ahw->pci_func];
1162 tmpl_hdr->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
1163 tmpl_hdr->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
1166 for (i = 0; i < no_entries; i++) {
1167 entry = (void *)tmpl_hdr + entry_offset;
1168 if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
1169 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1170 entry_offset += entry->hdr.offset;
1174 /* Find the handler for this entry */
1176 while (ops_index < ops_cnt) {
1177 if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
1182 if (ops_index == ops_cnt) {
1183 dev_info(&adapter->pdev->dev,
1184 "Invalid entry type %d, exiting dump\n",
1189 /* Collect dump for this entry */
1190 dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
1191 if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump))
1192 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1193 buf_offset += entry->hdr.cap_size;
1194 entry_offset += entry->hdr.offset;
1195 buffer = fw_dump->data + buf_offset;
1197 if (dump_size != buf_offset) {
1198 dev_info(&adapter->pdev->dev,
1199 "Captured(%d) and expected size(%d) do not match\n",
1200 buf_offset, dump_size);
1204 snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
1205 adapter->netdev->name);
1206 dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n",
1207 adapter->netdev->name, fw_dump->size);
1208 /* Send a udev event to notify availability of FW dump */
1209 kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
1213 if (fw_dump->use_pex_dma)
1214 dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
1215 fw_dump->dma_buffer, fw_dump->phys_addr);
1216 vfree(fw_dump->data);
1220 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
1222 u32 prev_version, current_version;
1223 struct qlcnic_hardware_context *ahw = adapter->ahw;
1224 struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
1225 struct pci_dev *pdev = adapter->pdev;
1227 prev_version = adapter->fw_version;
1228 current_version = qlcnic_83xx_get_fw_version(adapter);
1230 if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
1231 if (fw_dump->tmpl_hdr)
1232 vfree(fw_dump->tmpl_hdr);
1233 if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
1234 dev_info(&pdev->dev, "Supports FW dump capability\n");