2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
15 * qla2x00_mailbox_command
16 * Issue mailbox command and waits for completion.
19 * ha = adapter block pointer.
20 * mcp = driver internal mbx struct pointer.
23 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
26 * 0 : QLA_SUCCESS = cmd performed success
27 * 1 : QLA_FUNCTION_FAILED (error encountered)
28 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
34 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
37 unsigned long flags = 0;
38 device_reg_t __iomem *reg;
43 uint16_t __iomem *optr;
46 unsigned long wait_time;
47 struct qla_hw_data *ha = vha->hw;
48 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
50 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
52 if (ha->pdev->error_state > pci_channel_io_frozen) {
53 ql_log(ql_log_warn, vha, 0x1001,
54 "error_state is greater than pci_channel_io_frozen, "
56 return QLA_FUNCTION_TIMEOUT;
59 if (vha->device_flags & DFLG_DEV_FAILED) {
60 ql_log(ql_log_warn, vha, 0x1002,
61 "Device in failed state, exiting.\n");
62 return QLA_FUNCTION_TIMEOUT;
66 io_lock_on = base_vha->flags.init_done;
69 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
72 if (ha->flags.pci_channel_io_perm_failure) {
73 ql_log(ql_log_warn, vha, 0x1003,
74 "Perm failure on EEH timeout MBX, exiting.\n");
75 return QLA_FUNCTION_TIMEOUT;
78 if (IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung) {
79 /* Setting Link-Down error */
80 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
81 ql_log(ql_log_warn, vha, 0x1004,
82 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
83 return QLA_FUNCTION_TIMEOUT;
87 * Wait for active mailbox commands to finish by waiting at most tov
88 * seconds. This is to serialize actual issuing of mailbox cmds during
91 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
92 /* Timeout occurred. Return error. */
93 ql_log(ql_log_warn, vha, 0x1005,
94 "Cmd access timeout, cmd=0x%x, Exiting.\n",
96 return QLA_FUNCTION_TIMEOUT;
99 ha->flags.mbox_busy = 1;
100 /* Save mailbox command for debug */
103 ql_dbg(ql_dbg_mbx, vha, 0x1006,
104 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
106 spin_lock_irqsave(&ha->hardware_lock, flags);
108 /* Load mailbox registers. */
110 optr = (uint16_t __iomem *)®->isp82.mailbox_in[0];
111 else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha))
112 optr = (uint16_t __iomem *)®->isp24.mailbox0;
114 optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0);
117 command = mcp->mb[0];
118 mboxes = mcp->out_mb;
120 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
121 if (IS_QLA2200(ha) && cnt == 8)
123 (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8);
125 WRT_REG_WORD(optr, *iptr);
132 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
133 "Loaded MBX registers (displayed in bytes) =.\n");
134 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1112,
135 (uint8_t *)mcp->mb, 16);
136 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1113,
138 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1114,
139 ((uint8_t *)mcp->mb + 0x10), 16);
140 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1115,
142 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1116,
143 ((uint8_t *)mcp->mb + 0x20), 8);
144 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
145 "I/O Address = %p.\n", optr);
146 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x100e);
148 /* Issue set host interrupt command to send cmd out. */
149 ha->flags.mbox_int = 0;
150 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
152 /* Unlock mbx registers and wait for interrupt */
153 ql_dbg(ql_dbg_mbx, vha, 0x100f,
154 "Going to unlock irq & waiting for interrupts. "
155 "jiffies=%lx.\n", jiffies);
157 /* Wait for mbx cmd completion until timeout */
159 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
160 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
162 if (IS_QLA82XX(ha)) {
163 if (RD_REG_DWORD(®->isp82.hint) &
164 HINT_MBX_INT_PENDING) {
165 spin_unlock_irqrestore(&ha->hardware_lock,
167 ha->flags.mbox_busy = 0;
168 ql_dbg(ql_dbg_mbx, vha, 0x1010,
169 "Pending mailbox timeout, exiting.\n");
170 rval = QLA_FUNCTION_TIMEOUT;
173 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
174 } else if (IS_FWI2_CAPABLE(ha))
175 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
177 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
178 spin_unlock_irqrestore(&ha->hardware_lock, flags);
180 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
182 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
185 ql_dbg(ql_dbg_mbx, vha, 0x1011,
186 "Cmd=%x Polling Mode.\n", command);
188 if (IS_QLA82XX(ha)) {
189 if (RD_REG_DWORD(®->isp82.hint) &
190 HINT_MBX_INT_PENDING) {
191 spin_unlock_irqrestore(&ha->hardware_lock,
193 ha->flags.mbox_busy = 0;
194 ql_dbg(ql_dbg_mbx, vha, 0x1012,
195 "Pending mailbox timeout, exiting.\n");
196 rval = QLA_FUNCTION_TIMEOUT;
199 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
200 } else if (IS_FWI2_CAPABLE(ha))
201 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
203 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
204 spin_unlock_irqrestore(&ha->hardware_lock, flags);
206 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
207 while (!ha->flags.mbox_int) {
208 if (time_after(jiffies, wait_time))
211 /* Check for pending interrupts. */
212 qla2x00_poll(ha->rsp_q_map[0]);
214 if (!ha->flags.mbox_int &&
216 command == MBC_LOAD_RISC_RAM_EXTENDED))
219 ql_dbg(ql_dbg_mbx, vha, 0x1013,
221 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
224 /* Check whether we timed out */
225 if (ha->flags.mbox_int) {
228 ql_dbg(ql_dbg_mbx, vha, 0x1014,
229 "Cmd=%x completed.\n", command);
231 /* Got interrupt. Clear the flag. */
232 ha->flags.mbox_int = 0;
233 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
235 if ((IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung)) {
236 ha->flags.mbox_busy = 0;
237 /* Setting Link-Down error */
238 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
240 rval = QLA_FUNCTION_FAILED;
241 ql_log(ql_log_warn, vha, 0x1015,
242 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
246 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
247 rval = QLA_FUNCTION_FAILED;
249 /* Load return mailbox registers. */
251 iptr = (uint16_t *)&ha->mailbox_out[0];
253 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
266 if (IS_FWI2_CAPABLE(ha)) {
267 mb0 = RD_REG_WORD(®->isp24.mailbox0);
268 ictrl = RD_REG_DWORD(®->isp24.ictrl);
270 mb0 = RD_MAILBOX_REG(ha, ®->isp, 0);
271 ictrl = RD_REG_WORD(®->isp.ictrl);
273 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
274 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
275 "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
276 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
279 * Attempt to capture a firmware dump for further analysis
280 * of the current firmware state
282 ha->isp_ops->fw_dump(vha, 0);
284 rval = QLA_FUNCTION_TIMEOUT;
287 ha->flags.mbox_busy = 0;
292 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
293 ql_dbg(ql_dbg_mbx, vha, 0x101a,
294 "Checking for additional resp interrupt.\n");
296 /* polling mode for non isp_abort commands. */
297 qla2x00_poll(ha->rsp_q_map[0]);
300 if (rval == QLA_FUNCTION_TIMEOUT &&
301 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
302 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
303 ha->flags.eeh_busy) {
304 /* not in dpc. schedule it for dpc to take over. */
305 ql_dbg(ql_dbg_mbx, vha, 0x101b,
306 "Timeout, schedule isp_abort_needed.\n");
308 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
309 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
310 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
311 if (IS_QLA82XX(ha)) {
312 ql_dbg(ql_dbg_mbx, vha, 0x112a,
313 "disabling pause transmit on port "
316 QLA82XX_CRB_NIU + 0x98,
317 CRB_NIU_XG_PAUSE_CTL_P0|
318 CRB_NIU_XG_PAUSE_CTL_P1);
320 ql_log(ql_log_info, base_vha, 0x101c,
321 "Mailbox cmd timeout occurred, cmd=0x%x, "
322 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
323 "abort.\n", command, mcp->mb[0],
325 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
326 qla2xxx_wake_dpc(vha);
328 } else if (!abort_active) {
329 /* call abort directly since we are in the DPC thread */
330 ql_dbg(ql_dbg_mbx, vha, 0x101d,
331 "Timeout, calling abort_isp.\n");
333 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
334 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
335 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
336 if (IS_QLA82XX(ha)) {
337 ql_dbg(ql_dbg_mbx, vha, 0x112b,
338 "disabling pause transmit on port "
341 QLA82XX_CRB_NIU + 0x98,
342 CRB_NIU_XG_PAUSE_CTL_P0|
343 CRB_NIU_XG_PAUSE_CTL_P1);
345 ql_log(ql_log_info, base_vha, 0x101e,
346 "Mailbox cmd timeout occurred, cmd=0x%x, "
347 "mb[0]=0x%x. Scheduling ISP abort ",
348 command, mcp->mb[0]);
349 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
350 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
351 /* Allow next mbx cmd to come in. */
352 complete(&ha->mbx_cmd_comp);
353 if (ha->isp_ops->abort_isp(vha)) {
354 /* Failed. retry later. */
355 set_bit(ISP_ABORT_NEEDED,
358 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
359 ql_dbg(ql_dbg_mbx, vha, 0x101f,
360 "Finished abort_isp.\n");
367 /* Allow next mbx cmd to come in. */
368 complete(&ha->mbx_cmd_comp);
372 ql_log(ql_log_warn, base_vha, 0x1020,
373 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
374 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
376 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
383 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
384 uint32_t risc_code_size)
387 struct qla_hw_data *ha = vha->hw;
389 mbx_cmd_t *mcp = &mc;
391 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
392 "Entered %s.\n", __func__);
394 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
395 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
396 mcp->mb[8] = MSW(risc_addr);
397 mcp->out_mb = MBX_8|MBX_0;
399 mcp->mb[0] = MBC_LOAD_RISC_RAM;
402 mcp->mb[1] = LSW(risc_addr);
403 mcp->mb[2] = MSW(req_dma);
404 mcp->mb[3] = LSW(req_dma);
405 mcp->mb[6] = MSW(MSD(req_dma));
406 mcp->mb[7] = LSW(MSD(req_dma));
407 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
408 if (IS_FWI2_CAPABLE(ha)) {
409 mcp->mb[4] = MSW(risc_code_size);
410 mcp->mb[5] = LSW(risc_code_size);
411 mcp->out_mb |= MBX_5|MBX_4;
413 mcp->mb[4] = LSW(risc_code_size);
414 mcp->out_mb |= MBX_4;
418 mcp->tov = MBX_TOV_SECONDS;
420 rval = qla2x00_mailbox_command(vha, mcp);
422 if (rval != QLA_SUCCESS) {
423 ql_dbg(ql_dbg_mbx, vha, 0x1023,
424 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
426 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
427 "Done %s.\n", __func__);
433 #define EXTENDED_BB_CREDITS BIT_0
436 * Start adapter firmware.
439 * ha = adapter block pointer.
440 * TARGET_QUEUE_LOCK must be released.
441 * ADAPTER_STATE_LOCK must be released.
444 * qla2x00 local function return status code.
450 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
453 struct qla_hw_data *ha = vha->hw;
455 mbx_cmd_t *mcp = &mc;
457 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
458 "Entered %s.\n", __func__);
460 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
463 if (IS_FWI2_CAPABLE(ha)) {
464 mcp->mb[1] = MSW(risc_addr);
465 mcp->mb[2] = LSW(risc_addr);
467 if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) {
468 struct nvram_81xx *nv = ha->nvram;
469 mcp->mb[4] = (nv->enhanced_features &
470 EXTENDED_BB_CREDITS);
473 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
476 mcp->mb[1] = LSW(risc_addr);
477 mcp->out_mb |= MBX_1;
478 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
480 mcp->out_mb |= MBX_2;
484 mcp->tov = MBX_TOV_SECONDS;
486 rval = qla2x00_mailbox_command(vha, mcp);
488 if (rval != QLA_SUCCESS) {
489 ql_dbg(ql_dbg_mbx, vha, 0x1026,
490 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
492 if (IS_FWI2_CAPABLE(ha)) {
493 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027,
494 "Done exchanges=%x.\n", mcp->mb[1]);
496 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
497 "Done %s.\n", __func__);
505 * qla2x00_get_fw_version
506 * Get firmware version.
509 * ha: adapter state pointer.
510 * major: pointer for major number.
511 * minor: pointer for minor number.
512 * subminor: pointer for subminor number.
515 * qla2x00 local function return status code.
521 qla2x00_get_fw_version(scsi_qla_host_t *vha)
525 mbx_cmd_t *mcp = &mc;
526 struct qla_hw_data *ha = vha->hw;
528 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
529 "Entered %s.\n", __func__);
531 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
533 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
534 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha))
535 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
536 if (IS_FWI2_CAPABLE(ha))
537 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
539 mcp->tov = MBX_TOV_SECONDS;
540 rval = qla2x00_mailbox_command(vha, mcp);
541 if (rval != QLA_SUCCESS)
544 /* Return mailbox data. */
545 ha->fw_major_version = mcp->mb[1];
546 ha->fw_minor_version = mcp->mb[2];
547 ha->fw_subminor_version = mcp->mb[3];
548 ha->fw_attributes = mcp->mb[6];
549 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
550 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
552 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
553 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) {
554 ha->mpi_version[0] = mcp->mb[10] & 0xff;
555 ha->mpi_version[1] = mcp->mb[11] >> 8;
556 ha->mpi_version[2] = mcp->mb[11] & 0xff;
557 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
558 ha->phy_version[0] = mcp->mb[8] & 0xff;
559 ha->phy_version[1] = mcp->mb[9] >> 8;
560 ha->phy_version[2] = mcp->mb[9] & 0xff;
562 if (IS_FWI2_CAPABLE(ha)) {
563 ha->fw_attributes_h = mcp->mb[15];
564 ha->fw_attributes_ext[0] = mcp->mb[16];
565 ha->fw_attributes_ext[1] = mcp->mb[17];
566 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
567 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
568 __func__, mcp->mb[15], mcp->mb[6]);
569 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
570 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
571 __func__, mcp->mb[17], mcp->mb[16]);
575 if (rval != QLA_SUCCESS) {
577 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
580 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
581 "Done %s.\n", __func__);
587 * qla2x00_get_fw_options
588 * Set firmware options.
591 * ha = adapter block pointer.
592 * fwopt = pointer for firmware options.
595 * qla2x00 local function return status code.
601 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
605 mbx_cmd_t *mcp = &mc;
607 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
608 "Entered %s.\n", __func__);
610 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
612 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
613 mcp->tov = MBX_TOV_SECONDS;
615 rval = qla2x00_mailbox_command(vha, mcp);
617 if (rval != QLA_SUCCESS) {
619 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
621 fwopts[0] = mcp->mb[0];
622 fwopts[1] = mcp->mb[1];
623 fwopts[2] = mcp->mb[2];
624 fwopts[3] = mcp->mb[3];
626 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
627 "Done %s.\n", __func__);
635 * qla2x00_set_fw_options
636 * Set firmware options.
639 * ha = adapter block pointer.
640 * fwopt = pointer for firmware options.
643 * qla2x00 local function return status code.
649 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
653 mbx_cmd_t *mcp = &mc;
655 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
656 "Entered %s.\n", __func__);
658 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
659 mcp->mb[1] = fwopts[1];
660 mcp->mb[2] = fwopts[2];
661 mcp->mb[3] = fwopts[3];
662 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
664 if (IS_FWI2_CAPABLE(vha->hw)) {
667 mcp->mb[10] = fwopts[10];
668 mcp->mb[11] = fwopts[11];
669 mcp->mb[12] = 0; /* Undocumented, but used */
670 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
672 mcp->tov = MBX_TOV_SECONDS;
674 rval = qla2x00_mailbox_command(vha, mcp);
676 fwopts[0] = mcp->mb[0];
678 if (rval != QLA_SUCCESS) {
680 ql_dbg(ql_dbg_mbx, vha, 0x1030,
681 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
684 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
685 "Done %s.\n", __func__);
692 * qla2x00_mbx_reg_test
693 * Mailbox register wrap test.
696 * ha = adapter block pointer.
697 * TARGET_QUEUE_LOCK must be released.
698 * ADAPTER_STATE_LOCK must be released.
701 * qla2x00 local function return status code.
707 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
711 mbx_cmd_t *mcp = &mc;
713 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
714 "Entered %s.\n", __func__);
716 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
724 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
725 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
726 mcp->tov = MBX_TOV_SECONDS;
728 rval = qla2x00_mailbox_command(vha, mcp);
730 if (rval == QLA_SUCCESS) {
731 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
732 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
733 rval = QLA_FUNCTION_FAILED;
734 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
735 mcp->mb[7] != 0x2525)
736 rval = QLA_FUNCTION_FAILED;
739 if (rval != QLA_SUCCESS) {
741 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
744 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
745 "Done %s.\n", __func__);
752 * qla2x00_verify_checksum
753 * Verify firmware checksum.
756 * ha = adapter block pointer.
757 * TARGET_QUEUE_LOCK must be released.
758 * ADAPTER_STATE_LOCK must be released.
761 * qla2x00 local function return status code.
767 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
771 mbx_cmd_t *mcp = &mc;
773 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
774 "Entered %s.\n", __func__);
776 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
779 if (IS_FWI2_CAPABLE(vha->hw)) {
780 mcp->mb[1] = MSW(risc_addr);
781 mcp->mb[2] = LSW(risc_addr);
782 mcp->out_mb |= MBX_2|MBX_1;
783 mcp->in_mb |= MBX_2|MBX_1;
785 mcp->mb[1] = LSW(risc_addr);
786 mcp->out_mb |= MBX_1;
790 mcp->tov = MBX_TOV_SECONDS;
792 rval = qla2x00_mailbox_command(vha, mcp);
794 if (rval != QLA_SUCCESS) {
795 ql_dbg(ql_dbg_mbx, vha, 0x1036,
796 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
797 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
799 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
800 "Done %s.\n", __func__);
808 * Issue IOCB using mailbox command
811 * ha = adapter state pointer.
812 * buffer = buffer pointer.
813 * phys_addr = physical address of buffer.
814 * size = size of buffer.
815 * TARGET_QUEUE_LOCK must be released.
816 * ADAPTER_STATE_LOCK must be released.
819 * qla2x00 local function return status code.
825 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
826 dma_addr_t phys_addr, size_t size, uint32_t tov)
830 mbx_cmd_t *mcp = &mc;
832 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
833 "Entered %s.\n", __func__);
835 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
837 mcp->mb[2] = MSW(phys_addr);
838 mcp->mb[3] = LSW(phys_addr);
839 mcp->mb[6] = MSW(MSD(phys_addr));
840 mcp->mb[7] = LSW(MSD(phys_addr));
841 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
842 mcp->in_mb = MBX_2|MBX_0;
845 rval = qla2x00_mailbox_command(vha, mcp);
847 if (rval != QLA_SUCCESS) {
849 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
851 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
853 /* Mask reserved bits. */
854 sts_entry->entry_status &=
855 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
856 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
857 "Done %s.\n", __func__);
864 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
867 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
872 * qla2x00_abort_command
873 * Abort command aborts a specified IOCB.
876 * ha = adapter block pointer.
877 * sp = SB structure pointer.
880 * qla2x00 local function return status code.
886 qla2x00_abort_command(srb_t *sp)
888 unsigned long flags = 0;
892 mbx_cmd_t *mcp = &mc;
893 fc_port_t *fcport = sp->fcport;
894 scsi_qla_host_t *vha = fcport->vha;
895 struct qla_hw_data *ha = vha->hw;
896 struct req_que *req = vha->req;
897 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
899 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
900 "Entered %s.\n", __func__);
902 spin_lock_irqsave(&ha->hardware_lock, flags);
903 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
904 if (req->outstanding_cmds[handle] == sp)
907 spin_unlock_irqrestore(&ha->hardware_lock, flags);
909 if (handle == MAX_OUTSTANDING_COMMANDS) {
910 /* command not found */
911 return QLA_FUNCTION_FAILED;
914 mcp->mb[0] = MBC_ABORT_COMMAND;
915 if (HAS_EXTENDED_IDS(ha))
916 mcp->mb[1] = fcport->loop_id;
918 mcp->mb[1] = fcport->loop_id << 8;
919 mcp->mb[2] = (uint16_t)handle;
920 mcp->mb[3] = (uint16_t)(handle >> 16);
921 mcp->mb[6] = (uint16_t)cmd->device->lun;
922 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
924 mcp->tov = MBX_TOV_SECONDS;
926 rval = qla2x00_mailbox_command(vha, mcp);
928 if (rval != QLA_SUCCESS) {
929 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
931 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
932 "Done %s.\n", __func__);
939 qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
943 mbx_cmd_t *mcp = &mc;
944 scsi_qla_host_t *vha;
951 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
952 "Entered %s.\n", __func__);
954 req = vha->hw->req_q_map[0];
956 mcp->mb[0] = MBC_ABORT_TARGET;
957 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
958 if (HAS_EXTENDED_IDS(vha->hw)) {
959 mcp->mb[1] = fcport->loop_id;
961 mcp->out_mb |= MBX_10;
963 mcp->mb[1] = fcport->loop_id << 8;
965 mcp->mb[2] = vha->hw->loop_reset_delay;
966 mcp->mb[9] = vha->vp_idx;
969 mcp->tov = MBX_TOV_SECONDS;
971 rval = qla2x00_mailbox_command(vha, mcp);
972 if (rval != QLA_SUCCESS) {
973 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
974 "Failed=%x.\n", rval);
977 /* Issue marker IOCB. */
978 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
980 if (rval2 != QLA_SUCCESS) {
981 ql_dbg(ql_dbg_mbx, vha, 0x1040,
982 "Failed to issue marker IOCB (%x).\n", rval2);
984 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
985 "Done %s.\n", __func__);
992 qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
996 mbx_cmd_t *mcp = &mc;
997 scsi_qla_host_t *vha;
1003 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1004 "Entered %s.\n", __func__);
1006 req = vha->hw->req_q_map[0];
1008 mcp->mb[0] = MBC_LUN_RESET;
1009 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1010 if (HAS_EXTENDED_IDS(vha->hw))
1011 mcp->mb[1] = fcport->loop_id;
1013 mcp->mb[1] = fcport->loop_id << 8;
1016 mcp->mb[9] = vha->vp_idx;
1019 mcp->tov = MBX_TOV_SECONDS;
1021 rval = qla2x00_mailbox_command(vha, mcp);
1022 if (rval != QLA_SUCCESS) {
1023 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1026 /* Issue marker IOCB. */
1027 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
1029 if (rval2 != QLA_SUCCESS) {
1030 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1031 "Failed to issue marker IOCB (%x).\n", rval2);
1033 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1034 "Done %s.\n", __func__);
1041 * qla2x00_get_adapter_id
1042 * Get adapter ID and topology.
1045 * ha = adapter block pointer.
1046 * id = pointer for loop ID.
1047 * al_pa = pointer for AL_PA.
1048 * area = pointer for area.
1049 * domain = pointer for domain.
1050 * top = pointer for topology.
1051 * TARGET_QUEUE_LOCK must be released.
1052 * ADAPTER_STATE_LOCK must be released.
1055 * qla2x00 local function return status code.
1061 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1062 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1066 mbx_cmd_t *mcp = &mc;
1068 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1069 "Entered %s.\n", __func__);
1071 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1072 mcp->mb[9] = vha->vp_idx;
1073 mcp->out_mb = MBX_9|MBX_0;
1074 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1075 if (IS_CNA_CAPABLE(vha->hw))
1076 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1077 mcp->tov = MBX_TOV_SECONDS;
1079 rval = qla2x00_mailbox_command(vha, mcp);
1080 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1081 rval = QLA_COMMAND_ERROR;
1082 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1083 rval = QLA_INVALID_COMMAND;
1087 *al_pa = LSB(mcp->mb[2]);
1088 *area = MSB(mcp->mb[2]);
1089 *domain = LSB(mcp->mb[3]);
1091 *sw_cap = mcp->mb[7];
1093 if (rval != QLA_SUCCESS) {
1095 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1097 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1098 "Done %s.\n", __func__);
1100 if (IS_CNA_CAPABLE(vha->hw)) {
1101 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1102 vha->fcoe_fcf_idx = mcp->mb[10];
1103 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1104 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1105 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1106 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1107 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1108 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1116 * qla2x00_get_retry_cnt
1117 * Get current firmware login retry count and delay.
1120 * ha = adapter block pointer.
1121 * retry_cnt = pointer to login retry count.
1122 * tov = pointer to login timeout value.
1125 * qla2x00 local function return status code.
1131 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1137 mbx_cmd_t *mcp = &mc;
1139 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1140 "Entered %s.\n", __func__);
1142 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1143 mcp->out_mb = MBX_0;
1144 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1145 mcp->tov = MBX_TOV_SECONDS;
1147 rval = qla2x00_mailbox_command(vha, mcp);
1149 if (rval != QLA_SUCCESS) {
1151 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1152 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1154 /* Convert returned data and check our values. */
1155 *r_a_tov = mcp->mb[3] / 2;
1156 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1157 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1158 /* Update to the larger values */
1159 *retry_cnt = (uint8_t)mcp->mb[1];
1163 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1164 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1171 * qla2x00_init_firmware
1172 * Initialize adapter firmware.
1175 * ha = adapter block pointer.
1176 * dptr = Initialization control block pointer.
1177 * size = size of initialization control block.
1178 * TARGET_QUEUE_LOCK must be released.
1179 * ADAPTER_STATE_LOCK must be released.
1182 * qla2x00 local function return status code.
1188 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1192 mbx_cmd_t *mcp = &mc;
1193 struct qla_hw_data *ha = vha->hw;
1195 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1196 "Entered %s.\n", __func__);
1198 if (IS_QLA82XX(ha) && ql2xdbwr)
1199 qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
1200 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1202 if (ha->flags.npiv_supported)
1203 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1205 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1208 mcp->mb[2] = MSW(ha->init_cb_dma);
1209 mcp->mb[3] = LSW(ha->init_cb_dma);
1210 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1211 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1212 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1213 if ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) && ha->ex_init_cb->ex_version) {
1215 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1216 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1217 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1218 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1219 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1220 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1222 /* 1 and 2 should normally be captured. */
1223 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1225 /* mb3 is additional info about the installed SFP. */
1226 mcp->in_mb |= MBX_3;
1227 mcp->buf_size = size;
1228 mcp->flags = MBX_DMA_OUT;
1229 mcp->tov = MBX_TOV_SECONDS;
1230 rval = qla2x00_mailbox_command(vha, mcp);
1232 if (rval != QLA_SUCCESS) {
1234 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1235 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
1236 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1239 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1240 "Done %s.\n", __func__);
1247 * qla2x00_get_node_name_list
1248 * Issue get node name list mailbox command, kmalloc()
1249 * and return the resulting list. Caller must kfree() it!
1252 * ha = adapter state pointer.
1253 * out_data = resulting list
1254 * out_len = length of the resulting list
1257 * qla2x00 local function return status code.
1263 qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
1265 struct qla_hw_data *ha = vha->hw;
1266 struct qla_port_24xx_data *list = NULL;
1269 dma_addr_t pmap_dma;
1275 dma_size = left * sizeof(*list);
1276 pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size,
1277 &pmap_dma, GFP_KERNEL);
1279 ql_log(ql_log_warn, vha, 0x113f,
1280 "%s(%ld): DMA Alloc failed of %ld\n",
1281 __func__, vha->host_no, dma_size);
1282 rval = QLA_MEMORY_ALLOC_FAILED;
1286 mc.mb[0] = MBC_PORT_NODE_NAME_LIST;
1287 mc.mb[1] = BIT_1 | BIT_3;
1288 mc.mb[2] = MSW(pmap_dma);
1289 mc.mb[3] = LSW(pmap_dma);
1290 mc.mb[6] = MSW(MSD(pmap_dma));
1291 mc.mb[7] = LSW(MSD(pmap_dma));
1292 mc.mb[8] = dma_size;
1293 mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8;
1294 mc.in_mb = MBX_0|MBX_1;
1296 mc.flags = MBX_DMA_IN;
1298 rval = qla2x00_mailbox_command(vha, &mc);
1299 if (rval != QLA_SUCCESS) {
1300 if ((mc.mb[0] == MBS_COMMAND_ERROR) &&
1301 (mc.mb[1] == 0xA)) {
1302 left += le16_to_cpu(mc.mb[2]) /
1303 sizeof(struct qla_port_24xx_data);
1311 list = kzalloc(dma_size, GFP_KERNEL);
1313 ql_log(ql_log_warn, vha, 0x1140,
1314 "%s(%ld): failed to allocate node names list "
1315 "structure.\n", __func__, vha->host_no);
1316 rval = QLA_MEMORY_ALLOC_FAILED;
1320 memcpy(list, pmap, dma_size);
1322 dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
1326 *out_len = dma_size;
1332 dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
1337 * qla2x00_get_port_database
1338 * Issue normal/enhanced get port database mailbox command
1339 * and copy device name as necessary.
1342 * ha = adapter state pointer.
1343 * dev = structure pointer.
1344 * opt = enhanced cmd option byte.
1347 * qla2x00 local function return status code.
1353 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1357 mbx_cmd_t *mcp = &mc;
1358 port_database_t *pd;
1359 struct port_database_24xx *pd24;
1361 struct qla_hw_data *ha = vha->hw;
1363 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1364 "Entered %s.\n", __func__);
1367 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1369 ql_log(ql_log_warn, vha, 0x1050,
1370 "Failed to allocate port database structure.\n");
1371 return QLA_MEMORY_ALLOC_FAILED;
1373 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
1375 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1376 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1377 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1378 mcp->mb[2] = MSW(pd_dma);
1379 mcp->mb[3] = LSW(pd_dma);
1380 mcp->mb[6] = MSW(MSD(pd_dma));
1381 mcp->mb[7] = LSW(MSD(pd_dma));
1382 mcp->mb[9] = vha->vp_idx;
1383 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1385 if (IS_FWI2_CAPABLE(ha)) {
1386 mcp->mb[1] = fcport->loop_id;
1388 mcp->out_mb |= MBX_10|MBX_1;
1389 mcp->in_mb |= MBX_1;
1390 } else if (HAS_EXTENDED_IDS(ha)) {
1391 mcp->mb[1] = fcport->loop_id;
1393 mcp->out_mb |= MBX_10|MBX_1;
1395 mcp->mb[1] = fcport->loop_id << 8 | opt;
1396 mcp->out_mb |= MBX_1;
1398 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1399 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1400 mcp->flags = MBX_DMA_IN;
1401 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1402 rval = qla2x00_mailbox_command(vha, mcp);
1403 if (rval != QLA_SUCCESS)
1406 if (IS_FWI2_CAPABLE(ha)) {
1408 pd24 = (struct port_database_24xx *) pd;
1410 /* Check for logged in state. */
1411 if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
1412 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1413 ql_dbg(ql_dbg_mbx, vha, 0x1051,
1414 "Unable to verify login-state (%x/%x) for "
1415 "loop_id %x.\n", pd24->current_login_state,
1416 pd24->last_login_state, fcport->loop_id);
1417 rval = QLA_FUNCTION_FAILED;
1421 if (fcport->loop_id == FC_NO_LOOP_ID ||
1422 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1423 memcmp(fcport->port_name, pd24->port_name, 8))) {
1424 /* We lost the device mid way. */
1425 rval = QLA_NOT_LOGGED_IN;
1429 /* Names are little-endian. */
1430 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1431 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1433 /* Get port_id of device. */
1434 fcport->d_id.b.domain = pd24->port_id[0];
1435 fcport->d_id.b.area = pd24->port_id[1];
1436 fcport->d_id.b.al_pa = pd24->port_id[2];
1437 fcport->d_id.b.rsvd_1 = 0;
1439 /* If not target must be initiator or unknown type. */
1440 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1441 fcport->port_type = FCT_INITIATOR;
1443 fcport->port_type = FCT_TARGET;
1445 /* Passback COS information. */
1446 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1447 FC_COS_CLASS2 : FC_COS_CLASS3;
1449 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1450 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1454 /* Check for logged in state. */
1455 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1456 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1457 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1458 "Unable to verify login-state (%x/%x) - "
1459 "portid=%02x%02x%02x.\n", pd->master_state,
1460 pd->slave_state, fcport->d_id.b.domain,
1461 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1462 rval = QLA_FUNCTION_FAILED;
1466 if (fcport->loop_id == FC_NO_LOOP_ID ||
1467 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1468 memcmp(fcport->port_name, pd->port_name, 8))) {
1469 /* We lost the device mid way. */
1470 rval = QLA_NOT_LOGGED_IN;
1474 /* Names are little-endian. */
1475 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1476 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
1478 /* Get port_id of device. */
1479 fcport->d_id.b.domain = pd->port_id[0];
1480 fcport->d_id.b.area = pd->port_id[3];
1481 fcport->d_id.b.al_pa = pd->port_id[2];
1482 fcport->d_id.b.rsvd_1 = 0;
1484 /* If not target must be initiator or unknown type. */
1485 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
1486 fcport->port_type = FCT_INITIATOR;
1488 fcport->port_type = FCT_TARGET;
1490 /* Passback COS information. */
1491 fcport->supported_classes = (pd->options & BIT_4) ?
1492 FC_COS_CLASS2: FC_COS_CLASS3;
1496 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1498 if (rval != QLA_SUCCESS) {
1499 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1500 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1501 mcp->mb[0], mcp->mb[1]);
1503 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
1504 "Done %s.\n", __func__);
1511 * qla2x00_get_firmware_state
1512 * Get adapter firmware state.
1515 * ha = adapter block pointer.
1516 * dptr = pointer for firmware state.
1517 * TARGET_QUEUE_LOCK must be released.
1518 * ADAPTER_STATE_LOCK must be released.
1521 * qla2x00 local function return status code.
1527 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1531 mbx_cmd_t *mcp = &mc;
1533 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
1534 "Entered %s.\n", __func__);
1536 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1537 mcp->out_mb = MBX_0;
1538 if (IS_FWI2_CAPABLE(vha->hw))
1539 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1541 mcp->in_mb = MBX_1|MBX_0;
1542 mcp->tov = MBX_TOV_SECONDS;
1544 rval = qla2x00_mailbox_command(vha, mcp);
1546 /* Return firmware states. */
1547 states[0] = mcp->mb[1];
1548 if (IS_FWI2_CAPABLE(vha->hw)) {
1549 states[1] = mcp->mb[2];
1550 states[2] = mcp->mb[3];
1551 states[3] = mcp->mb[4];
1552 states[4] = mcp->mb[5];
1555 if (rval != QLA_SUCCESS) {
1557 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1560 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
1561 "Done %s.\n", __func__);
1568 * qla2x00_get_port_name
1569 * Issue get port name mailbox command.
1570 * Returned name is in big endian format.
1573 * ha = adapter block pointer.
1574 * loop_id = loop ID of device.
1575 * name = pointer for name.
1576 * TARGET_QUEUE_LOCK must be released.
1577 * ADAPTER_STATE_LOCK must be released.
1580 * qla2x00 local function return status code.
1586 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1591 mbx_cmd_t *mcp = &mc;
1593 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
1594 "Entered %s.\n", __func__);
1596 mcp->mb[0] = MBC_GET_PORT_NAME;
1597 mcp->mb[9] = vha->vp_idx;
1598 mcp->out_mb = MBX_9|MBX_1|MBX_0;
1599 if (HAS_EXTENDED_IDS(vha->hw)) {
1600 mcp->mb[1] = loop_id;
1602 mcp->out_mb |= MBX_10;
1604 mcp->mb[1] = loop_id << 8 | opt;
1607 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1608 mcp->tov = MBX_TOV_SECONDS;
1610 rval = qla2x00_mailbox_command(vha, mcp);
1612 if (rval != QLA_SUCCESS) {
1614 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
1617 /* This function returns name in big endian. */
1618 name[0] = MSB(mcp->mb[2]);
1619 name[1] = LSB(mcp->mb[2]);
1620 name[2] = MSB(mcp->mb[3]);
1621 name[3] = LSB(mcp->mb[3]);
1622 name[4] = MSB(mcp->mb[6]);
1623 name[5] = LSB(mcp->mb[6]);
1624 name[6] = MSB(mcp->mb[7]);
1625 name[7] = LSB(mcp->mb[7]);
1628 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
1629 "Done %s.\n", __func__);
1637 * Issue LIP reset mailbox command.
1640 * ha = adapter block pointer.
1641 * TARGET_QUEUE_LOCK must be released.
1642 * ADAPTER_STATE_LOCK must be released.
1645 * qla2x00 local function return status code.
1651 qla2x00_lip_reset(scsi_qla_host_t *vha)
1655 mbx_cmd_t *mcp = &mc;
1657 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
1658 "Entered %s.\n", __func__);
1660 if (IS_CNA_CAPABLE(vha->hw)) {
1661 /* Logout across all FCFs. */
1662 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1665 mcp->out_mb = MBX_2|MBX_1|MBX_0;
1666 } else if (IS_FWI2_CAPABLE(vha->hw)) {
1667 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1670 mcp->mb[3] = vha->hw->loop_reset_delay;
1671 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1673 mcp->mb[0] = MBC_LIP_RESET;
1674 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1675 if (HAS_EXTENDED_IDS(vha->hw)) {
1676 mcp->mb[1] = 0x00ff;
1678 mcp->out_mb |= MBX_10;
1680 mcp->mb[1] = 0xff00;
1682 mcp->mb[2] = vha->hw->loop_reset_delay;
1686 mcp->tov = MBX_TOV_SECONDS;
1688 rval = qla2x00_mailbox_command(vha, mcp);
1690 if (rval != QLA_SUCCESS) {
1692 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
1695 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
1696 "Done %s.\n", __func__);
1707 * ha = adapter block pointer.
1708 * sns = pointer for command.
1709 * cmd_size = command size.
1710 * buf_size = response/command size.
1711 * TARGET_QUEUE_LOCK must be released.
1712 * ADAPTER_STATE_LOCK must be released.
1715 * qla2x00 local function return status code.
1721 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1722 uint16_t cmd_size, size_t buf_size)
1726 mbx_cmd_t *mcp = &mc;
1728 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
1729 "Entered %s.\n", __func__);
1731 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
1732 "Retry cnt=%d ratov=%d total tov=%d.\n",
1733 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
1735 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
1736 mcp->mb[1] = cmd_size;
1737 mcp->mb[2] = MSW(sns_phys_address);
1738 mcp->mb[3] = LSW(sns_phys_address);
1739 mcp->mb[6] = MSW(MSD(sns_phys_address));
1740 mcp->mb[7] = LSW(MSD(sns_phys_address));
1741 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1742 mcp->in_mb = MBX_0|MBX_1;
1743 mcp->buf_size = buf_size;
1744 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
1745 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
1746 rval = qla2x00_mailbox_command(vha, mcp);
1748 if (rval != QLA_SUCCESS) {
1750 ql_dbg(ql_dbg_mbx, vha, 0x105f,
1751 "Failed=%x mb[0]=%x mb[1]=%x.\n",
1752 rval, mcp->mb[0], mcp->mb[1]);
1755 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
1756 "Done %s.\n", __func__);
1763 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1764 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1768 struct logio_entry_24xx *lg;
1771 struct qla_hw_data *ha = vha->hw;
1772 struct req_que *req;
1773 struct rsp_que *rsp;
1775 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
1776 "Entered %s.\n", __func__);
1778 if (ha->flags.cpu_affinity_enabled)
1779 req = ha->req_q_map[0];
1784 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1786 ql_log(ql_log_warn, vha, 0x1062,
1787 "Failed to allocate login IOCB.\n");
1788 return QLA_MEMORY_ALLOC_FAILED;
1790 memset(lg, 0, sizeof(struct logio_entry_24xx));
1792 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1793 lg->entry_count = 1;
1794 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1795 lg->nport_handle = cpu_to_le16(loop_id);
1796 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
1798 lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI);
1800 lg->control_flags |= __constant_cpu_to_le16(LCF_SKIP_PRLI);
1801 lg->port_id[0] = al_pa;
1802 lg->port_id[1] = area;
1803 lg->port_id[2] = domain;
1804 lg->vp_index = vha->vp_idx;
1805 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
1806 (ha->r_a_tov / 10 * 2) + 2);
1807 if (rval != QLA_SUCCESS) {
1808 ql_dbg(ql_dbg_mbx, vha, 0x1063,
1809 "Failed to issue login IOCB (%x).\n", rval);
1810 } else if (lg->entry_status != 0) {
1811 ql_dbg(ql_dbg_mbx, vha, 0x1064,
1812 "Failed to complete IOCB -- error status (%x).\n",
1814 rval = QLA_FUNCTION_FAILED;
1815 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1816 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1817 iop[1] = le32_to_cpu(lg->io_parameter[1]);
1819 ql_dbg(ql_dbg_mbx, vha, 0x1065,
1820 "Failed to complete IOCB -- completion status (%x) "
1821 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1825 case LSC_SCODE_PORTID_USED:
1826 mb[0] = MBS_PORT_ID_USED;
1827 mb[1] = LSW(iop[1]);
1829 case LSC_SCODE_NPORT_USED:
1830 mb[0] = MBS_LOOP_ID_USED;
1832 case LSC_SCODE_NOLINK:
1833 case LSC_SCODE_NOIOCB:
1834 case LSC_SCODE_NOXCB:
1835 case LSC_SCODE_CMD_FAILED:
1836 case LSC_SCODE_NOFABRIC:
1837 case LSC_SCODE_FW_NOT_READY:
1838 case LSC_SCODE_NOT_LOGGED_IN:
1839 case LSC_SCODE_NOPCB:
1840 case LSC_SCODE_ELS_REJECT:
1841 case LSC_SCODE_CMD_PARAM_ERR:
1842 case LSC_SCODE_NONPORT:
1843 case LSC_SCODE_LOGGED_IN:
1844 case LSC_SCODE_NOFLOGI_ACC:
1846 mb[0] = MBS_COMMAND_ERROR;
1850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
1851 "Done %s.\n", __func__);
1853 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1855 mb[0] = MBS_COMMAND_COMPLETE;
1857 if (iop[0] & BIT_4) {
1863 /* Passback COS information. */
1865 if (lg->io_parameter[7] || lg->io_parameter[8])
1866 mb[10] |= BIT_0; /* Class 2. */
1867 if (lg->io_parameter[9] || lg->io_parameter[10])
1868 mb[10] |= BIT_1; /* Class 3. */
1869 if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7))
1870 mb[10] |= BIT_7; /* Confirmed Completion
1875 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
1881 * qla2x00_login_fabric
1882 * Issue login fabric port mailbox command.
1885 * ha = adapter block pointer.
1886 * loop_id = device loop ID.
1887 * domain = device domain.
1888 * area = device area.
1889 * al_pa = device AL_PA.
1890 * status = pointer for return status.
1891 * opt = command options.
1892 * TARGET_QUEUE_LOCK must be released.
1893 * ADAPTER_STATE_LOCK must be released.
1896 * qla2x00 local function return status code.
1902 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1903 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1907 mbx_cmd_t *mcp = &mc;
1908 struct qla_hw_data *ha = vha->hw;
1910 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
1911 "Entered %s.\n", __func__);
1913 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1914 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1915 if (HAS_EXTENDED_IDS(ha)) {
1916 mcp->mb[1] = loop_id;
1918 mcp->out_mb |= MBX_10;
1920 mcp->mb[1] = (loop_id << 8) | opt;
1922 mcp->mb[2] = domain;
1923 mcp->mb[3] = area << 8 | al_pa;
1925 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
1926 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1928 rval = qla2x00_mailbox_command(vha, mcp);
1930 /* Return mailbox statuses. */
1937 /* COS retrieved from Get-Port-Database mailbox command. */
1941 if (rval != QLA_SUCCESS) {
1942 /* RLU tmp code: need to change main mailbox_command function to
1943 * return ok even when the mailbox completion value is not
1944 * SUCCESS. The caller needs to be responsible to interpret
1945 * the return values of this mailbox command if we're not
1946 * to change too much of the existing code.
1948 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
1949 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
1950 mcp->mb[0] == 0x4006)
1954 ql_dbg(ql_dbg_mbx, vha, 0x1068,
1955 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
1956 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
1959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
1960 "Done %s.\n", __func__);
1967 * qla2x00_login_local_device
1968 * Issue login loop port mailbox command.
1971 * ha = adapter block pointer.
1972 * loop_id = device loop ID.
1973 * opt = command options.
1976 * Return status code.
1983 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1984 uint16_t *mb_ret, uint8_t opt)
1988 mbx_cmd_t *mcp = &mc;
1989 struct qla_hw_data *ha = vha->hw;
1991 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
1992 "Entered %s.\n", __func__);
1994 if (IS_FWI2_CAPABLE(ha))
1995 return qla24xx_login_fabric(vha, fcport->loop_id,
1996 fcport->d_id.b.domain, fcport->d_id.b.area,
1997 fcport->d_id.b.al_pa, mb_ret, opt);
1999 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2000 if (HAS_EXTENDED_IDS(ha))
2001 mcp->mb[1] = fcport->loop_id;
2003 mcp->mb[1] = fcport->loop_id << 8;
2005 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2006 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2007 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2009 rval = qla2x00_mailbox_command(vha, mcp);
2011 /* Return mailbox statuses. */
2012 if (mb_ret != NULL) {
2013 mb_ret[0] = mcp->mb[0];
2014 mb_ret[1] = mcp->mb[1];
2015 mb_ret[6] = mcp->mb[6];
2016 mb_ret[7] = mcp->mb[7];
2019 if (rval != QLA_SUCCESS) {
2020 /* AV tmp code: need to change main mailbox_command function to
2021 * return ok even when the mailbox completion value is not
2022 * SUCCESS. The caller needs to be responsible to interpret
2023 * the return values of this mailbox command if we're not
2024 * to change too much of the existing code.
2026 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2029 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2030 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2031 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2034 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2035 "Done %s.\n", __func__);
2042 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2043 uint8_t area, uint8_t al_pa)
2046 struct logio_entry_24xx *lg;
2048 struct qla_hw_data *ha = vha->hw;
2049 struct req_que *req;
2050 struct rsp_que *rsp;
2052 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2053 "Entered %s.\n", __func__);
2055 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2057 ql_log(ql_log_warn, vha, 0x106e,
2058 "Failed to allocate logout IOCB.\n");
2059 return QLA_MEMORY_ALLOC_FAILED;
2061 memset(lg, 0, sizeof(struct logio_entry_24xx));
2063 if (ql2xmaxqueues > 1)
2064 req = ha->req_q_map[0];
2068 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2069 lg->entry_count = 1;
2070 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2071 lg->nport_handle = cpu_to_le16(loop_id);
2073 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2075 lg->port_id[0] = al_pa;
2076 lg->port_id[1] = area;
2077 lg->port_id[2] = domain;
2078 lg->vp_index = vha->vp_idx;
2079 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2080 (ha->r_a_tov / 10 * 2) + 2);
2081 if (rval != QLA_SUCCESS) {
2082 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2083 "Failed to issue logout IOCB (%x).\n", rval);
2084 } else if (lg->entry_status != 0) {
2085 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2086 "Failed to complete IOCB -- error status (%x).\n",
2088 rval = QLA_FUNCTION_FAILED;
2089 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2090 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2091 "Failed to complete IOCB -- completion status (%x) "
2092 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2093 le32_to_cpu(lg->io_parameter[0]),
2094 le32_to_cpu(lg->io_parameter[1]));
2097 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2098 "Done %s.\n", __func__);
2101 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2107 * qla2x00_fabric_logout
2108 * Issue logout fabric port mailbox command.
2111 * ha = adapter block pointer.
2112 * loop_id = device loop ID.
2113 * TARGET_QUEUE_LOCK must be released.
2114 * ADAPTER_STATE_LOCK must be released.
2117 * qla2x00 local function return status code.
2123 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2124 uint8_t area, uint8_t al_pa)
2128 mbx_cmd_t *mcp = &mc;
2130 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2131 "Entered %s.\n", __func__);
2133 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2134 mcp->out_mb = MBX_1|MBX_0;
2135 if (HAS_EXTENDED_IDS(vha->hw)) {
2136 mcp->mb[1] = loop_id;
2138 mcp->out_mb |= MBX_10;
2140 mcp->mb[1] = loop_id << 8;
2143 mcp->in_mb = MBX_1|MBX_0;
2144 mcp->tov = MBX_TOV_SECONDS;
2146 rval = qla2x00_mailbox_command(vha, mcp);
2148 if (rval != QLA_SUCCESS) {
2150 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2151 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2154 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2155 "Done %s.\n", __func__);
2162 * qla2x00_full_login_lip
2163 * Issue full login LIP mailbox command.
2166 * ha = adapter block pointer.
2167 * TARGET_QUEUE_LOCK must be released.
2168 * ADAPTER_STATE_LOCK must be released.
2171 * qla2x00 local function return status code.
2177 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2181 mbx_cmd_t *mcp = &mc;
2183 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2184 "Entered %s.\n", __func__);
2186 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2187 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
2190 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2192 mcp->tov = MBX_TOV_SECONDS;
2194 rval = qla2x00_mailbox_command(vha, mcp);
2196 if (rval != QLA_SUCCESS) {
2198 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2201 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2202 "Done %s.\n", __func__);
2209 * qla2x00_get_id_list
2212 * ha = adapter block pointer.
2215 * qla2x00 local function return status code.
2221 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2226 mbx_cmd_t *mcp = &mc;
2228 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2229 "Entered %s.\n", __func__);
2231 if (id_list == NULL)
2232 return QLA_FUNCTION_FAILED;
2234 mcp->mb[0] = MBC_GET_ID_LIST;
2235 mcp->out_mb = MBX_0;
2236 if (IS_FWI2_CAPABLE(vha->hw)) {
2237 mcp->mb[2] = MSW(id_list_dma);
2238 mcp->mb[3] = LSW(id_list_dma);
2239 mcp->mb[6] = MSW(MSD(id_list_dma));
2240 mcp->mb[7] = LSW(MSD(id_list_dma));
2242 mcp->mb[9] = vha->vp_idx;
2243 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2245 mcp->mb[1] = MSW(id_list_dma);
2246 mcp->mb[2] = LSW(id_list_dma);
2247 mcp->mb[3] = MSW(MSD(id_list_dma));
2248 mcp->mb[6] = LSW(MSD(id_list_dma));
2249 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2251 mcp->in_mb = MBX_1|MBX_0;
2252 mcp->tov = MBX_TOV_SECONDS;
2254 rval = qla2x00_mailbox_command(vha, mcp);
2256 if (rval != QLA_SUCCESS) {
2258 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2260 *entries = mcp->mb[1];
2261 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2262 "Done %s.\n", __func__);
2269 * qla2x00_get_resource_cnts
2270 * Get current firmware resource counts.
2273 * ha = adapter block pointer.
2276 * qla2x00 local function return status code.
2282 qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2283 uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
2284 uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs)
2288 mbx_cmd_t *mcp = &mc;
2290 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2291 "Entered %s.\n", __func__);
2293 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2294 mcp->out_mb = MBX_0;
2295 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2296 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
2297 mcp->in_mb |= MBX_12;
2298 mcp->tov = MBX_TOV_SECONDS;
2300 rval = qla2x00_mailbox_command(vha, mcp);
2302 if (rval != QLA_SUCCESS) {
2304 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2305 "Failed mb[0]=%x.\n", mcp->mb[0]);
2307 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2308 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2309 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2310 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2311 mcp->mb[11], mcp->mb[12]);
2314 *cur_xchg_cnt = mcp->mb[3];
2316 *orig_xchg_cnt = mcp->mb[6];
2318 *cur_iocb_cnt = mcp->mb[7];
2320 *orig_iocb_cnt = mcp->mb[10];
2321 if (vha->hw->flags.npiv_supported && max_npiv_vports)
2322 *max_npiv_vports = mcp->mb[11];
2323 if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs)
2324 *max_fcfs = mcp->mb[12];
2331 * qla2x00_get_fcal_position_map
2332 * Get FCAL (LILP) position map using mailbox command
2335 * ha = adapter state pointer.
2336 * pos_map = buffer pointer (can be NULL).
2339 * qla2x00 local function return status code.
2345 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2349 mbx_cmd_t *mcp = &mc;
2351 dma_addr_t pmap_dma;
2352 struct qla_hw_data *ha = vha->hw;
2354 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2355 "Entered %s.\n", __func__);
2357 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2359 ql_log(ql_log_warn, vha, 0x1080,
2360 "Memory alloc failed.\n");
2361 return QLA_MEMORY_ALLOC_FAILED;
2363 memset(pmap, 0, FCAL_MAP_SIZE);
2365 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2366 mcp->mb[2] = MSW(pmap_dma);
2367 mcp->mb[3] = LSW(pmap_dma);
2368 mcp->mb[6] = MSW(MSD(pmap_dma));
2369 mcp->mb[7] = LSW(MSD(pmap_dma));
2370 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2371 mcp->in_mb = MBX_1|MBX_0;
2372 mcp->buf_size = FCAL_MAP_SIZE;
2373 mcp->flags = MBX_DMA_IN;
2374 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2375 rval = qla2x00_mailbox_command(vha, mcp);
2377 if (rval == QLA_SUCCESS) {
2378 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2379 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2380 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2381 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2385 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2387 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2389 if (rval != QLA_SUCCESS) {
2390 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2392 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2393 "Done %s.\n", __func__);
2400 * qla2x00_get_link_status
2403 * ha = adapter block pointer.
2404 * loop_id = device loop ID.
2405 * ret_buf = pointer to link status return buffer.
2409 * BIT_0 = mem alloc error.
2410 * BIT_1 = mailbox error.
2413 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2414 struct link_statistics *stats, dma_addr_t stats_dma)
2418 mbx_cmd_t *mcp = &mc;
2419 uint32_t *siter, *diter, dwords;
2420 struct qla_hw_data *ha = vha->hw;
2422 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
2423 "Entered %s.\n", __func__);
2425 mcp->mb[0] = MBC_GET_LINK_STATUS;
2426 mcp->mb[2] = MSW(stats_dma);
2427 mcp->mb[3] = LSW(stats_dma);
2428 mcp->mb[6] = MSW(MSD(stats_dma));
2429 mcp->mb[7] = LSW(MSD(stats_dma));
2430 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2432 if (IS_FWI2_CAPABLE(ha)) {
2433 mcp->mb[1] = loop_id;
2436 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2437 mcp->in_mb |= MBX_1;
2438 } else if (HAS_EXTENDED_IDS(ha)) {
2439 mcp->mb[1] = loop_id;
2441 mcp->out_mb |= MBX_10|MBX_1;
2443 mcp->mb[1] = loop_id << 8;
2444 mcp->out_mb |= MBX_1;
2446 mcp->tov = MBX_TOV_SECONDS;
2447 mcp->flags = IOCTL_CMD;
2448 rval = qla2x00_mailbox_command(vha, mcp);
2450 if (rval == QLA_SUCCESS) {
2451 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2452 ql_dbg(ql_dbg_mbx, vha, 0x1085,
2453 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2454 rval = QLA_FUNCTION_FAILED;
2456 /* Copy over data -- firmware data is LE. */
2457 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
2458 "Done %s.\n", __func__);
2459 dwords = offsetof(struct link_statistics, unused1) / 4;
2460 siter = diter = &stats->link_fail_cnt;
2462 *diter++ = le32_to_cpu(*siter++);
2466 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
2473 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2474 dma_addr_t stats_dma)
2478 mbx_cmd_t *mcp = &mc;
2479 uint32_t *siter, *diter, dwords;
2481 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
2482 "Entered %s.\n", __func__);
2484 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2485 mcp->mb[2] = MSW(stats_dma);
2486 mcp->mb[3] = LSW(stats_dma);
2487 mcp->mb[6] = MSW(MSD(stats_dma));
2488 mcp->mb[7] = LSW(MSD(stats_dma));
2489 mcp->mb[8] = sizeof(struct link_statistics) / 4;
2490 mcp->mb[9] = vha->vp_idx;
2492 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2493 mcp->in_mb = MBX_2|MBX_1|MBX_0;
2494 mcp->tov = MBX_TOV_SECONDS;
2495 mcp->flags = IOCTL_CMD;
2496 rval = qla2x00_mailbox_command(vha, mcp);
2498 if (rval == QLA_SUCCESS) {
2499 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2500 ql_dbg(ql_dbg_mbx, vha, 0x1089,
2501 "Failed mb[0]=%x.\n", mcp->mb[0]);
2502 rval = QLA_FUNCTION_FAILED;
2504 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
2505 "Done %s.\n", __func__);
2506 /* Copy over data -- firmware data is LE. */
2507 dwords = sizeof(struct link_statistics) / 4;
2508 siter = diter = &stats->link_fail_cnt;
2510 *diter++ = le32_to_cpu(*siter++);
2514 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
2521 qla24xx_abort_command(srb_t *sp)
2524 unsigned long flags = 0;
2526 struct abort_entry_24xx *abt;
2529 fc_port_t *fcport = sp->fcport;
2530 struct scsi_qla_host *vha = fcport->vha;
2531 struct qla_hw_data *ha = vha->hw;
2532 struct req_que *req = vha->req;
2534 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
2535 "Entered %s.\n", __func__);
2537 spin_lock_irqsave(&ha->hardware_lock, flags);
2538 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
2539 if (req->outstanding_cmds[handle] == sp)
2542 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2543 if (handle == MAX_OUTSTANDING_COMMANDS) {
2544 /* Command not found. */
2545 return QLA_FUNCTION_FAILED;
2548 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
2550 ql_log(ql_log_warn, vha, 0x108d,
2551 "Failed to allocate abort IOCB.\n");
2552 return QLA_MEMORY_ALLOC_FAILED;
2554 memset(abt, 0, sizeof(struct abort_entry_24xx));
2556 abt->entry_type = ABORT_IOCB_TYPE;
2557 abt->entry_count = 1;
2558 abt->handle = MAKE_HANDLE(req->id, abt->handle);
2559 abt->nport_handle = cpu_to_le16(fcport->loop_id);
2560 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
2561 abt->port_id[0] = fcport->d_id.b.al_pa;
2562 abt->port_id[1] = fcport->d_id.b.area;
2563 abt->port_id[2] = fcport->d_id.b.domain;
2564 abt->vp_index = fcport->vha->vp_idx;
2566 abt->req_que_no = cpu_to_le16(req->id);
2568 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
2569 if (rval != QLA_SUCCESS) {
2570 ql_dbg(ql_dbg_mbx, vha, 0x108e,
2571 "Failed to issue IOCB (%x).\n", rval);
2572 } else if (abt->entry_status != 0) {
2573 ql_dbg(ql_dbg_mbx, vha, 0x108f,
2574 "Failed to complete IOCB -- error status (%x).\n",
2576 rval = QLA_FUNCTION_FAILED;
2577 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
2578 ql_dbg(ql_dbg_mbx, vha, 0x1090,
2579 "Failed to complete IOCB -- completion status (%x).\n",
2580 le16_to_cpu(abt->nport_handle));
2581 rval = QLA_FUNCTION_FAILED;
2583 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
2584 "Done %s.\n", __func__);
2587 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
2592 struct tsk_mgmt_cmd {
2594 struct tsk_mgmt_entry tsk;
2595 struct sts_entry_24xx sts;
2600 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2601 unsigned int l, int tag)
2604 struct tsk_mgmt_cmd *tsk;
2605 struct sts_entry_24xx *sts;
2607 scsi_qla_host_t *vha;
2608 struct qla_hw_data *ha;
2609 struct req_que *req;
2610 struct rsp_que *rsp;
2616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
2617 "Entered %s.\n", __func__);
2619 if (ha->flags.cpu_affinity_enabled)
2620 rsp = ha->rsp_q_map[tag + 1];
2623 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2625 ql_log(ql_log_warn, vha, 0x1093,
2626 "Failed to allocate task management IOCB.\n");
2627 return QLA_MEMORY_ALLOC_FAILED;
2629 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
2631 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
2632 tsk->p.tsk.entry_count = 1;
2633 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
2634 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
2635 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2636 tsk->p.tsk.control_flags = cpu_to_le32(type);
2637 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
2638 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
2639 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
2640 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
2641 if (type == TCF_LUN_RESET) {
2642 int_to_scsilun(l, &tsk->p.tsk.lun);
2643 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
2644 sizeof(tsk->p.tsk.lun));
2648 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
2649 if (rval != QLA_SUCCESS) {
2650 ql_dbg(ql_dbg_mbx, vha, 0x1094,
2651 "Failed to issue %s reset IOCB (%x).\n", name, rval);
2652 } else if (sts->entry_status != 0) {
2653 ql_dbg(ql_dbg_mbx, vha, 0x1095,
2654 "Failed to complete IOCB -- error status (%x).\n",
2656 rval = QLA_FUNCTION_FAILED;
2657 } else if (sts->comp_status !=
2658 __constant_cpu_to_le16(CS_COMPLETE)) {
2659 ql_dbg(ql_dbg_mbx, vha, 0x1096,
2660 "Failed to complete IOCB -- completion status (%x).\n",
2661 le16_to_cpu(sts->comp_status));
2662 rval = QLA_FUNCTION_FAILED;
2663 } else if (le16_to_cpu(sts->scsi_status) &
2664 SS_RESPONSE_INFO_LEN_VALID) {
2665 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2666 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
2667 "Ignoring inconsistent data length -- not enough "
2668 "response info (%d).\n",
2669 le32_to_cpu(sts->rsp_data_len));
2670 } else if (sts->data[3]) {
2671 ql_dbg(ql_dbg_mbx, vha, 0x1098,
2672 "Failed to complete IOCB -- response (%x).\n",
2674 rval = QLA_FUNCTION_FAILED;
2678 /* Issue marker IOCB. */
2679 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
2680 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
2681 if (rval2 != QLA_SUCCESS) {
2682 ql_dbg(ql_dbg_mbx, vha, 0x1099,
2683 "Failed to issue marker IOCB (%x).\n", rval2);
2685 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
2686 "Done %s.\n", __func__);
2689 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
2695 qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
2697 struct qla_hw_data *ha = fcport->vha->hw;
2699 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
2700 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
2702 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
2706 qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
2708 struct qla_hw_data *ha = fcport->vha->hw;
2710 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
2711 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
2713 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
2717 qla2x00_system_error(scsi_qla_host_t *vha)
2721 mbx_cmd_t *mcp = &mc;
2722 struct qla_hw_data *ha = vha->hw;
2724 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
2725 return QLA_FUNCTION_FAILED;
2727 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
2728 "Entered %s.\n", __func__);
2730 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
2731 mcp->out_mb = MBX_0;
2735 rval = qla2x00_mailbox_command(vha, mcp);
2737 if (rval != QLA_SUCCESS) {
2738 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
2740 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
2741 "Done %s.\n", __func__);
2748 * qla2x00_set_serdes_params() -
2754 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2755 uint16_t sw_em_2g, uint16_t sw_em_4g)
2759 mbx_cmd_t *mcp = &mc;
2761 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
2762 "Entered %s.\n", __func__);
2764 mcp->mb[0] = MBC_SERDES_PARAMS;
2766 mcp->mb[2] = sw_em_1g | BIT_15;
2767 mcp->mb[3] = sw_em_2g | BIT_15;
2768 mcp->mb[4] = sw_em_4g | BIT_15;
2769 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2771 mcp->tov = MBX_TOV_SECONDS;
2773 rval = qla2x00_mailbox_command(vha, mcp);
2775 if (rval != QLA_SUCCESS) {
2777 ql_dbg(ql_dbg_mbx, vha, 0x109f,
2778 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2781 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
2782 "Done %s.\n", __func__);
2789 qla2x00_stop_firmware(scsi_qla_host_t *vha)
2793 mbx_cmd_t *mcp = &mc;
2795 if (!IS_FWI2_CAPABLE(vha->hw))
2796 return QLA_FUNCTION_FAILED;
2798 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
2799 "Entered %s.\n", __func__);
2801 mcp->mb[0] = MBC_STOP_FIRMWARE;
2803 mcp->out_mb = MBX_1|MBX_0;
2807 rval = qla2x00_mailbox_command(vha, mcp);
2809 if (rval != QLA_SUCCESS) {
2810 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
2811 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2812 rval = QLA_INVALID_COMMAND;
2814 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
2815 "Done %s.\n", __func__);
2822 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2827 mbx_cmd_t *mcp = &mc;
2829 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
2830 "Entered %s.\n", __func__);
2832 if (!IS_FWI2_CAPABLE(vha->hw))
2833 return QLA_FUNCTION_FAILED;
2835 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2836 return QLA_FUNCTION_FAILED;
2838 mcp->mb[0] = MBC_TRACE_CONTROL;
2839 mcp->mb[1] = TC_EFT_ENABLE;
2840 mcp->mb[2] = LSW(eft_dma);
2841 mcp->mb[3] = MSW(eft_dma);
2842 mcp->mb[4] = LSW(MSD(eft_dma));
2843 mcp->mb[5] = MSW(MSD(eft_dma));
2844 mcp->mb[6] = buffers;
2845 mcp->mb[7] = TC_AEN_DISABLE;
2846 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2847 mcp->in_mb = MBX_1|MBX_0;
2848 mcp->tov = MBX_TOV_SECONDS;
2850 rval = qla2x00_mailbox_command(vha, mcp);
2851 if (rval != QLA_SUCCESS) {
2852 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
2853 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2854 rval, mcp->mb[0], mcp->mb[1]);
2856 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
2857 "Done %s.\n", __func__);
2864 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2868 mbx_cmd_t *mcp = &mc;
2870 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
2871 "Entered %s.\n", __func__);
2873 if (!IS_FWI2_CAPABLE(vha->hw))
2874 return QLA_FUNCTION_FAILED;
2876 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2877 return QLA_FUNCTION_FAILED;
2879 mcp->mb[0] = MBC_TRACE_CONTROL;
2880 mcp->mb[1] = TC_EFT_DISABLE;
2881 mcp->out_mb = MBX_1|MBX_0;
2882 mcp->in_mb = MBX_1|MBX_0;
2883 mcp->tov = MBX_TOV_SECONDS;
2885 rval = qla2x00_mailbox_command(vha, mcp);
2886 if (rval != QLA_SUCCESS) {
2887 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
2888 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2889 rval, mcp->mb[0], mcp->mb[1]);
2891 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
2892 "Done %s.\n", __func__);
2899 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2900 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
2904 mbx_cmd_t *mcp = &mc;
2906 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
2907 "Entered %s.\n", __func__);
2909 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
2910 !IS_QLA83XX(vha->hw))
2911 return QLA_FUNCTION_FAILED;
2913 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2914 return QLA_FUNCTION_FAILED;
2916 mcp->mb[0] = MBC_TRACE_CONTROL;
2917 mcp->mb[1] = TC_FCE_ENABLE;
2918 mcp->mb[2] = LSW(fce_dma);
2919 mcp->mb[3] = MSW(fce_dma);
2920 mcp->mb[4] = LSW(MSD(fce_dma));
2921 mcp->mb[5] = MSW(MSD(fce_dma));
2922 mcp->mb[6] = buffers;
2923 mcp->mb[7] = TC_AEN_DISABLE;
2925 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
2926 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
2927 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
2929 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2930 mcp->tov = MBX_TOV_SECONDS;
2932 rval = qla2x00_mailbox_command(vha, mcp);
2933 if (rval != QLA_SUCCESS) {
2934 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
2935 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2936 rval, mcp->mb[0], mcp->mb[1]);
2938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
2939 "Done %s.\n", __func__);
2942 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
2951 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2955 mbx_cmd_t *mcp = &mc;
2957 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
2958 "Entered %s.\n", __func__);
2960 if (!IS_FWI2_CAPABLE(vha->hw))
2961 return QLA_FUNCTION_FAILED;
2963 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2964 return QLA_FUNCTION_FAILED;
2966 mcp->mb[0] = MBC_TRACE_CONTROL;
2967 mcp->mb[1] = TC_FCE_DISABLE;
2968 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
2969 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2970 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
2972 mcp->tov = MBX_TOV_SECONDS;
2974 rval = qla2x00_mailbox_command(vha, mcp);
2975 if (rval != QLA_SUCCESS) {
2976 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
2977 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2978 rval, mcp->mb[0], mcp->mb[1]);
2980 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
2981 "Done %s.\n", __func__);
2984 *wr = (uint64_t) mcp->mb[5] << 48 |
2985 (uint64_t) mcp->mb[4] << 32 |
2986 (uint64_t) mcp->mb[3] << 16 |
2987 (uint64_t) mcp->mb[2];
2989 *rd = (uint64_t) mcp->mb[9] << 48 |
2990 (uint64_t) mcp->mb[8] << 32 |
2991 (uint64_t) mcp->mb[7] << 16 |
2992 (uint64_t) mcp->mb[6];
2999 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3000 uint16_t *port_speed, uint16_t *mb)
3004 mbx_cmd_t *mcp = &mc;
3006 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3007 "Entered %s.\n", __func__);
3009 if (!IS_IIDMA_CAPABLE(vha->hw))
3010 return QLA_FUNCTION_FAILED;
3012 mcp->mb[0] = MBC_PORT_PARAMS;
3013 mcp->mb[1] = loop_id;
3014 mcp->mb[2] = mcp->mb[3] = 0;
3015 mcp->mb[9] = vha->vp_idx;
3016 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3017 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3018 mcp->tov = MBX_TOV_SECONDS;
3020 rval = qla2x00_mailbox_command(vha, mcp);
3022 /* Return mailbox statuses. */
3029 if (rval != QLA_SUCCESS) {
3030 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3032 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3033 "Done %s.\n", __func__);
3035 *port_speed = mcp->mb[3];
3042 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3043 uint16_t port_speed, uint16_t *mb)
3047 mbx_cmd_t *mcp = &mc;
3049 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3050 "Entered %s.\n", __func__);
3052 if (!IS_IIDMA_CAPABLE(vha->hw))
3053 return QLA_FUNCTION_FAILED;
3055 mcp->mb[0] = MBC_PORT_PARAMS;
3056 mcp->mb[1] = loop_id;
3058 if (IS_CNA_CAPABLE(vha->hw))
3059 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
3061 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
3062 mcp->mb[9] = vha->vp_idx;
3063 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3064 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3065 mcp->tov = MBX_TOV_SECONDS;
3067 rval = qla2x00_mailbox_command(vha, mcp);
3069 /* Return mailbox statuses. */
3076 if (rval != QLA_SUCCESS) {
3077 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3078 "Failed=%x.\n", rval);
3080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3081 "Done %s.\n", __func__);
3088 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3089 struct vp_rpt_id_entry_24xx *rptid_entry)
3092 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
3093 struct qla_hw_data *ha = vha->hw;
3094 scsi_qla_host_t *vp;
3095 unsigned long flags;
3097 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3098 "Entered %s.\n", __func__);
3100 if (rptid_entry->entry_status != 0)
3103 if (rptid_entry->format == 0) {
3104 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
3105 "Format 0 : Number of VPs setup %d, number of "
3106 "VPs acquired %d.\n",
3107 MSB(le16_to_cpu(rptid_entry->vp_count)),
3108 LSB(le16_to_cpu(rptid_entry->vp_count)));
3109 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
3110 "Primary port id %02x%02x%02x.\n",
3111 rptid_entry->port_id[2], rptid_entry->port_id[1],
3112 rptid_entry->port_id[0]);
3113 } else if (rptid_entry->format == 1) {
3115 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
3116 "Format 1: VP[%d] enabled - status %d - with "
3117 "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
3118 rptid_entry->port_id[2], rptid_entry->port_id[1],
3119 rptid_entry->port_id[0]);
3122 if (vp_idx == 0 && (MSB(stat) != 1))
3125 if (MSB(stat) != 0 && MSB(stat) != 2) {
3126 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
3127 "Could not acquire ID for VP[%d].\n", vp_idx);
3131 spin_lock_irqsave(&ha->vport_slock, flags);
3132 list_for_each_entry(vp, &ha->vp_list, list)
3133 if (vp_idx == vp->vp_idx)
3135 spin_unlock_irqrestore(&ha->vport_slock, flags);
3140 vp->d_id.b.domain = rptid_entry->port_id[2];
3141 vp->d_id.b.area = rptid_entry->port_id[1];
3142 vp->d_id.b.al_pa = rptid_entry->port_id[0];
3145 * Cannot configure here as we are still sitting on the
3146 * response queue. Handle it in dpc context.
3148 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
3151 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
3152 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
3153 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
3154 qla2xxx_wake_dpc(vha);
3159 * qla24xx_modify_vp_config
3160 * Change VP configuration for vha
3163 * vha = adapter block pointer.
3166 * qla2xxx local function return status code.
3172 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3175 struct vp_config_entry_24xx *vpmod;
3176 dma_addr_t vpmod_dma;
3177 struct qla_hw_data *ha = vha->hw;
3178 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3180 /* This can be called by the parent */
3182 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
3183 "Entered %s.\n", __func__);
3185 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
3187 ql_log(ql_log_warn, vha, 0x10bc,
3188 "Failed to allocate modify VP IOCB.\n");
3189 return QLA_MEMORY_ALLOC_FAILED;
3192 memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
3193 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
3194 vpmod->entry_count = 1;
3195 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
3196 vpmod->vp_count = 1;
3197 vpmod->vp_index1 = vha->vp_idx;
3198 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
3200 qlt_modify_vp_config(vha, vpmod);
3202 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
3203 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
3204 vpmod->entry_count = 1;
3206 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
3207 if (rval != QLA_SUCCESS) {
3208 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
3209 "Failed to issue VP config IOCB (%x).\n", rval);
3210 } else if (vpmod->comp_status != 0) {
3211 ql_dbg(ql_dbg_mbx, vha, 0x10be,
3212 "Failed to complete IOCB -- error status (%x).\n",
3213 vpmod->comp_status);
3214 rval = QLA_FUNCTION_FAILED;
3215 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
3216 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
3217 "Failed to complete IOCB -- completion status (%x).\n",
3218 le16_to_cpu(vpmod->comp_status));
3219 rval = QLA_FUNCTION_FAILED;
3222 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
3223 "Done %s.\n", __func__);
3224 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
3226 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
3232 * qla24xx_control_vp
3233 * Enable a virtual port for given host
3236 * ha = adapter block pointer.
3237 * vhba = virtual adapter (unused)
3238 * index = index number for enabled VP
3241 * qla2xxx local function return status code.
3247 qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3251 struct vp_ctrl_entry_24xx *vce;
3253 struct qla_hw_data *ha = vha->hw;
3254 int vp_index = vha->vp_idx;
3255 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3257 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
3258 "Entered %s enabling index %d.\n", __func__, vp_index);
3260 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
3261 return QLA_PARAMETER_ERROR;
3263 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
3265 ql_log(ql_log_warn, vha, 0x10c2,
3266 "Failed to allocate VP control IOCB.\n");
3267 return QLA_MEMORY_ALLOC_FAILED;
3269 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
3271 vce->entry_type = VP_CTRL_IOCB_TYPE;
3272 vce->entry_count = 1;
3273 vce->command = cpu_to_le16(cmd);
3274 vce->vp_count = __constant_cpu_to_le16(1);
3276 /* index map in firmware starts with 1; decrement index
3277 * this is ok as we never use index 0
3279 map = (vp_index - 1) / 8;
3280 pos = (vp_index - 1) & 7;
3281 mutex_lock(&ha->vport_lock);
3282 vce->vp_idx_map[map] |= 1 << pos;
3283 mutex_unlock(&ha->vport_lock);
3285 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
3286 if (rval != QLA_SUCCESS) {
3287 ql_dbg(ql_dbg_mbx, vha, 0x10c3,
3288 "Failed to issue VP control IOCB (%x).\n", rval);
3289 } else if (vce->entry_status != 0) {
3290 ql_dbg(ql_dbg_mbx, vha, 0x10c4,
3291 "Failed to complete IOCB -- error status (%x).\n",
3293 rval = QLA_FUNCTION_FAILED;
3294 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
3295 ql_dbg(ql_dbg_mbx, vha, 0x10c5,
3296 "Failed to complet IOCB -- completion status (%x).\n",
3297 le16_to_cpu(vce->comp_status));
3298 rval = QLA_FUNCTION_FAILED;
3300 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
3301 "Done %s.\n", __func__);
3304 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
3310 * qla2x00_send_change_request
3311 * Receive or disable RSCN request from fabric controller
3314 * ha = adapter block pointer
3315 * format = registration format:
3317 * 1 - Fabric detected registration
3318 * 2 - N_port detected registration
3319 * 3 - Full registration
3320 * FF - clear registration
3321 * vp_idx = Virtual port index
3324 * qla2x00 local function return status code.
3331 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
3336 mbx_cmd_t *mcp = &mc;
3338 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
3339 "Entered %s.\n", __func__);
3341 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
3342 mcp->mb[1] = format;
3343 mcp->mb[9] = vp_idx;
3344 mcp->out_mb = MBX_9|MBX_1|MBX_0;
3345 mcp->in_mb = MBX_0|MBX_1;
3346 mcp->tov = MBX_TOV_SECONDS;
3348 rval = qla2x00_mailbox_command(vha, mcp);
3350 if (rval == QLA_SUCCESS) {
3351 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3361 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3366 mbx_cmd_t *mcp = &mc;
3368 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
3369 "Entered %s.\n", __func__);
3371 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
3372 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
3373 mcp->mb[8] = MSW(addr);
3374 mcp->out_mb = MBX_8|MBX_0;
3376 mcp->mb[0] = MBC_DUMP_RISC_RAM;
3377 mcp->out_mb = MBX_0;
3379 mcp->mb[1] = LSW(addr);
3380 mcp->mb[2] = MSW(req_dma);
3381 mcp->mb[3] = LSW(req_dma);
3382 mcp->mb[6] = MSW(MSD(req_dma));
3383 mcp->mb[7] = LSW(MSD(req_dma));
3384 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
3385 if (IS_FWI2_CAPABLE(vha->hw)) {
3386 mcp->mb[4] = MSW(size);
3387 mcp->mb[5] = LSW(size);
3388 mcp->out_mb |= MBX_5|MBX_4;
3390 mcp->mb[4] = LSW(size);
3391 mcp->out_mb |= MBX_4;
3395 mcp->tov = MBX_TOV_SECONDS;
3397 rval = qla2x00_mailbox_command(vha, mcp);
3399 if (rval != QLA_SUCCESS) {
3400 ql_dbg(ql_dbg_mbx, vha, 0x1008,
3401 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3403 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
3404 "Done %s.\n", __func__);
3409 /* 84XX Support **************************************************************/
3411 struct cs84xx_mgmt_cmd {
3413 struct verify_chip_entry_84xx req;
3414 struct verify_chip_rsp_84xx rsp;
3419 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3422 struct cs84xx_mgmt_cmd *mn;
3425 unsigned long flags;
3426 struct qla_hw_data *ha = vha->hw;
3428 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
3429 "Entered %s.\n", __func__);
3431 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
3433 return QLA_MEMORY_ALLOC_FAILED;
3437 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
3438 /* Diagnostic firmware? */
3439 /* options |= MENLO_DIAG_FW; */
3440 /* We update the firmware with only one data sequence. */
3441 options |= VCO_END_OF_DATA;
3445 memset(mn, 0, sizeof(*mn));
3446 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
3447 mn->p.req.entry_count = 1;
3448 mn->p.req.options = cpu_to_le16(options);
3450 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
3451 "Dump of Verify Request.\n");
3452 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
3453 (uint8_t *)mn, sizeof(*mn));
3455 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
3456 if (rval != QLA_SUCCESS) {
3457 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
3458 "Failed to issue verify IOCB (%x).\n", rval);
3462 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
3463 "Dump of Verify Response.\n");
3464 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
3465 (uint8_t *)mn, sizeof(*mn));
3467 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
3468 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
3469 le16_to_cpu(mn->p.rsp.failure_code) : 0;
3470 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
3471 "cs=%x fc=%x.\n", status[0], status[1]);
3473 if (status[0] != CS_COMPLETE) {
3474 rval = QLA_FUNCTION_FAILED;
3475 if (!(options & VCO_DONT_UPDATE_FW)) {
3476 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
3477 "Firmware update failed. Retrying "
3478 "without update firmware.\n");
3479 options |= VCO_DONT_UPDATE_FW;
3480 options &= ~VCO_FORCE_UPDATE;
3484 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
3485 "Firmware updated to %x.\n",
3486 le32_to_cpu(mn->p.rsp.fw_ver));
3488 /* NOTE: we only update OP firmware. */
3489 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
3490 ha->cs84xx->op_fw_version =
3491 le32_to_cpu(mn->p.rsp.fw_ver);
3492 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
3498 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
3500 if (rval != QLA_SUCCESS) {
3501 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
3502 "Failed=%x.\n", rval);
3504 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
3505 "Done %s.\n", __func__);
3512 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3515 unsigned long flags;
3517 mbx_cmd_t *mcp = &mc;
3518 struct device_reg_25xxmq __iomem *reg;
3519 struct qla_hw_data *ha = vha->hw;
3521 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
3522 "Entered %s.\n", __func__);
3524 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3525 mcp->mb[1] = req->options;
3526 mcp->mb[2] = MSW(LSD(req->dma));
3527 mcp->mb[3] = LSW(LSD(req->dma));
3528 mcp->mb[6] = MSW(MSD(req->dma));
3529 mcp->mb[7] = LSW(MSD(req->dma));
3530 mcp->mb[5] = req->length;
3532 mcp->mb[10] = req->rsp->id;
3533 mcp->mb[12] = req->qos;
3534 mcp->mb[11] = req->vp_idx;
3535 mcp->mb[13] = req->rid;
3539 reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
3540 QLA_QUE_PAGE * req->id);
3542 mcp->mb[4] = req->id;
3543 /* que in ptr index */
3545 /* que out ptr index */
3547 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
3548 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3550 mcp->flags = MBX_DMA_OUT;
3551 mcp->tov = MBX_TOV_SECONDS * 2;
3553 if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3554 mcp->in_mb |= MBX_1;
3555 if (IS_QLA83XX(ha)) {
3556 mcp->out_mb |= MBX_15;
3557 /* debug q create issue in SR-IOV */
3558 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
3561 spin_lock_irqsave(&ha->hardware_lock, flags);
3562 if (!(req->options & BIT_0)) {
3563 WRT_REG_DWORD(®->req_q_in, 0);
3564 if (!IS_QLA83XX(ha))
3565 WRT_REG_DWORD(®->req_q_out, 0);
3567 req->req_q_in = ®->req_q_in;
3568 req->req_q_out = ®->req_q_out;
3569 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3571 rval = qla2x00_mailbox_command(vha, mcp);
3572 if (rval != QLA_SUCCESS) {
3573 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
3574 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
3577 "Done %s.\n", __func__);
3584 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3587 unsigned long flags;
3589 mbx_cmd_t *mcp = &mc;
3590 struct device_reg_25xxmq __iomem *reg;
3591 struct qla_hw_data *ha = vha->hw;
3593 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
3594 "Entered %s.\n", __func__);
3596 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3597 mcp->mb[1] = rsp->options;
3598 mcp->mb[2] = MSW(LSD(rsp->dma));
3599 mcp->mb[3] = LSW(LSD(rsp->dma));
3600 mcp->mb[6] = MSW(MSD(rsp->dma));
3601 mcp->mb[7] = LSW(MSD(rsp->dma));
3602 mcp->mb[5] = rsp->length;
3603 mcp->mb[14] = rsp->msix->entry;
3604 mcp->mb[13] = rsp->rid;
3608 reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
3609 QLA_QUE_PAGE * rsp->id);
3611 mcp->mb[4] = rsp->id;
3612 /* que in ptr index */
3614 /* que out ptr index */
3616 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
3617 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3619 mcp->flags = MBX_DMA_OUT;
3620 mcp->tov = MBX_TOV_SECONDS * 2;
3622 if (IS_QLA81XX(ha)) {
3623 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
3624 mcp->in_mb |= MBX_1;
3625 } else if (IS_QLA83XX(ha)) {
3626 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
3627 mcp->in_mb |= MBX_1;
3628 /* debug q create issue in SR-IOV */
3629 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
3632 spin_lock_irqsave(&ha->hardware_lock, flags);
3633 if (!(rsp->options & BIT_0)) {
3634 WRT_REG_DWORD(®->rsp_q_out, 0);
3635 if (!IS_QLA83XX(ha))
3636 WRT_REG_DWORD(®->rsp_q_in, 0);
3639 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3641 rval = qla2x00_mailbox_command(vha, mcp);
3642 if (rval != QLA_SUCCESS) {
3643 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
3644 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3646 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
3647 "Done %s.\n", __func__);
3654 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3658 mbx_cmd_t *mcp = &mc;
3660 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
3661 "Entered %s.\n", __func__);
3663 mcp->mb[0] = MBC_IDC_ACK;
3664 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
3665 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3667 mcp->tov = MBX_TOV_SECONDS;
3669 rval = qla2x00_mailbox_command(vha, mcp);
3671 if (rval != QLA_SUCCESS) {
3672 ql_dbg(ql_dbg_mbx, vha, 0x10da,
3673 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3675 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
3676 "Done %s.\n", __func__);
3683 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3687 mbx_cmd_t *mcp = &mc;
3689 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
3690 "Entered %s.\n", __func__);
3692 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3693 return QLA_FUNCTION_FAILED;
3695 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3696 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
3697 mcp->out_mb = MBX_1|MBX_0;
3698 mcp->in_mb = MBX_1|MBX_0;
3699 mcp->tov = MBX_TOV_SECONDS;
3701 rval = qla2x00_mailbox_command(vha, mcp);
3703 if (rval != QLA_SUCCESS) {
3704 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
3705 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3706 rval, mcp->mb[0], mcp->mb[1]);
3708 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
3709 "Done %s.\n", __func__);
3710 *sector_size = mcp->mb[1];
3717 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3721 mbx_cmd_t *mcp = &mc;
3723 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3724 return QLA_FUNCTION_FAILED;
3726 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
3727 "Entered %s.\n", __func__);
3729 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3730 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
3731 FAC_OPT_CMD_WRITE_PROTECT;
3732 mcp->out_mb = MBX_1|MBX_0;
3733 mcp->in_mb = MBX_1|MBX_0;
3734 mcp->tov = MBX_TOV_SECONDS;
3736 rval = qla2x00_mailbox_command(vha, mcp);
3738 if (rval != QLA_SUCCESS) {
3739 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
3740 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3741 rval, mcp->mb[0], mcp->mb[1]);
3743 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
3744 "Done %s.\n", __func__);
3751 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3755 mbx_cmd_t *mcp = &mc;
3757 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3758 return QLA_FUNCTION_FAILED;
3760 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
3761 "Entered %s.\n", __func__);
3763 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3764 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
3765 mcp->mb[2] = LSW(start);
3766 mcp->mb[3] = MSW(start);
3767 mcp->mb[4] = LSW(finish);
3768 mcp->mb[5] = MSW(finish);
3769 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3770 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3771 mcp->tov = MBX_TOV_SECONDS;
3773 rval = qla2x00_mailbox_command(vha, mcp);
3775 if (rval != QLA_SUCCESS) {
3776 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
3777 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3778 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3780 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
3781 "Done %s.\n", __func__);
3788 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3792 mbx_cmd_t *mcp = &mc;
3794 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
3795 "Entered %s.\n", __func__);
3797 mcp->mb[0] = MBC_RESTART_MPI_FW;
3798 mcp->out_mb = MBX_0;
3799 mcp->in_mb = MBX_0|MBX_1;
3800 mcp->tov = MBX_TOV_SECONDS;
3802 rval = qla2x00_mailbox_command(vha, mcp);
3804 if (rval != QLA_SUCCESS) {
3805 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
3806 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3807 rval, mcp->mb[0], mcp->mb[1]);
3809 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
3810 "Done %s.\n", __func__);
3817 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3818 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
3822 mbx_cmd_t *mcp = &mc;
3823 struct qla_hw_data *ha = vha->hw;
3825 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
3826 "Entered %s.\n", __func__);
3828 if (!IS_FWI2_CAPABLE(ha))
3829 return QLA_FUNCTION_FAILED;
3834 mcp->mb[0] = MBC_READ_SFP;
3836 mcp->mb[2] = MSW(sfp_dma);
3837 mcp->mb[3] = LSW(sfp_dma);
3838 mcp->mb[6] = MSW(MSD(sfp_dma));
3839 mcp->mb[7] = LSW(MSD(sfp_dma));
3843 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3844 mcp->in_mb = MBX_1|MBX_0;
3845 mcp->tov = MBX_TOV_SECONDS;
3847 rval = qla2x00_mailbox_command(vha, mcp);
3852 if (rval != QLA_SUCCESS) {
3853 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
3854 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3856 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
3857 "Done %s.\n", __func__);
3864 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3865 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
3869 mbx_cmd_t *mcp = &mc;
3870 struct qla_hw_data *ha = vha->hw;
3872 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
3873 "Entered %s.\n", __func__);
3875 if (!IS_FWI2_CAPABLE(ha))
3876 return QLA_FUNCTION_FAILED;
3884 mcp->mb[0] = MBC_WRITE_SFP;
3886 mcp->mb[2] = MSW(sfp_dma);
3887 mcp->mb[3] = LSW(sfp_dma);
3888 mcp->mb[6] = MSW(MSD(sfp_dma));
3889 mcp->mb[7] = LSW(MSD(sfp_dma));
3893 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3894 mcp->in_mb = MBX_1|MBX_0;
3895 mcp->tov = MBX_TOV_SECONDS;
3897 rval = qla2x00_mailbox_command(vha, mcp);
3899 if (rval != QLA_SUCCESS) {
3900 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
3901 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3903 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
3904 "Done %s.\n", __func__);
3911 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3912 uint16_t size_in_bytes, uint16_t *actual_size)
3916 mbx_cmd_t *mcp = &mc;
3918 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
3919 "Entered %s.\n", __func__);
3921 if (!IS_CNA_CAPABLE(vha->hw))
3922 return QLA_FUNCTION_FAILED;
3924 mcp->mb[0] = MBC_GET_XGMAC_STATS;
3925 mcp->mb[2] = MSW(stats_dma);
3926 mcp->mb[3] = LSW(stats_dma);
3927 mcp->mb[6] = MSW(MSD(stats_dma));
3928 mcp->mb[7] = LSW(MSD(stats_dma));
3929 mcp->mb[8] = size_in_bytes >> 2;
3930 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3931 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3932 mcp->tov = MBX_TOV_SECONDS;
3934 rval = qla2x00_mailbox_command(vha, mcp);
3936 if (rval != QLA_SUCCESS) {
3937 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
3938 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3939 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3941 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
3942 "Done %s.\n", __func__);
3945 *actual_size = mcp->mb[2] << 2;
3952 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3957 mbx_cmd_t *mcp = &mc;
3959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
3960 "Entered %s.\n", __func__);
3962 if (!IS_CNA_CAPABLE(vha->hw))
3963 return QLA_FUNCTION_FAILED;
3965 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
3967 mcp->mb[2] = MSW(tlv_dma);
3968 mcp->mb[3] = LSW(tlv_dma);
3969 mcp->mb[6] = MSW(MSD(tlv_dma));
3970 mcp->mb[7] = LSW(MSD(tlv_dma));
3972 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3973 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3974 mcp->tov = MBX_TOV_SECONDS;
3976 rval = qla2x00_mailbox_command(vha, mcp);
3978 if (rval != QLA_SUCCESS) {
3979 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
3980 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3981 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3983 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
3984 "Done %s.\n", __func__);
3991 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3995 mbx_cmd_t *mcp = &mc;
3997 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
3998 "Entered %s.\n", __func__);
4000 if (!IS_FWI2_CAPABLE(vha->hw))
4001 return QLA_FUNCTION_FAILED;
4003 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
4004 mcp->mb[1] = LSW(risc_addr);
4005 mcp->mb[8] = MSW(risc_addr);
4006 mcp->out_mb = MBX_8|MBX_1|MBX_0;
4007 mcp->in_mb = MBX_3|MBX_2|MBX_0;
4010 rval = qla2x00_mailbox_command(vha, mcp);
4011 if (rval != QLA_SUCCESS) {
4012 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
4013 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4015 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
4016 "Done %s.\n", __func__);
4017 *data = mcp->mb[3] << 16 | mcp->mb[2];
4024 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4029 mbx_cmd_t *mcp = &mc;
4030 uint32_t iter_cnt = 0x1;
4032 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
4033 "Entered %s.\n", __func__);
4035 memset(mcp->mb, 0 , sizeof(mcp->mb));
4036 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
4037 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
4039 /* transfer count */
4040 mcp->mb[10] = LSW(mreq->transfer_size);
4041 mcp->mb[11] = MSW(mreq->transfer_size);
4043 /* send data address */
4044 mcp->mb[14] = LSW(mreq->send_dma);
4045 mcp->mb[15] = MSW(mreq->send_dma);
4046 mcp->mb[20] = LSW(MSD(mreq->send_dma));
4047 mcp->mb[21] = MSW(MSD(mreq->send_dma));
4049 /* receive data address */
4050 mcp->mb[16] = LSW(mreq->rcv_dma);
4051 mcp->mb[17] = MSW(mreq->rcv_dma);
4052 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
4053 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4055 /* Iteration count */
4056 mcp->mb[18] = LSW(iter_cnt);
4057 mcp->mb[19] = MSW(iter_cnt);
4059 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
4060 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
4061 if (IS_CNA_CAPABLE(vha->hw))
4062 mcp->out_mb |= MBX_2;
4063 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
4065 mcp->buf_size = mreq->transfer_size;
4066 mcp->tov = MBX_TOV_SECONDS;
4067 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4069 rval = qla2x00_mailbox_command(vha, mcp);
4071 if (rval != QLA_SUCCESS) {
4072 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
4073 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
4074 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
4075 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
4077 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
4078 "Done %s.\n", __func__);
4081 /* Copy mailbox information */
4082 memcpy( mresp, mcp->mb, 64);
4087 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4092 mbx_cmd_t *mcp = &mc;
4093 struct qla_hw_data *ha = vha->hw;
4095 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
4096 "Entered %s.\n", __func__);
4098 memset(mcp->mb, 0 , sizeof(mcp->mb));
4099 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
4100 mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
4101 if (IS_CNA_CAPABLE(ha)) {
4102 mcp->mb[1] |= BIT_15;
4103 mcp->mb[2] = vha->fcoe_fcf_idx;
4105 mcp->mb[16] = LSW(mreq->rcv_dma);
4106 mcp->mb[17] = MSW(mreq->rcv_dma);
4107 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
4108 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4110 mcp->mb[10] = LSW(mreq->transfer_size);
4112 mcp->mb[14] = LSW(mreq->send_dma);
4113 mcp->mb[15] = MSW(mreq->send_dma);
4114 mcp->mb[20] = LSW(MSD(mreq->send_dma));
4115 mcp->mb[21] = MSW(MSD(mreq->send_dma));
4117 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
4118 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
4119 if (IS_CNA_CAPABLE(ha))
4120 mcp->out_mb |= MBX_2;
4123 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
4124 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
4125 mcp->in_mb |= MBX_1;
4126 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
4127 mcp->in_mb |= MBX_3;
4129 mcp->tov = MBX_TOV_SECONDS;
4130 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4131 mcp->buf_size = mreq->transfer_size;
4133 rval = qla2x00_mailbox_command(vha, mcp);
4135 if (rval != QLA_SUCCESS) {
4136 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
4137 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4138 rval, mcp->mb[0], mcp->mb[1]);
4140 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
4141 "Done %s.\n", __func__);
4144 /* Copy mailbox information */
4145 memcpy(mresp, mcp->mb, 64);
4150 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
4154 mbx_cmd_t *mcp = &mc;
4156 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
4157 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
4159 mcp->mb[0] = MBC_ISP84XX_RESET;
4160 mcp->mb[1] = enable_diagnostic;
4161 mcp->out_mb = MBX_1|MBX_0;
4162 mcp->in_mb = MBX_1|MBX_0;
4163 mcp->tov = MBX_TOV_SECONDS;
4164 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4165 rval = qla2x00_mailbox_command(vha, mcp);
4167 if (rval != QLA_SUCCESS)
4168 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
4170 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
4171 "Done %s.\n", __func__);
4177 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
4181 mbx_cmd_t *mcp = &mc;
4183 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
4184 "Entered %s.\n", __func__);
4186 if (!IS_FWI2_CAPABLE(vha->hw))
4187 return QLA_FUNCTION_FAILED;
4189 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
4190 mcp->mb[1] = LSW(risc_addr);
4191 mcp->mb[2] = LSW(data);
4192 mcp->mb[3] = MSW(data);
4193 mcp->mb[8] = MSW(risc_addr);
4194 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
4198 rval = qla2x00_mailbox_command(vha, mcp);
4199 if (rval != QLA_SUCCESS) {
4200 ql_dbg(ql_dbg_mbx, vha, 0x1101,
4201 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4203 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
4204 "Done %s.\n", __func__);
4211 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
4214 uint32_t stat, timer;
4216 struct qla_hw_data *ha = vha->hw;
4217 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4221 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
4222 "Entered %s.\n", __func__);
4224 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
4226 /* Write the MBC data to the registers */
4227 WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER);
4228 WRT_REG_WORD(®->mailbox1, mb[0]);
4229 WRT_REG_WORD(®->mailbox2, mb[1]);
4230 WRT_REG_WORD(®->mailbox3, mb[2]);
4231 WRT_REG_WORD(®->mailbox4, mb[3]);
4233 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT);
4235 /* Poll for MBC interrupt */
4236 for (timer = 6000000; timer; timer--) {
4237 /* Check for pending interrupts. */
4238 stat = RD_REG_DWORD(®->host_status);
4239 if (stat & HSRX_RISC_INT) {
4242 if (stat == 0x1 || stat == 0x2 ||
4243 stat == 0x10 || stat == 0x11) {
4244 set_bit(MBX_INTERRUPT,
4245 &ha->mbx_cmd_flags);
4246 mb0 = RD_REG_WORD(®->mailbox0);
4247 WRT_REG_DWORD(®->hccr,
4248 HCCRX_CLR_RISC_INT);
4249 RD_REG_DWORD(®->hccr);
4256 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
4257 rval = mb0 & MBS_MASK;
4259 rval = QLA_FUNCTION_FAILED;
4261 if (rval != QLA_SUCCESS) {
4262 ql_dbg(ql_dbg_mbx, vha, 0x1104,
4263 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
4265 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
4266 "Done %s.\n", __func__);
4273 qla2x00_get_data_rate(scsi_qla_host_t *vha)
4277 mbx_cmd_t *mcp = &mc;
4278 struct qla_hw_data *ha = vha->hw;
4280 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
4281 "Entered %s.\n", __func__);
4283 if (!IS_FWI2_CAPABLE(ha))
4284 return QLA_FUNCTION_FAILED;
4286 mcp->mb[0] = MBC_DATA_RATE;
4288 mcp->out_mb = MBX_1|MBX_0;
4289 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4291 mcp->in_mb |= MBX_3;
4292 mcp->tov = MBX_TOV_SECONDS;
4294 rval = qla2x00_mailbox_command(vha, mcp);
4295 if (rval != QLA_SUCCESS) {
4296 ql_dbg(ql_dbg_mbx, vha, 0x1107,
4297 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4299 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
4300 "Done %s.\n", __func__);
4301 if (mcp->mb[1] != 0x7)
4302 ha->link_data_rate = mcp->mb[1];
4309 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4313 mbx_cmd_t *mcp = &mc;
4314 struct qla_hw_data *ha = vha->hw;
4316 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
4317 "Entered %s.\n", __func__);
4319 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
4320 return QLA_FUNCTION_FAILED;
4321 mcp->mb[0] = MBC_GET_PORT_CONFIG;
4322 mcp->out_mb = MBX_0;
4323 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4324 mcp->tov = MBX_TOV_SECONDS;
4327 rval = qla2x00_mailbox_command(vha, mcp);
4329 if (rval != QLA_SUCCESS) {
4330 ql_dbg(ql_dbg_mbx, vha, 0x110a,
4331 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4333 /* Copy all bits to preserve original value */
4334 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
4336 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
4337 "Done %s.\n", __func__);
4343 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4347 mbx_cmd_t *mcp = &mc;
4349 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
4350 "Entered %s.\n", __func__);
4352 mcp->mb[0] = MBC_SET_PORT_CONFIG;
4353 /* Copy all bits to preserve original setting */
4354 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
4355 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4357 mcp->tov = MBX_TOV_SECONDS;
4359 rval = qla2x00_mailbox_command(vha, mcp);
4361 if (rval != QLA_SUCCESS) {
4362 ql_dbg(ql_dbg_mbx, vha, 0x110d,
4363 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4365 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
4366 "Done %s.\n", __func__);
4373 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4378 mbx_cmd_t *mcp = &mc;
4379 struct qla_hw_data *ha = vha->hw;
4381 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
4382 "Entered %s.\n", __func__);
4384 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
4385 return QLA_FUNCTION_FAILED;
4387 mcp->mb[0] = MBC_PORT_PARAMS;
4388 mcp->mb[1] = loop_id;
4389 if (ha->flags.fcp_prio_enabled)
4393 mcp->mb[4] = priority & 0xf;
4394 mcp->mb[9] = vha->vp_idx;
4395 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4396 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
4399 rval = qla2x00_mailbox_command(vha, mcp);
4407 if (rval != QLA_SUCCESS) {
4408 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
4410 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
4411 "Done %s.\n", __func__);
4418 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4422 struct qla_hw_data *ha = vha->hw;
4424 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
4425 "Entered %s.\n", __func__);
4428 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1,
4429 BIT_13|BIT_12|BIT_0);
4430 if (rval != QLA_SUCCESS) {
4431 ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
4432 ha->flags.thermal_supported = 0;
4438 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1,
4439 BIT_13|BIT_12|BIT_0);
4440 if (rval != QLA_SUCCESS) {
4441 ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
4442 ha->flags.thermal_supported = 0;
4445 *frac = (byte >> 6) * 25;
4447 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
4448 "Done %s.\n", __func__);
4454 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4457 struct qla_hw_data *ha = vha->hw;
4459 mbx_cmd_t *mcp = &mc;
4461 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
4462 "Entered %s.\n", __func__);
4464 if (!IS_FWI2_CAPABLE(ha))
4465 return QLA_FUNCTION_FAILED;
4467 memset(mcp, 0, sizeof(mbx_cmd_t));
4468 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4471 mcp->out_mb = MBX_1|MBX_0;
4476 rval = qla2x00_mailbox_command(vha, mcp);
4477 if (rval != QLA_SUCCESS) {
4478 ql_dbg(ql_dbg_mbx, vha, 0x1016,
4479 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4481 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
4482 "Done %s.\n", __func__);
4489 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4492 struct qla_hw_data *ha = vha->hw;
4494 mbx_cmd_t *mcp = &mc;
4496 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
4497 "Entered %s.\n", __func__);
4499 if (!IS_QLA82XX(ha))
4500 return QLA_FUNCTION_FAILED;
4502 memset(mcp, 0, sizeof(mbx_cmd_t));
4503 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4506 mcp->out_mb = MBX_1|MBX_0;
4511 rval = qla2x00_mailbox_command(vha, mcp);
4512 if (rval != QLA_SUCCESS) {
4513 ql_dbg(ql_dbg_mbx, vha, 0x100c,
4514 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4516 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
4517 "Done %s.\n", __func__);
4524 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
4526 struct qla_hw_data *ha = vha->hw;
4528 mbx_cmd_t *mcp = &mc;
4529 int rval = QLA_FUNCTION_FAILED;
4531 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
4532 "Entered %s.\n", __func__);
4534 memset(mcp->mb, 0 , sizeof(mcp->mb));
4535 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4536 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4537 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
4538 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
4540 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
4541 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
4542 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4544 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4545 mcp->tov = MBX_TOV_SECONDS;
4546 rval = qla2x00_mailbox_command(vha, mcp);
4548 /* Always copy back return mailbox values. */
4549 if (rval != QLA_SUCCESS) {
4550 ql_dbg(ql_dbg_mbx, vha, 0x1120,
4551 "mailbox command FAILED=0x%x, subcode=%x.\n",
4552 (mcp->mb[1] << 16) | mcp->mb[0],
4553 (mcp->mb[3] << 16) | mcp->mb[2]);
4555 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
4556 "Done %s.\n", __func__);
4557 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
4558 if (!ha->md_template_size) {
4559 ql_dbg(ql_dbg_mbx, vha, 0x1122,
4560 "Null template size obtained.\n");
4561 rval = QLA_FUNCTION_FAILED;
4568 qla82xx_md_get_template(scsi_qla_host_t *vha)
4570 struct qla_hw_data *ha = vha->hw;
4572 mbx_cmd_t *mcp = &mc;
4573 int rval = QLA_FUNCTION_FAILED;
4575 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
4576 "Entered %s.\n", __func__);
4578 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
4579 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
4580 if (!ha->md_tmplt_hdr) {
4581 ql_log(ql_log_warn, vha, 0x1124,
4582 "Unable to allocate memory for Minidump template.\n");
4586 memset(mcp->mb, 0 , sizeof(mcp->mb));
4587 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4588 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4589 mcp->mb[2] = LSW(RQST_TMPLT);
4590 mcp->mb[3] = MSW(RQST_TMPLT);
4591 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
4592 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
4593 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
4594 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
4595 mcp->mb[8] = LSW(ha->md_template_size);
4596 mcp->mb[9] = MSW(ha->md_template_size);
4598 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4599 mcp->tov = MBX_TOV_SECONDS;
4600 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
4601 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4602 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
4603 rval = qla2x00_mailbox_command(vha, mcp);
4605 if (rval != QLA_SUCCESS) {
4606 ql_dbg(ql_dbg_mbx, vha, 0x1125,
4607 "mailbox command FAILED=0x%x, subcode=%x.\n",
4608 ((mcp->mb[1] << 16) | mcp->mb[0]),
4609 ((mcp->mb[3] << 16) | mcp->mb[2]));
4611 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
4612 "Done %s.\n", __func__);
4617 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4620 struct qla_hw_data *ha = vha->hw;
4622 mbx_cmd_t *mcp = &mc;
4624 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
4625 return QLA_FUNCTION_FAILED;
4627 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
4628 "Entered %s.\n", __func__);
4630 memset(mcp, 0, sizeof(mbx_cmd_t));
4631 mcp->mb[0] = MBC_SET_LED_CONFIG;
4632 mcp->mb[1] = led_cfg[0];
4633 mcp->mb[2] = led_cfg[1];
4634 if (IS_QLA8031(ha)) {
4635 mcp->mb[3] = led_cfg[2];
4636 mcp->mb[4] = led_cfg[3];
4637 mcp->mb[5] = led_cfg[4];
4638 mcp->mb[6] = led_cfg[5];
4641 mcp->out_mb = MBX_2|MBX_1|MBX_0;
4643 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
4648 rval = qla2x00_mailbox_command(vha, mcp);
4649 if (rval != QLA_SUCCESS) {
4650 ql_dbg(ql_dbg_mbx, vha, 0x1134,
4651 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4653 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
4654 "Done %s.\n", __func__);
4661 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4664 struct qla_hw_data *ha = vha->hw;
4666 mbx_cmd_t *mcp = &mc;
4668 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
4669 return QLA_FUNCTION_FAILED;
4671 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
4672 "Entered %s.\n", __func__);
4674 memset(mcp, 0, sizeof(mbx_cmd_t));
4675 mcp->mb[0] = MBC_GET_LED_CONFIG;
4677 mcp->out_mb = MBX_0;
4678 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4680 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
4684 rval = qla2x00_mailbox_command(vha, mcp);
4685 if (rval != QLA_SUCCESS) {
4686 ql_dbg(ql_dbg_mbx, vha, 0x1137,
4687 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4689 led_cfg[0] = mcp->mb[1];
4690 led_cfg[1] = mcp->mb[2];
4691 if (IS_QLA8031(ha)) {
4692 led_cfg[2] = mcp->mb[3];
4693 led_cfg[3] = mcp->mb[4];
4694 led_cfg[4] = mcp->mb[5];
4695 led_cfg[5] = mcp->mb[6];
4697 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
4698 "Done %s.\n", __func__);
4705 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4708 struct qla_hw_data *ha = vha->hw;
4710 mbx_cmd_t *mcp = &mc;
4712 if (!IS_QLA82XX(ha))
4713 return QLA_FUNCTION_FAILED;
4715 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
4716 "Entered %s.\n", __func__);
4718 memset(mcp, 0, sizeof(mbx_cmd_t));
4719 mcp->mb[0] = MBC_SET_LED_CONFIG;
4725 mcp->out_mb = MBX_7|MBX_0;
4727 mcp->tov = MBX_TOV_SECONDS;
4730 rval = qla2x00_mailbox_command(vha, mcp);
4731 if (rval != QLA_SUCCESS) {
4732 ql_dbg(ql_dbg_mbx, vha, 0x1128,
4733 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4735 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
4736 "Done %s.\n", __func__);
4743 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
4746 struct qla_hw_data *ha = vha->hw;
4748 mbx_cmd_t *mcp = &mc;
4750 if (!IS_QLA83XX(ha))
4751 return QLA_FUNCTION_FAILED;
4753 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
4754 "Entered %s.\n", __func__);
4756 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
4757 mcp->mb[1] = LSW(reg);
4758 mcp->mb[2] = MSW(reg);
4759 mcp->mb[3] = LSW(data);
4760 mcp->mb[4] = MSW(data);
4761 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4763 mcp->in_mb = MBX_1|MBX_0;
4764 mcp->tov = MBX_TOV_SECONDS;
4766 rval = qla2x00_mailbox_command(vha, mcp);
4768 if (rval != QLA_SUCCESS) {
4769 ql_dbg(ql_dbg_mbx, vha, 0x1131,
4770 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4772 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
4773 "Done %s.\n", __func__);
4780 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
4783 struct qla_hw_data *ha = vha->hw;
4785 mbx_cmd_t *mcp = &mc;
4787 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4788 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
4789 "Implicit LOGO Unsupported.\n");
4790 return QLA_FUNCTION_FAILED;
4794 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
4795 "Entering %s.\n", __func__);
4797 /* Perform Implicit LOGO. */
4798 mcp->mb[0] = MBC_PORT_LOGOUT;
4799 mcp->mb[1] = fcport->loop_id;
4800 mcp->mb[10] = BIT_15;
4801 mcp->out_mb = MBX_10|MBX_1|MBX_0;
4803 mcp->tov = MBX_TOV_SECONDS;
4805 rval = qla2x00_mailbox_command(vha, mcp);
4806 if (rval != QLA_SUCCESS)
4807 ql_dbg(ql_dbg_mbx, vha, 0x113d,
4808 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4810 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
4811 "Done %s.\n", __func__);
4817 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
4821 mbx_cmd_t *mcp = &mc;
4822 struct qla_hw_data *ha = vha->hw;
4823 unsigned long retry_max_time = jiffies + (2 * HZ);
4825 if (!IS_QLA83XX(ha))
4826 return QLA_FUNCTION_FAILED;
4828 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
4831 mcp->mb[0] = MBC_READ_REMOTE_REG;
4832 mcp->mb[1] = LSW(reg);
4833 mcp->mb[2] = MSW(reg);
4834 mcp->out_mb = MBX_2|MBX_1|MBX_0;
4835 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
4836 mcp->tov = MBX_TOV_SECONDS;
4838 rval = qla2x00_mailbox_command(vha, mcp);
4840 if (rval != QLA_SUCCESS) {
4841 ql_dbg(ql_dbg_mbx, vha, 0x114c,
4842 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4843 rval, mcp->mb[0], mcp->mb[1]);
4845 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
4846 if (*data == QLA8XXX_BAD_VALUE) {
4848 * During soft-reset CAMRAM register reads might
4849 * return 0xbad0bad0. So retry for MAX of 2 sec
4850 * while reading camram registers.
4852 if (time_after(jiffies, retry_max_time)) {
4853 ql_dbg(ql_dbg_mbx, vha, 0x1141,
4854 "Failure to read CAMRAM register. "
4855 "data=0x%x.\n", *data);
4856 return QLA_FUNCTION_FAILED;
4861 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
4868 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
4872 mbx_cmd_t *mcp = &mc;
4873 struct qla_hw_data *ha = vha->hw;
4875 if (!IS_QLA83XX(ha))
4876 return QLA_FUNCTION_FAILED;
4878 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
4880 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
4881 mcp->out_mb = MBX_0;
4882 mcp->in_mb = MBX_1|MBX_0;
4883 mcp->tov = MBX_TOV_SECONDS;
4885 rval = qla2x00_mailbox_command(vha, mcp);
4887 if (rval != QLA_SUCCESS) {
4888 ql_dbg(ql_dbg_mbx, vha, 0x1144,
4889 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4890 rval, mcp->mb[0], mcp->mb[1]);
4891 ha->isp_ops->fw_dump(vha, 0);
4893 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
4900 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
4901 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
4905 mbx_cmd_t *mcp = &mc;
4906 uint8_t subcode = (uint8_t)options;
4907 struct qla_hw_data *ha = vha->hw;
4909 if (!IS_QLA8031(ha))
4910 return QLA_FUNCTION_FAILED;
4912 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
4914 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
4915 mcp->mb[1] = options;
4916 mcp->out_mb = MBX_1|MBX_0;
4917 if (subcode & BIT_2) {
4918 mcp->mb[2] = LSW(start_addr);
4919 mcp->mb[3] = MSW(start_addr);
4920 mcp->mb[4] = LSW(end_addr);
4921 mcp->mb[5] = MSW(end_addr);
4922 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
4924 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4925 if (!(subcode & (BIT_2 | BIT_5)))
4926 mcp->in_mb |= MBX_4|MBX_3;
4927 mcp->tov = MBX_TOV_SECONDS;
4929 rval = qla2x00_mailbox_command(vha, mcp);
4931 if (rval != QLA_SUCCESS) {
4932 ql_dbg(ql_dbg_mbx, vha, 0x1147,
4933 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
4934 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
4936 ha->isp_ops->fw_dump(vha, 0);
4938 if (subcode & BIT_5)
4939 *sector_size = mcp->mb[1];
4940 else if (subcode & (BIT_6 | BIT_7)) {
4941 ql_dbg(ql_dbg_mbx, vha, 0x1148,
4942 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
4943 } else if (subcode & (BIT_3 | BIT_4)) {
4944 ql_dbg(ql_dbg_mbx, vha, 0x1149,
4945 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
4947 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
4954 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4959 mbx_cmd_t *mcp = &mc;
4961 if (!IS_MCTP_CAPABLE(vha->hw))
4962 return QLA_FUNCTION_FAILED;
4964 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
4965 "Entered %s.\n", __func__);
4967 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4968 mcp->mb[1] = LSW(addr);
4969 mcp->mb[2] = MSW(req_dma);
4970 mcp->mb[3] = LSW(req_dma);
4971 mcp->mb[4] = MSW(size);
4972 mcp->mb[5] = LSW(size);
4973 mcp->mb[6] = MSW(MSD(req_dma));
4974 mcp->mb[7] = LSW(MSD(req_dma));
4975 mcp->mb[8] = MSW(addr);
4976 /* Setting RAM ID to valid */
4977 mcp->mb[10] |= BIT_7;
4978 /* For MCTP RAM ID is 0x40 */
4979 mcp->mb[10] |= 0x40;
4981 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
4985 mcp->tov = MBX_TOV_SECONDS;
4987 rval = qla2x00_mailbox_command(vha, mcp);
4989 if (rval != QLA_SUCCESS) {
4990 ql_dbg(ql_dbg_mbx, vha, 0x114e,
4991 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4993 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
4994 "Done %s.\n", __func__);