2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <scsi/scsi_tcq.h>
13 #include <scsi/scsi_bsg_fc.h>
14 #include <scsi/scsi_eh.h>
16 #include "qla_target.h"
18 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
19 static void qla2x00_process_completed_request(struct scsi_qla_host *,
20 struct req_que *, uint32_t);
21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
27 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
29 * @dev_id: SCSI driver HA context
31 * Called by system whenever the host adapter generates an interrupt.
33 * Returns handled flag.
36 qla2100_intr_handler(int irq, void *dev_id)
39 struct qla_hw_data *ha;
40 struct device_reg_2xxx __iomem *reg;
48 rsp = (struct rsp_que *) dev_id;
50 ql_log(ql_log_info, NULL, 0x505d,
51 "%s: NULL response queue pointer.\n", __func__);
56 reg = &ha->iobase->isp;
59 spin_lock_irqsave(&ha->hardware_lock, flags);
60 vha = pci_get_drvdata(ha->pdev);
61 for (iter = 50; iter--; ) {
62 hccr = RD_REG_WORD(®->hccr);
63 if (hccr & HCCR_RISC_PAUSE) {
64 if (pci_channel_offline(ha->pdev))
68 * Issue a "HARD" reset in order for the RISC interrupt
69 * bit to be cleared. Schedule a big hammer to get
70 * out of the RISC PAUSED state.
72 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
73 RD_REG_WORD(®->hccr);
75 ha->isp_ops->fw_dump(vha, 1);
76 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
78 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
81 if (RD_REG_WORD(®->semaphore) & BIT_0) {
82 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
83 RD_REG_WORD(®->hccr);
85 /* Get mailbox data. */
86 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
87 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
88 qla2x00_mbx_completion(vha, mb[0]);
89 status |= MBX_INTERRUPT;
90 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
91 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
92 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
93 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
94 qla2x00_async_event(vha, rsp, mb);
97 ql_dbg(ql_dbg_async, vha, 0x5025,
98 "Unrecognized interrupt type (%d).\n",
101 /* Release mailbox registers. */
102 WRT_REG_WORD(®->semaphore, 0);
103 RD_REG_WORD(®->semaphore);
105 qla2x00_process_response_queue(rsp);
107 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
108 RD_REG_WORD(®->hccr);
111 spin_unlock_irqrestore(&ha->hardware_lock, flags);
113 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
114 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
115 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
116 complete(&ha->mbx_intr_comp);
119 return (IRQ_HANDLED);
123 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
125 * @dev_id: SCSI driver HA context
127 * Called by system whenever the host adapter generates an interrupt.
129 * Returns handled flag.
132 qla2300_intr_handler(int irq, void *dev_id)
134 scsi_qla_host_t *vha;
135 struct device_reg_2xxx __iomem *reg;
142 struct qla_hw_data *ha;
145 rsp = (struct rsp_que *) dev_id;
147 ql_log(ql_log_info, NULL, 0x5058,
148 "%s: NULL response queue pointer.\n", __func__);
153 reg = &ha->iobase->isp;
156 spin_lock_irqsave(&ha->hardware_lock, flags);
157 vha = pci_get_drvdata(ha->pdev);
158 for (iter = 50; iter--; ) {
159 stat = RD_REG_DWORD(®->u.isp2300.host_status);
160 if (stat & HSR_RISC_PAUSED) {
161 if (unlikely(pci_channel_offline(ha->pdev)))
164 hccr = RD_REG_WORD(®->hccr);
165 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
166 ql_log(ql_log_warn, vha, 0x5026,
167 "Parity error -- HCCR=%x, Dumping "
168 "firmware.\n", hccr);
170 ql_log(ql_log_warn, vha, 0x5027,
171 "RISC paused -- HCCR=%x, Dumping "
172 "firmware.\n", hccr);
175 * Issue a "HARD" reset in order for the RISC
176 * interrupt bit to be cleared. Schedule a big
177 * hammer to get out of the RISC PAUSED state.
179 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
180 RD_REG_WORD(®->hccr);
182 ha->isp_ops->fw_dump(vha, 1);
183 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
185 } else if ((stat & HSR_RISC_INT) == 0)
188 switch (stat & 0xff) {
193 qla2x00_mbx_completion(vha, MSW(stat));
194 status |= MBX_INTERRUPT;
196 /* Release mailbox registers. */
197 WRT_REG_WORD(®->semaphore, 0);
201 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
202 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
203 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
204 qla2x00_async_event(vha, rsp, mb);
207 qla2x00_process_response_queue(rsp);
210 mb[0] = MBA_CMPLT_1_16BIT;
212 qla2x00_async_event(vha, rsp, mb);
215 mb[0] = MBA_SCSI_COMPLETION;
217 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
218 qla2x00_async_event(vha, rsp, mb);
221 ql_dbg(ql_dbg_async, vha, 0x5028,
222 "Unrecognized interrupt type (%d).\n", stat & 0xff);
225 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
226 RD_REG_WORD_RELAXED(®->hccr);
228 spin_unlock_irqrestore(&ha->hardware_lock, flags);
230 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
231 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
232 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
233 complete(&ha->mbx_intr_comp);
236 return (IRQ_HANDLED);
240 * qla2x00_mbx_completion() - Process mailbox command completions.
241 * @ha: SCSI driver HA context
242 * @mb0: Mailbox0 register
245 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
249 uint16_t __iomem *wptr;
250 struct qla_hw_data *ha = vha->hw;
251 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
253 /* Read all mbox registers? */
254 mboxes = (1 << ha->mbx_count) - 1;
256 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
258 mboxes = ha->mcp->in_mb;
260 /* Load return mailbox registers. */
261 ha->flags.mbox_int = 1;
262 ha->mailbox_out[0] = mb0;
264 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
266 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
267 if (IS_QLA2200(ha) && cnt == 8)
268 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
269 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
270 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
271 else if (mboxes & BIT_0)
272 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
280 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
282 static char *event[] =
283 { "Complete", "Request Notification", "Time Extension" };
285 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
286 uint16_t __iomem *wptr;
287 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
289 /* Seed data -- mailbox1 -> mailbox7. */
290 wptr = (uint16_t __iomem *)®24->mailbox1;
291 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
292 mb[cnt] = RD_REG_WORD(wptr);
294 ql_dbg(ql_dbg_async, vha, 0x5021,
295 "Inter-Driver Communication %s -- "
296 "%04x %04x %04x %04x %04x %04x %04x.\n",
297 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
298 mb[4], mb[5], mb[6]);
299 if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
300 vha->hw->flags.idc_compl_status = 1;
301 if (vha->hw->notify_dcbx_comp)
302 complete(&vha->hw->dcbx_comp);
305 /* Acknowledgement needed? [Notify && non-zero timeout]. */
306 timeout = (descr >> 8) & 0xf;
307 if (aen != MBA_IDC_NOTIFY || !timeout)
310 ql_dbg(ql_dbg_async, vha, 0x5022,
311 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
312 vha->host_no, event[aen & 0xff], timeout);
314 rval = qla2x00_post_idc_ack_work(vha, mb);
315 if (rval != QLA_SUCCESS)
316 ql_log(ql_log_warn, vha, 0x5023,
317 "IDC failed to post ACK.\n");
322 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
324 static const char * const link_speeds[] = {
325 "1", "2", "?", "4", "8", "16", "10"
328 if (IS_QLA2100(ha) || IS_QLA2200(ha))
329 return link_speeds[0];
330 else if (speed == 0x13)
331 return link_speeds[6];
333 return link_speeds[speed];
335 return link_speeds[LS_UNKNOWN];
339 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
341 struct qla_hw_data *ha = vha->hw;
344 * 8200 AEN Interpretation:
346 * mb[1] = AEN Reason code
347 * mb[2] = LSW of Peg-Halt Status-1 Register
348 * mb[6] = MSW of Peg-Halt Status-1 Register
349 * mb[3] = LSW of Peg-Halt Status-2 register
350 * mb[7] = MSW of Peg-Halt Status-2 register
351 * mb[4] = IDC Device-State Register value
352 * mb[5] = IDC Driver-Presence Register value
354 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
355 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
356 mb[0], mb[1], mb[2], mb[6]);
357 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
358 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
359 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
361 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
362 IDC_HEARTBEAT_FAILURE)) {
363 ha->flags.nic_core_hung = 1;
364 ql_log(ql_log_warn, vha, 0x5060,
365 "83XX: F/W Error Reported: Check if reset required.\n");
367 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
368 uint32_t protocol_engine_id, fw_err_code, err_level;
371 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
372 * - PEG-Halt Status-1 Register:
373 * (LSW = mb[2], MSW = mb[6])
374 * Bits 0-7 = protocol-engine ID
375 * Bits 8-28 = f/w error code
376 * Bits 29-31 = Error-level
377 * Error-level 0x1 = Non-Fatal error
378 * Error-level 0x2 = Recoverable Fatal error
379 * Error-level 0x4 = UnRecoverable Fatal error
380 * - PEG-Halt Status-2 Register:
381 * (LSW = mb[3], MSW = mb[7])
383 protocol_engine_id = (mb[2] & 0xff);
384 fw_err_code = (((mb[2] & 0xff00) >> 8) |
385 ((mb[6] & 0x1fff) << 8));
386 err_level = ((mb[6] & 0xe000) >> 13);
387 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
388 "Register: protocol_engine_id=0x%x "
389 "fw_err_code=0x%x err_level=0x%x.\n",
390 protocol_engine_id, fw_err_code, err_level);
391 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
392 "Register: 0x%x%x.\n", mb[7], mb[3]);
393 if (err_level == ERR_LEVEL_NON_FATAL) {
394 ql_log(ql_log_warn, vha, 0x5063,
395 "Not a fatal error, f/w has recovered "
397 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
398 ql_log(ql_log_fatal, vha, 0x5064,
399 "Recoverable Fatal error: Chip reset "
401 qla83xx_schedule_work(vha,
402 QLA83XX_NIC_CORE_RESET);
403 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
404 ql_log(ql_log_fatal, vha, 0x5065,
405 "Unrecoverable Fatal error: Set FAILED "
406 "state, reboot required.\n");
407 qla83xx_schedule_work(vha,
408 QLA83XX_NIC_CORE_UNRECOVERABLE);
412 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
413 uint16_t peg_fw_state, nw_interface_link_up;
414 uint16_t nw_interface_signal_detect, sfp_status;
415 uint16_t htbt_counter, htbt_monitor_enable;
416 uint16_t sfp_additonal_info, sfp_multirate;
417 uint16_t sfp_tx_fault, link_speed, dcbx_status;
420 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
421 * - PEG-to-FC Status Register:
422 * (LSW = mb[2], MSW = mb[6])
423 * Bits 0-7 = Peg-Firmware state
424 * Bit 8 = N/W Interface Link-up
425 * Bit 9 = N/W Interface signal detected
426 * Bits 10-11 = SFP Status
427 * SFP Status 0x0 = SFP+ transceiver not expected
428 * SFP Status 0x1 = SFP+ transceiver not present
429 * SFP Status 0x2 = SFP+ transceiver invalid
430 * SFP Status 0x3 = SFP+ transceiver present and
432 * Bits 12-14 = Heartbeat Counter
433 * Bit 15 = Heartbeat Monitor Enable
434 * Bits 16-17 = SFP Additional Info
435 * SFP info 0x0 = Unregocnized transceiver for
437 * SFP info 0x1 = SFP+ brand validation failed
438 * SFP info 0x2 = SFP+ speed validation failed
439 * SFP info 0x3 = SFP+ access error
440 * Bit 18 = SFP Multirate
441 * Bit 19 = SFP Tx Fault
442 * Bits 20-22 = Link Speed
443 * Bits 23-27 = Reserved
444 * Bits 28-30 = DCBX Status
445 * DCBX Status 0x0 = DCBX Disabled
446 * DCBX Status 0x1 = DCBX Enabled
447 * DCBX Status 0x2 = DCBX Exchange error
450 peg_fw_state = (mb[2] & 0x00ff);
451 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
452 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
453 sfp_status = ((mb[2] & 0x0c00) >> 10);
454 htbt_counter = ((mb[2] & 0x7000) >> 12);
455 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
456 sfp_additonal_info = (mb[6] & 0x0003);
457 sfp_multirate = ((mb[6] & 0x0004) >> 2);
458 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
459 link_speed = ((mb[6] & 0x0070) >> 4);
460 dcbx_status = ((mb[6] & 0x7000) >> 12);
462 ql_log(ql_log_warn, vha, 0x5066,
463 "Peg-to-Fc Status Register:\n"
464 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
465 "nw_interface_signal_detect=0x%x"
466 "\nsfp_statis=0x%x.\n ", peg_fw_state,
467 nw_interface_link_up, nw_interface_signal_detect,
469 ql_log(ql_log_warn, vha, 0x5067,
470 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
471 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
472 htbt_counter, htbt_monitor_enable,
473 sfp_additonal_info, sfp_multirate);
474 ql_log(ql_log_warn, vha, 0x5068,
475 "sfp_tx_fault=0x%x, link_state=0x%x, "
476 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
479 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
482 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
483 ql_log(ql_log_warn, vha, 0x5069,
484 "Heartbeat Failure encountered, chip reset "
487 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
491 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
492 ql_log(ql_log_info, vha, 0x506a,
493 "IDC Device-State changed = 0x%x.\n", mb[4]);
494 if (ha->flags.nic_core_reset_owner)
496 qla83xx_schedule_work(vha, MBA_IDC_AEN);
501 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
503 struct qla_hw_data *ha = vha->hw;
512 spin_lock_irqsave(&ha->vport_slock, flags);
513 list_for_each_entry(vp, &ha->vp_list, list) {
514 vp_did = vp->d_id.b24;
515 if (vp_did == rscn_entry) {
520 spin_unlock_irqrestore(&ha->vport_slock, flags);
526 * qla2x00_async_event() - Process aynchronous events.
527 * @ha: SCSI driver HA context
528 * @mb: Mailbox registers (0 - 3)
531 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
536 struct qla_hw_data *ha = vha->hw;
537 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
538 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
539 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
540 uint32_t rscn_entry, host_pid;
543 /* Setup to process RIO completion. */
545 if (IS_CNA_CAPABLE(ha))
548 case MBA_SCSI_COMPLETION:
549 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
552 case MBA_CMPLT_1_16BIT:
555 mb[0] = MBA_SCSI_COMPLETION;
557 case MBA_CMPLT_2_16BIT:
561 mb[0] = MBA_SCSI_COMPLETION;
563 case MBA_CMPLT_3_16BIT:
568 mb[0] = MBA_SCSI_COMPLETION;
570 case MBA_CMPLT_4_16BIT:
574 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
576 mb[0] = MBA_SCSI_COMPLETION;
578 case MBA_CMPLT_5_16BIT:
582 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
583 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
585 mb[0] = MBA_SCSI_COMPLETION;
587 case MBA_CMPLT_2_32BIT:
588 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
589 handles[1] = le32_to_cpu(
590 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
591 RD_MAILBOX_REG(ha, reg, 6));
593 mb[0] = MBA_SCSI_COMPLETION;
600 case MBA_SCSI_COMPLETION: /* Fast Post */
601 if (!vha->flags.online)
604 for (cnt = 0; cnt < handle_cnt; cnt++)
605 qla2x00_process_completed_request(vha, rsp->req,
609 case MBA_RESET: /* Reset */
610 ql_dbg(ql_dbg_async, vha, 0x5002,
611 "Asynchronous RESET.\n");
613 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
616 case MBA_SYSTEM_ERR: /* System Error */
617 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
618 RD_REG_WORD(®24->mailbox7) : 0;
619 ql_log(ql_log_warn, vha, 0x5003,
620 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
621 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
623 ha->isp_ops->fw_dump(vha, 1);
625 if (IS_FWI2_CAPABLE(ha)) {
626 if (mb[1] == 0 && mb[2] == 0) {
627 ql_log(ql_log_fatal, vha, 0x5004,
628 "Unrecoverable Hardware Error: adapter "
629 "marked OFFLINE!\n");
630 vha->flags.online = 0;
631 vha->device_flags |= DFLG_DEV_FAILED;
633 /* Check to see if MPI timeout occurred */
634 if ((mbx & MBX_3) && (ha->flags.port0))
635 set_bit(MPI_RESET_NEEDED,
638 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
640 } else if (mb[1] == 0) {
641 ql_log(ql_log_fatal, vha, 0x5005,
642 "Unrecoverable Hardware Error: adapter marked "
644 vha->flags.online = 0;
645 vha->device_flags |= DFLG_DEV_FAILED;
647 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
650 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
651 ql_log(ql_log_warn, vha, 0x5006,
652 "ISP Request Transfer Error (%x).\n", mb[1]);
654 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
657 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
658 ql_log(ql_log_warn, vha, 0x5007,
659 "ISP Response Transfer Error.\n");
661 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
664 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
665 ql_dbg(ql_dbg_async, vha, 0x5008,
666 "Asynchronous WAKEUP_THRES.\n");
669 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
670 ql_dbg(ql_dbg_async, vha, 0x5009,
671 "LIP occurred (%x).\n", mb[1]);
673 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
674 atomic_set(&vha->loop_state, LOOP_DOWN);
675 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
676 qla2x00_mark_all_devices_lost(vha, 1);
680 atomic_set(&vha->vp_state, VP_FAILED);
681 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
684 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
685 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
687 vha->flags.management_server_logged_in = 0;
688 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
691 case MBA_LOOP_UP: /* Loop Up Event */
692 if (IS_QLA2100(ha) || IS_QLA2200(ha))
693 ha->link_data_rate = PORT_SPEED_1GB;
695 ha->link_data_rate = mb[1];
697 ql_dbg(ql_dbg_async, vha, 0x500a,
698 "LOOP UP detected (%s Gbps).\n",
699 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
701 vha->flags.management_server_logged_in = 0;
702 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
705 case MBA_LOOP_DOWN: /* Loop Down Event */
706 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
707 ? RD_REG_WORD(®24->mailbox4) : 0;
708 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx;
709 ql_dbg(ql_dbg_async, vha, 0x500b,
710 "LOOP DOWN detected (%x %x %x %x).\n",
711 mb[1], mb[2], mb[3], mbx);
713 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
714 atomic_set(&vha->loop_state, LOOP_DOWN);
715 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
716 vha->device_flags |= DFLG_NO_CABLE;
717 qla2x00_mark_all_devices_lost(vha, 1);
721 atomic_set(&vha->vp_state, VP_FAILED);
722 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
725 vha->flags.management_server_logged_in = 0;
726 ha->link_data_rate = PORT_SPEED_UNKNOWN;
727 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
730 case MBA_LIP_RESET: /* LIP reset occurred */
731 ql_dbg(ql_dbg_async, vha, 0x500c,
732 "LIP reset occurred (%x).\n", mb[1]);
734 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
735 atomic_set(&vha->loop_state, LOOP_DOWN);
736 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
737 qla2x00_mark_all_devices_lost(vha, 1);
741 atomic_set(&vha->vp_state, VP_FAILED);
742 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
745 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
747 ha->operating_mode = LOOP;
748 vha->flags.management_server_logged_in = 0;
749 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
752 /* case MBA_DCBX_COMPLETE: */
753 case MBA_POINT_TO_POINT: /* Point-to-Point */
757 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
758 ql_dbg(ql_dbg_async, vha, 0x500d,
759 "DCBX Completed -- %04x %04x %04x.\n",
760 mb[1], mb[2], mb[3]);
761 if (ha->notify_dcbx_comp)
762 complete(&ha->dcbx_comp);
765 ql_dbg(ql_dbg_async, vha, 0x500e,
766 "Asynchronous P2P MODE received.\n");
769 * Until there's a transition from loop down to loop up, treat
770 * this as loop down only.
772 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
773 atomic_set(&vha->loop_state, LOOP_DOWN);
774 if (!atomic_read(&vha->loop_down_timer))
775 atomic_set(&vha->loop_down_timer,
777 qla2x00_mark_all_devices_lost(vha, 1);
781 atomic_set(&vha->vp_state, VP_FAILED);
782 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
785 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
786 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
788 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
789 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
791 ha->flags.gpsc_supported = 1;
792 vha->flags.management_server_logged_in = 0;
795 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
799 ql_dbg(ql_dbg_async, vha, 0x500f,
800 "Configuration change detected: value=%x.\n", mb[1]);
802 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
803 atomic_set(&vha->loop_state, LOOP_DOWN);
804 if (!atomic_read(&vha->loop_down_timer))
805 atomic_set(&vha->loop_down_timer,
807 qla2x00_mark_all_devices_lost(vha, 1);
811 atomic_set(&vha->vp_state, VP_FAILED);
812 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
815 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
816 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
819 case MBA_PORT_UPDATE: /* Port database update */
821 * Handle only global and vn-port update events
824 * mb[1] = N_Port handle of changed port
825 * OR 0xffff for global event
826 * mb[2] = New login state
827 * 7 = Port logged out
828 * mb[3] = LSB is vp_idx, 0xff = all vps
830 * Skip processing if:
831 * Event is global, vp_idx is NOT all vps,
832 * vp_idx does not match
833 * Event is not global, vp_idx does not match
835 if (IS_QLA2XXX_MIDTYPE(ha) &&
836 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
837 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
840 /* Global event -- port logout or port unavailable. */
841 if (mb[1] == 0xffff && mb[2] == 0x7) {
842 ql_dbg(ql_dbg_async, vha, 0x5010,
843 "Port unavailable %04x %04x %04x.\n",
844 mb[1], mb[2], mb[3]);
845 ql_log(ql_log_warn, vha, 0x505e,
846 "Link is offline.\n");
848 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
849 atomic_set(&vha->loop_state, LOOP_DOWN);
850 atomic_set(&vha->loop_down_timer,
852 vha->device_flags |= DFLG_NO_CABLE;
853 qla2x00_mark_all_devices_lost(vha, 1);
857 atomic_set(&vha->vp_state, VP_FAILED);
858 fc_vport_set_state(vha->fc_vport,
860 qla2x00_mark_all_devices_lost(vha, 1);
863 vha->flags.management_server_logged_in = 0;
864 ha->link_data_rate = PORT_SPEED_UNKNOWN;
869 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
870 * event etc. earlier indicating loop is down) then process
871 * it. Otherwise ignore it and Wait for RSCN to come in.
873 atomic_set(&vha->loop_down_timer, 0);
874 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
875 ql_dbg(ql_dbg_async, vha, 0x5011,
876 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
877 mb[1], mb[2], mb[3]);
879 qlt_async_event(mb[0], vha, mb);
883 ql_dbg(ql_dbg_async, vha, 0x5012,
884 "Port database changed %04x %04x %04x.\n",
885 mb[1], mb[2], mb[3]);
886 ql_log(ql_log_warn, vha, 0x505f,
887 "Link is operational (%s Gbps).\n",
888 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
891 * Mark all devices as missing so we will login again.
893 atomic_set(&vha->loop_state, LOOP_UP);
895 qla2x00_mark_all_devices_lost(vha, 1);
897 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
898 set_bit(SCR_PENDING, &vha->dpc_flags);
900 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
901 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
903 qlt_async_event(mb[0], vha, mb);
906 case MBA_RSCN_UPDATE: /* State Change Registration */
907 /* Check if the Vport has issued a SCR */
908 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
910 /* Only handle SCNs for our Vport index. */
911 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
914 ql_dbg(ql_dbg_async, vha, 0x5013,
915 "RSCN database changed -- %04x %04x %04x.\n",
916 mb[1], mb[2], mb[3]);
918 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
919 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
921 if (rscn_entry == host_pid) {
922 ql_dbg(ql_dbg_async, vha, 0x5014,
923 "Ignoring RSCN update to local host "
924 "port ID (%06x).\n", host_pid);
928 /* Ignore reserved bits from RSCN-payload. */
929 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
931 /* Skip RSCNs for virtual ports on the same physical port */
932 if (qla2x00_is_a_vp_did(vha, rscn_entry))
935 atomic_set(&vha->loop_down_timer, 0);
936 vha->flags.management_server_logged_in = 0;
938 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
939 set_bit(RSCN_UPDATE, &vha->dpc_flags);
940 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
943 /* case MBA_RIO_RESPONSE: */
944 case MBA_ZIO_RESPONSE:
945 ql_dbg(ql_dbg_async, vha, 0x5015,
946 "[R|Z]IO update completion.\n");
948 if (IS_FWI2_CAPABLE(ha))
949 qla24xx_process_response_queue(vha, rsp);
951 qla2x00_process_response_queue(rsp);
954 case MBA_DISCARD_RND_FRAME:
955 ql_dbg(ql_dbg_async, vha, 0x5016,
956 "Discard RND Frame -- %04x %04x %04x.\n",
957 mb[1], mb[2], mb[3]);
960 case MBA_TRACE_NOTIFICATION:
961 ql_dbg(ql_dbg_async, vha, 0x5017,
962 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
965 case MBA_ISP84XX_ALERT:
966 ql_dbg(ql_dbg_async, vha, 0x5018,
967 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
968 mb[1], mb[2], mb[3]);
970 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
972 case A84_PANIC_RECOVERY:
973 ql_log(ql_log_info, vha, 0x5019,
974 "Alert 84XX: panic recovery %04x %04x.\n",
977 case A84_OP_LOGIN_COMPLETE:
978 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
979 ql_log(ql_log_info, vha, 0x501a,
980 "Alert 84XX: firmware version %x.\n",
981 ha->cs84xx->op_fw_version);
983 case A84_DIAG_LOGIN_COMPLETE:
984 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
985 ql_log(ql_log_info, vha, 0x501b,
986 "Alert 84XX: diagnostic firmware version %x.\n",
987 ha->cs84xx->diag_fw_version);
989 case A84_GOLD_LOGIN_COMPLETE:
990 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
991 ha->cs84xx->fw_update = 1;
992 ql_log(ql_log_info, vha, 0x501c,
993 "Alert 84XX: gold firmware version %x.\n",
994 ha->cs84xx->gold_fw_version);
997 ql_log(ql_log_warn, vha, 0x501d,
998 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
999 mb[1], mb[2], mb[3]);
1001 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1003 case MBA_DCBX_START:
1004 ql_dbg(ql_dbg_async, vha, 0x501e,
1005 "DCBX Started -- %04x %04x %04x.\n",
1006 mb[1], mb[2], mb[3]);
1008 case MBA_DCBX_PARAM_UPDATE:
1009 ql_dbg(ql_dbg_async, vha, 0x501f,
1010 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1011 mb[1], mb[2], mb[3]);
1013 case MBA_FCF_CONF_ERR:
1014 ql_dbg(ql_dbg_async, vha, 0x5020,
1015 "FCF Configuration Error -- %04x %04x %04x.\n",
1016 mb[1], mb[2], mb[3]);
1018 case MBA_IDC_NOTIFY:
1019 if (IS_QLA8031(vha->hw)) {
1020 mb[4] = RD_REG_WORD(®24->mailbox4);
1021 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1022 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1023 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1024 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1026 * Extend loop down timer since port is active.
1028 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1029 atomic_set(&vha->loop_down_timer,
1031 qla2xxx_wake_dpc(vha);
1034 case MBA_IDC_COMPLETE:
1035 if (ha->notify_lb_portup_comp)
1036 complete(&ha->lb_portup_comp);
1038 case MBA_IDC_TIME_EXT:
1039 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
1040 qla81xx_idc_event(vha, mb[0], mb[1]);
1044 mb[4] = RD_REG_WORD(®24->mailbox4);
1045 mb[5] = RD_REG_WORD(®24->mailbox5);
1046 mb[6] = RD_REG_WORD(®24->mailbox6);
1047 mb[7] = RD_REG_WORD(®24->mailbox7);
1048 qla83xx_handle_8200_aen(vha, mb);
1052 ql_dbg(ql_dbg_async, vha, 0x5057,
1053 "Unknown AEN:%04x %04x %04x %04x\n",
1054 mb[0], mb[1], mb[2], mb[3]);
1057 qlt_async_event(mb[0], vha, mb);
1059 if (!vha->vp_idx && ha->num_vhosts)
1060 qla2x00_alert_all_vps(rsp, mb);
1064 * qla2x00_process_completed_request() - Process a Fast Post response.
1065 * @ha: SCSI driver HA context
1069 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1070 struct req_que *req, uint32_t index)
1073 struct qla_hw_data *ha = vha->hw;
1075 /* Validate handle. */
1076 if (index >= req->num_outstanding_cmds) {
1077 ql_log(ql_log_warn, vha, 0x3014,
1078 "Invalid SCSI command index (%x).\n", index);
1081 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1083 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1087 sp = req->outstanding_cmds[index];
1089 /* Free outstanding command slot. */
1090 req->outstanding_cmds[index] = NULL;
1092 /* Save ISP completion status */
1093 sp->done(ha, sp, DID_OK << 16);
1095 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1098 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1100 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1105 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1106 struct req_que *req, void *iocb)
1108 struct qla_hw_data *ha = vha->hw;
1109 sts_entry_t *pkt = iocb;
1113 index = LSW(pkt->handle);
1114 if (index >= req->num_outstanding_cmds) {
1115 ql_log(ql_log_warn, vha, 0x5031,
1116 "Invalid command index (%x).\n", index);
1118 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1120 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1123 sp = req->outstanding_cmds[index];
1125 ql_log(ql_log_warn, vha, 0x5032,
1126 "Invalid completion handle (%x) -- timed-out.\n", index);
1129 if (sp->handle != index) {
1130 ql_log(ql_log_warn, vha, 0x5033,
1131 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1135 req->outstanding_cmds[index] = NULL;
1142 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1143 struct mbx_entry *mbx)
1145 const char func[] = "MBX-IOCB";
1149 struct srb_iocb *lio;
1153 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1157 lio = &sp->u.iocb_cmd;
1159 fcport = sp->fcport;
1160 data = lio->u.logio.data;
1162 data[0] = MBS_COMMAND_ERROR;
1163 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1164 QLA_LOGIO_LOGIN_RETRIED : 0;
1165 if (mbx->entry_status) {
1166 ql_dbg(ql_dbg_async, vha, 0x5043,
1167 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1168 "entry-status=%x status=%x state-flag=%x "
1169 "status-flags=%x.\n", type, sp->handle,
1170 fcport->d_id.b.domain, fcport->d_id.b.area,
1171 fcport->d_id.b.al_pa, mbx->entry_status,
1172 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1173 le16_to_cpu(mbx->status_flags));
1175 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1176 (uint8_t *)mbx, sizeof(*mbx));
1181 status = le16_to_cpu(mbx->status);
1182 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1183 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1185 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1186 ql_dbg(ql_dbg_async, vha, 0x5045,
1187 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1188 type, sp->handle, fcport->d_id.b.domain,
1189 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1190 le16_to_cpu(mbx->mb1));
1192 data[0] = MBS_COMMAND_COMPLETE;
1193 if (sp->type == SRB_LOGIN_CMD) {
1194 fcport->port_type = FCT_TARGET;
1195 if (le16_to_cpu(mbx->mb1) & BIT_0)
1196 fcport->port_type = FCT_INITIATOR;
1197 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1198 fcport->flags |= FCF_FCP2_DEVICE;
1203 data[0] = le16_to_cpu(mbx->mb0);
1205 case MBS_PORT_ID_USED:
1206 data[1] = le16_to_cpu(mbx->mb1);
1208 case MBS_LOOP_ID_USED:
1211 data[0] = MBS_COMMAND_ERROR;
1215 ql_log(ql_log_warn, vha, 0x5046,
1216 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1217 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1218 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1219 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1220 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1221 le16_to_cpu(mbx->mb7));
1224 sp->done(vha, sp, 0);
1228 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1229 sts_entry_t *pkt, int iocb_type)
1231 const char func[] = "CT_IOCB";
1234 struct fc_bsg_job *bsg_job;
1235 uint16_t comp_status;
1238 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1242 bsg_job = sp->u.bsg_job;
1244 type = "ct pass-through";
1246 comp_status = le16_to_cpu(pkt->comp_status);
1248 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1249 * fc payload to the caller
1251 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1252 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1254 if (comp_status != CS_COMPLETE) {
1255 if (comp_status == CS_DATA_UNDERRUN) {
1257 bsg_job->reply->reply_payload_rcv_len =
1258 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1260 ql_log(ql_log_warn, vha, 0x5048,
1261 "CT pass-through-%s error "
1262 "comp_status-status=0x%x total_byte = 0x%x.\n",
1264 bsg_job->reply->reply_payload_rcv_len);
1266 ql_log(ql_log_warn, vha, 0x5049,
1267 "CT pass-through-%s error "
1268 "comp_status-status=0x%x.\n", type, comp_status);
1269 res = DID_ERROR << 16;
1270 bsg_job->reply->reply_payload_rcv_len = 0;
1272 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1273 (uint8_t *)pkt, sizeof(*pkt));
1276 bsg_job->reply->reply_payload_rcv_len =
1277 bsg_job->reply_payload.payload_len;
1278 bsg_job->reply_len = 0;
1281 sp->done(vha, sp, res);
1285 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1286 struct sts_entry_24xx *pkt, int iocb_type)
1288 const char func[] = "ELS_CT_IOCB";
1291 struct fc_bsg_job *bsg_job;
1292 uint16_t comp_status;
1293 uint32_t fw_status[3];
1294 uint8_t* fw_sts_ptr;
1297 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1300 bsg_job = sp->u.bsg_job;
1304 case SRB_ELS_CMD_RPT:
1305 case SRB_ELS_CMD_HST:
1309 type = "ct pass-through";
1312 ql_dbg(ql_dbg_user, vha, 0x503e,
1313 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1317 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1318 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1319 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1321 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1322 * fc payload to the caller
1324 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1325 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1327 if (comp_status != CS_COMPLETE) {
1328 if (comp_status == CS_DATA_UNDERRUN) {
1330 bsg_job->reply->reply_payload_rcv_len =
1331 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1333 ql_dbg(ql_dbg_user, vha, 0x503f,
1334 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1335 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1336 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1337 le16_to_cpu(((struct els_sts_entry_24xx *)
1338 pkt)->total_byte_count));
1339 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1340 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1343 ql_dbg(ql_dbg_user, vha, 0x5040,
1344 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1345 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1346 type, sp->handle, comp_status,
1347 le16_to_cpu(((struct els_sts_entry_24xx *)
1348 pkt)->error_subcode_1),
1349 le16_to_cpu(((struct els_sts_entry_24xx *)
1350 pkt)->error_subcode_2));
1351 res = DID_ERROR << 16;
1352 bsg_job->reply->reply_payload_rcv_len = 0;
1353 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1354 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1356 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1357 (uint8_t *)pkt, sizeof(*pkt));
1361 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1362 bsg_job->reply_len = 0;
1365 sp->done(vha, sp, res);
1369 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1370 struct logio_entry_24xx *logio)
1372 const char func[] = "LOGIO-IOCB";
1376 struct srb_iocb *lio;
1380 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1384 lio = &sp->u.iocb_cmd;
1386 fcport = sp->fcport;
1387 data = lio->u.logio.data;
1389 data[0] = MBS_COMMAND_ERROR;
1390 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1391 QLA_LOGIO_LOGIN_RETRIED : 0;
1392 if (logio->entry_status) {
1393 ql_log(ql_log_warn, fcport->vha, 0x5034,
1394 "Async-%s error entry - hdl=%x"
1395 "portid=%02x%02x%02x entry-status=%x.\n",
1396 type, sp->handle, fcport->d_id.b.domain,
1397 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1398 logio->entry_status);
1399 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1400 (uint8_t *)logio, sizeof(*logio));
1405 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1406 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1407 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1408 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1409 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1410 le32_to_cpu(logio->io_parameter[0]));
1412 data[0] = MBS_COMMAND_COMPLETE;
1413 if (sp->type != SRB_LOGIN_CMD)
1416 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1417 if (iop[0] & BIT_4) {
1418 fcport->port_type = FCT_TARGET;
1420 fcport->flags |= FCF_FCP2_DEVICE;
1421 } else if (iop[0] & BIT_5)
1422 fcport->port_type = FCT_INITIATOR;
1425 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1427 if (logio->io_parameter[7] || logio->io_parameter[8])
1428 fcport->supported_classes |= FC_COS_CLASS2;
1429 if (logio->io_parameter[9] || logio->io_parameter[10])
1430 fcport->supported_classes |= FC_COS_CLASS3;
1435 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1436 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1438 case LSC_SCODE_PORTID_USED:
1439 data[0] = MBS_PORT_ID_USED;
1440 data[1] = LSW(iop[1]);
1442 case LSC_SCODE_NPORT_USED:
1443 data[0] = MBS_LOOP_ID_USED;
1446 data[0] = MBS_COMMAND_ERROR;
1450 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1451 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1452 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1453 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1454 le16_to_cpu(logio->comp_status),
1455 le32_to_cpu(logio->io_parameter[0]),
1456 le32_to_cpu(logio->io_parameter[1]));
1459 sp->done(vha, sp, 0);
1463 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1464 struct tsk_mgmt_entry *tsk)
1466 const char func[] = "TMF-IOCB";
1470 struct srb_iocb *iocb;
1471 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1474 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1478 iocb = &sp->u.iocb_cmd;
1480 fcport = sp->fcport;
1482 if (sts->entry_status) {
1483 ql_log(ql_log_warn, fcport->vha, 0x5038,
1484 "Async-%s error - hdl=%x entry-status(%x).\n",
1485 type, sp->handle, sts->entry_status);
1486 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1487 ql_log(ql_log_warn, fcport->vha, 0x5039,
1488 "Async-%s error - hdl=%x completion status(%x).\n",
1489 type, sp->handle, sts->comp_status);
1490 } else if (!(le16_to_cpu(sts->scsi_status) &
1491 SS_RESPONSE_INFO_LEN_VALID)) {
1492 ql_log(ql_log_warn, fcport->vha, 0x503a,
1493 "Async-%s error - hdl=%x no response info(%x).\n",
1494 type, sp->handle, sts->scsi_status);
1495 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1496 ql_log(ql_log_warn, fcport->vha, 0x503b,
1497 "Async-%s error - hdl=%x not enough response(%d).\n",
1498 type, sp->handle, sts->rsp_data_len);
1499 } else if (sts->data[3]) {
1500 ql_log(ql_log_warn, fcport->vha, 0x503c,
1501 "Async-%s error - hdl=%x response(%x).\n",
1502 type, sp->handle, sts->data[3]);
1508 iocb->u.tmf.data = error;
1509 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1510 (uint8_t *)sts, sizeof(*sts));
1513 sp->done(vha, sp, 0);
1517 * qla2x00_process_response_queue() - Process response queue entries.
1518 * @ha: SCSI driver HA context
1521 qla2x00_process_response_queue(struct rsp_que *rsp)
1523 struct scsi_qla_host *vha;
1524 struct qla_hw_data *ha = rsp->hw;
1525 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1527 uint16_t handle_cnt;
1530 vha = pci_get_drvdata(ha->pdev);
1532 if (!vha->flags.online)
1535 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1536 pkt = (sts_entry_t *)rsp->ring_ptr;
1539 if (rsp->ring_index == rsp->length) {
1540 rsp->ring_index = 0;
1541 rsp->ring_ptr = rsp->ring;
1546 if (pkt->entry_status != 0) {
1547 qla2x00_error_entry(vha, rsp, pkt);
1548 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1553 switch (pkt->entry_type) {
1555 qla2x00_status_entry(vha, rsp, pkt);
1557 case STATUS_TYPE_21:
1558 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1559 for (cnt = 0; cnt < handle_cnt; cnt++) {
1560 qla2x00_process_completed_request(vha, rsp->req,
1561 ((sts21_entry_t *)pkt)->handle[cnt]);
1564 case STATUS_TYPE_22:
1565 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1566 for (cnt = 0; cnt < handle_cnt; cnt++) {
1567 qla2x00_process_completed_request(vha, rsp->req,
1568 ((sts22_entry_t *)pkt)->handle[cnt]);
1571 case STATUS_CONT_TYPE:
1572 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1575 qla2x00_mbx_iocb_entry(vha, rsp->req,
1576 (struct mbx_entry *)pkt);
1579 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1582 /* Type Not Supported. */
1583 ql_log(ql_log_warn, vha, 0x504a,
1584 "Received unknown response pkt type %x "
1585 "entry status=%x.\n",
1586 pkt->entry_type, pkt->entry_status);
1589 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1593 /* Adjust ring index */
1594 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1598 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1599 uint32_t sense_len, struct rsp_que *rsp, int res)
1601 struct scsi_qla_host *vha = sp->fcport->vha;
1602 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1603 uint32_t track_sense_len;
1605 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1606 sense_len = SCSI_SENSE_BUFFERSIZE;
1608 SET_CMD_SENSE_LEN(sp, sense_len);
1609 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1610 track_sense_len = sense_len;
1612 if (sense_len > par_sense_len)
1613 sense_len = par_sense_len;
1615 memcpy(cp->sense_buffer, sense_data, sense_len);
1617 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1618 track_sense_len -= sense_len;
1619 SET_CMD_SENSE_LEN(sp, track_sense_len);
1621 if (track_sense_len != 0) {
1622 rsp->status_srb = sp;
1627 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1628 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1629 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1631 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1632 cp->sense_buffer, sense_len);
1636 struct scsi_dif_tuple {
1637 __be16 guard; /* Checksum */
1638 __be16 app_tag; /* APPL identifier */
1639 __be32 ref_tag; /* Target LBA or indirect LBA */
1643 * Checks the guard or meta-data for the type of error
1644 * detected by the HBA. In case of errors, we set the
1645 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1646 * to indicate to the kernel that the HBA detected error.
1649 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1651 struct scsi_qla_host *vha = sp->fcport->vha;
1652 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1653 uint8_t *ap = &sts24->data[12];
1654 uint8_t *ep = &sts24->data[20];
1655 uint32_t e_ref_tag, a_ref_tag;
1656 uint16_t e_app_tag, a_app_tag;
1657 uint16_t e_guard, a_guard;
1660 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1661 * would make guard field appear at offset 2
1663 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1664 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1665 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1666 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1667 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1668 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1670 ql_dbg(ql_dbg_io, vha, 0x3023,
1671 "iocb(s) %p Returned STATUS.\n", sts24);
1673 ql_dbg(ql_dbg_io, vha, 0x3024,
1674 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1675 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1676 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1677 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1678 a_app_tag, e_app_tag, a_guard, e_guard);
1682 * For type 3: ref & app tag is all 'f's
1683 * For type 0,1,2: app tag is all 'f's
1685 if ((a_app_tag == 0xffff) &&
1686 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1687 (a_ref_tag == 0xffffffff))) {
1688 uint32_t blocks_done, resid;
1689 sector_t lba_s = scsi_get_lba(cmd);
1691 /* 2TB boundary case covered automatically with this */
1692 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1694 resid = scsi_bufflen(cmd) - (blocks_done *
1695 cmd->device->sector_size);
1697 scsi_set_resid(cmd, resid);
1698 cmd->result = DID_OK << 16;
1700 /* Update protection tag */
1701 if (scsi_prot_sg_count(cmd)) {
1702 uint32_t i, j = 0, k = 0, num_ent;
1703 struct scatterlist *sg;
1704 struct sd_dif_tuple *spt;
1706 /* Patch the corresponding protection tags */
1707 scsi_for_each_prot_sg(cmd, sg,
1708 scsi_prot_sg_count(cmd), i) {
1709 num_ent = sg_dma_len(sg) / 8;
1710 if (k + num_ent < blocks_done) {
1714 j = blocks_done - k - 1;
1719 if (k != blocks_done) {
1720 ql_log(ql_log_warn, vha, 0x302f,
1721 "unexpected tag values tag:lba=%x:%llx)\n",
1722 e_ref_tag, (unsigned long long)lba_s);
1726 spt = page_address(sg_page(sg)) + sg->offset;
1729 spt->app_tag = 0xffff;
1730 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1731 spt->ref_tag = 0xffffffff;
1738 if (e_guard != a_guard) {
1739 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1741 set_driver_byte(cmd, DRIVER_SENSE);
1742 set_host_byte(cmd, DID_ABORT);
1743 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1748 if (e_ref_tag != a_ref_tag) {
1749 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1751 set_driver_byte(cmd, DRIVER_SENSE);
1752 set_host_byte(cmd, DID_ABORT);
1753 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1757 /* check appl tag */
1758 if (e_app_tag != a_app_tag) {
1759 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1761 set_driver_byte(cmd, DRIVER_SENSE);
1762 set_host_byte(cmd, DID_ABORT);
1763 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1771 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1772 struct req_que *req, uint32_t index)
1774 struct qla_hw_data *ha = vha->hw;
1776 uint16_t comp_status;
1777 uint16_t scsi_status;
1779 uint32_t rval = EXT_STATUS_OK;
1780 struct fc_bsg_job *bsg_job = NULL;
1782 struct sts_entry_24xx *sts24;
1783 sts = (sts_entry_t *) pkt;
1784 sts24 = (struct sts_entry_24xx *) pkt;
1786 /* Validate handle. */
1787 if (index >= req->num_outstanding_cmds) {
1788 ql_log(ql_log_warn, vha, 0x70af,
1789 "Invalid SCSI completion handle 0x%x.\n", index);
1790 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1794 sp = req->outstanding_cmds[index];
1796 /* Free outstanding command slot. */
1797 req->outstanding_cmds[index] = NULL;
1798 bsg_job = sp->u.bsg_job;
1800 ql_log(ql_log_warn, vha, 0x70b0,
1801 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1804 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1808 if (IS_FWI2_CAPABLE(ha)) {
1809 comp_status = le16_to_cpu(sts24->comp_status);
1810 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1812 comp_status = le16_to_cpu(sts->comp_status);
1813 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1816 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1817 switch (comp_status) {
1819 if (scsi_status == 0) {
1820 bsg_job->reply->reply_payload_rcv_len =
1821 bsg_job->reply_payload.payload_len;
1822 rval = EXT_STATUS_OK;
1826 case CS_DATA_OVERRUN:
1827 ql_dbg(ql_dbg_user, vha, 0x70b1,
1828 "Command completed with date overrun thread_id=%d\n",
1830 rval = EXT_STATUS_DATA_OVERRUN;
1833 case CS_DATA_UNDERRUN:
1834 ql_dbg(ql_dbg_user, vha, 0x70b2,
1835 "Command completed with date underrun thread_id=%d\n",
1837 rval = EXT_STATUS_DATA_UNDERRUN;
1839 case CS_BIDIR_RD_OVERRUN:
1840 ql_dbg(ql_dbg_user, vha, 0x70b3,
1841 "Command completed with read data overrun thread_id=%d\n",
1843 rval = EXT_STATUS_DATA_OVERRUN;
1846 case CS_BIDIR_RD_WR_OVERRUN:
1847 ql_dbg(ql_dbg_user, vha, 0x70b4,
1848 "Command completed with read and write data overrun "
1849 "thread_id=%d\n", thread_id);
1850 rval = EXT_STATUS_DATA_OVERRUN;
1853 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1854 ql_dbg(ql_dbg_user, vha, 0x70b5,
1855 "Command completed with read data over and write data "
1856 "underrun thread_id=%d\n", thread_id);
1857 rval = EXT_STATUS_DATA_OVERRUN;
1860 case CS_BIDIR_RD_UNDERRUN:
1861 ql_dbg(ql_dbg_user, vha, 0x70b6,
1862 "Command completed with read data data underrun "
1863 "thread_id=%d\n", thread_id);
1864 rval = EXT_STATUS_DATA_UNDERRUN;
1867 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1868 ql_dbg(ql_dbg_user, vha, 0x70b7,
1869 "Command completed with read data under and write data "
1870 "overrun thread_id=%d\n", thread_id);
1871 rval = EXT_STATUS_DATA_UNDERRUN;
1874 case CS_BIDIR_RD_WR_UNDERRUN:
1875 ql_dbg(ql_dbg_user, vha, 0x70b8,
1876 "Command completed with read and write data underrun "
1877 "thread_id=%d\n", thread_id);
1878 rval = EXT_STATUS_DATA_UNDERRUN;
1882 ql_dbg(ql_dbg_user, vha, 0x70b9,
1883 "Command completed with data DMA error thread_id=%d\n",
1885 rval = EXT_STATUS_DMA_ERR;
1889 ql_dbg(ql_dbg_user, vha, 0x70ba,
1890 "Command completed with timeout thread_id=%d\n",
1892 rval = EXT_STATUS_TIMEOUT;
1895 ql_dbg(ql_dbg_user, vha, 0x70bb,
1896 "Command completed with completion status=0x%x "
1897 "thread_id=%d\n", comp_status, thread_id);
1898 rval = EXT_STATUS_ERR;
1901 bsg_job->reply->reply_payload_rcv_len = 0;
1904 /* Return the vendor specific reply to API */
1905 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1906 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1907 /* Always return DID_OK, bsg will send the vendor specific response
1908 * in this case only */
1909 sp->done(vha, sp, (DID_OK << 6));
1914 * qla2x00_status_entry() - Process a Status IOCB entry.
1915 * @ha: SCSI driver HA context
1916 * @pkt: Entry pointer
1919 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1923 struct scsi_cmnd *cp;
1925 struct sts_entry_24xx *sts24;
1926 uint16_t comp_status;
1927 uint16_t scsi_status;
1929 uint8_t lscsi_status;
1931 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1933 uint8_t *rsp_info, *sense_data;
1934 struct qla_hw_data *ha = vha->hw;
1937 struct req_que *req;
1940 uint16_t state_flags = 0;
1942 sts = (sts_entry_t *) pkt;
1943 sts24 = (struct sts_entry_24xx *) pkt;
1944 if (IS_FWI2_CAPABLE(ha)) {
1945 comp_status = le16_to_cpu(sts24->comp_status);
1946 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1947 state_flags = le16_to_cpu(sts24->state_flags);
1949 comp_status = le16_to_cpu(sts->comp_status);
1950 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1952 handle = (uint32_t) LSW(sts->handle);
1953 que = MSW(sts->handle);
1954 req = ha->req_q_map[que];
1956 /* Validate handle. */
1957 if (handle < req->num_outstanding_cmds)
1958 sp = req->outstanding_cmds[handle];
1963 ql_dbg(ql_dbg_io, vha, 0x3017,
1964 "Invalid status handle (0x%x).\n", sts->handle);
1967 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1969 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1970 qla2xxx_wake_dpc(vha);
1974 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
1975 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
1979 /* Fast path completion. */
1980 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1981 qla2x00_do_host_ramp_up(vha);
1982 qla2x00_process_completed_request(vha, req, handle);
1987 req->outstanding_cmds[handle] = NULL;
1988 cp = GET_CMD_SP(sp);
1990 ql_dbg(ql_dbg_io, vha, 0x3018,
1991 "Command already returned (0x%x/%p).\n",
1997 lscsi_status = scsi_status & STATUS_MASK;
1999 fcport = sp->fcport;
2002 sense_len = par_sense_len = rsp_info_len = resid_len =
2004 if (IS_FWI2_CAPABLE(ha)) {
2005 if (scsi_status & SS_SENSE_LEN_VALID)
2006 sense_len = le32_to_cpu(sts24->sense_len);
2007 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2008 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2009 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2010 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2011 if (comp_status == CS_DATA_UNDERRUN)
2012 fw_resid_len = le32_to_cpu(sts24->residual_len);
2013 rsp_info = sts24->data;
2014 sense_data = sts24->data;
2015 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2016 ox_id = le16_to_cpu(sts24->ox_id);
2017 par_sense_len = sizeof(sts24->data);
2019 if (scsi_status & SS_SENSE_LEN_VALID)
2020 sense_len = le16_to_cpu(sts->req_sense_length);
2021 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2022 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2023 resid_len = le32_to_cpu(sts->residual_length);
2024 rsp_info = sts->rsp_info;
2025 sense_data = sts->req_sense_data;
2026 par_sense_len = sizeof(sts->req_sense_data);
2029 /* Check for any FCP transport errors. */
2030 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2031 /* Sense data lies beyond any FCP RESPONSE data. */
2032 if (IS_FWI2_CAPABLE(ha)) {
2033 sense_data += rsp_info_len;
2034 par_sense_len -= rsp_info_len;
2036 if (rsp_info_len > 3 && rsp_info[3]) {
2037 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2038 "FCP I/O protocol failure (0x%x/0x%x).\n",
2039 rsp_info_len, rsp_info[3]);
2041 res = DID_BUS_BUSY << 16;
2046 /* Check for overrun. */
2047 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2048 scsi_status & SS_RESIDUAL_OVER)
2049 comp_status = CS_DATA_OVERRUN;
2052 * Based on Host and scsi status generate status code for Linux
2054 switch (comp_status) {
2057 if (scsi_status == 0) {
2061 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2063 scsi_set_resid(cp, resid);
2065 if (!lscsi_status &&
2066 ((unsigned)(scsi_bufflen(cp) - resid) <
2068 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2069 "Mid-layer underflow "
2070 "detected (0x%x of 0x%x bytes).\n",
2071 resid, scsi_bufflen(cp));
2073 res = DID_ERROR << 16;
2077 res = DID_OK << 16 | lscsi_status;
2079 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2080 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2081 "QUEUE FULL detected.\n");
2085 if (lscsi_status != SS_CHECK_CONDITION)
2088 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2089 if (!(scsi_status & SS_SENSE_LEN_VALID))
2092 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2096 case CS_DATA_UNDERRUN:
2097 /* Use F/W calculated residual length. */
2098 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2099 scsi_set_resid(cp, resid);
2100 if (scsi_status & SS_RESIDUAL_UNDER) {
2101 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2102 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2103 "Dropped frame(s) detected "
2104 "(0x%x of 0x%x bytes).\n",
2105 resid, scsi_bufflen(cp));
2107 res = DID_ERROR << 16 | lscsi_status;
2108 goto check_scsi_status;
2111 if (!lscsi_status &&
2112 ((unsigned)(scsi_bufflen(cp) - resid) <
2114 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2115 "Mid-layer underflow "
2116 "detected (0x%x of 0x%x bytes).\n",
2117 resid, scsi_bufflen(cp));
2119 res = DID_ERROR << 16;
2122 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2123 lscsi_status != SAM_STAT_BUSY) {
2125 * scsi status of task set and busy are considered to be
2126 * task not completed.
2129 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2130 "Dropped frame(s) detected (0x%x "
2131 "of 0x%x bytes).\n", resid,
2134 res = DID_ERROR << 16 | lscsi_status;
2135 goto check_scsi_status;
2137 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2138 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2139 scsi_status, lscsi_status);
2142 res = DID_OK << 16 | lscsi_status;
2147 * Check to see if SCSI Status is non zero. If so report SCSI
2150 if (lscsi_status != 0) {
2151 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2152 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2153 "QUEUE FULL detected.\n");
2157 if (lscsi_status != SS_CHECK_CONDITION)
2160 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2161 if (!(scsi_status & SS_SENSE_LEN_VALID))
2164 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2165 sense_len, rsp, res);
2169 case CS_PORT_LOGGED_OUT:
2170 case CS_PORT_CONFIG_CHG:
2173 case CS_PORT_UNAVAILABLE:
2178 * We are going to have the fc class block the rport
2179 * while we try to recover so instruct the mid layer
2180 * to requeue until the class decides how to handle this.
2182 res = DID_TRANSPORT_DISRUPTED << 16;
2184 if (comp_status == CS_TIMEOUT) {
2185 if (IS_FWI2_CAPABLE(ha))
2187 else if ((le16_to_cpu(sts->status_flags) &
2188 SF_LOGOUT_SENT) == 0)
2192 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
2193 "Port down status: port-state=0x%x.\n",
2194 atomic_read(&fcport->state));
2196 if (atomic_read(&fcport->state) == FCS_ONLINE)
2197 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2201 res = DID_RESET << 16;
2205 logit = qla2x00_handle_dif_error(sp, sts24);
2210 res = DID_ERROR << 16;
2212 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2215 if (state_flags & BIT_4)
2216 scmd_printk(KERN_WARNING, cp,
2217 "Unsupported device '%s' found.\n",
2218 cp->device->vendor);
2222 res = DID_ERROR << 16;
2228 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2229 "FCP command status: 0x%x-0x%x (0x%x) "
2230 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
2231 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
2232 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
2233 comp_status, scsi_status, res, vha->host_no,
2234 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2235 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2236 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
2237 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
2238 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
2239 resid_len, fw_resid_len);
2242 qla2x00_do_host_ramp_up(vha);
2244 if (rsp->status_srb == NULL)
2245 sp->done(ha, sp, res);
2249 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2250 * @ha: SCSI driver HA context
2251 * @pkt: Entry pointer
2253 * Extended sense data.
2256 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2258 uint8_t sense_sz = 0;
2259 struct qla_hw_data *ha = rsp->hw;
2260 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2261 srb_t *sp = rsp->status_srb;
2262 struct scsi_cmnd *cp;
2266 if (!sp || !GET_CMD_SENSE_LEN(sp))
2269 sense_len = GET_CMD_SENSE_LEN(sp);
2270 sense_ptr = GET_CMD_SENSE_PTR(sp);
2272 cp = GET_CMD_SP(sp);
2274 ql_log(ql_log_warn, vha, 0x3025,
2275 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2277 rsp->status_srb = NULL;
2281 if (sense_len > sizeof(pkt->data))
2282 sense_sz = sizeof(pkt->data);
2284 sense_sz = sense_len;
2286 /* Move sense data. */
2287 if (IS_FWI2_CAPABLE(ha))
2288 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2289 memcpy(sense_ptr, pkt->data, sense_sz);
2290 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2291 sense_ptr, sense_sz);
2293 sense_len -= sense_sz;
2294 sense_ptr += sense_sz;
2296 SET_CMD_SENSE_PTR(sp, sense_ptr);
2297 SET_CMD_SENSE_LEN(sp, sense_len);
2299 /* Place command on done queue. */
2300 if (sense_len == 0) {
2301 rsp->status_srb = NULL;
2302 sp->done(ha, sp, cp->result);
2307 * qla2x00_error_entry() - Process an error entry.
2308 * @ha: SCSI driver HA context
2309 * @pkt: Entry pointer
2312 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2315 struct qla_hw_data *ha = vha->hw;
2316 const char func[] = "ERROR-IOCB";
2317 uint16_t que = MSW(pkt->handle);
2318 struct req_que *req = NULL;
2319 int res = DID_ERROR << 16;
2321 ql_dbg(ql_dbg_async, vha, 0x502a,
2322 "type of error status in response: 0x%x\n", pkt->entry_status);
2324 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2327 req = ha->req_q_map[que];
2329 if (pkt->entry_status & RF_BUSY)
2330 res = DID_BUS_BUSY << 16;
2332 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2334 sp->done(ha, sp, res);
2338 ql_log(ql_log_warn, vha, 0x5030,
2339 "Error entry - invalid handle/queue.\n");
2342 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2344 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2345 qla2xxx_wake_dpc(vha);
2349 * qla24xx_mbx_completion() - Process mailbox command completions.
2350 * @ha: SCSI driver HA context
2351 * @mb0: Mailbox0 register
2354 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2358 uint16_t __iomem *wptr;
2359 struct qla_hw_data *ha = vha->hw;
2360 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2362 /* Read all mbox registers? */
2363 mboxes = (1 << ha->mbx_count) - 1;
2365 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2367 mboxes = ha->mcp->in_mb;
2369 /* Load return mailbox registers. */
2370 ha->flags.mbox_int = 1;
2371 ha->mailbox_out[0] = mb0;
2373 wptr = (uint16_t __iomem *)®->mailbox1;
2375 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2377 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2385 * qla24xx_process_response_queue() - Process response queue entries.
2386 * @ha: SCSI driver HA context
2388 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2389 struct rsp_que *rsp)
2391 struct sts_entry_24xx *pkt;
2392 struct qla_hw_data *ha = vha->hw;
2394 if (!vha->flags.online)
2397 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2398 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2401 if (rsp->ring_index == rsp->length) {
2402 rsp->ring_index = 0;
2403 rsp->ring_ptr = rsp->ring;
2408 if (pkt->entry_status != 0) {
2409 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2411 (void)qlt_24xx_process_response_error(vha, pkt);
2413 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2418 switch (pkt->entry_type) {
2420 qla2x00_status_entry(vha, rsp, pkt);
2422 case STATUS_CONT_TYPE:
2423 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2425 case VP_RPT_ID_IOCB_TYPE:
2426 qla24xx_report_id_acquisition(vha,
2427 (struct vp_rpt_id_entry_24xx *)pkt);
2429 case LOGINOUT_PORT_IOCB_TYPE:
2430 qla24xx_logio_entry(vha, rsp->req,
2431 (struct logio_entry_24xx *)pkt);
2433 case TSK_MGMT_IOCB_TYPE:
2434 qla24xx_tm_iocb_entry(vha, rsp->req,
2435 (struct tsk_mgmt_entry *)pkt);
2438 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2441 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2443 case ABTS_RECV_24XX:
2444 /* ensure that the ATIO queue is empty */
2445 qlt_24xx_process_atio_queue(vha);
2446 case ABTS_RESP_24XX:
2448 case NOTIFY_ACK_TYPE:
2449 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2452 /* Do nothing in this case, this check is to prevent it
2453 * from falling into default case
2457 /* Type Not Supported. */
2458 ql_dbg(ql_dbg_async, vha, 0x5042,
2459 "Received unknown response pkt type %x "
2460 "entry status=%x.\n",
2461 pkt->entry_type, pkt->entry_status);
2464 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2468 /* Adjust ring index */
2469 if (IS_QLA82XX(ha)) {
2470 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2471 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
2473 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2477 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2481 struct qla_hw_data *ha = vha->hw;
2482 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2484 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
2488 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
2489 RD_REG_DWORD(®->iobase_addr);
2490 WRT_REG_DWORD(®->iobase_window, 0x0001);
2491 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2492 rval == QLA_SUCCESS; cnt--) {
2494 WRT_REG_DWORD(®->iobase_window, 0x0001);
2497 rval = QLA_FUNCTION_TIMEOUT;
2499 if (rval == QLA_SUCCESS)
2502 WRT_REG_DWORD(®->iobase_window, 0x0003);
2503 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2504 rval == QLA_SUCCESS; cnt--) {
2506 WRT_REG_DWORD(®->iobase_window, 0x0003);
2509 rval = QLA_FUNCTION_TIMEOUT;
2511 if (rval != QLA_SUCCESS)
2515 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
2516 ql_log(ql_log_info, vha, 0x504c,
2517 "Additional code -- 0x55AA.\n");
2520 WRT_REG_DWORD(®->iobase_window, 0x0000);
2521 RD_REG_DWORD(®->iobase_window);
2525 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
2527 * @dev_id: SCSI driver HA context
2529 * Called by system whenever the host adapter generates an interrupt.
2531 * Returns handled flag.
2534 qla24xx_intr_handler(int irq, void *dev_id)
2536 scsi_qla_host_t *vha;
2537 struct qla_hw_data *ha;
2538 struct device_reg_24xx __iomem *reg;
2544 struct rsp_que *rsp;
2545 unsigned long flags;
2547 rsp = (struct rsp_que *) dev_id;
2549 ql_log(ql_log_info, NULL, 0x5059,
2550 "%s: NULL response queue pointer.\n", __func__);
2555 reg = &ha->iobase->isp24;
2558 if (unlikely(pci_channel_offline(ha->pdev)))
2561 spin_lock_irqsave(&ha->hardware_lock, flags);
2562 vha = pci_get_drvdata(ha->pdev);
2563 for (iter = 50; iter--; ) {
2564 stat = RD_REG_DWORD(®->host_status);
2565 if (stat & HSRX_RISC_PAUSED) {
2566 if (unlikely(pci_channel_offline(ha->pdev)))
2569 hccr = RD_REG_DWORD(®->hccr);
2571 ql_log(ql_log_warn, vha, 0x504b,
2572 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2575 qla2xxx_check_risc_status(vha);
2577 ha->isp_ops->fw_dump(vha, 1);
2578 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2580 } else if ((stat & HSRX_RISC_INT) == 0)
2583 switch (stat & 0xff) {
2584 case INTR_ROM_MB_SUCCESS:
2585 case INTR_ROM_MB_FAILED:
2586 case INTR_MB_SUCCESS:
2587 case INTR_MB_FAILED:
2588 qla24xx_mbx_completion(vha, MSW(stat));
2589 status |= MBX_INTERRUPT;
2592 case INTR_ASYNC_EVENT:
2594 mb[1] = RD_REG_WORD(®->mailbox1);
2595 mb[2] = RD_REG_WORD(®->mailbox2);
2596 mb[3] = RD_REG_WORD(®->mailbox3);
2597 qla2x00_async_event(vha, rsp, mb);
2599 case INTR_RSP_QUE_UPDATE:
2600 case INTR_RSP_QUE_UPDATE_83XX:
2601 qla24xx_process_response_queue(vha, rsp);
2603 case INTR_ATIO_QUE_UPDATE:
2604 qlt_24xx_process_atio_queue(vha);
2606 case INTR_ATIO_RSP_QUE_UPDATE:
2607 qlt_24xx_process_atio_queue(vha);
2608 qla24xx_process_response_queue(vha, rsp);
2611 ql_dbg(ql_dbg_async, vha, 0x504f,
2612 "Unrecognized interrupt type (%d).\n", stat * 0xff);
2615 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2616 RD_REG_DWORD_RELAXED(®->hccr);
2617 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2620 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2622 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2623 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2624 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2625 complete(&ha->mbx_intr_comp);
2632 qla24xx_msix_rsp_q(int irq, void *dev_id)
2634 struct qla_hw_data *ha;
2635 struct rsp_que *rsp;
2636 struct device_reg_24xx __iomem *reg;
2637 struct scsi_qla_host *vha;
2638 unsigned long flags;
2640 rsp = (struct rsp_que *) dev_id;
2642 ql_log(ql_log_info, NULL, 0x505a,
2643 "%s: NULL response queue pointer.\n", __func__);
2647 reg = &ha->iobase->isp24;
2649 spin_lock_irqsave(&ha->hardware_lock, flags);
2651 vha = pci_get_drvdata(ha->pdev);
2652 qla24xx_process_response_queue(vha, rsp);
2653 if (!ha->flags.disable_msix_handshake) {
2654 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2655 RD_REG_DWORD_RELAXED(®->hccr);
2657 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2663 qla25xx_msix_rsp_q(int irq, void *dev_id)
2665 struct qla_hw_data *ha;
2666 struct rsp_que *rsp;
2667 struct device_reg_24xx __iomem *reg;
2668 unsigned long flags;
2670 rsp = (struct rsp_que *) dev_id;
2672 ql_log(ql_log_info, NULL, 0x505b,
2673 "%s: NULL response queue pointer.\n", __func__);
2678 /* Clear the interrupt, if enabled, for this response queue */
2679 if (!ha->flags.disable_msix_handshake) {
2680 reg = &ha->iobase->isp24;
2681 spin_lock_irqsave(&ha->hardware_lock, flags);
2682 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2683 RD_REG_DWORD_RELAXED(®->hccr);
2684 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2686 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2692 qla24xx_msix_default(int irq, void *dev_id)
2694 scsi_qla_host_t *vha;
2695 struct qla_hw_data *ha;
2696 struct rsp_que *rsp;
2697 struct device_reg_24xx __iomem *reg;
2702 unsigned long flags;
2704 rsp = (struct rsp_que *) dev_id;
2706 ql_log(ql_log_info, NULL, 0x505c,
2707 "%s: NULL response queue pointer.\n", __func__);
2711 reg = &ha->iobase->isp24;
2714 spin_lock_irqsave(&ha->hardware_lock, flags);
2715 vha = pci_get_drvdata(ha->pdev);
2717 stat = RD_REG_DWORD(®->host_status);
2718 if (stat & HSRX_RISC_PAUSED) {
2719 if (unlikely(pci_channel_offline(ha->pdev)))
2722 hccr = RD_REG_DWORD(®->hccr);
2724 ql_log(ql_log_info, vha, 0x5050,
2725 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2728 qla2xxx_check_risc_status(vha);
2730 ha->isp_ops->fw_dump(vha, 1);
2731 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2733 } else if ((stat & HSRX_RISC_INT) == 0)
2736 switch (stat & 0xff) {
2737 case INTR_ROM_MB_SUCCESS:
2738 case INTR_ROM_MB_FAILED:
2739 case INTR_MB_SUCCESS:
2740 case INTR_MB_FAILED:
2741 qla24xx_mbx_completion(vha, MSW(stat));
2742 status |= MBX_INTERRUPT;
2745 case INTR_ASYNC_EVENT:
2747 mb[1] = RD_REG_WORD(®->mailbox1);
2748 mb[2] = RD_REG_WORD(®->mailbox2);
2749 mb[3] = RD_REG_WORD(®->mailbox3);
2750 qla2x00_async_event(vha, rsp, mb);
2752 case INTR_RSP_QUE_UPDATE:
2753 case INTR_RSP_QUE_UPDATE_83XX:
2754 qla24xx_process_response_queue(vha, rsp);
2756 case INTR_ATIO_QUE_UPDATE:
2757 qlt_24xx_process_atio_queue(vha);
2759 case INTR_ATIO_RSP_QUE_UPDATE:
2760 qlt_24xx_process_atio_queue(vha);
2761 qla24xx_process_response_queue(vha, rsp);
2764 ql_dbg(ql_dbg_async, vha, 0x5051,
2765 "Unrecognized interrupt type (%d).\n", stat & 0xff);
2768 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2770 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2772 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2773 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2774 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2775 complete(&ha->mbx_intr_comp);
2780 /* Interrupt handling helpers. */
2782 struct qla_init_msix_entry {
2784 irq_handler_t handler;
2787 static struct qla_init_msix_entry msix_entries[3] = {
2788 { "qla2xxx (default)", qla24xx_msix_default },
2789 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2790 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2793 static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2794 { "qla2xxx (default)", qla82xx_msix_default },
2795 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2798 static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
2799 { "qla2xxx (default)", qla24xx_msix_default },
2800 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2801 { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
2805 qla24xx_disable_msix(struct qla_hw_data *ha)
2808 struct qla_msix_entry *qentry;
2809 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2811 for (i = 0; i < ha->msix_count; i++) {
2812 qentry = &ha->msix_entries[i];
2813 if (qentry->have_irq)
2814 free_irq(qentry->vector, qentry->rsp);
2816 pci_disable_msix(ha->pdev);
2817 kfree(ha->msix_entries);
2818 ha->msix_entries = NULL;
2819 ha->flags.msix_enabled = 0;
2820 ql_dbg(ql_dbg_init, vha, 0x0042,
2821 "Disabled the MSI.\n");
2825 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2827 #define MIN_MSIX_COUNT 2
2829 struct msix_entry *entries;
2830 struct qla_msix_entry *qentry;
2831 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2833 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2836 ql_log(ql_log_warn, vha, 0x00bc,
2837 "Failed to allocate memory for msix_entry.\n");
2841 for (i = 0; i < ha->msix_count; i++)
2842 entries[i].entry = i;
2844 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2846 if (ret < MIN_MSIX_COUNT)
2849 ql_log(ql_log_warn, vha, 0x00c6,
2850 "MSI-X: Failed to enable support "
2851 "-- %d/%d\n Retry with %d vectors.\n",
2852 ha->msix_count, ret, ret);
2853 ha->msix_count = ret;
2854 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2857 ql_log(ql_log_fatal, vha, 0x00c7,
2858 "MSI-X: Failed to enable support, "
2859 "giving up -- %d/%d.\n",
2860 ha->msix_count, ret);
2863 ha->max_rsp_queues = ha->msix_count - 1;
2865 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2866 ha->msix_count, GFP_KERNEL);
2867 if (!ha->msix_entries) {
2868 ql_log(ql_log_fatal, vha, 0x00c8,
2869 "Failed to allocate memory for ha->msix_entries.\n");
2873 ha->flags.msix_enabled = 1;
2875 for (i = 0; i < ha->msix_count; i++) {
2876 qentry = &ha->msix_entries[i];
2877 qentry->vector = entries[i].vector;
2878 qentry->entry = entries[i].entry;
2879 qentry->have_irq = 0;
2883 /* Enable MSI-X vectors for the base queue */
2884 for (i = 0; i < ha->msix_count; i++) {
2885 qentry = &ha->msix_entries[i];
2886 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
2887 ret = request_irq(qentry->vector,
2888 qla83xx_msix_entries[i].handler,
2889 0, qla83xx_msix_entries[i].name, rsp);
2890 } else if (IS_QLA82XX(ha)) {
2891 ret = request_irq(qentry->vector,
2892 qla82xx_msix_entries[i].handler,
2893 0, qla82xx_msix_entries[i].name, rsp);
2895 ret = request_irq(qentry->vector,
2896 msix_entries[i].handler,
2897 0, msix_entries[i].name, rsp);
2900 ql_log(ql_log_fatal, vha, 0x00cb,
2901 "MSI-X: unable to register handler -- %x/%d.\n",
2902 qentry->vector, ret);
2903 qla24xx_disable_msix(ha);
2907 qentry->have_irq = 1;
2912 /* Enable MSI-X vector for response queue update for queue 0 */
2913 if (IS_QLA83XX(ha)) {
2914 if (ha->msixbase && ha->mqiobase &&
2915 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2919 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2921 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2922 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2923 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2924 ql_dbg(ql_dbg_init, vha, 0x0055,
2925 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2926 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2934 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2937 device_reg_t __iomem *reg = ha->iobase;
2938 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2940 /* If possible, enable MSI-X. */
2941 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2942 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
2945 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2946 (ha->pdev->subsystem_device == 0x7040 ||
2947 ha->pdev->subsystem_device == 0x7041 ||
2948 ha->pdev->subsystem_device == 0x1705)) {
2949 ql_log(ql_log_warn, vha, 0x0034,
2950 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2951 ha->pdev->subsystem_vendor,
2952 ha->pdev->subsystem_device);
2956 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
2957 ql_log(ql_log_warn, vha, 0x0035,
2958 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2959 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
2963 ret = qla24xx_enable_msix(ha, rsp);
2965 ql_dbg(ql_dbg_init, vha, 0x0036,
2966 "MSI-X: Enabled (0x%X, 0x%X).\n",
2967 ha->chip_revision, ha->fw_attributes);
2968 goto clear_risc_ints;
2970 ql_log(ql_log_info, vha, 0x0037,
2971 "MSI-X Falling back-to MSI mode -%d.\n", ret);
2974 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2975 !IS_QLA8001(ha) && !IS_QLA82XX(ha))
2978 ret = pci_enable_msi(ha->pdev);
2980 ql_dbg(ql_dbg_init, vha, 0x0038,
2982 ha->flags.msi_enabled = 1;
2984 ql_log(ql_log_warn, vha, 0x0039,
2985 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
2987 /* Skip INTx on ISP82xx. */
2988 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
2989 return QLA_FUNCTION_FAILED;
2993 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2994 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2995 QLA2XXX_DRIVER_NAME, rsp);
2997 ql_log(ql_log_warn, vha, 0x003a,
2998 "Failed to reserve interrupt %d already in use.\n",
3001 } else if (!ha->flags.msi_enabled)
3002 ql_dbg(ql_dbg_init, vha, 0x0125,
3003 "INTa mode: Enabled.\n");
3007 spin_lock_irq(&ha->hardware_lock);
3008 if (!IS_FWI2_CAPABLE(ha))
3009 WRT_REG_WORD(®->isp.semaphore, 0);
3010 spin_unlock_irq(&ha->hardware_lock);
3017 qla2x00_free_irqs(scsi_qla_host_t *vha)
3019 struct qla_hw_data *ha = vha->hw;
3020 struct rsp_que *rsp;
3023 * We need to check that ha->rsp_q_map is valid in case we are called
3024 * from a probe failure context.
3026 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3028 rsp = ha->rsp_q_map[0];
3030 if (ha->flags.msix_enabled)
3031 qla24xx_disable_msix(ha);
3032 else if (ha->flags.msi_enabled) {
3033 free_irq(ha->pdev->irq, rsp);
3034 pci_disable_msi(ha->pdev);
3036 free_irq(ha->pdev->irq, rsp);
3040 int qla25xx_request_irq(struct rsp_que *rsp)
3042 struct qla_hw_data *ha = rsp->hw;
3043 struct qla_init_msix_entry *intr = &msix_entries[2];
3044 struct qla_msix_entry *msix = rsp->msix;
3045 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3048 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
3050 ql_log(ql_log_fatal, vha, 0x00e6,
3051 "MSI-X: Unable to register handler -- %x/%d.\n",