1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
7 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/cpu.h>
13 #include <linux/t10-pi.h>
14 #include <scsi/scsi_tcq.h>
15 #include <scsi/scsi_bsg_fc.h>
16 #include <scsi/scsi_eh.h>
17 #include <scsi/fc/fc_fs.h>
18 #include <linux/nvme-fc-driver.h>
20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
25 static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha,
26 struct purex_item *item);
27 static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha,
29 static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha,
31 static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha,
32 void **pkt, struct rsp_que **rsp);
35 qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item)
37 void *pkt = &item->iocb;
38 uint16_t pkt_size = item->size;
40 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d,
41 "%s: Enter\n", __func__);
43 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e,
44 "-------- ELS REQ -------\n");
45 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f,
48 fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt, 0);
51 const char *const port_state_str[] = {
52 [FCS_UNKNOWN] = "Unknown",
53 [FCS_UNCONFIGURED] = "UNCONFIGURED",
54 [FCS_DEVICE_DEAD] = "DEAD",
55 [FCS_DEVICE_LOST] = "LOST",
56 [FCS_ONLINE] = "ONLINE"
60 qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
62 struct abts_entry_24xx *abts =
63 (struct abts_entry_24xx *)&pkt->iocb;
64 struct qla_hw_data *ha = vha->hw;
65 struct els_entry_24xx *rsp_els;
66 struct abts_entry_24xx *abts_rsp;
71 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__);
73 ql_log(ql_log_warn, vha, 0x0287,
74 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n",
75 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id,
76 abts->seq_id, abts->seq_cnt);
77 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
78 "-------- ABTS RCV -------\n");
79 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
80 (uint8_t *)abts, sizeof(*abts));
82 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma,
85 ql_log(ql_log_warn, vha, 0x0287,
86 "Failed allocate dma buffer ABTS/ELS RSP.\n");
90 /* terminate exchange */
91 rsp_els->entry_type = ELS_IOCB_TYPE;
92 rsp_els->entry_count = 1;
93 rsp_els->nport_handle = cpu_to_le16(~0);
94 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
95 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
96 ql_dbg(ql_dbg_init, vha, 0x0283,
97 "Sending ELS Response to terminate exchange %#x...\n",
98 abts->rx_xch_addr_to_abort);
99 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
100 "-------- ELS RSP -------\n");
101 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
102 (uint8_t *)rsp_els, sizeof(*rsp_els));
103 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0);
105 ql_log(ql_log_warn, vha, 0x0288,
106 "%s: iocb failed to execute -> %x\n", __func__, rval);
107 } else if (rsp_els->comp_status) {
108 ql_log(ql_log_warn, vha, 0x0289,
109 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
110 __func__, rsp_els->comp_status,
111 rsp_els->error_subcode_1, rsp_els->error_subcode_2);
113 ql_dbg(ql_dbg_init, vha, 0x028a,
114 "%s: abort exchange done.\n", __func__);
117 /* send ABTS response */
118 abts_rsp = (void *)rsp_els;
119 memset(abts_rsp, 0, sizeof(*abts_rsp));
120 abts_rsp->entry_type = ABTS_RSP_TYPE;
121 abts_rsp->entry_count = 1;
122 abts_rsp->nport_handle = abts->nport_handle;
123 abts_rsp->vp_idx = abts->vp_idx;
124 abts_rsp->sof_type = abts->sof_type & 0xf0;
125 abts_rsp->rx_xch_addr = abts->rx_xch_addr;
126 abts_rsp->d_id[0] = abts->s_id[0];
127 abts_rsp->d_id[1] = abts->s_id[1];
128 abts_rsp->d_id[2] = abts->s_id[2];
129 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC;
130 abts_rsp->s_id[0] = abts->d_id[0];
131 abts_rsp->s_id[1] = abts->d_id[1];
132 abts_rsp->s_id[2] = abts->d_id[2];
133 abts_rsp->cs_ctl = abts->cs_ctl;
134 /* include flipping bit23 in fctl */
135 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 |
136 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT;
137 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff;
138 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff;
139 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff;
140 abts_rsp->type = FC_TYPE_BLD;
141 abts_rsp->rx_id = abts->rx_id;
142 abts_rsp->ox_id = abts->ox_id;
143 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
144 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
145 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
146 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
147 ql_dbg(ql_dbg_init, vha, 0x028b,
148 "Sending BA ACC response to ABTS %#x...\n",
149 abts->rx_xch_addr_to_abort);
150 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
151 "-------- ELS RSP -------\n");
152 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
153 (uint8_t *)abts_rsp, sizeof(*abts_rsp));
154 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0);
156 ql_log(ql_log_warn, vha, 0x028c,
157 "%s: iocb failed to execute -> %x\n", __func__, rval);
158 } else if (abts_rsp->comp_status) {
159 ql_log(ql_log_warn, vha, 0x028d,
160 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
161 __func__, abts_rsp->comp_status,
162 abts_rsp->payload.error.subcode1,
163 abts_rsp->payload.error.subcode2);
165 ql_dbg(ql_dbg_init, vha, 0x028ea,
166 "%s: done.\n", __func__);
169 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
173 * __qla_consume_iocb - this routine is used to tell fw driver has processed
174 * or consumed the head IOCB along with the continuation IOCB's from the
175 * provided respond queue.
176 * @vha: host adapter pointer
177 * @pkt: pointer to current packet. On return, this pointer shall move
178 * to the next packet.
179 * @rsp: respond queue pointer.
181 * it is assumed pkt is the head iocb, not the continuation iocbk
183 void __qla_consume_iocb(struct scsi_qla_host *vha,
184 void **pkt, struct rsp_que **rsp)
186 struct rsp_que *rsp_q = *rsp;
188 uint16_t entry_count_remaining;
189 struct purex_entry_24xx *purex = *pkt;
191 entry_count_remaining = purex->entry_count;
192 while (entry_count_remaining > 0) {
193 new_pkt = rsp_q->ring_ptr;
197 if (rsp_q->ring_index == rsp_q->length) {
198 rsp_q->ring_index = 0;
199 rsp_q->ring_ptr = rsp_q->ring;
204 new_pkt->signature = RESPONSE_PROCESSED;
205 /* flush signature */
207 --entry_count_remaining;
212 * __qla_copy_purex_to_buffer - extract ELS payload from Purex IOCB
213 * and save to provided buffer
214 * @vha: host adapter pointer
215 * @pkt: pointer Purex IOCB
216 * @rsp: respond queue
217 * @buf: extracted ELS payload copy here
218 * @buf_len: buffer length
220 int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha,
221 void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len)
223 struct purex_entry_24xx *purex = *pkt;
224 struct rsp_que *rsp_q = *rsp;
225 sts_cont_entry_t *new_pkt;
226 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
227 uint16_t buffer_copy_offset = 0;
228 uint16_t entry_count_remaining;
231 entry_count_remaining = purex->entry_count;
232 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
233 - PURX_ELS_HEADER_SIZE;
236 * end of payload may not end in 4bytes boundary. Need to
237 * round up / pad for room to swap, before saving data
239 tpad = roundup(total_bytes, 4);
241 if (buf_len < tpad) {
242 ql_dbg(ql_dbg_async, vha, 0x5084,
243 "%s buffer is too small %d < %d\n",
244 __func__, buf_len, tpad);
245 __qla_consume_iocb(vha, pkt, rsp);
249 pending_bytes = total_bytes = tpad;
250 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
251 sizeof(purex->els_frame_payload) : pending_bytes;
253 memcpy(buf, &purex->els_frame_payload[0], no_bytes);
254 buffer_copy_offset += no_bytes;
255 pending_bytes -= no_bytes;
256 --entry_count_remaining;
258 ((response_t *)purex)->signature = RESPONSE_PROCESSED;
259 /* flush signature */
263 while ((total_bytes > 0) && (entry_count_remaining > 0)) {
264 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
267 if (new_pkt->entry_type != STATUS_CONT_TYPE) {
268 ql_log(ql_log_warn, vha, 0x507a,
269 "Unexpected IOCB type, partial data 0x%x\n",
275 if (rsp_q->ring_index == rsp_q->length) {
276 rsp_q->ring_index = 0;
277 rsp_q->ring_ptr = rsp_q->ring;
281 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
282 sizeof(new_pkt->data) : pending_bytes;
283 if ((buffer_copy_offset + no_bytes) <= total_bytes) {
284 memcpy((buf + buffer_copy_offset), new_pkt->data,
286 buffer_copy_offset += no_bytes;
287 pending_bytes -= no_bytes;
288 --entry_count_remaining;
290 ql_log(ql_log_warn, vha, 0x5044,
291 "Attempt to copy more that we got, optimizing..%x\n",
293 memcpy((buf + buffer_copy_offset), new_pkt->data,
294 total_bytes - buffer_copy_offset);
297 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
298 /* flush signature */
302 if (pending_bytes != 0 || entry_count_remaining != 0) {
303 ql_log(ql_log_fatal, vha, 0x508b,
304 "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n",
305 total_bytes, entry_count_remaining);
308 } while (entry_count_remaining > 0);
310 be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2);
316 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
317 * @irq: interrupt number
318 * @dev_id: SCSI driver HA context
320 * Called by system whenever the host adapter generates an interrupt.
322 * Returns handled flag.
325 qla2100_intr_handler(int irq, void *dev_id)
327 scsi_qla_host_t *vha;
328 struct qla_hw_data *ha;
329 struct device_reg_2xxx __iomem *reg;
337 rsp = (struct rsp_que *) dev_id;
339 ql_log(ql_log_info, NULL, 0x505d,
340 "%s: NULL response queue pointer.\n", __func__);
345 reg = &ha->iobase->isp;
348 spin_lock_irqsave(&ha->hardware_lock, flags);
349 vha = pci_get_drvdata(ha->pdev);
350 for (iter = 50; iter--; ) {
351 hccr = rd_reg_word(®->hccr);
352 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
354 if (hccr & HCCR_RISC_PAUSE) {
355 if (pci_channel_offline(ha->pdev))
359 * Issue a "HARD" reset in order for the RISC interrupt
360 * bit to be cleared. Schedule a big hammer to get
361 * out of the RISC PAUSED state.
363 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
364 rd_reg_word(®->hccr);
366 ha->isp_ops->fw_dump(vha);
367 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
369 } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0)
372 if (rd_reg_word(®->semaphore) & BIT_0) {
373 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
374 rd_reg_word(®->hccr);
376 /* Get mailbox data. */
377 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
378 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
379 qla2x00_mbx_completion(vha, mb[0]);
380 status |= MBX_INTERRUPT;
381 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
382 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
383 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
384 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
385 qla2x00_async_event(vha, rsp, mb);
388 ql_dbg(ql_dbg_async, vha, 0x5025,
389 "Unrecognized interrupt type (%d).\n",
392 /* Release mailbox registers. */
393 wrt_reg_word(®->semaphore, 0);
394 rd_reg_word(®->semaphore);
396 qla2x00_process_response_queue(rsp);
398 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
399 rd_reg_word(®->hccr);
402 qla2x00_handle_mbx_completion(ha, status);
403 spin_unlock_irqrestore(&ha->hardware_lock, flags);
405 return (IRQ_HANDLED);
409 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
411 /* Check for PCI disconnection */
412 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
413 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
414 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
415 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
416 qla_schedule_eeh_work(vha);
424 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
426 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
430 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
431 * @irq: interrupt number
432 * @dev_id: SCSI driver HA context
434 * Called by system whenever the host adapter generates an interrupt.
436 * Returns handled flag.
439 qla2300_intr_handler(int irq, void *dev_id)
441 scsi_qla_host_t *vha;
442 struct device_reg_2xxx __iomem *reg;
449 struct qla_hw_data *ha;
452 rsp = (struct rsp_que *) dev_id;
454 ql_log(ql_log_info, NULL, 0x5058,
455 "%s: NULL response queue pointer.\n", __func__);
460 reg = &ha->iobase->isp;
463 spin_lock_irqsave(&ha->hardware_lock, flags);
464 vha = pci_get_drvdata(ha->pdev);
465 for (iter = 50; iter--; ) {
466 stat = rd_reg_dword(®->u.isp2300.host_status);
467 if (qla2x00_check_reg32_for_disconnect(vha, stat))
469 if (stat & HSR_RISC_PAUSED) {
470 if (unlikely(pci_channel_offline(ha->pdev)))
473 hccr = rd_reg_word(®->hccr);
475 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
476 ql_log(ql_log_warn, vha, 0x5026,
477 "Parity error -- HCCR=%x, Dumping "
478 "firmware.\n", hccr);
480 ql_log(ql_log_warn, vha, 0x5027,
481 "RISC paused -- HCCR=%x, Dumping "
482 "firmware.\n", hccr);
485 * Issue a "HARD" reset in order for the RISC
486 * interrupt bit to be cleared. Schedule a big
487 * hammer to get out of the RISC PAUSED state.
489 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
490 rd_reg_word(®->hccr);
492 ha->isp_ops->fw_dump(vha);
493 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
495 } else if ((stat & HSR_RISC_INT) == 0)
498 switch (stat & 0xff) {
503 qla2x00_mbx_completion(vha, MSW(stat));
504 status |= MBX_INTERRUPT;
506 /* Release mailbox registers. */
507 wrt_reg_word(®->semaphore, 0);
511 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
512 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
513 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
514 qla2x00_async_event(vha, rsp, mb);
517 qla2x00_process_response_queue(rsp);
520 mb[0] = MBA_CMPLT_1_16BIT;
522 qla2x00_async_event(vha, rsp, mb);
525 mb[0] = MBA_SCSI_COMPLETION;
527 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
528 qla2x00_async_event(vha, rsp, mb);
531 ql_dbg(ql_dbg_async, vha, 0x5028,
532 "Unrecognized interrupt type (%d).\n", stat & 0xff);
535 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
536 rd_reg_word_relaxed(®->hccr);
538 qla2x00_handle_mbx_completion(ha, status);
539 spin_unlock_irqrestore(&ha->hardware_lock, flags);
541 return (IRQ_HANDLED);
545 * qla2x00_mbx_completion() - Process mailbox command completions.
546 * @vha: SCSI driver HA context
547 * @mb0: Mailbox0 register
550 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
554 __le16 __iomem *wptr;
555 struct qla_hw_data *ha = vha->hw;
556 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
558 /* Read all mbox registers? */
559 WARN_ON_ONCE(ha->mbx_count > 32);
560 mboxes = (1ULL << ha->mbx_count) - 1;
562 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
564 mboxes = ha->mcp->in_mb;
566 /* Load return mailbox registers. */
567 ha->flags.mbox_int = 1;
568 ha->mailbox_out[0] = mb0;
570 wptr = MAILBOX_REG(ha, reg, 1);
572 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
573 if (IS_QLA2200(ha) && cnt == 8)
574 wptr = MAILBOX_REG(ha, reg, 8);
575 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
576 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
577 else if (mboxes & BIT_0)
578 ha->mailbox_out[cnt] = rd_reg_word(wptr);
586 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
588 static char *event[] =
589 { "Complete", "Request Notification", "Time Extension" };
591 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
592 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
593 __le16 __iomem *wptr;
594 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
596 /* Seed data -- mailbox1 -> mailbox7. */
597 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
598 wptr = ®24->mailbox1;
599 else if (IS_QLA8044(vha->hw))
600 wptr = ®82->mailbox_out[1];
604 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
605 mb[cnt] = rd_reg_word(wptr);
607 ql_dbg(ql_dbg_async, vha, 0x5021,
608 "Inter-Driver Communication %s -- "
609 "%04x %04x %04x %04x %04x %04x %04x.\n",
610 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
611 mb[4], mb[5], mb[6]);
613 /* Handle IDC Error completion case. */
614 case MBA_IDC_COMPLETE:
616 vha->hw->flags.idc_compl_status = 1;
617 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
618 complete(&vha->hw->dcbx_comp);
623 /* Acknowledgement needed? [Notify && non-zero timeout]. */
624 timeout = (descr >> 8) & 0xf;
625 ql_dbg(ql_dbg_async, vha, 0x5022,
626 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
627 vha->host_no, event[aen & 0xff], timeout);
631 rval = qla2x00_post_idc_ack_work(vha, mb);
632 if (rval != QLA_SUCCESS)
633 ql_log(ql_log_warn, vha, 0x5023,
634 "IDC failed to post ACK.\n");
636 case MBA_IDC_TIME_EXT:
637 vha->hw->idc_extend_tmo = descr;
638 ql_dbg(ql_dbg_async, vha, 0x5087,
639 "%lu Inter-Driver Communication %s -- "
640 "Extend timeout by=%d.\n",
641 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
648 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
650 static const char *const link_speeds[] = {
651 "1", "2", "?", "4", "8", "16", "32", "64", "10"
653 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
655 if (IS_QLA2100(ha) || IS_QLA2200(ha))
656 return link_speeds[0];
657 else if (speed == 0x13)
658 return link_speeds[QLA_LAST_SPEED];
659 else if (speed < QLA_LAST_SPEED)
660 return link_speeds[speed];
662 return link_speeds[LS_UNKNOWN];
666 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
668 struct qla_hw_data *ha = vha->hw;
671 * 8200 AEN Interpretation:
673 * mb[1] = AEN Reason code
674 * mb[2] = LSW of Peg-Halt Status-1 Register
675 * mb[6] = MSW of Peg-Halt Status-1 Register
676 * mb[3] = LSW of Peg-Halt Status-2 register
677 * mb[7] = MSW of Peg-Halt Status-2 register
678 * mb[4] = IDC Device-State Register value
679 * mb[5] = IDC Driver-Presence Register value
681 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
682 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
683 mb[0], mb[1], mb[2], mb[6]);
684 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
685 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
686 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
688 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
689 IDC_HEARTBEAT_FAILURE)) {
690 ha->flags.nic_core_hung = 1;
691 ql_log(ql_log_warn, vha, 0x5060,
692 "83XX: F/W Error Reported: Check if reset required.\n");
694 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
695 uint32_t protocol_engine_id, fw_err_code, err_level;
698 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
699 * - PEG-Halt Status-1 Register:
700 * (LSW = mb[2], MSW = mb[6])
701 * Bits 0-7 = protocol-engine ID
702 * Bits 8-28 = f/w error code
703 * Bits 29-31 = Error-level
704 * Error-level 0x1 = Non-Fatal error
705 * Error-level 0x2 = Recoverable Fatal error
706 * Error-level 0x4 = UnRecoverable Fatal error
707 * - PEG-Halt Status-2 Register:
708 * (LSW = mb[3], MSW = mb[7])
710 protocol_engine_id = (mb[2] & 0xff);
711 fw_err_code = (((mb[2] & 0xff00) >> 8) |
712 ((mb[6] & 0x1fff) << 8));
713 err_level = ((mb[6] & 0xe000) >> 13);
714 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
715 "Register: protocol_engine_id=0x%x "
716 "fw_err_code=0x%x err_level=0x%x.\n",
717 protocol_engine_id, fw_err_code, err_level);
718 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
719 "Register: 0x%x%x.\n", mb[7], mb[3]);
720 if (err_level == ERR_LEVEL_NON_FATAL) {
721 ql_log(ql_log_warn, vha, 0x5063,
722 "Not a fatal error, f/w has recovered itself.\n");
723 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
724 ql_log(ql_log_fatal, vha, 0x5064,
725 "Recoverable Fatal error: Chip reset "
727 qla83xx_schedule_work(vha,
728 QLA83XX_NIC_CORE_RESET);
729 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
730 ql_log(ql_log_fatal, vha, 0x5065,
731 "Unrecoverable Fatal error: Set FAILED "
732 "state, reboot required.\n");
733 qla83xx_schedule_work(vha,
734 QLA83XX_NIC_CORE_UNRECOVERABLE);
738 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
739 uint16_t peg_fw_state, nw_interface_link_up;
740 uint16_t nw_interface_signal_detect, sfp_status;
741 uint16_t htbt_counter, htbt_monitor_enable;
742 uint16_t sfp_additional_info, sfp_multirate;
743 uint16_t sfp_tx_fault, link_speed, dcbx_status;
746 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
747 * - PEG-to-FC Status Register:
748 * (LSW = mb[2], MSW = mb[6])
749 * Bits 0-7 = Peg-Firmware state
750 * Bit 8 = N/W Interface Link-up
751 * Bit 9 = N/W Interface signal detected
752 * Bits 10-11 = SFP Status
753 * SFP Status 0x0 = SFP+ transceiver not expected
754 * SFP Status 0x1 = SFP+ transceiver not present
755 * SFP Status 0x2 = SFP+ transceiver invalid
756 * SFP Status 0x3 = SFP+ transceiver present and
758 * Bits 12-14 = Heartbeat Counter
759 * Bit 15 = Heartbeat Monitor Enable
760 * Bits 16-17 = SFP Additional Info
761 * SFP info 0x0 = Unregocnized transceiver for
763 * SFP info 0x1 = SFP+ brand validation failed
764 * SFP info 0x2 = SFP+ speed validation failed
765 * SFP info 0x3 = SFP+ access error
766 * Bit 18 = SFP Multirate
767 * Bit 19 = SFP Tx Fault
768 * Bits 20-22 = Link Speed
769 * Bits 23-27 = Reserved
770 * Bits 28-30 = DCBX Status
771 * DCBX Status 0x0 = DCBX Disabled
772 * DCBX Status 0x1 = DCBX Enabled
773 * DCBX Status 0x2 = DCBX Exchange error
776 peg_fw_state = (mb[2] & 0x00ff);
777 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
778 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
779 sfp_status = ((mb[2] & 0x0c00) >> 10);
780 htbt_counter = ((mb[2] & 0x7000) >> 12);
781 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
782 sfp_additional_info = (mb[6] & 0x0003);
783 sfp_multirate = ((mb[6] & 0x0004) >> 2);
784 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
785 link_speed = ((mb[6] & 0x0070) >> 4);
786 dcbx_status = ((mb[6] & 0x7000) >> 12);
788 ql_log(ql_log_warn, vha, 0x5066,
789 "Peg-to-Fc Status Register:\n"
790 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
791 "nw_interface_signal_detect=0x%x"
792 "\nsfp_statis=0x%x.\n ", peg_fw_state,
793 nw_interface_link_up, nw_interface_signal_detect,
795 ql_log(ql_log_warn, vha, 0x5067,
796 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
797 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
798 htbt_counter, htbt_monitor_enable,
799 sfp_additional_info, sfp_multirate);
800 ql_log(ql_log_warn, vha, 0x5068,
801 "sfp_tx_fault=0x%x, link_state=0x%x, "
802 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
805 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
808 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
809 ql_log(ql_log_warn, vha, 0x5069,
810 "Heartbeat Failure encountered, chip reset "
813 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
817 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
818 ql_log(ql_log_info, vha, 0x506a,
819 "IDC Device-State changed = 0x%x.\n", mb[4]);
820 if (ha->flags.nic_core_reset_owner)
822 qla83xx_schedule_work(vha, MBA_IDC_AEN);
827 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
829 struct qla_hw_data *ha = vha->hw;
838 spin_lock_irqsave(&ha->vport_slock, flags);
839 list_for_each_entry(vp, &ha->vp_list, list) {
840 vp_did = vp->d_id.b24;
841 if (vp_did == rscn_entry) {
846 spin_unlock_irqrestore(&ha->vport_slock, flags);
852 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
857 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
858 if (f->loop_id == loop_id)
864 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
869 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
870 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
873 else if (f->deleted == 0)
881 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
887 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
888 if (f->d_id.b24 == id->b24) {
891 else if (f->deleted == 0)
898 /* Shall be called only on supported adapters. */
900 qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
902 struct qla_hw_data *ha = vha->hw;
903 bool reset_isp_needed = false;
905 ql_log(ql_log_warn, vha, 0x02f0,
906 "MPI Heartbeat stop. MPI reset is%s needed. "
907 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
908 mb[1] & BIT_8 ? "" : " not",
909 mb[0], mb[1], mb[2], mb[3]);
911 if ((mb[1] & BIT_8) == 0)
914 ql_log(ql_log_warn, vha, 0x02f1,
915 "MPI Heartbeat stop. FW dump needed\n");
917 if (ql2xfulldump_on_mpifail) {
918 ha->isp_ops->fw_dump(vha);
919 reset_isp_needed = true;
922 ha->isp_ops->mpi_fw_dump(vha, 1);
924 if (reset_isp_needed) {
925 vha->hw->flags.fw_init_done = 0;
926 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
927 qla2xxx_wake_dpc(vha);
931 static struct purex_item *
932 qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size)
934 struct purex_item *item = NULL;
935 uint8_t item_hdr_size = sizeof(*item);
937 if (size > QLA_DEFAULT_PAYLOAD_SIZE) {
938 item = kzalloc(item_hdr_size +
939 (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC);
941 if (atomic_inc_return(&vha->default_item.in_use) == 1) {
942 item = &vha->default_item;
943 goto initialize_purex_header;
945 item = kzalloc(item_hdr_size, GFP_ATOMIC);
949 ql_log(ql_log_warn, vha, 0x5092,
950 ">> Failed allocate purex list item.\n");
955 initialize_purex_header:
962 qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
963 void (*process_item)(struct scsi_qla_host *vha,
964 struct purex_item *pkt))
966 struct purex_list *list = &vha->purex_list;
969 pkt->process_item = process_item;
971 spin_lock_irqsave(&list->lock, flags);
972 list_add_tail(&pkt->list, &list->head);
973 spin_unlock_irqrestore(&list->lock, flags);
975 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
979 * qla24xx_copy_std_pkt() - Copy over purex ELS which is
980 * contained in a single IOCB.
982 * @vha: SCSI driver HA context
985 static struct purex_item
986 *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
988 struct purex_item *item;
990 item = qla24xx_alloc_purex_item(vha,
991 QLA_DEFAULT_PAYLOAD_SIZE);
995 memcpy(&item->iocb, pkt, sizeof(item->iocb));
1000 * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can
1001 * span over multiple IOCBs.
1002 * @vha: SCSI driver HA context
1004 * @rsp: Response queue
1006 static struct purex_item *
1007 qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt,
1008 struct rsp_que **rsp)
1010 struct purex_entry_24xx *purex = *pkt;
1011 struct rsp_que *rsp_q = *rsp;
1012 sts_cont_entry_t *new_pkt;
1013 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
1014 uint16_t buffer_copy_offset = 0;
1015 uint16_t entry_count, entry_count_remaining;
1016 struct purex_item *item;
1017 void *fpin_pkt = NULL;
1019 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
1020 - PURX_ELS_HEADER_SIZE;
1021 pending_bytes = total_bytes;
1022 entry_count = entry_count_remaining = purex->entry_count;
1023 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
1024 sizeof(purex->els_frame_payload) : pending_bytes;
1025 ql_log(ql_log_info, vha, 0x509a,
1026 "FPIN ELS, frame_size 0x%x, entry count %d\n",
1027 total_bytes, entry_count);
1029 item = qla24xx_alloc_purex_item(vha, total_bytes);
1033 fpin_pkt = &item->iocb;
1035 memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes);
1036 buffer_copy_offset += no_bytes;
1037 pending_bytes -= no_bytes;
1038 --entry_count_remaining;
1040 ((response_t *)purex)->signature = RESPONSE_PROCESSED;
1044 while ((total_bytes > 0) && (entry_count_remaining > 0)) {
1045 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
1046 ql_dbg(ql_dbg_async, vha, 0x5084,
1047 "Ran out of IOCBs, partial data 0x%x\n",
1048 buffer_copy_offset);
1053 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
1056 if (new_pkt->entry_type != STATUS_CONT_TYPE) {
1057 ql_log(ql_log_warn, vha, 0x507a,
1058 "Unexpected IOCB type, partial data 0x%x\n",
1059 buffer_copy_offset);
1063 rsp_q->ring_index++;
1064 if (rsp_q->ring_index == rsp_q->length) {
1065 rsp_q->ring_index = 0;
1066 rsp_q->ring_ptr = rsp_q->ring;
1070 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
1071 sizeof(new_pkt->data) : pending_bytes;
1072 if ((buffer_copy_offset + no_bytes) <= total_bytes) {
1073 memcpy(((uint8_t *)fpin_pkt +
1074 buffer_copy_offset), new_pkt->data,
1076 buffer_copy_offset += no_bytes;
1077 pending_bytes -= no_bytes;
1078 --entry_count_remaining;
1080 ql_log(ql_log_warn, vha, 0x5044,
1081 "Attempt to copy more that we got, optimizing..%x\n",
1082 buffer_copy_offset);
1083 memcpy(((uint8_t *)fpin_pkt +
1084 buffer_copy_offset), new_pkt->data,
1085 total_bytes - buffer_copy_offset);
1088 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
1092 if (pending_bytes != 0 || entry_count_remaining != 0) {
1093 ql_log(ql_log_fatal, vha, 0x508b,
1094 "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
1095 total_bytes, entry_count_remaining);
1096 qla24xx_free_purex_item(item);
1099 } while (entry_count_remaining > 0);
1100 host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
1105 * qla2x00_async_event() - Process aynchronous events.
1106 * @vha: SCSI driver HA context
1107 * @rsp: response queue
1108 * @mb: Mailbox registers (0 - 3)
1111 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
1113 uint16_t handle_cnt;
1115 uint32_t handles[5];
1116 struct qla_hw_data *ha = vha->hw;
1117 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1118 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1119 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1120 uint32_t rscn_entry, host_pid;
1121 unsigned long flags;
1122 fc_port_t *fcport = NULL;
1124 if (!vha->hw->flags.fw_started)
1127 /* Setup to process RIO completion. */
1129 if (IS_CNA_CAPABLE(ha))
1132 case MBA_SCSI_COMPLETION:
1133 handles[0] = make_handle(mb[2], mb[1]);
1136 case MBA_CMPLT_1_16BIT:
1139 mb[0] = MBA_SCSI_COMPLETION;
1141 case MBA_CMPLT_2_16BIT:
1145 mb[0] = MBA_SCSI_COMPLETION;
1147 case MBA_CMPLT_3_16BIT:
1152 mb[0] = MBA_SCSI_COMPLETION;
1154 case MBA_CMPLT_4_16BIT:
1158 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1160 mb[0] = MBA_SCSI_COMPLETION;
1162 case MBA_CMPLT_5_16BIT:
1166 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1167 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
1169 mb[0] = MBA_SCSI_COMPLETION;
1171 case MBA_CMPLT_2_32BIT:
1172 handles[0] = make_handle(mb[2], mb[1]);
1173 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
1174 RD_MAILBOX_REG(ha, reg, 6));
1176 mb[0] = MBA_SCSI_COMPLETION;
1183 case MBA_SCSI_COMPLETION: /* Fast Post */
1184 if (!vha->flags.online)
1187 for (cnt = 0; cnt < handle_cnt; cnt++)
1188 qla2x00_process_completed_request(vha, rsp->req,
1192 case MBA_RESET: /* Reset */
1193 ql_dbg(ql_dbg_async, vha, 0x5002,
1194 "Asynchronous RESET.\n");
1196 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1199 case MBA_SYSTEM_ERR: /* System Error */
1204 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
1205 IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1208 m[0] = rd_reg_word(®24->mailbox4);
1209 m[1] = rd_reg_word(®24->mailbox5);
1210 m[2] = rd_reg_word(®24->mailbox6);
1211 mbx = m[3] = rd_reg_word(®24->mailbox7);
1213 ql_log(ql_log_warn, vha, 0x5003,
1214 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
1215 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]);
1217 ql_log(ql_log_warn, vha, 0x5003,
1218 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
1219 mb[1], mb[2], mb[3]);
1221 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
1222 rd_reg_word(®24->mailbox7) & BIT_8)
1223 ha->isp_ops->mpi_fw_dump(vha, 1);
1224 ha->isp_ops->fw_dump(vha);
1225 ha->flags.fw_init_done = 0;
1228 if (IS_FWI2_CAPABLE(ha)) {
1229 if (mb[1] == 0 && mb[2] == 0) {
1230 ql_log(ql_log_fatal, vha, 0x5004,
1231 "Unrecoverable Hardware Error: adapter "
1232 "marked OFFLINE!\n");
1233 vha->flags.online = 0;
1234 vha->device_flags |= DFLG_DEV_FAILED;
1236 /* Check to see if MPI timeout occurred */
1237 if ((mbx & MBX_3) && (ha->port_no == 0))
1238 set_bit(MPI_RESET_NEEDED,
1241 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1243 } else if (mb[1] == 0) {
1244 ql_log(ql_log_fatal, vha, 0x5005,
1245 "Unrecoverable Hardware Error: adapter marked "
1247 vha->flags.online = 0;
1248 vha->device_flags |= DFLG_DEV_FAILED;
1250 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1253 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
1254 ql_log(ql_log_warn, vha, 0x5006,
1255 "ISP Request Transfer Error (%x).\n", mb[1]);
1259 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1262 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
1263 ql_log(ql_log_warn, vha, 0x5007,
1264 "ISP Response Transfer Error (%x).\n", mb[1]);
1268 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1271 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
1272 ql_dbg(ql_dbg_async, vha, 0x5008,
1273 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
1276 case MBA_LOOP_INIT_ERR:
1277 ql_log(ql_log_warn, vha, 0x5090,
1278 "LOOP INIT ERROR (%x).\n", mb[1]);
1279 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1282 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
1283 ha->flags.lip_ae = 1;
1285 ql_dbg(ql_dbg_async, vha, 0x5009,
1286 "LIP occurred (%x).\n", mb[1]);
1288 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1289 atomic_set(&vha->loop_state, LOOP_DOWN);
1290 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1291 qla2x00_mark_all_devices_lost(vha);
1295 atomic_set(&vha->vp_state, VP_FAILED);
1296 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1299 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1300 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1302 vha->flags.management_server_logged_in = 0;
1303 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1306 case MBA_LOOP_UP: /* Loop Up Event */
1307 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1308 ha->link_data_rate = PORT_SPEED_1GB;
1310 ha->link_data_rate = mb[1];
1312 ql_log(ql_log_info, vha, 0x500a,
1313 "LOOP UP detected (%s Gbps).\n",
1314 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
1316 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1318 ql_log(ql_log_info, vha, 0x11a0,
1319 "FEC=enabled (link up).\n");
1322 vha->flags.management_server_logged_in = 0;
1323 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1325 if (vha->link_down_time < vha->hw->port_down_retry_count) {
1326 vha->short_link_down_cnt++;
1327 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
1332 case MBA_LOOP_DOWN: /* Loop Down Event */
1334 ha->flags.lip_ae = 0;
1335 ha->current_topology = 0;
1336 vha->link_down_time = 0;
1338 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
1339 ? rd_reg_word(®24->mailbox4) : 0;
1340 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4])
1342 ql_log(ql_log_info, vha, 0x500b,
1343 "LOOP DOWN detected (%x %x %x %x).\n",
1344 mb[1], mb[2], mb[3], mbx);
1346 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1347 atomic_set(&vha->loop_state, LOOP_DOWN);
1348 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1350 * In case of loop down, restore WWPN from
1351 * NVRAM in case of FA-WWPN capable ISP
1352 * Restore for Physical Port only
1355 if (ha->flags.fawwpn_enabled &&
1356 (ha->current_topology == ISP_CFG_F)) {
1357 memcpy(vha->port_name, ha->port_name, WWN_SIZE);
1358 fc_host_port_name(vha->host) =
1359 wwn_to_u64(vha->port_name);
1360 ql_dbg(ql_dbg_init + ql_dbg_verbose,
1361 vha, 0x00d8, "LOOP DOWN detected,"
1362 "restore WWPN %016llx\n",
1363 wwn_to_u64(vha->port_name));
1366 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
1369 vha->device_flags |= DFLG_NO_CABLE;
1370 qla2x00_mark_all_devices_lost(vha);
1374 atomic_set(&vha->vp_state, VP_FAILED);
1375 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1378 vha->flags.management_server_logged_in = 0;
1379 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1380 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1383 case MBA_LIP_RESET: /* LIP reset occurred */
1384 ql_dbg(ql_dbg_async, vha, 0x500c,
1385 "LIP reset occurred (%x).\n", mb[1]);
1387 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1388 atomic_set(&vha->loop_state, LOOP_DOWN);
1389 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1390 qla2x00_mark_all_devices_lost(vha);
1394 atomic_set(&vha->vp_state, VP_FAILED);
1395 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1398 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1400 ha->operating_mode = LOOP;
1401 vha->flags.management_server_logged_in = 0;
1402 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1405 /* case MBA_DCBX_COMPLETE: */
1406 case MBA_POINT_TO_POINT: /* Point-to-Point */
1407 ha->flags.lip_ae = 0;
1412 if (IS_CNA_CAPABLE(ha)) {
1413 ql_dbg(ql_dbg_async, vha, 0x500d,
1414 "DCBX Completed -- %04x %04x %04x.\n",
1415 mb[1], mb[2], mb[3]);
1416 if (ha->notify_dcbx_comp && !vha->vp_idx)
1417 complete(&ha->dcbx_comp);
1420 ql_dbg(ql_dbg_async, vha, 0x500e,
1421 "Asynchronous P2P MODE received.\n");
1424 * Until there's a transition from loop down to loop up, treat
1425 * this as loop down only.
1427 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1428 atomic_set(&vha->loop_state, LOOP_DOWN);
1429 if (!atomic_read(&vha->loop_down_timer))
1430 atomic_set(&vha->loop_down_timer,
1433 qla2x00_mark_all_devices_lost(vha);
1437 atomic_set(&vha->vp_state, VP_FAILED);
1438 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1441 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
1442 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1444 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1445 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1447 vha->flags.management_server_logged_in = 0;
1450 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
1454 ql_dbg(ql_dbg_async, vha, 0x500f,
1455 "Configuration change detected: value=%x.\n", mb[1]);
1457 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1458 atomic_set(&vha->loop_state, LOOP_DOWN);
1459 if (!atomic_read(&vha->loop_down_timer))
1460 atomic_set(&vha->loop_down_timer,
1462 qla2x00_mark_all_devices_lost(vha);
1466 atomic_set(&vha->vp_state, VP_FAILED);
1467 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1470 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1471 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1474 case MBA_PORT_UPDATE: /* Port database update */
1476 * Handle only global and vn-port update events
1479 * mb[1] = N_Port handle of changed port
1480 * OR 0xffff for global event
1481 * mb[2] = New login state
1482 * 7 = Port logged out
1483 * mb[3] = LSB is vp_idx, 0xff = all vps
1485 * Skip processing if:
1486 * Event is global, vp_idx is NOT all vps,
1487 * vp_idx does not match
1488 * Event is not global, vp_idx does not match
1490 if (IS_QLA2XXX_MIDTYPE(ha) &&
1491 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
1492 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
1496 ql_dbg(ql_dbg_async, vha, 0x5010,
1497 "Port %s %04x %04x %04x.\n",
1498 mb[1] == 0xffff ? "unavailable" : "logout",
1499 mb[1], mb[2], mb[3]);
1501 if (mb[1] == 0xffff)
1502 goto global_port_update;
1504 if (mb[1] == NPH_SNS_LID(ha)) {
1505 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1506 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1510 /* use handle_cnt for loop id/nport handle */
1511 if (IS_FWI2_CAPABLE(ha))
1512 handle_cnt = NPH_SNS;
1514 handle_cnt = SIMPLE_NAME_SERVER;
1515 if (mb[1] == handle_cnt) {
1516 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1517 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1522 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1525 if (atomic_read(&fcport->state) != FCS_ONLINE)
1527 ql_dbg(ql_dbg_async, vha, 0x508a,
1528 "Marking port lost loopid=%04x portid=%06x.\n",
1529 fcport->loop_id, fcport->d_id.b24);
1530 if (qla_ini_mode_enabled(vha)) {
1531 fcport->logout_on_delete = 0;
1532 qlt_schedule_sess_for_deletion(fcport);
1537 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1538 atomic_set(&vha->loop_state, LOOP_DOWN);
1539 atomic_set(&vha->loop_down_timer,
1541 vha->device_flags |= DFLG_NO_CABLE;
1542 qla2x00_mark_all_devices_lost(vha);
1546 atomic_set(&vha->vp_state, VP_FAILED);
1547 fc_vport_set_state(vha->fc_vport,
1549 qla2x00_mark_all_devices_lost(vha);
1552 vha->flags.management_server_logged_in = 0;
1553 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1558 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1559 * event etc. earlier indicating loop is down) then process
1560 * it. Otherwise ignore it and Wait for RSCN to come in.
1562 atomic_set(&vha->loop_down_timer, 0);
1563 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1564 !ha->flags.n2n_ae &&
1565 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1566 ql_dbg(ql_dbg_async, vha, 0x5011,
1567 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1568 mb[1], mb[2], mb[3]);
1572 ql_dbg(ql_dbg_async, vha, 0x5012,
1573 "Port database changed %04x %04x %04x.\n",
1574 mb[1], mb[2], mb[3]);
1577 * Mark all devices as missing so we will login again.
1579 atomic_set(&vha->loop_state, LOOP_UP);
1580 vha->scan.scan_retry = 0;
1582 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1583 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1584 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1587 case MBA_RSCN_UPDATE: /* State Change Registration */
1588 /* Check if the Vport has issued a SCR */
1589 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1591 /* Only handle SCNs for our Vport index. */
1592 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1595 ql_log(ql_log_warn, vha, 0x5013,
1596 "RSCN database changed -- %04x %04x %04x.\n",
1597 mb[1], mb[2], mb[3]);
1599 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1600 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1601 | vha->d_id.b.al_pa;
1602 if (rscn_entry == host_pid) {
1603 ql_dbg(ql_dbg_async, vha, 0x5014,
1604 "Ignoring RSCN update to local host "
1605 "port ID (%06x).\n", host_pid);
1609 /* Ignore reserved bits from RSCN-payload. */
1610 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1612 /* Skip RSCNs for virtual ports on the same physical port */
1613 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1616 atomic_set(&vha->loop_down_timer, 0);
1617 vha->flags.management_server_logged_in = 0;
1619 struct event_arg ea;
1621 memset(&ea, 0, sizeof(ea));
1622 ea.id.b24 = rscn_entry;
1623 ea.id.b.rsvd_1 = rscn_entry >> 24;
1624 qla2x00_handle_rscn(vha, &ea);
1625 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1628 case MBA_CONGN_NOTI_RECV:
1629 if (!ha->flags.scm_enabled ||
1630 mb[1] != QLA_CON_PRIMITIVE_RECEIVED)
1633 if (mb[2] == QLA_CONGESTION_ARB_WARNING) {
1634 ql_dbg(ql_dbg_async, vha, 0x509b,
1635 "Congestion Warning %04x %04x.\n", mb[1], mb[2]);
1636 } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) {
1637 ql_log(ql_log_warn, vha, 0x509b,
1638 "Congestion Alarm %04x %04x.\n", mb[1], mb[2]);
1641 /* case MBA_RIO_RESPONSE: */
1642 case MBA_ZIO_RESPONSE:
1643 ql_dbg(ql_dbg_async, vha, 0x5015,
1644 "[R|Z]IO update completion.\n");
1646 if (IS_FWI2_CAPABLE(ha))
1647 qla24xx_process_response_queue(vha, rsp);
1649 qla2x00_process_response_queue(rsp);
1652 case MBA_DISCARD_RND_FRAME:
1653 ql_dbg(ql_dbg_async, vha, 0x5016,
1654 "Discard RND Frame -- %04x %04x %04x.\n",
1655 mb[1], mb[2], mb[3]);
1656 vha->interface_err_cnt++;
1659 case MBA_TRACE_NOTIFICATION:
1660 ql_dbg(ql_dbg_async, vha, 0x5017,
1661 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1664 case MBA_ISP84XX_ALERT:
1665 ql_dbg(ql_dbg_async, vha, 0x5018,
1666 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1667 mb[1], mb[2], mb[3]);
1669 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1671 case A84_PANIC_RECOVERY:
1672 ql_log(ql_log_info, vha, 0x5019,
1673 "Alert 84XX: panic recovery %04x %04x.\n",
1676 case A84_OP_LOGIN_COMPLETE:
1677 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1678 ql_log(ql_log_info, vha, 0x501a,
1679 "Alert 84XX: firmware version %x.\n",
1680 ha->cs84xx->op_fw_version);
1682 case A84_DIAG_LOGIN_COMPLETE:
1683 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1684 ql_log(ql_log_info, vha, 0x501b,
1685 "Alert 84XX: diagnostic firmware version %x.\n",
1686 ha->cs84xx->diag_fw_version);
1688 case A84_GOLD_LOGIN_COMPLETE:
1689 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1690 ha->cs84xx->fw_update = 1;
1691 ql_log(ql_log_info, vha, 0x501c,
1692 "Alert 84XX: gold firmware version %x.\n",
1693 ha->cs84xx->gold_fw_version);
1696 ql_log(ql_log_warn, vha, 0x501d,
1697 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1698 mb[1], mb[2], mb[3]);
1700 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1702 case MBA_DCBX_START:
1703 ql_dbg(ql_dbg_async, vha, 0x501e,
1704 "DCBX Started -- %04x %04x %04x.\n",
1705 mb[1], mb[2], mb[3]);
1707 case MBA_DCBX_PARAM_UPDATE:
1708 ql_dbg(ql_dbg_async, vha, 0x501f,
1709 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1710 mb[1], mb[2], mb[3]);
1712 case MBA_FCF_CONF_ERR:
1713 ql_dbg(ql_dbg_async, vha, 0x5020,
1714 "FCF Configuration Error -- %04x %04x %04x.\n",
1715 mb[1], mb[2], mb[3]);
1717 case MBA_IDC_NOTIFY:
1718 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1719 mb[4] = rd_reg_word(®24->mailbox4);
1720 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1721 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1722 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1723 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1725 * Extend loop down timer since port is active.
1727 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1728 atomic_set(&vha->loop_down_timer,
1730 qla2xxx_wake_dpc(vha);
1734 case MBA_IDC_COMPLETE:
1735 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1736 complete(&ha->lb_portup_comp);
1738 case MBA_IDC_TIME_EXT:
1739 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1741 qla81xx_idc_event(vha, mb[0], mb[1]);
1745 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1747 qla27xx_handle_8200_aen(vha, mb);
1748 } else if (IS_QLA83XX(ha)) {
1749 mb[4] = rd_reg_word(®24->mailbox4);
1750 mb[5] = rd_reg_word(®24->mailbox5);
1751 mb[6] = rd_reg_word(®24->mailbox6);
1752 mb[7] = rd_reg_word(®24->mailbox7);
1753 qla83xx_handle_8200_aen(vha, mb);
1755 ql_dbg(ql_dbg_async, vha, 0x5052,
1756 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
1757 mb[0], mb[1], mb[2], mb[3]);
1761 case MBA_DPORT_DIAGNOSTICS:
1762 if ((mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_NOERR ||
1763 (mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_ERR)
1764 vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
1765 ql_dbg(ql_dbg_async, vha, 0x5052,
1766 "D-Port Diagnostics: %04x %04x %04x %04x\n",
1767 mb[0], mb[1], mb[2], mb[3]);
1768 memcpy(vha->dport_data, mb, sizeof(vha->dport_data));
1769 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1770 static char *results[] = {
1771 "start", "done(pass)", "done(error)", "undefined" };
1772 static char *types[] = {
1773 "none", "dynamic", "static", "other" };
1774 uint result = mb[1] >> 0 & 0x3;
1775 uint type = mb[1] >> 6 & 0x3;
1776 uint sw = mb[1] >> 15 & 0x1;
1777 ql_dbg(ql_dbg_async, vha, 0x5052,
1778 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
1779 results[result], types[type], sw);
1781 static char *reasons[] = {
1782 "reserved", "unexpected reject",
1783 "unexpected phase", "retry exceeded",
1784 "timed out", "not supported",
1786 uint reason = mb[2] >> 0 & 0xf;
1787 uint phase = mb[2] >> 12 & 0xf;
1788 ql_dbg(ql_dbg_async, vha, 0x5052,
1789 "D-Port Diagnostics: reason=%s phase=%u \n",
1790 reason < 7 ? reasons[reason] : "other",
1796 case MBA_TEMPERATURE_ALERT:
1797 ql_dbg(ql_dbg_async, vha, 0x505e,
1798 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1801 case MBA_TRANS_INSERT:
1802 ql_dbg(ql_dbg_async, vha, 0x5091,
1803 "Transceiver Insertion: %04x\n", mb[1]);
1804 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
1807 case MBA_TRANS_REMOVE:
1808 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n");
1812 ql_dbg(ql_dbg_async, vha, 0x5057,
1813 "Unknown AEN:%04x %04x %04x %04x\n",
1814 mb[0], mb[1], mb[2], mb[3]);
1817 qlt_async_event(mb[0], vha, mb);
1819 if (!vha->vp_idx && ha->num_vhosts)
1820 qla2x00_alert_all_vps(rsp, mb);
1824 * qla2x00_process_completed_request() - Process a Fast Post response.
1825 * @vha: SCSI driver HA context
1826 * @req: request queue
1830 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1831 struct req_que *req, uint32_t index)
1834 struct qla_hw_data *ha = vha->hw;
1836 /* Validate handle. */
1837 if (index >= req->num_outstanding_cmds) {
1838 ql_log(ql_log_warn, vha, 0x3014,
1839 "Invalid SCSI command index (%x).\n", index);
1841 if (IS_P3P_TYPE(ha))
1842 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1844 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1848 sp = req->outstanding_cmds[index];
1850 /* Free outstanding command slot. */
1851 req->outstanding_cmds[index] = NULL;
1853 /* Save ISP completion status */
1854 sp->done(sp, DID_OK << 16);
1856 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1858 if (IS_P3P_TYPE(ha))
1859 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1861 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1866 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1867 struct req_que *req, void *iocb)
1869 struct qla_hw_data *ha = vha->hw;
1870 sts_entry_t *pkt = iocb;
1874 if (pkt->handle == QLA_SKIP_HANDLE)
1877 index = LSW(pkt->handle);
1878 if (index >= req->num_outstanding_cmds) {
1879 ql_log(ql_log_warn, vha, 0x5031,
1880 "%s: Invalid command index (%x) type %8ph.\n",
1882 if (IS_P3P_TYPE(ha))
1883 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1885 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1888 sp = req->outstanding_cmds[index];
1890 ql_log(ql_log_warn, vha, 0x5032,
1891 "%s: Invalid completion handle (%x) -- timed-out.\n",
1895 if (sp->handle != index) {
1896 ql_log(ql_log_warn, vha, 0x5033,
1897 "%s: SRB handle (%x) mismatch %x.\n", func,
1902 req->outstanding_cmds[index] = NULL;
1904 qla_put_fw_resources(sp->qpair, &sp->iores);
1909 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1910 struct mbx_entry *mbx)
1912 const char func[] = "MBX-IOCB";
1916 struct srb_iocb *lio;
1920 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1924 lio = &sp->u.iocb_cmd;
1926 fcport = sp->fcport;
1927 data = lio->u.logio.data;
1929 data[0] = MBS_COMMAND_ERROR;
1930 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1931 QLA_LOGIO_LOGIN_RETRIED : 0;
1932 if (mbx->entry_status) {
1933 ql_dbg(ql_dbg_async, vha, 0x5043,
1934 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1935 "entry-status=%x status=%x state-flag=%x "
1936 "status-flags=%x.\n", type, sp->handle,
1937 fcport->d_id.b.domain, fcport->d_id.b.area,
1938 fcport->d_id.b.al_pa, mbx->entry_status,
1939 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1940 le16_to_cpu(mbx->status_flags));
1942 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1948 status = le16_to_cpu(mbx->status);
1949 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1950 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1952 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1953 ql_dbg(ql_dbg_async, vha, 0x5045,
1954 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1955 type, sp->handle, fcport->d_id.b.domain,
1956 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1957 le16_to_cpu(mbx->mb1));
1959 data[0] = MBS_COMMAND_COMPLETE;
1960 if (sp->type == SRB_LOGIN_CMD) {
1961 fcport->port_type = FCT_TARGET;
1962 if (le16_to_cpu(mbx->mb1) & BIT_0)
1963 fcport->port_type = FCT_INITIATOR;
1964 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1965 fcport->flags |= FCF_FCP2_DEVICE;
1970 data[0] = le16_to_cpu(mbx->mb0);
1972 case MBS_PORT_ID_USED:
1973 data[1] = le16_to_cpu(mbx->mb1);
1975 case MBS_LOOP_ID_USED:
1978 data[0] = MBS_COMMAND_ERROR;
1982 ql_log(ql_log_warn, vha, 0x5046,
1983 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1984 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1985 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1986 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1987 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1988 le16_to_cpu(mbx->mb7));
1995 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1996 struct mbx_24xx_entry *pkt)
1998 const char func[] = "MBX-IOCB2";
1999 struct qla_hw_data *ha = vha->hw;
2001 struct srb_iocb *si;
2005 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2009 if (sp->type == SRB_SCSI_CMD ||
2010 sp->type == SRB_NVME_CMD ||
2011 sp->type == SRB_TM_CMD) {
2012 ql_log(ql_log_warn, vha, 0x509d,
2013 "Inconsistent event entry type %d\n", sp->type);
2014 if (IS_P3P_TYPE(ha))
2015 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2017 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2021 si = &sp->u.iocb_cmd;
2022 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
2024 for (i = 0; i < sz; i++)
2025 si->u.mbx.in_mb[i] = pkt->mb[i];
2027 res = (si->u.mbx.in_mb[0] & MBS_MASK);
2033 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2034 struct nack_to_isp *pkt)
2036 const char func[] = "nack";
2040 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2044 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
2045 res = QLA_FUNCTION_FAILED;
2051 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
2052 sts_entry_t *pkt, int iocb_type)
2054 const char func[] = "CT_IOCB";
2057 struct bsg_job *bsg_job;
2058 struct fc_bsg_reply *bsg_reply;
2059 uint16_t comp_status;
2062 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2068 bsg_job = sp->u.bsg_job;
2069 bsg_reply = bsg_job->reply;
2071 type = "ct pass-through";
2073 comp_status = le16_to_cpu(pkt->comp_status);
2076 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
2077 * fc payload to the caller
2079 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2080 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2082 if (comp_status != CS_COMPLETE) {
2083 if (comp_status == CS_DATA_UNDERRUN) {
2085 bsg_reply->reply_payload_rcv_len =
2086 le16_to_cpu(pkt->rsp_info_len);
2088 ql_log(ql_log_warn, vha, 0x5048,
2089 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
2091 bsg_reply->reply_payload_rcv_len);
2093 ql_log(ql_log_warn, vha, 0x5049,
2094 "CT pass-through-%s error comp_status=0x%x.\n",
2096 res = DID_ERROR << 16;
2097 bsg_reply->reply_payload_rcv_len = 0;
2099 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
2103 bsg_reply->reply_payload_rcv_len =
2104 bsg_job->reply_payload.payload_len;
2105 bsg_job->reply_len = 0;
2108 case SRB_CT_PTHRU_CMD:
2110 * borrowing sts_entry_24xx.comp_status.
2111 * same location as ct_entry_24xx.comp_status
2113 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
2114 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2123 qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
2124 struct sts_entry_24xx *pkt, int iocb_type)
2126 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
2127 const char func[] = "ELS_CT_IOCB";
2130 struct bsg_job *bsg_job;
2131 struct fc_bsg_reply *bsg_reply;
2132 uint16_t comp_status;
2133 uint32_t fw_status[3];
2135 struct srb_iocb *els;
2137 scsi_qla_host_t *vha;
2138 struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt;
2140 sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
2143 bsg_job = sp->u.bsg_job;
2148 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
2149 fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
2150 fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
2153 case SRB_ELS_CMD_RPT:
2154 case SRB_ELS_CMD_HST:
2157 case SRB_ELS_CMD_HST_NOLOGIN:
2160 struct els_entry_24xx *els = (void *)pkt;
2161 struct qla_bsg_auth_els_request *p =
2162 (struct qla_bsg_auth_els_request *)bsg_job->request;
2164 ql_dbg(ql_dbg_user, vha, 0x700f,
2165 "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n",
2166 __func__, sc_to_str(p->e.sub_cmd),
2167 e->d_id[2], e->d_id[1], e->d_id[0],
2168 comp_status, p->e.extra_rx_xchg_address, bsg_job);
2170 if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) {
2171 if (sp->remap.remapped) {
2172 n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2173 bsg_job->reply_payload.sg_cnt,
2176 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e,
2177 "%s: SG copied %x of %x\n",
2178 __func__, n, sp->remap.rsp.len);
2180 ql_dbg(ql_dbg_user, vha, 0x700f,
2181 "%s: NOT REMAPPED (error)...!!!\n",
2188 type = "ct pass-through";
2191 type = "Driver ELS logo";
2192 if (iocb_type != ELS_IOCB_TYPE) {
2193 ql_dbg(ql_dbg_user, vha, 0x5047,
2194 "Completing %s: (%p) type=%d.\n",
2195 type, sp, sp->type);
2200 case SRB_CT_PTHRU_CMD:
2201 /* borrowing sts_entry_24xx.comp_status.
2202 same location as ct_entry_24xx.comp_status
2204 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
2205 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2210 ql_dbg(ql_dbg_user, vha, 0x503e,
2211 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
2215 if (iocb_type == ELS_IOCB_TYPE) {
2216 els = &sp->u.iocb_cmd;
2217 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
2218 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
2219 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
2220 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
2221 if (comp_status == CS_COMPLETE) {
2224 if (comp_status == CS_DATA_UNDERRUN) {
2226 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
2227 ese->total_byte_count));
2229 if (sp->remap.remapped &&
2230 ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) {
2231 ql_dbg(ql_dbg_user, vha, 0x503f,
2232 "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x",
2233 __func__, e->s_id[0], e->s_id[2], e->s_id[1],
2234 e->d_id[2], e->d_id[1], e->d_id[0]);
2238 } else if (comp_status == CS_PORT_LOGGED_OUT) {
2239 ql_dbg(ql_dbg_disc, vha, 0x911e,
2240 "%s %d schedule session deletion\n",
2241 __func__, __LINE__);
2243 els->u.els_plogi.len = 0;
2244 res = DID_IMM_RETRY << 16;
2245 qlt_schedule_sess_for_deletion(sp->fcport);
2247 els->u.els_plogi.len = 0;
2248 res = DID_ERROR << 16;
2251 if (sp->remap.remapped &&
2252 ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
2254 ql_dbg(ql_dbg_user, vha, 0x503f,
2255 "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
2256 type, sp->handle, comp_status);
2258 ql_dbg(ql_dbg_user, vha, 0x503f,
2259 "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
2260 fw_status[1], fw_status[2],
2261 le32_to_cpu(((struct els_sts_entry_24xx *)
2262 pkt)->total_byte_count),
2263 e->s_id[0], e->s_id[2], e->s_id[1],
2264 e->d_id[2], e->d_id[1], e->d_id[0]);
2266 if (sp->fcport && sp->fcport->flags & FCF_FCSP_DEVICE &&
2267 sp->type == SRB_ELS_CMD_HST_NOLOGIN) {
2268 ql_dbg(ql_dbg_edif, vha, 0x911e,
2269 "%s rcv reject. Sched delete\n", __func__);
2270 qlt_schedule_sess_for_deletion(sp->fcport);
2273 ql_log(ql_log_info, vha, 0x503f,
2274 "%s IOCB Done hdl=%x comp_status=0x%x\n",
2275 type, sp->handle, comp_status);
2276 ql_log(ql_log_info, vha, 0x503f,
2277 "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
2278 fw_status[1], fw_status[2],
2279 le32_to_cpu(((struct els_sts_entry_24xx *)
2280 pkt)->total_byte_count),
2281 e->s_id[0], e->s_id[2], e->s_id[1],
2282 e->d_id[2], e->d_id[1], e->d_id[0]);
2288 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
2289 * fc payload to the caller
2291 bsg_job = sp->u.bsg_job;
2292 bsg_reply = bsg_job->reply;
2293 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2294 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
2296 if (comp_status != CS_COMPLETE) {
2297 if (comp_status == CS_DATA_UNDERRUN) {
2299 bsg_reply->reply_payload_rcv_len =
2300 le32_to_cpu(ese->total_byte_count);
2302 ql_dbg(ql_dbg_user, vha, 0x503f,
2303 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2304 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
2305 type, sp->handle, comp_status, fw_status[1], fw_status[2],
2306 le32_to_cpu(ese->total_byte_count));
2308 ql_dbg(ql_dbg_user, vha, 0x5040,
2309 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2310 "error subcode 1=0x%x error subcode 2=0x%x.\n",
2311 type, sp->handle, comp_status,
2312 le32_to_cpu(ese->error_subcode_1),
2313 le32_to_cpu(ese->error_subcode_2));
2314 res = DID_ERROR << 16;
2315 bsg_reply->reply_payload_rcv_len = 0;
2317 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
2318 fw_status, sizeof(fw_status));
2319 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
2324 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2325 bsg_job->reply_len = 0;
2333 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
2334 struct logio_entry_24xx *logio)
2336 const char func[] = "LOGIO-IOCB";
2340 struct srb_iocb *lio;
2345 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
2349 lio = &sp->u.iocb_cmd;
2351 fcport = sp->fcport;
2352 data = lio->u.logio.data;
2354 data[0] = MBS_COMMAND_ERROR;
2355 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
2356 QLA_LOGIO_LOGIN_RETRIED : 0;
2357 if (logio->entry_status) {
2358 ql_log(ql_log_warn, fcport->vha, 0x5034,
2359 "Async-%s error entry - %8phC hdl=%x"
2360 "portid=%02x%02x%02x entry-status=%x.\n",
2361 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
2362 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2363 logio->entry_status);
2364 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
2365 logio, sizeof(*logio));
2370 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
2371 ql_dbg(ql_dbg_async, sp->vha, 0x5036,
2372 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n",
2373 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2374 le32_to_cpu(logio->io_parameter[0]));
2376 vha->hw->exch_starvation = 0;
2377 data[0] = MBS_COMMAND_COMPLETE;
2379 if (sp->type == SRB_PRLI_CMD) {
2380 lio->u.logio.iop[0] =
2381 le32_to_cpu(logio->io_parameter[0]);
2382 lio->u.logio.iop[1] =
2383 le32_to_cpu(logio->io_parameter[1]);
2387 if (sp->type != SRB_LOGIN_CMD)
2390 lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]);
2391 if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP)
2392 fcport->flags |= FCF_FCSP_DEVICE;
2394 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2395 if (iop[0] & BIT_4) {
2396 fcport->port_type = FCT_TARGET;
2398 fcport->flags |= FCF_FCP2_DEVICE;
2399 } else if (iop[0] & BIT_5)
2400 fcport->port_type = FCT_INITIATOR;
2403 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2405 if (logio->io_parameter[7] || logio->io_parameter[8])
2406 fcport->supported_classes |= FC_COS_CLASS2;
2407 if (logio->io_parameter[9] || logio->io_parameter[10])
2408 fcport->supported_classes |= FC_COS_CLASS3;
2413 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2414 iop[1] = le32_to_cpu(logio->io_parameter[1]);
2415 lio->u.logio.iop[0] = iop[0];
2416 lio->u.logio.iop[1] = iop[1];
2418 case LSC_SCODE_PORTID_USED:
2419 data[0] = MBS_PORT_ID_USED;
2420 data[1] = LSW(iop[1]);
2423 case LSC_SCODE_NPORT_USED:
2424 data[0] = MBS_LOOP_ID_USED;
2427 case LSC_SCODE_CMD_FAILED:
2428 if (iop[1] == 0x0606) {
2430 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
2431 * Target side acked.
2433 data[0] = MBS_COMMAND_COMPLETE;
2436 data[0] = MBS_COMMAND_ERROR;
2438 case LSC_SCODE_NOXCB:
2439 vha->hw->exch_starvation++;
2440 if (vha->hw->exch_starvation > 5) {
2441 ql_log(ql_log_warn, vha, 0xd046,
2442 "Exchange starvation. Resetting RISC\n");
2444 vha->hw->exch_starvation = 0;
2446 if (IS_P3P_TYPE(vha->hw))
2447 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2449 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2450 qla2xxx_wake_dpc(vha);
2454 data[0] = MBS_COMMAND_ERROR;
2459 ql_log(ql_log_warn, sp->vha, 0x5037, "Async-%s failed: "
2460 "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2461 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2462 le16_to_cpu(logio->comp_status),
2463 le32_to_cpu(logio->io_parameter[0]),
2464 le32_to_cpu(logio->io_parameter[1]));
2466 ql_dbg(ql_dbg_disc, sp->vha, 0x5037, "Async-%s failed: "
2467 "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2468 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2469 le16_to_cpu(logio->comp_status),
2470 le32_to_cpu(logio->io_parameter[0]),
2471 le32_to_cpu(logio->io_parameter[1]));
2478 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
2480 const char func[] = "TMF-IOCB";
2484 struct srb_iocb *iocb;
2485 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2488 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
2492 comp_status = le16_to_cpu(sts->comp_status);
2493 iocb = &sp->u.iocb_cmd;
2495 fcport = sp->fcport;
2496 iocb->u.tmf.data = QLA_SUCCESS;
2498 if (sts->entry_status) {
2499 ql_log(ql_log_warn, fcport->vha, 0x5038,
2500 "Async-%s error - hdl=%x entry-status(%x).\n",
2501 type, sp->handle, sts->entry_status);
2502 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2503 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
2504 ql_log(ql_log_warn, fcport->vha, 0x5039,
2505 "Async-%s error - hdl=%x completion status(%x).\n",
2506 type, sp->handle, comp_status);
2507 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2508 } else if ((le16_to_cpu(sts->scsi_status) &
2509 SS_RESPONSE_INFO_LEN_VALID)) {
2510 host_to_fcp_swap(sts->data, sizeof(sts->data));
2511 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2512 ql_log(ql_log_warn, fcport->vha, 0x503b,
2513 "Async-%s error - hdl=%x not enough response(%d).\n",
2514 type, sp->handle, sts->rsp_data_len);
2515 } else if (sts->data[3]) {
2516 ql_log(ql_log_warn, fcport->vha, 0x503c,
2517 "Async-%s error - hdl=%x response(%x).\n",
2518 type, sp->handle, sts->data[3]);
2519 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2523 switch (comp_status) {
2524 case CS_PORT_LOGGED_OUT:
2525 case CS_PORT_CONFIG_CHG:
2528 case CS_PORT_UNAVAILABLE:
2531 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2532 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2533 "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n",
2534 fcport->d_id.b.domain, fcport->d_id.b.area,
2535 fcport->d_id.b.al_pa,
2536 port_state_str[FCS_ONLINE],
2539 qlt_schedule_sess_for_deletion(fcport);
2547 if (iocb->u.tmf.data != QLA_SUCCESS)
2548 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
2554 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2555 void *tsk, srb_t *sp)
2558 struct srb_iocb *iocb;
2559 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2560 uint16_t state_flags;
2561 struct nvmefc_fcp_req *fd;
2562 uint16_t ret = QLA_SUCCESS;
2563 __le16 comp_status = sts->comp_status;
2566 iocb = &sp->u.iocb_cmd;
2567 fcport = sp->fcport;
2568 iocb->u.nvme.comp_status = comp_status;
2569 state_flags = le16_to_cpu(sts->state_flags);
2570 fd = iocb->u.nvme.desc;
2572 if (unlikely(iocb->u.nvme.aen_op))
2573 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
2575 sp->qpair->cmd_completion_cnt++;
2577 if (unlikely(comp_status != CS_COMPLETE))
2580 fd->transferred_length = fd->payload_length -
2581 le32_to_cpu(sts->residual_len);
2584 * State flags: Bit 6 and 0.
2585 * If 0 is set, we don't care about 6.
2586 * both cases resp was dma'd to host buffer
2587 * if both are 0, that is good path case.
2588 * if six is set and 0 is clear, we need to
2589 * copy resp data from status iocb to resp buffer.
2591 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
2592 iocb->u.nvme.rsp_pyld_len = 0;
2593 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
2594 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
2595 /* Response already DMA'd to fd->rspaddr. */
2596 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2597 } else if ((state_flags & SF_FCP_RSP_DMA)) {
2599 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
2602 iocb->u.nvme.rsp_pyld_len = 0;
2603 fd->transferred_length = 0;
2604 ql_dbg(ql_dbg_io, fcport->vha, 0x307a,
2605 "Unexpected values in NVMe_RSP IU.\n");
2607 } else if (state_flags & SF_NVME_ERSP) {
2608 uint32_t *inbuf, *outbuf;
2611 inbuf = (uint32_t *)&sts->nvme_ersp_data;
2612 outbuf = (uint32_t *)fd->rspaddr;
2613 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2614 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
2615 sizeof(struct nvme_fc_ersp_iu))) {
2616 if (ql_mask_match(ql_dbg_io)) {
2617 WARN_ONCE(1, "Unexpected response payload length %u.\n",
2618 iocb->u.nvme.rsp_pyld_len);
2619 ql_log(ql_log_warn, fcport->vha, 0x5100,
2620 "Unexpected response payload length %u.\n",
2621 iocb->u.nvme.rsp_pyld_len);
2623 iocb->u.nvme.rsp_pyld_len =
2624 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
2626 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
2627 for (; iter; iter--)
2628 *outbuf++ = swab32(*inbuf++);
2631 if (state_flags & SF_NVME_ERSP) {
2632 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr;
2635 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
2636 if (fd->transferred_length != tgt_xfer_len) {
2637 ql_log(ql_log_warn, fcport->vha, 0x3079,
2638 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
2639 tgt_xfer_len, fd->transferred_length);
2641 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
2643 * Do not log if this is just an underflow and there
2650 if (unlikely(logit))
2651 ql_dbg(ql_dbg_io, fcport->vha, 0x5060,
2652 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
2653 sp->name, sp->handle, comp_status,
2654 fd->transferred_length, le32_to_cpu(sts->residual_len),
2658 * If transport error then Failure (HBA rejects request)
2659 * otherwise transport will handle.
2661 switch (le16_to_cpu(comp_status)) {
2666 case CS_PORT_UNAVAILABLE:
2667 case CS_PORT_LOGGED_OUT:
2668 fcport->nvme_flag |= NVME_FLAG_RESETTING;
2669 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2670 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2671 "Port to be marked lost on fcport=%06x, current "
2672 "port state= %s comp_status %x.\n",
2673 fcport->d_id.b24, port_state_str[FCS_ONLINE],
2676 qlt_schedule_sess_for_deletion(fcport);
2681 fd->transferred_length = 0;
2682 iocb->u.nvme.rsp_pyld_len = 0;
2685 case CS_DATA_UNDERRUN:
2688 ret = QLA_FUNCTION_FAILED;
2694 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
2695 struct vp_ctrl_entry_24xx *vce)
2697 const char func[] = "CTRLVP-IOCB";
2699 int rval = QLA_SUCCESS;
2701 sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
2705 if (vce->entry_status != 0) {
2706 ql_dbg(ql_dbg_vport, vha, 0x10c4,
2707 "%s: Failed to complete IOCB -- error status (%x)\n",
2708 sp->name, vce->entry_status);
2709 rval = QLA_FUNCTION_FAILED;
2710 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
2711 ql_dbg(ql_dbg_vport, vha, 0x10c5,
2712 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
2713 sp->name, le16_to_cpu(vce->comp_status),
2714 le16_to_cpu(vce->vp_idx_failed));
2715 rval = QLA_FUNCTION_FAILED;
2717 ql_dbg(ql_dbg_vport, vha, 0x10c6,
2718 "Done %s.\n", __func__);
2725 /* Process a single response queue entry. */
2726 static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
2727 struct rsp_que *rsp,
2730 sts21_entry_t *sts21_entry;
2731 sts22_entry_t *sts22_entry;
2732 uint16_t handle_cnt;
2735 switch (pkt->entry_type) {
2737 qla2x00_status_entry(vha, rsp, pkt);
2739 case STATUS_TYPE_21:
2740 sts21_entry = (sts21_entry_t *)pkt;
2741 handle_cnt = sts21_entry->handle_count;
2742 for (cnt = 0; cnt < handle_cnt; cnt++)
2743 qla2x00_process_completed_request(vha, rsp->req,
2744 sts21_entry->handle[cnt]);
2746 case STATUS_TYPE_22:
2747 sts22_entry = (sts22_entry_t *)pkt;
2748 handle_cnt = sts22_entry->handle_count;
2749 for (cnt = 0; cnt < handle_cnt; cnt++)
2750 qla2x00_process_completed_request(vha, rsp->req,
2751 sts22_entry->handle[cnt]);
2753 case STATUS_CONT_TYPE:
2754 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2757 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
2760 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2763 /* Type Not Supported. */
2764 ql_log(ql_log_warn, vha, 0x504a,
2765 "Received unknown response pkt type %x entry status=%x.\n",
2766 pkt->entry_type, pkt->entry_status);
2772 * qla2x00_process_response_queue() - Process response queue entries.
2773 * @rsp: response queue
2776 qla2x00_process_response_queue(struct rsp_que *rsp)
2778 struct scsi_qla_host *vha;
2779 struct qla_hw_data *ha = rsp->hw;
2780 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2783 vha = pci_get_drvdata(ha->pdev);
2785 if (!vha->flags.online)
2788 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2789 pkt = (sts_entry_t *)rsp->ring_ptr;
2792 if (rsp->ring_index == rsp->length) {
2793 rsp->ring_index = 0;
2794 rsp->ring_ptr = rsp->ring;
2799 if (pkt->entry_status != 0) {
2800 qla2x00_error_entry(vha, rsp, pkt);
2801 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2806 qla2x00_process_response_entry(vha, rsp, pkt);
2807 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2811 /* Adjust ring index */
2812 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
2816 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2817 uint32_t sense_len, struct rsp_que *rsp, int res)
2819 struct scsi_qla_host *vha = sp->vha;
2820 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2821 uint32_t track_sense_len;
2823 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2824 sense_len = SCSI_SENSE_BUFFERSIZE;
2826 SET_CMD_SENSE_LEN(sp, sense_len);
2827 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2828 track_sense_len = sense_len;
2830 if (sense_len > par_sense_len)
2831 sense_len = par_sense_len;
2833 memcpy(cp->sense_buffer, sense_data, sense_len);
2835 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2836 track_sense_len -= sense_len;
2837 SET_CMD_SENSE_LEN(sp, track_sense_len);
2839 if (track_sense_len != 0) {
2840 rsp->status_srb = sp;
2845 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
2846 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2847 sp->vha->host_no, cp->device->id, cp->device->lun,
2849 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
2850 cp->sense_buffer, sense_len);
2854 struct scsi_dif_tuple {
2855 __be16 guard; /* Checksum */
2856 __be16 app_tag; /* APPL identifier */
2857 __be32 ref_tag; /* Target LBA or indirect LBA */
2861 * Checks the guard or meta-data for the type of error
2862 * detected by the HBA. In case of errors, we set the
2863 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
2864 * to indicate to the kernel that the HBA detected error.
2867 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2869 struct scsi_qla_host *vha = sp->vha;
2870 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2871 uint8_t *ap = &sts24->data[12];
2872 uint8_t *ep = &sts24->data[20];
2873 uint32_t e_ref_tag, a_ref_tag;
2874 uint16_t e_app_tag, a_app_tag;
2875 uint16_t e_guard, a_guard;
2878 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
2879 * would make guard field appear at offset 2
2881 a_guard = get_unaligned_le16(ap + 2);
2882 a_app_tag = get_unaligned_le16(ap + 0);
2883 a_ref_tag = get_unaligned_le32(ap + 4);
2884 e_guard = get_unaligned_le16(ep + 2);
2885 e_app_tag = get_unaligned_le16(ep + 0);
2886 e_ref_tag = get_unaligned_le32(ep + 4);
2888 ql_dbg(ql_dbg_io, vha, 0x3023,
2889 "iocb(s) %p Returned STATUS.\n", sts24);
2891 ql_dbg(ql_dbg_io, vha, 0x3024,
2892 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2893 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2894 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2895 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2896 a_app_tag, e_app_tag, a_guard, e_guard);
2900 * For type 3: ref & app tag is all 'f's
2901 * For type 0,1,2: app tag is all 'f's
2903 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
2904 (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
2905 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
2906 uint32_t blocks_done, resid;
2907 sector_t lba_s = scsi_get_lba(cmd);
2909 /* 2TB boundary case covered automatically with this */
2910 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2912 resid = scsi_bufflen(cmd) - (blocks_done *
2913 cmd->device->sector_size);
2915 scsi_set_resid(cmd, resid);
2916 cmd->result = DID_OK << 16;
2918 /* Update protection tag */
2919 if (scsi_prot_sg_count(cmd)) {
2920 uint32_t i, j = 0, k = 0, num_ent;
2921 struct scatterlist *sg;
2922 struct t10_pi_tuple *spt;
2924 /* Patch the corresponding protection tags */
2925 scsi_for_each_prot_sg(cmd, sg,
2926 scsi_prot_sg_count(cmd), i) {
2927 num_ent = sg_dma_len(sg) / 8;
2928 if (k + num_ent < blocks_done) {
2932 j = blocks_done - k - 1;
2937 if (k != blocks_done) {
2938 ql_log(ql_log_warn, vha, 0x302f,
2939 "unexpected tag values tag:lba=%x:%llx)\n",
2940 e_ref_tag, (unsigned long long)lba_s);
2944 spt = page_address(sg_page(sg)) + sg->offset;
2947 spt->app_tag = T10_PI_APP_ESCAPE;
2948 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2949 spt->ref_tag = T10_PI_REF_ESCAPE;
2956 if (e_guard != a_guard) {
2957 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2958 set_host_byte(cmd, DID_ABORT);
2963 if (e_ref_tag != a_ref_tag) {
2964 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2965 set_host_byte(cmd, DID_ABORT);
2969 /* check appl tag */
2970 if (e_app_tag != a_app_tag) {
2971 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2972 set_host_byte(cmd, DID_ABORT);
2980 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2981 struct req_que *req, uint32_t index)
2983 struct qla_hw_data *ha = vha->hw;
2985 uint16_t comp_status;
2986 uint16_t scsi_status;
2988 uint32_t rval = EXT_STATUS_OK;
2989 struct bsg_job *bsg_job = NULL;
2990 struct fc_bsg_request *bsg_request;
2991 struct fc_bsg_reply *bsg_reply;
2992 sts_entry_t *sts = pkt;
2993 struct sts_entry_24xx *sts24 = pkt;
2995 /* Validate handle. */
2996 if (index >= req->num_outstanding_cmds) {
2997 ql_log(ql_log_warn, vha, 0x70af,
2998 "Invalid SCSI completion handle 0x%x.\n", index);
2999 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3003 sp = req->outstanding_cmds[index];
3005 ql_log(ql_log_warn, vha, 0x70b0,
3006 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
3009 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3013 /* Free outstanding command slot. */
3014 req->outstanding_cmds[index] = NULL;
3015 bsg_job = sp->u.bsg_job;
3016 bsg_request = bsg_job->request;
3017 bsg_reply = bsg_job->reply;
3019 if (IS_FWI2_CAPABLE(ha)) {
3020 comp_status = le16_to_cpu(sts24->comp_status);
3021 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
3023 comp_status = le16_to_cpu(sts->comp_status);
3024 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
3027 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
3028 switch (comp_status) {
3030 if (scsi_status == 0) {
3031 bsg_reply->reply_payload_rcv_len =
3032 bsg_job->reply_payload.payload_len;
3033 vha->qla_stats.input_bytes +=
3034 bsg_reply->reply_payload_rcv_len;
3035 vha->qla_stats.input_requests++;
3036 rval = EXT_STATUS_OK;
3040 case CS_DATA_OVERRUN:
3041 ql_dbg(ql_dbg_user, vha, 0x70b1,
3042 "Command completed with data overrun thread_id=%d\n",
3044 rval = EXT_STATUS_DATA_OVERRUN;
3047 case CS_DATA_UNDERRUN:
3048 ql_dbg(ql_dbg_user, vha, 0x70b2,
3049 "Command completed with data underrun thread_id=%d\n",
3051 rval = EXT_STATUS_DATA_UNDERRUN;
3053 case CS_BIDIR_RD_OVERRUN:
3054 ql_dbg(ql_dbg_user, vha, 0x70b3,
3055 "Command completed with read data overrun thread_id=%d\n",
3057 rval = EXT_STATUS_DATA_OVERRUN;
3060 case CS_BIDIR_RD_WR_OVERRUN:
3061 ql_dbg(ql_dbg_user, vha, 0x70b4,
3062 "Command completed with read and write data overrun "
3063 "thread_id=%d\n", thread_id);
3064 rval = EXT_STATUS_DATA_OVERRUN;
3067 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
3068 ql_dbg(ql_dbg_user, vha, 0x70b5,
3069 "Command completed with read data over and write data "
3070 "underrun thread_id=%d\n", thread_id);
3071 rval = EXT_STATUS_DATA_OVERRUN;
3074 case CS_BIDIR_RD_UNDERRUN:
3075 ql_dbg(ql_dbg_user, vha, 0x70b6,
3076 "Command completed with read data underrun "
3077 "thread_id=%d\n", thread_id);
3078 rval = EXT_STATUS_DATA_UNDERRUN;
3081 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
3082 ql_dbg(ql_dbg_user, vha, 0x70b7,
3083 "Command completed with read data under and write data "
3084 "overrun thread_id=%d\n", thread_id);
3085 rval = EXT_STATUS_DATA_UNDERRUN;
3088 case CS_BIDIR_RD_WR_UNDERRUN:
3089 ql_dbg(ql_dbg_user, vha, 0x70b8,
3090 "Command completed with read and write data underrun "
3091 "thread_id=%d\n", thread_id);
3092 rval = EXT_STATUS_DATA_UNDERRUN;
3096 ql_dbg(ql_dbg_user, vha, 0x70b9,
3097 "Command completed with data DMA error thread_id=%d\n",
3099 rval = EXT_STATUS_DMA_ERR;
3103 ql_dbg(ql_dbg_user, vha, 0x70ba,
3104 "Command completed with timeout thread_id=%d\n",
3106 rval = EXT_STATUS_TIMEOUT;
3109 ql_dbg(ql_dbg_user, vha, 0x70bb,
3110 "Command completed with completion status=0x%x "
3111 "thread_id=%d\n", comp_status, thread_id);
3112 rval = EXT_STATUS_ERR;
3115 bsg_reply->reply_payload_rcv_len = 0;
3118 /* Return the vendor specific reply to API */
3119 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
3120 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
3121 /* Always return DID_OK, bsg will send the vendor specific response
3122 * in this case only */
3123 sp->done(sp, DID_OK << 16);
3128 * qla2x00_status_entry() - Process a Status IOCB entry.
3129 * @vha: SCSI driver HA context
3130 * @rsp: response queue
3131 * @pkt: Entry pointer
3134 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
3138 struct scsi_cmnd *cp;
3139 sts_entry_t *sts = pkt;
3140 struct sts_entry_24xx *sts24 = pkt;
3141 uint16_t comp_status;
3142 uint16_t scsi_status;
3144 uint8_t lscsi_status;
3146 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
3148 uint8_t *rsp_info, *sense_data;
3149 struct qla_hw_data *ha = vha->hw;
3152 struct req_que *req;
3155 uint16_t state_flags = 0;
3156 uint16_t sts_qual = 0;
3158 if (IS_FWI2_CAPABLE(ha)) {
3159 comp_status = le16_to_cpu(sts24->comp_status);
3160 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
3161 state_flags = le16_to_cpu(sts24->state_flags);
3163 comp_status = le16_to_cpu(sts->comp_status);
3164 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
3166 handle = (uint32_t) LSW(sts->handle);
3167 que = MSW(sts->handle);
3168 req = ha->req_q_map[que];
3170 /* Check for invalid queue pointer */
3172 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
3173 ql_dbg(ql_dbg_io, vha, 0x3059,
3174 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
3175 "que=%u.\n", sts->handle, req, que);
3179 /* Validate handle. */
3180 if (handle < req->num_outstanding_cmds) {
3181 sp = req->outstanding_cmds[handle];
3183 ql_dbg(ql_dbg_io, vha, 0x3075,
3184 "%s(%ld): Already returned command for status handle (0x%x).\n",
3185 __func__, vha->host_no, sts->handle);
3189 ql_dbg(ql_dbg_io, vha, 0x3017,
3190 "Invalid status handle, out of range (0x%x).\n",
3193 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3194 if (IS_P3P_TYPE(ha))
3195 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3197 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3198 qla2xxx_wake_dpc(vha);
3202 qla_put_fw_resources(sp->qpair, &sp->iores);
3204 if (sp->cmd_type != TYPE_SRB) {
3205 req->outstanding_cmds[handle] = NULL;
3206 ql_dbg(ql_dbg_io, vha, 0x3015,
3207 "Unknown sp->cmd_type %x %p).\n",
3212 /* NVME completion. */
3213 if (sp->type == SRB_NVME_CMD) {
3214 req->outstanding_cmds[handle] = NULL;
3215 qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
3219 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
3220 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
3224 /* Task Management completion. */
3225 if (sp->type == SRB_TM_CMD) {
3226 qla24xx_tm_iocb_entry(vha, req, pkt);
3230 /* Fast path completion. */
3231 qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24);
3232 sp->qpair->cmd_completion_cnt++;
3234 if (comp_status == CS_COMPLETE && scsi_status == 0) {
3235 qla2x00_process_completed_request(vha, req, handle);
3240 req->outstanding_cmds[handle] = NULL;
3241 cp = GET_CMD_SP(sp);
3243 ql_dbg(ql_dbg_io, vha, 0x3018,
3244 "Command already returned (0x%x/%p).\n",
3250 lscsi_status = scsi_status & STATUS_MASK;
3252 fcport = sp->fcport;
3255 sense_len = par_sense_len = rsp_info_len = resid_len =
3257 if (IS_FWI2_CAPABLE(ha)) {
3258 if (scsi_status & SS_SENSE_LEN_VALID)
3259 sense_len = le32_to_cpu(sts24->sense_len);
3260 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3261 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
3262 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
3263 resid_len = le32_to_cpu(sts24->rsp_residual_count);
3264 if (comp_status == CS_DATA_UNDERRUN)
3265 fw_resid_len = le32_to_cpu(sts24->residual_len);
3266 rsp_info = sts24->data;
3267 sense_data = sts24->data;
3268 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
3269 ox_id = le16_to_cpu(sts24->ox_id);
3270 par_sense_len = sizeof(sts24->data);
3271 sts_qual = le16_to_cpu(sts24->status_qualifier);
3273 if (scsi_status & SS_SENSE_LEN_VALID)
3274 sense_len = le16_to_cpu(sts->req_sense_length);
3275 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3276 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
3277 resid_len = le32_to_cpu(sts->residual_length);
3278 rsp_info = sts->rsp_info;
3279 sense_data = sts->req_sense_data;
3280 par_sense_len = sizeof(sts->req_sense_data);
3283 /* Check for any FCP transport errors. */
3284 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
3285 /* Sense data lies beyond any FCP RESPONSE data. */
3286 if (IS_FWI2_CAPABLE(ha)) {
3287 sense_data += rsp_info_len;
3288 par_sense_len -= rsp_info_len;
3290 if (rsp_info_len > 3 && rsp_info[3]) {
3291 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
3292 "FCP I/O protocol failure (0x%x/0x%x).\n",
3293 rsp_info_len, rsp_info[3]);
3295 res = DID_BUS_BUSY << 16;
3300 /* Check for overrun. */
3301 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
3302 scsi_status & SS_RESIDUAL_OVER)
3303 comp_status = CS_DATA_OVERRUN;
3306 * Check retry_delay_timer value if we receive a busy or
3309 if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL ||
3310 lscsi_status == SAM_STAT_BUSY))
3311 qla2x00_set_retry_delay_timestamp(fcport, sts_qual);
3314 * Based on Host and scsi status generate status code for Linux
3316 switch (comp_status) {
3319 if (scsi_status == 0) {
3323 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
3325 scsi_set_resid(cp, resid);
3327 if (!lscsi_status &&
3328 ((unsigned)(scsi_bufflen(cp) - resid) <
3330 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
3331 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3332 resid, scsi_bufflen(cp));
3334 res = DID_ERROR << 16;
3338 res = DID_OK << 16 | lscsi_status;
3340 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3341 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
3342 "QUEUE FULL detected.\n");
3346 if (lscsi_status != SS_CHECK_CONDITION)
3349 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3350 if (!(scsi_status & SS_SENSE_LEN_VALID))
3353 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
3357 case CS_DATA_UNDERRUN:
3358 /* Use F/W calculated residual length. */
3359 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
3360 scsi_set_resid(cp, resid);
3361 if (scsi_status & SS_RESIDUAL_UNDER) {
3362 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
3363 ql_log(ql_log_warn, fcport->vha, 0x301d,
3364 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3365 resid, scsi_bufflen(cp));
3367 res = DID_ERROR << 16 | lscsi_status;
3368 goto check_scsi_status;
3371 if (!lscsi_status &&
3372 ((unsigned)(scsi_bufflen(cp) - resid) <
3374 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
3375 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3376 resid, scsi_bufflen(cp));
3378 res = DID_ERROR << 16;
3381 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
3382 lscsi_status != SAM_STAT_BUSY) {
3384 * scsi status of task set and busy are considered to be
3385 * task not completed.
3388 ql_log(ql_log_warn, fcport->vha, 0x301f,
3389 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3390 resid, scsi_bufflen(cp));
3392 vha->interface_err_cnt++;
3394 res = DID_ERROR << 16 | lscsi_status;
3395 goto check_scsi_status;
3397 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
3398 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
3399 scsi_status, lscsi_status);
3402 res = DID_OK << 16 | lscsi_status;
3407 * Check to see if SCSI Status is non zero. If so report SCSI
3410 if (lscsi_status != 0) {
3411 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3412 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
3413 "QUEUE FULL detected.\n");
3417 if (lscsi_status != SS_CHECK_CONDITION)
3420 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3421 if (!(scsi_status & SS_SENSE_LEN_VALID))
3424 qla2x00_handle_sense(sp, sense_data, par_sense_len,
3425 sense_len, rsp, res);
3429 case CS_PORT_LOGGED_OUT:
3430 case CS_PORT_CONFIG_CHG:
3433 case CS_PORT_UNAVAILABLE:
3436 case CS_EDIF_INV_REQ:
3439 * We are going to have the fc class block the rport
3440 * while we try to recover so instruct the mid layer
3441 * to requeue until the class decides how to handle this.
3443 res = DID_TRANSPORT_DISRUPTED << 16;
3445 if (comp_status == CS_TIMEOUT) {
3446 if (IS_FWI2_CAPABLE(ha))
3448 else if ((le16_to_cpu(sts->status_flags) &
3449 SF_LOGOUT_SENT) == 0)
3453 if (atomic_read(&fcport->state) == FCS_ONLINE) {
3454 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
3455 "Port to be marked lost on fcport=%02x%02x%02x, current "
3456 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
3457 fcport->d_id.b.area, fcport->d_id.b.al_pa,
3458 port_state_str[FCS_ONLINE],
3461 qlt_schedule_sess_for_deletion(fcport);
3467 res = DID_RESET << 16;
3471 logit = qla2x00_handle_dif_error(sp, sts24);
3476 res = DID_ERROR << 16;
3479 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
3482 if (state_flags & BIT_4)
3483 scmd_printk(KERN_WARNING, cp,
3484 "Unsupported device '%s' found.\n",
3485 cp->device->vendor);
3489 ql_log(ql_log_info, fcport->vha, 0x3022,
3490 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3491 comp_status, scsi_status, res, vha->host_no,
3492 cp->device->id, cp->device->lun, fcport->d_id.b24,
3493 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3494 resid_len, fw_resid_len, sp, cp);
3495 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
3496 pkt, sizeof(*sts24));
3497 res = DID_ERROR << 16;
3501 res = DID_ERROR << 16;
3507 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
3508 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3509 comp_status, scsi_status, res, vha->host_no,
3510 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
3511 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
3512 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3513 resid_len, fw_resid_len, sp, cp);
3515 if (rsp->status_srb == NULL)
3520 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
3521 * @rsp: response queue
3522 * @pkt: Entry pointer
3524 * Extended sense data.
3527 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
3529 uint8_t sense_sz = 0;
3530 struct qla_hw_data *ha = rsp->hw;
3531 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
3532 srb_t *sp = rsp->status_srb;
3533 struct scsi_cmnd *cp;
3537 if (!sp || !GET_CMD_SENSE_LEN(sp))
3540 sense_len = GET_CMD_SENSE_LEN(sp);
3541 sense_ptr = GET_CMD_SENSE_PTR(sp);
3543 cp = GET_CMD_SP(sp);
3545 ql_log(ql_log_warn, vha, 0x3025,
3546 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
3548 rsp->status_srb = NULL;
3552 if (sense_len > sizeof(pkt->data))
3553 sense_sz = sizeof(pkt->data);
3555 sense_sz = sense_len;
3557 /* Move sense data. */
3558 if (IS_FWI2_CAPABLE(ha))
3559 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
3560 memcpy(sense_ptr, pkt->data, sense_sz);
3561 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
3562 sense_ptr, sense_sz);
3564 sense_len -= sense_sz;
3565 sense_ptr += sense_sz;
3567 SET_CMD_SENSE_PTR(sp, sense_ptr);
3568 SET_CMD_SENSE_LEN(sp, sense_len);
3570 /* Place command on done queue. */
3571 if (sense_len == 0) {
3572 rsp->status_srb = NULL;
3573 sp->done(sp, cp->result);
3578 * qla2x00_error_entry() - Process an error entry.
3579 * @vha: SCSI driver HA context
3580 * @rsp: response queue
3581 * @pkt: Entry pointer
3582 * return : 1=allow further error analysis. 0=no additional error analysis.
3585 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
3588 struct qla_hw_data *ha = vha->hw;
3589 const char func[] = "ERROR-IOCB";
3590 uint16_t que = MSW(pkt->handle);
3591 struct req_que *req = NULL;
3592 int res = DID_ERROR << 16;
3594 ql_dbg(ql_dbg_async, vha, 0x502a,
3595 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
3596 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
3598 if (que >= ha->max_req_queues || !ha->req_q_map[que])
3601 req = ha->req_q_map[que];
3603 if (pkt->entry_status & RF_BUSY)
3604 res = DID_BUS_BUSY << 16;
3606 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
3609 switch (pkt->entry_type) {
3610 case NOTIFY_ACK_TYPE:
3612 case STATUS_CONT_TYPE:
3613 case LOGINOUT_PORT_IOCB_TYPE:
3616 case ABORT_IOCB_TYPE:
3619 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3626 case SA_UPDATE_IOCB_TYPE:
3627 case ABTS_RESP_24XX:
3633 ql_log(ql_log_warn, vha, 0x5030,
3634 "Error entry - invalid handle/queue (%04x).\n", que);
3639 * qla24xx_mbx_completion() - Process mailbox command completions.
3640 * @vha: SCSI driver HA context
3641 * @mb0: Mailbox0 register
3644 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
3648 __le16 __iomem *wptr;
3649 struct qla_hw_data *ha = vha->hw;
3650 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3652 /* Read all mbox registers? */
3653 WARN_ON_ONCE(ha->mbx_count > 32);
3654 mboxes = (1ULL << ha->mbx_count) - 1;
3656 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
3658 mboxes = ha->mcp->in_mb;
3660 /* Load return mailbox registers. */
3661 ha->flags.mbox_int = 1;
3662 ha->mailbox_out[0] = mb0;
3664 wptr = ®->mailbox1;
3666 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
3668 ha->mailbox_out[cnt] = rd_reg_word(wptr);
3676 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
3677 struct abort_entry_24xx *pkt)
3679 const char func[] = "ABT_IOCB";
3681 srb_t *orig_sp = NULL;
3682 struct srb_iocb *abt;
3684 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3688 abt = &sp->u.iocb_cmd;
3689 abt->u.abt.comp_status = pkt->comp_status;
3690 orig_sp = sp->cmd_sp;
3691 /* Need to pass original sp */
3693 qla_nvme_abort_process_comp_status(pkt, orig_sp);
3698 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
3699 struct pt_ls4_request *pkt, struct req_que *req)
3702 const char func[] = "LS4_IOCB";
3703 uint16_t comp_status;
3705 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3709 comp_status = le16_to_cpu(pkt->status);
3710 sp->done(sp, comp_status);
3714 * qla_chk_cont_iocb_avail - check for all continuation iocbs are available
3715 * before iocb processing can start.
3716 * @vha: host adapter pointer
3717 * @rsp: respond queue
3718 * @pkt: head iocb describing how many continuation iocb
3719 * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived.
3721 static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
3722 struct rsp_que *rsp, response_t *pkt, u32 rsp_q_in)
3724 int start_pkt_ring_index;
3728 if (pkt->entry_count == 1)
3731 /* ring_index was pre-increment. set it back to current pkt */
3732 if (rsp->ring_index == 0)
3733 start_pkt_ring_index = rsp->length - 1;
3735 start_pkt_ring_index = rsp->ring_index - 1;
3737 if (rsp_q_in < start_pkt_ring_index)
3738 /* q in ptr is wrapped */
3739 iocb_cnt = rsp->length - start_pkt_ring_index + rsp_q_in;
3741 iocb_cnt = rsp_q_in - start_pkt_ring_index;
3743 if (iocb_cnt < pkt->entry_count)
3746 ql_dbg(ql_dbg_init, vha, 0x5091,
3747 "%s - ring %p pkt %p entry count %d iocb_cnt %d rsp_q_in %d rc %d\n",
3748 __func__, rsp->ring, pkt, pkt->entry_count, iocb_cnt, rsp_q_in, rc);
3754 * qla24xx_process_response_queue() - Process response queue entries.
3755 * @vha: SCSI driver HA context
3756 * @rsp: response queue
3758 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
3759 struct rsp_que *rsp)
3761 struct sts_entry_24xx *pkt;
3762 struct qla_hw_data *ha = vha->hw;
3763 struct purex_entry_24xx *purex_entry;
3764 struct purex_item *pure_item;
3765 u16 rsp_in = 0, cur_ring_index;
3768 if (!ha->flags.fw_started)
3771 if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
3772 rsp->qpair->rcv_intr = 1;
3775 #define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \
3777 _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \
3778 rd_reg_dword_relaxed((_rsp)->rsp_q_in); \
3781 is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha);
3783 __update_rsp_in(is_shadow_hba, rsp, rsp_in);
3785 while (rsp->ring_index != rsp_in &&
3786 rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
3787 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
3788 cur_ring_index = rsp->ring_index;
3791 if (rsp->ring_index == rsp->length) {
3792 rsp->ring_index = 0;
3793 rsp->ring_ptr = rsp->ring;
3798 if (pkt->entry_status != 0) {
3799 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
3802 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3808 switch (pkt->entry_type) {
3810 qla2x00_status_entry(vha, rsp, pkt);
3812 case STATUS_CONT_TYPE:
3813 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
3815 case VP_RPT_ID_IOCB_TYPE:
3816 qla24xx_report_id_acquisition(vha,
3817 (struct vp_rpt_id_entry_24xx *)pkt);
3819 case LOGINOUT_PORT_IOCB_TYPE:
3820 qla24xx_logio_entry(vha, rsp->req,
3821 (struct logio_entry_24xx *)pkt);
3824 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
3827 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
3829 case ABTS_RECV_24XX:
3830 if (qla_ini_mode_enabled(vha)) {
3831 pure_item = qla24xx_copy_std_pkt(vha, pkt);
3834 qla24xx_queue_purex_item(vha, pure_item,
3835 qla24xx_process_abts);
3838 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3840 /* ensure that the ATIO queue is empty */
3841 qlt_handle_abts_recv(vha, rsp,
3845 qlt_24xx_process_atio_queue(vha, 1);
3848 case ABTS_RESP_24XX:
3851 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
3853 case PT_LS4_REQUEST:
3854 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
3857 case NOTIFY_ACK_TYPE:
3858 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
3859 qlt_response_pkt_all_vps(vha, rsp,
3862 qla24xxx_nack_iocb_entry(vha, rsp->req,
3863 (struct nack_to_isp *)pkt);
3866 /* Do nothing in this case, this check is to prevent it
3867 * from falling into default case
3870 case ABORT_IOCB_TYPE:
3871 qla24xx_abort_iocb_entry(vha, rsp->req,
3872 (struct abort_entry_24xx *)pkt);
3875 qla24xx_mbx_iocb_entry(vha, rsp->req,
3876 (struct mbx_24xx_entry *)pkt);
3878 case VP_CTRL_IOCB_TYPE:
3879 qla_ctrlvp_completed(vha, rsp->req,
3880 (struct vp_ctrl_entry_24xx *)pkt);
3882 case PUREX_IOCB_TYPE:
3883 purex_entry = (void *)pkt;
3884 switch (purex_entry->els_frame_payload[3]) {
3886 pure_item = qla24xx_copy_std_pkt(vha, pkt);
3889 qla24xx_queue_purex_item(vha, pure_item,
3890 qla24xx_process_purex_rdp);
3893 if (!vha->hw->flags.scm_enabled) {
3894 ql_log(ql_log_warn, vha, 0x5094,
3895 "SCM not active for this port\n");
3898 pure_item = qla27xx_copy_fpin_pkt(vha,
3899 (void **)&pkt, &rsp);
3900 __update_rsp_in(is_shadow_hba, rsp, rsp_in);
3903 qla24xx_queue_purex_item(vha, pure_item,
3904 qla27xx_process_purex_fpin);
3908 if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) {
3910 * ring_ptr and ring_index were
3911 * pre-incremented above. Reset them
3912 * back to current. Wait for next
3913 * interrupt with all IOCBs to arrive
3916 rsp->ring_ptr = (response_t *)pkt;
3917 rsp->ring_index = cur_ring_index;
3919 ql_dbg(ql_dbg_init, vha, 0x5091,
3920 "Defer processing ELS opcode %#x...\n",
3921 purex_entry->els_frame_payload[3]);
3924 qla24xx_auth_els(vha, (void **)&pkt, &rsp);
3927 ql_log(ql_log_warn, vha, 0x509c,
3928 "Discarding ELS Request opcode 0x%x\n",
3929 purex_entry->els_frame_payload[3]);
3932 case SA_UPDATE_IOCB_TYPE:
3933 qla28xx_sa_update_iocb_entry(vha, rsp->req,
3934 (struct sa_update_28xx *)pkt);
3938 /* Type Not Supported. */
3939 ql_dbg(ql_dbg_async, vha, 0x5042,
3940 "Received unknown response pkt type 0x%x entry status=%x.\n",
3941 pkt->entry_type, pkt->entry_status);
3944 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3948 /* Adjust ring index */
3949 if (IS_P3P_TYPE(ha)) {
3950 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
3952 wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index);
3954 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
3959 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3963 struct qla_hw_data *ha = vha->hw;
3964 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3966 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3967 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3971 wrt_reg_dword(®->iobase_addr, 0x7C00);
3972 rd_reg_dword(®->iobase_addr);
3973 wrt_reg_dword(®->iobase_window, 0x0001);
3974 for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3975 rval == QLA_SUCCESS; cnt--) {
3977 wrt_reg_dword(®->iobase_window, 0x0001);
3980 rval = QLA_FUNCTION_TIMEOUT;
3982 if (rval == QLA_SUCCESS)
3986 wrt_reg_dword(®->iobase_window, 0x0003);
3987 for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3988 rval == QLA_SUCCESS; cnt--) {
3990 wrt_reg_dword(®->iobase_window, 0x0003);
3993 rval = QLA_FUNCTION_TIMEOUT;
3995 if (rval != QLA_SUCCESS)
3999 if (rd_reg_dword(®->iobase_c8) & BIT_3)
4000 ql_log(ql_log_info, vha, 0x504c,
4001 "Additional code -- 0x55AA.\n");
4004 wrt_reg_dword(®->iobase_window, 0x0000);
4005 rd_reg_dword(®->iobase_window);
4009 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
4010 * @irq: interrupt number
4011 * @dev_id: SCSI driver HA context
4013 * Called by system whenever the host adapter generates an interrupt.
4015 * Returns handled flag.
4018 qla24xx_intr_handler(int irq, void *dev_id)
4020 scsi_qla_host_t *vha;
4021 struct qla_hw_data *ha;
4022 struct device_reg_24xx __iomem *reg;
4028 struct rsp_que *rsp;
4029 unsigned long flags;
4030 bool process_atio = false;
4032 rsp = (struct rsp_que *) dev_id;
4034 ql_log(ql_log_info, NULL, 0x5059,
4035 "%s: NULL response queue pointer.\n", __func__);
4040 reg = &ha->iobase->isp24;
4043 if (unlikely(pci_channel_offline(ha->pdev)))
4046 spin_lock_irqsave(&ha->hardware_lock, flags);
4047 vha = pci_get_drvdata(ha->pdev);
4048 for (iter = 50; iter--; ) {
4049 stat = rd_reg_dword(®->host_status);
4050 if (qla2x00_check_reg32_for_disconnect(vha, stat))
4052 if (stat & HSRX_RISC_PAUSED) {
4053 if (unlikely(pci_channel_offline(ha->pdev)))
4056 hccr = rd_reg_dword(®->hccr);
4058 ql_log(ql_log_warn, vha, 0x504b,
4059 "RISC paused -- HCCR=%x, Dumping firmware.\n",
4062 qla2xxx_check_risc_status(vha);
4064 ha->isp_ops->fw_dump(vha);
4065 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4067 } else if ((stat & HSRX_RISC_INT) == 0)
4070 switch (stat & 0xff) {
4071 case INTR_ROM_MB_SUCCESS:
4072 case INTR_ROM_MB_FAILED:
4073 case INTR_MB_SUCCESS:
4074 case INTR_MB_FAILED:
4075 qla24xx_mbx_completion(vha, MSW(stat));
4076 status |= MBX_INTERRUPT;
4079 case INTR_ASYNC_EVENT:
4081 mb[1] = rd_reg_word(®->mailbox1);
4082 mb[2] = rd_reg_word(®->mailbox2);
4083 mb[3] = rd_reg_word(®->mailbox3);
4084 qla2x00_async_event(vha, rsp, mb);
4086 case INTR_RSP_QUE_UPDATE:
4087 case INTR_RSP_QUE_UPDATE_83XX:
4088 qla24xx_process_response_queue(vha, rsp);
4090 case INTR_ATIO_QUE_UPDATE_27XX:
4091 case INTR_ATIO_QUE_UPDATE:
4092 process_atio = true;
4094 case INTR_ATIO_RSP_QUE_UPDATE:
4095 process_atio = true;
4096 qla24xx_process_response_queue(vha, rsp);
4099 ql_dbg(ql_dbg_async, vha, 0x504f,
4100 "Unrecognized interrupt type (%d).\n", stat * 0xff);
4103 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4104 rd_reg_dword_relaxed(®->hccr);
4105 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
4108 qla2x00_handle_mbx_completion(ha, status);
4109 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4112 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4113 qlt_24xx_process_atio_queue(vha, 0);
4114 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4121 qla24xx_msix_rsp_q(int irq, void *dev_id)
4123 struct qla_hw_data *ha;
4124 struct rsp_que *rsp;
4125 struct device_reg_24xx __iomem *reg;
4126 struct scsi_qla_host *vha;
4127 unsigned long flags;
4129 rsp = (struct rsp_que *) dev_id;
4131 ql_log(ql_log_info, NULL, 0x505a,
4132 "%s: NULL response queue pointer.\n", __func__);
4136 reg = &ha->iobase->isp24;
4138 spin_lock_irqsave(&ha->hardware_lock, flags);
4140 vha = pci_get_drvdata(ha->pdev);
4141 qla24xx_process_response_queue(vha, rsp);
4142 if (!ha->flags.disable_msix_handshake) {
4143 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4144 rd_reg_dword_relaxed(®->hccr);
4146 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4152 qla24xx_msix_default(int irq, void *dev_id)
4154 scsi_qla_host_t *vha;
4155 struct qla_hw_data *ha;
4156 struct rsp_que *rsp;
4157 struct device_reg_24xx __iomem *reg;
4162 unsigned long flags;
4163 bool process_atio = false;
4165 rsp = (struct rsp_que *) dev_id;
4167 ql_log(ql_log_info, NULL, 0x505c,
4168 "%s: NULL response queue pointer.\n", __func__);
4172 reg = &ha->iobase->isp24;
4175 spin_lock_irqsave(&ha->hardware_lock, flags);
4176 vha = pci_get_drvdata(ha->pdev);
4178 stat = rd_reg_dword(®->host_status);
4179 if (qla2x00_check_reg32_for_disconnect(vha, stat))
4181 if (stat & HSRX_RISC_PAUSED) {
4182 if (unlikely(pci_channel_offline(ha->pdev)))
4185 hccr = rd_reg_dword(®->hccr);
4187 ql_log(ql_log_info, vha, 0x5050,
4188 "RISC paused -- HCCR=%x, Dumping firmware.\n",
4191 qla2xxx_check_risc_status(vha);
4194 ha->isp_ops->fw_dump(vha);
4195 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4197 } else if ((stat & HSRX_RISC_INT) == 0)
4200 switch (stat & 0xff) {
4201 case INTR_ROM_MB_SUCCESS:
4202 case INTR_ROM_MB_FAILED:
4203 case INTR_MB_SUCCESS:
4204 case INTR_MB_FAILED:
4205 qla24xx_mbx_completion(vha, MSW(stat));
4206 status |= MBX_INTERRUPT;
4209 case INTR_ASYNC_EVENT:
4211 mb[1] = rd_reg_word(®->mailbox1);
4212 mb[2] = rd_reg_word(®->mailbox2);
4213 mb[3] = rd_reg_word(®->mailbox3);
4214 qla2x00_async_event(vha, rsp, mb);
4216 case INTR_RSP_QUE_UPDATE:
4217 case INTR_RSP_QUE_UPDATE_83XX:
4218 qla24xx_process_response_queue(vha, rsp);
4220 case INTR_ATIO_QUE_UPDATE_27XX:
4221 case INTR_ATIO_QUE_UPDATE:
4222 process_atio = true;
4224 case INTR_ATIO_RSP_QUE_UPDATE:
4225 process_atio = true;
4226 qla24xx_process_response_queue(vha, rsp);
4229 ql_dbg(ql_dbg_async, vha, 0x5051,
4230 "Unrecognized interrupt type (%d).\n", stat & 0xff);
4233 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4235 qla2x00_handle_mbx_completion(ha, status);
4236 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4239 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4240 qlt_24xx_process_atio_queue(vha, 0);
4241 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4248 qla2xxx_msix_rsp_q(int irq, void *dev_id)
4250 struct qla_hw_data *ha;
4251 struct qla_qpair *qpair;
4255 ql_log(ql_log_info, NULL, 0x505b,
4256 "%s: NULL response queue pointer.\n", __func__);
4261 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
4267 qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
4269 struct qla_hw_data *ha;
4270 struct qla_qpair *qpair;
4271 struct device_reg_24xx __iomem *reg;
4272 unsigned long flags;
4276 ql_log(ql_log_info, NULL, 0x505b,
4277 "%s: NULL response queue pointer.\n", __func__);
4282 reg = &ha->iobase->isp24;
4283 spin_lock_irqsave(&ha->hardware_lock, flags);
4284 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4285 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4287 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
4292 /* Interrupt handling helpers. */
4294 struct qla_init_msix_entry {
4296 irq_handler_t handler;
4299 static const struct qla_init_msix_entry msix_entries[] = {
4300 { "default", qla24xx_msix_default },
4301 { "rsp_q", qla24xx_msix_rsp_q },
4302 { "atio_q", qla83xx_msix_atio_q },
4303 { "qpair_multiq", qla2xxx_msix_rsp_q },
4304 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
4307 static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
4308 { "qla2xxx (default)", qla82xx_msix_default },
4309 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
4313 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
4316 struct qla_msix_entry *qentry;
4317 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4318 int min_vecs = QLA_BASE_VECTORS;
4319 struct irq_affinity desc = {
4320 .pre_vectors = QLA_BASE_VECTORS,
4323 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4324 IS_ATIO_MSIX_CAPABLE(ha)) {
4329 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
4330 /* user wants to control IRQ setting for target mode */
4331 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
4332 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4335 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
4336 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4337 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
4341 ql_log(ql_log_fatal, vha, 0x00c7,
4342 "MSI-X: Failed to enable support, "
4343 "giving up -- %d/%d.\n",
4344 ha->msix_count, ret);
4346 } else if (ret < ha->msix_count) {
4347 ql_log(ql_log_info, vha, 0x00c6,
4348 "MSI-X: Using %d vectors\n", ret);
4349 ha->msix_count = ret;
4350 /* Recalculate queue values */
4351 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
4352 ha->max_req_queues = ha->msix_count - 1;
4354 /* ATIOQ needs 1 vector. That's 1 less QPair */
4355 if (QLA_TGT_MODE_ENABLED())
4356 ha->max_req_queues--;
4358 ha->max_rsp_queues = ha->max_req_queues;
4360 ha->max_qpairs = ha->max_req_queues - 1;
4361 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
4362 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
4365 vha->irq_offset = desc.pre_vectors;
4366 ha->msix_entries = kcalloc(ha->msix_count,
4367 sizeof(struct qla_msix_entry),
4369 if (!ha->msix_entries) {
4370 ql_log(ql_log_fatal, vha, 0x00c8,
4371 "Failed to allocate memory for ha->msix_entries.\n");
4375 ha->flags.msix_enabled = 1;
4377 for (i = 0; i < ha->msix_count; i++) {
4378 qentry = &ha->msix_entries[i];
4379 qentry->vector = pci_irq_vector(ha->pdev, i);
4380 qentry->vector_base0 = i;
4382 qentry->have_irq = 0;
4384 qentry->handle = NULL;
4387 /* Enable MSI-X vectors for the base queue */
4388 for (i = 0; i < QLA_BASE_VECTORS; i++) {
4389 qentry = &ha->msix_entries[i];
4390 qentry->handle = rsp;
4392 scnprintf(qentry->name, sizeof(qentry->name),
4393 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
4394 if (IS_P3P_TYPE(ha))
4395 ret = request_irq(qentry->vector,
4396 qla82xx_msix_entries[i].handler,
4397 0, qla82xx_msix_entries[i].name, rsp);
4399 ret = request_irq(qentry->vector,
4400 msix_entries[i].handler,
4401 0, qentry->name, rsp);
4403 goto msix_register_fail;
4404 qentry->have_irq = 1;
4409 * If target mode is enable, also request the vector for the ATIO
4412 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4413 IS_ATIO_MSIX_CAPABLE(ha)) {
4414 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
4416 qentry->handle = rsp;
4417 scnprintf(qentry->name, sizeof(qentry->name),
4418 "qla2xxx%lu_%s", vha->host_no,
4419 msix_entries[QLA_ATIO_VECTOR].name);
4421 ret = request_irq(qentry->vector,
4422 msix_entries[QLA_ATIO_VECTOR].handler,
4423 0, qentry->name, rsp);
4424 qentry->have_irq = 1;
4429 ql_log(ql_log_fatal, vha, 0x00cb,
4430 "MSI-X: unable to register handler -- %x/%d.\n",
4431 qentry->vector, ret);
4432 qla2x00_free_irqs(vha);
4437 /* Enable MSI-X vector for response queue update for queue 0 */
4438 if (IS_MQUE_CAPABLE(ha) &&
4439 (ha->msixbase && ha->mqiobase && ha->max_qpairs))
4444 ql_dbg(ql_dbg_multiq, vha, 0xc005,
4445 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4446 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4447 ql_dbg(ql_dbg_init, vha, 0x0055,
4448 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4449 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4455 pci_free_irq_vectors(ha->pdev);
4460 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
4462 int ret = QLA_FUNCTION_FAILED;
4463 device_reg_t *reg = ha->iobase;
4464 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4466 /* If possible, enable MSI-X. */
4467 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
4468 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
4469 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
4472 if (ql2xenablemsix == 2)
4475 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
4476 (ha->pdev->subsystem_device == 0x7040 ||
4477 ha->pdev->subsystem_device == 0x7041 ||
4478 ha->pdev->subsystem_device == 0x1705)) {
4479 ql_log(ql_log_warn, vha, 0x0034,
4480 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
4481 ha->pdev->subsystem_vendor,
4482 ha->pdev->subsystem_device);
4486 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
4487 ql_log(ql_log_warn, vha, 0x0035,
4488 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
4489 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
4493 ret = qla24xx_enable_msix(ha, rsp);
4495 ql_dbg(ql_dbg_init, vha, 0x0036,
4496 "MSI-X: Enabled (0x%X, 0x%X).\n",
4497 ha->chip_revision, ha->fw_attributes);
4498 goto clear_risc_ints;
4503 ql_log(ql_log_info, vha, 0x0037,
4504 "Falling back-to MSI mode -- ret=%d.\n", ret);
4506 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
4507 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
4508 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4511 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
4513 ql_dbg(ql_dbg_init, vha, 0x0038,
4515 ha->flags.msi_enabled = 1;
4517 ql_log(ql_log_warn, vha, 0x0039,
4518 "Falling back-to INTa mode -- ret=%d.\n", ret);
4521 /* Skip INTx on ISP82xx. */
4522 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
4523 return QLA_FUNCTION_FAILED;
4525 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
4526 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
4527 QLA2XXX_DRIVER_NAME, rsp);
4529 ql_log(ql_log_warn, vha, 0x003a,
4530 "Failed to reserve interrupt %d already in use.\n",
4533 } else if (!ha->flags.msi_enabled) {
4534 ql_dbg(ql_dbg_init, vha, 0x0125,
4535 "INTa mode: Enabled.\n");
4536 ha->flags.mr_intr_valid = 1;
4537 /* Set max_qpair to 0, as MSI-X and MSI in not enabled */
4542 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
4545 spin_lock_irq(&ha->hardware_lock);
4546 wrt_reg_word(®->isp.semaphore, 0);
4547 spin_unlock_irq(&ha->hardware_lock);
4554 qla2x00_free_irqs(scsi_qla_host_t *vha)
4556 struct qla_hw_data *ha = vha->hw;
4557 struct rsp_que *rsp;
4558 struct qla_msix_entry *qentry;
4562 * We need to check that ha->rsp_q_map is valid in case we are called
4563 * from a probe failure context.
4565 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
4567 rsp = ha->rsp_q_map[0];
4569 if (ha->flags.msix_enabled) {
4570 for (i = 0; i < ha->msix_count; i++) {
4571 qentry = &ha->msix_entries[i];
4572 if (qentry->have_irq) {
4573 irq_set_affinity_notifier(qentry->vector, NULL);
4574 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
4577 kfree(ha->msix_entries);
4578 ha->msix_entries = NULL;
4579 ha->flags.msix_enabled = 0;
4580 ql_dbg(ql_dbg_init, vha, 0x0042,
4581 "Disabled MSI-X.\n");
4583 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
4587 pci_free_irq_vectors(ha->pdev);
4590 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
4591 struct qla_msix_entry *msix, int vector_type)
4593 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
4594 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4597 scnprintf(msix->name, sizeof(msix->name),
4598 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
4599 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
4601 ql_log(ql_log_fatal, vha, 0x00e6,
4602 "MSI-X: Unable to register handler -- %x/%d.\n",
4607 msix->handle = qpair;
4608 qla_mapq_init_qp_cpu_map(ha, msix, qpair);