1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
13 #include "qla_devtbl.h"
19 #include "qla_target.h"
22 * QLogic ISP2x00 Hardware Support Function Prototypes.
24 static int qla2x00_isp_firmware(scsi_qla_host_t *);
25 static int qla2x00_setup_chip(scsi_qla_host_t *);
26 static int qla2x00_fw_ready(scsi_qla_host_t *);
27 static int qla2x00_configure_hba(scsi_qla_host_t *);
28 static int qla2x00_configure_loop(scsi_qla_host_t *);
29 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
30 static int qla2x00_configure_fabric(scsi_qla_host_t *);
31 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
32 static int qla2x00_restart_isp(scsi_qla_host_t *);
34 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
35 static int qla84xx_init_chip(scsi_qla_host_t *);
36 static int qla25xx_init_queues(struct qla_hw_data *);
37 static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
38 struct event_arg *ea);
39 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
41 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
43 /* SRB Extensions ---------------------------------------------------------- */
46 qla2x00_sp_timeout(struct timer_list *t)
48 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
49 struct srb_iocb *iocb;
50 scsi_qla_host_t *vha = sp->vha;
52 WARN_ON(irqs_disabled());
53 iocb = &sp->u.iocb_cmd;
57 kref_put(&sp->cmd_kref, qla2x00_sp_release);
59 if (vha && qla2x00_isp_reg_stat(vha->hw)) {
60 ql_log(ql_log_info, vha, 0x9008,
61 "PCI/Register disconnect.\n");
62 qla_pci_set_eeh_busy(vha);
66 void qla2x00_sp_free(srb_t *sp)
68 struct srb_iocb *iocb = &sp->u.iocb_cmd;
70 del_timer(&iocb->timer);
74 void qla2xxx_rel_done_warning(srb_t *sp, int res)
76 WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
79 void qla2xxx_rel_free_warning(srb_t *sp)
81 WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
84 /* Asynchronous Login/Logout Routines -------------------------------------- */
87 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
90 struct qla_hw_data *ha = vha->hw;
92 /* Firmware should use switch negotiated r_a_tov for timeout. */
93 tmo = ha->r_a_tov / 10 * 2;
95 tmo = FX00_DEF_RATOV * 2;
96 } else if (!IS_FWI2_CAPABLE(ha)) {
98 * Except for earlier ISPs where the timeout is seeded from the
99 * initialization control block.
101 tmo = ha->login_timeout;
106 static void qla24xx_abort_iocb_timeout(void *data)
109 struct srb_iocb *abt = &sp->u.iocb_cmd;
110 struct qla_qpair *qpair = sp->qpair;
113 int sp_found = 0, cmdsp_found = 0;
116 ql_dbg(ql_dbg_async, sp->vha, 0x507c,
117 "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n",
118 sp->cmd_sp->handle, sp->cmd_sp->type,
119 sp->handle, sp->type);
121 ql_dbg(ql_dbg_async, sp->vha, 0x507c,
122 "Abort timeout 2 - hdl=%x, type=%x\n",
123 sp->handle, sp->type);
125 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
126 for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
127 if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] ==
129 qpair->req->outstanding_cmds[handle] = NULL;
131 qla_put_fw_resources(qpair, &sp->cmd_sp->iores);
134 /* removing the abort */
135 if (qpair->req->outstanding_cmds[handle] == sp) {
136 qpair->req->outstanding_cmds[handle] = NULL;
138 qla_put_fw_resources(qpair, &sp->iores);
142 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
144 if (cmdsp_found && sp->cmd_sp) {
146 * This done function should take care of
147 * original command ref: INIT
149 sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
153 abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
154 sp->done(sp, QLA_OS_TIMER_EXPIRED);
158 static void qla24xx_abort_sp_done(srb_t *sp, int res)
160 struct srb_iocb *abt = &sp->u.iocb_cmd;
161 srb_t *orig_sp = sp->cmd_sp;
164 qla_wait_nvme_release_cmd_kref(orig_sp);
166 if (sp->flags & SRB_WAKEUP_ON_COMP)
167 complete(&abt->u.abt.comp);
170 kref_put(&sp->cmd_kref, qla2x00_sp_release);
173 int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
175 scsi_qla_host_t *vha = cmd_sp->vha;
176 struct srb_iocb *abt_iocb;
178 int rval = QLA_FUNCTION_FAILED;
180 /* ref: INIT for ABTS command */
181 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
184 return QLA_MEMORY_ALLOC_FAILED;
186 qla_vha_mark_busy(vha);
187 abt_iocb = &sp->u.iocb_cmd;
188 sp->type = SRB_ABT_CMD;
190 sp->qpair = cmd_sp->qpair;
193 sp->flags = SRB_WAKEUP_ON_COMP;
195 init_completion(&abt_iocb->u.abt.comp);
196 /* FW can send 2 x ABTS's timeout/20s */
197 qla2x00_init_async_sp(sp, 42, qla24xx_abort_sp_done);
198 sp->u.iocb_cmd.timeout = qla24xx_abort_iocb_timeout;
200 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
201 abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
203 ql_dbg(ql_dbg_async, vha, 0x507c,
204 "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
207 rval = qla2x00_start_sp(sp);
208 if (rval != QLA_SUCCESS) {
210 kref_put(&sp->cmd_kref, qla2x00_sp_release);
215 wait_for_completion(&abt_iocb->u.abt.comp);
216 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
217 QLA_SUCCESS : QLA_ERR_FROM_FW;
219 kref_put(&sp->cmd_kref, qla2x00_sp_release);
226 qla2x00_async_iocb_timeout(void *data)
229 fc_port_t *fcport = sp->fcport;
230 struct srb_iocb *lio = &sp->u.iocb_cmd;
235 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
236 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
237 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
239 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
241 pr_info("Async-%s timeout - hdl=%x.\n",
242 sp->name, sp->handle);
247 rc = qla24xx_async_abort_cmd(sp, false);
249 /* Retry as needed. */
250 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
251 lio->u.logio.data[1] =
252 lio->u.logio.flags & SRB_LOGIN_RETRIED ?
253 QLA_LOGIO_LOGIN_RETRIED : 0;
254 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
255 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
257 if (sp->qpair->req->outstanding_cmds[h] ==
259 sp->qpair->req->outstanding_cmds[h] =
264 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
265 sp->done(sp, QLA_FUNCTION_TIMEOUT);
269 case SRB_CT_PTHRU_CMD:
276 rc = qla24xx_async_abort_cmd(sp, false);
278 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
279 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
281 if (sp->qpair->req->outstanding_cmds[h] ==
283 sp->qpair->req->outstanding_cmds[h] =
288 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
289 sp->done(sp, QLA_FUNCTION_TIMEOUT);
295 static void qla2x00_async_login_sp_done(srb_t *sp, int res)
297 struct scsi_qla_host *vha = sp->vha;
298 struct srb_iocb *lio = &sp->u.iocb_cmd;
301 ql_dbg(ql_dbg_disc, vha, 0x20dd,
302 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
304 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
306 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
307 memset(&ea, 0, sizeof(ea));
308 ea.fcport = sp->fcport;
309 ea.data[0] = lio->u.logio.data[0];
310 ea.data[1] = lio->u.logio.data[1];
311 ea.iop[0] = lio->u.logio.iop[0];
312 ea.iop[1] = lio->u.logio.iop[1];
315 ea.data[0] = MBS_COMMAND_ERROR;
316 qla24xx_handle_plogi_done_event(vha, &ea);
320 kref_put(&sp->cmd_kref, qla2x00_sp_release);
324 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
328 struct srb_iocb *lio;
329 int rval = QLA_FUNCTION_FAILED;
331 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
332 fcport->loop_id == FC_NO_LOOP_ID) {
333 ql_log(ql_log_warn, vha, 0xffff,
334 "%s: %8phC - not sending command.\n",
335 __func__, fcport->port_name);
340 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
344 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
345 fcport->flags |= FCF_ASYNC_SENT;
346 fcport->logout_completed = 0;
348 sp->type = SRB_LOGIN_CMD;
350 sp->gen1 = fcport->rscn_gen;
351 sp->gen2 = fcport->login_gen;
352 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
353 qla2x00_async_login_sp_done);
355 lio = &sp->u.iocb_cmd;
356 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
357 lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
359 if (vha->hw->flags.edif_enabled &&
361 lio->u.logio.flags |=
362 (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI);
364 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
368 if (NVME_TARGET(vha->hw, fcport))
369 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
371 rval = qla2x00_start_sp(sp);
373 ql_dbg(ql_dbg_disc, vha, 0x2072,
374 "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
375 fcport->port_name, sp->handle, fcport->loop_id,
376 fcport->d_id.b24, fcport->login_retry,
377 lio->u.logio.flags & SRB_LOGIN_FCSP ? "FCSP" : "");
379 if (rval != QLA_SUCCESS) {
380 fcport->flags |= FCF_LOGIN_NEEDED;
381 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
389 kref_put(&sp->cmd_kref, qla2x00_sp_release);
390 fcport->flags &= ~FCF_ASYNC_SENT;
392 fcport->flags &= ~FCF_ASYNC_ACTIVE;
395 * async login failed. Could be due to iocb/exchange resource
396 * being low. Set state DELETED for re-login process to start again.
398 qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
402 static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
404 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
405 sp->fcport->login_gen++;
406 qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]);
408 kref_put(&sp->cmd_kref, qla2x00_sp_release);
412 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
415 int rval = QLA_FUNCTION_FAILED;
417 fcport->flags |= FCF_ASYNC_SENT;
419 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
423 sp->type = SRB_LOGOUT_CMD;
425 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
426 qla2x00_async_logout_sp_done),
428 ql_dbg(ql_dbg_disc, vha, 0x2070,
429 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n",
430 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
431 fcport->d_id.b.area, fcport->d_id.b.al_pa,
432 fcport->port_name, fcport->explicit_logout);
434 rval = qla2x00_start_sp(sp);
435 if (rval != QLA_SUCCESS)
441 kref_put(&sp->cmd_kref, qla2x00_sp_release);
443 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
448 qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
451 fcport->flags &= ~FCF_ASYNC_ACTIVE;
452 /* Don't re-login in target mode */
453 if (!fcport->tgt_session)
454 qla2x00_mark_device_lost(vha, fcport, 1);
455 qlt_logo_completion_handler(fcport, data[0]);
458 static void qla2x00_async_prlo_sp_done(srb_t *sp, int res)
460 struct srb_iocb *lio = &sp->u.iocb_cmd;
461 struct scsi_qla_host *vha = sp->vha;
463 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
464 if (!test_bit(UNLOADING, &vha->dpc_flags))
465 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
468 kref_put(&sp->cmd_kref, qla2x00_sp_release);
472 qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
477 rval = QLA_FUNCTION_FAILED;
479 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
483 sp->type = SRB_PRLO_CMD;
485 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
486 qla2x00_async_prlo_sp_done);
488 ql_dbg(ql_dbg_disc, vha, 0x2070,
489 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
490 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
491 fcport->d_id.b.area, fcport->d_id.b.al_pa);
493 rval = qla2x00_start_sp(sp);
494 if (rval != QLA_SUCCESS)
501 kref_put(&sp->cmd_kref, qla2x00_sp_release);
503 fcport->flags &= ~FCF_ASYNC_ACTIVE;
508 void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
510 struct fc_port *fcport = ea->fcport;
512 ql_dbg(ql_dbg_disc, vha, 0x20d2,
513 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
514 __func__, fcport->port_name, fcport->disc_state,
515 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
516 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
518 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
521 if (ea->data[0] != MBS_COMMAND_COMPLETE) {
522 ql_dbg(ql_dbg_disc, vha, 0x2066,
523 "%s %8phC: adisc fail: post delete\n",
524 __func__, ea->fcport->port_name);
525 /* deleted = 0 & logout_on_delete = force fw cleanup */
527 fcport->logout_on_delete = 1;
528 qlt_schedule_sess_for_deletion(ea->fcport);
532 if (ea->fcport->disc_state == DSC_DELETE_PEND)
535 if (ea->sp->gen2 != ea->fcport->login_gen) {
536 /* target side must have changed it. */
537 ql_dbg(ql_dbg_disc, vha, 0x20d3,
538 "%s %8phC generation changed\n",
539 __func__, ea->fcport->port_name);
541 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
542 qla_rscn_replay(fcport);
543 qlt_schedule_sess_for_deletion(fcport);
547 __qla24xx_handle_gpdb_event(vha, ea);
550 static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
552 struct qla_work_evt *e;
554 e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
556 return QLA_FUNCTION_FAILED;
558 e->u.fcport.fcport = fcport;
559 fcport->flags |= FCF_ASYNC_ACTIVE;
560 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
561 return qla2x00_post_work(vha, e);
564 static void qla2x00_async_adisc_sp_done(srb_t *sp, int res)
566 struct scsi_qla_host *vha = sp->vha;
568 struct srb_iocb *lio = &sp->u.iocb_cmd;
570 ql_dbg(ql_dbg_disc, vha, 0x2066,
571 "Async done-%s res %x %8phC\n",
572 sp->name, res, sp->fcport->port_name);
574 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
576 memset(&ea, 0, sizeof(ea));
578 ea.data[0] = lio->u.logio.data[0];
579 ea.data[1] = lio->u.logio.data[1];
580 ea.iop[0] = lio->u.logio.iop[0];
581 ea.iop[1] = lio->u.logio.iop[1];
582 ea.fcport = sp->fcport;
585 ea.data[0] = MBS_COMMAND_ERROR;
587 qla24xx_handle_adisc_event(vha, &ea);
589 kref_put(&sp->cmd_kref, qla2x00_sp_release);
593 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
597 struct srb_iocb *lio;
598 int rval = QLA_FUNCTION_FAILED;
600 if (IS_SESSION_DELETED(fcport)) {
601 ql_log(ql_log_warn, vha, 0xffff,
602 "%s: %8phC is being delete - not sending command.\n",
603 __func__, fcport->port_name);
604 fcport->flags &= ~FCF_ASYNC_ACTIVE;
608 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
611 fcport->flags |= FCF_ASYNC_SENT;
613 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
617 sp->type = SRB_ADISC_CMD;
619 sp->gen1 = fcport->rscn_gen;
620 sp->gen2 = fcport->login_gen;
621 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
622 qla2x00_async_adisc_sp_done);
624 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) {
625 lio = &sp->u.iocb_cmd;
626 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
629 ql_dbg(ql_dbg_disc, vha, 0x206f,
630 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
631 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
633 rval = qla2x00_start_sp(sp);
634 if (rval != QLA_SUCCESS)
641 kref_put(&sp->cmd_kref, qla2x00_sp_release);
643 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
644 qla2x00_post_async_adisc_work(vha, fcport, data);
648 static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
650 struct qla_hw_data *ha = vha->hw;
652 if (IS_FWI2_CAPABLE(ha))
653 return loop_id > NPH_LAST_HANDLE;
655 return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
656 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST;
660 * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID
661 * @vha: adapter state pointer.
662 * @dev: port structure pointer.
665 * qla2x00 local function return status code.
670 static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
673 struct qla_hw_data *ha = vha->hw;
674 unsigned long flags = 0;
678 spin_lock_irqsave(&ha->vport_slock, flags);
680 dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE);
681 if (dev->loop_id >= LOOPID_MAP_SIZE ||
682 qla2x00_is_reserved_id(vha, dev->loop_id)) {
683 dev->loop_id = FC_NO_LOOP_ID;
684 rval = QLA_FUNCTION_FAILED;
686 set_bit(dev->loop_id, ha->loop_id_map);
688 spin_unlock_irqrestore(&ha->vport_slock, flags);
690 if (rval == QLA_SUCCESS)
691 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
692 "Assigning new loopid=%x, portid=%x.\n",
693 dev->loop_id, dev->d_id.b24);
695 ql_log(ql_log_warn, dev->vha, 0x2087,
696 "No loop_id's available, portid=%x.\n",
702 void qla2x00_clear_loop_id(fc_port_t *fcport)
704 struct qla_hw_data *ha = fcport->vha->hw;
706 if (fcport->loop_id == FC_NO_LOOP_ID ||
707 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
710 clear_bit(fcport->loop_id, ha->loop_id_map);
711 fcport->loop_id = FC_NO_LOOP_ID;
714 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
715 struct event_arg *ea)
717 fc_port_t *fcport, *conflict_fcport;
718 struct get_name_list_extended *e;
719 u16 i, n, found = 0, loop_id;
723 u8 current_login_state, nvme_cls;
726 ql_dbg(ql_dbg_disc, vha, 0xffff,
727 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n",
728 __func__, fcport->port_name, fcport->disc_state,
729 fcport->fw_login_state, ea->rc,
730 fcport->login_gen, fcport->last_login_gen,
731 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable);
733 if (fcport->disc_state == DSC_DELETE_PEND)
736 if (ea->rc) { /* rval */
737 if (fcport->login_retry == 0) {
738 ql_dbg(ql_dbg_disc, vha, 0x20de,
739 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
740 fcport->port_name, fcport->login_retry);
745 if (fcport->last_rscn_gen != fcport->rscn_gen) {
746 qla_rscn_replay(fcport);
747 qlt_schedule_sess_for_deletion(fcport);
749 } else if (fcport->last_login_gen != fcport->login_gen) {
750 ql_dbg(ql_dbg_disc, vha, 0x20e0,
751 "%s %8phC login gen changed\n",
752 __func__, fcport->port_name);
753 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
757 n = ea->data[0] / sizeof(struct get_name_list_extended);
759 ql_dbg(ql_dbg_disc, vha, 0x20e1,
760 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
761 __func__, __LINE__, fcport->port_name, n,
762 fcport->d_id.b.domain, fcport->d_id.b.area,
763 fcport->d_id.b.al_pa, fcport->loop_id);
765 for (i = 0; i < n; i++) {
767 wwn = wwn_to_u64(e->port_name);
768 id.b.domain = e->port_id[2];
769 id.b.area = e->port_id[1];
770 id.b.al_pa = e->port_id[0];
773 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
776 if (IS_SW_RESV_ADDR(id))
781 loop_id = le16_to_cpu(e->nport_handle);
782 loop_id = (loop_id & 0x7fff);
783 nvme_cls = e->current_login_state >> 4;
784 current_login_state = e->current_login_state & 0xf;
786 if (PRLI_PHASE(nvme_cls)) {
787 current_login_state = nvme_cls;
788 fcport->fc4_type &= ~FS_FC4TYPE_FCP;
789 fcport->fc4_type |= FS_FC4TYPE_NVME;
790 } else if (PRLI_PHASE(current_login_state)) {
791 fcport->fc4_type |= FS_FC4TYPE_FCP;
792 fcport->fc4_type &= ~FS_FC4TYPE_NVME;
795 ql_dbg(ql_dbg_disc, vha, 0x20e2,
796 "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
797 __func__, fcport->port_name,
798 e->current_login_state, fcport->fw_login_state,
799 fcport->fc4_type, id.b24, fcport->d_id.b24,
800 loop_id, fcport->loop_id);
802 switch (fcport->disc_state) {
803 case DSC_DELETE_PEND:
807 if ((id.b24 != fcport->d_id.b24 &&
809 fcport->loop_id != FC_NO_LOOP_ID) ||
810 (fcport->loop_id != FC_NO_LOOP_ID &&
811 fcport->loop_id != loop_id)) {
812 ql_dbg(ql_dbg_disc, vha, 0x20e3,
813 "%s %d %8phC post del sess\n",
814 __func__, __LINE__, fcport->port_name);
815 if (fcport->n2n_flag)
816 fcport->d_id.b24 = 0;
817 qlt_schedule_sess_for_deletion(fcport);
823 fcport->loop_id = loop_id;
824 if (fcport->n2n_flag)
825 fcport->d_id.b24 = id.b24;
827 wwn = wwn_to_u64(fcport->port_name);
828 qlt_find_sess_invalidate_other(vha, wwn,
829 id, loop_id, &conflict_fcport);
831 if (conflict_fcport) {
833 * Another share fcport share the same loop_id &
834 * nport id. Conflict fcport needs to finish
835 * cleanup before this fcport can proceed to login.
837 conflict_fcport->conflict = fcport;
838 fcport->login_pause = 1;
841 switch (vha->hw->current_topology) {
843 switch (current_login_state) {
844 case DSC_LS_PRLI_COMP:
846 vha, 0x20e4, "%s %d %8phC post gpdb\n",
847 __func__, __LINE__, fcport->port_name);
849 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
850 fcport->port_type = FCT_INITIATOR;
852 fcport->port_type = FCT_TARGET;
853 data[0] = data[1] = 0;
854 qla2x00_post_async_adisc_work(vha, fcport,
857 case DSC_LS_PLOGI_COMP:
858 if (vha->hw->flags.edif_enabled) {
859 /* check to see if App support Secure */
860 qla24xx_post_gpdb_work(vha, fcport, 0);
864 case DSC_LS_PORT_UNAVAIL:
866 if (fcport->loop_id == FC_NO_LOOP_ID) {
867 qla2x00_find_new_loop_id(vha, fcport);
868 fcport->fw_login_state =
871 ql_dbg(ql_dbg_disc, vha, 0x20e5,
872 "%s %d %8phC\n", __func__, __LINE__,
874 qla24xx_fcport_handle_login(vha, fcport);
879 fcport->fw_login_state = current_login_state;
881 switch (current_login_state) {
882 case DSC_LS_PRLI_PEND:
884 * In the middle of PRLI. Let it finish.
885 * Allow relogin code to recheck state again
886 * with GNL. Push disc_state back to DELETED
887 * so GNL can go out again
889 qla2x00_set_fcport_disc_state(fcport,
891 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
893 case DSC_LS_PRLI_COMP:
894 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
895 fcport->port_type = FCT_INITIATOR;
897 fcport->port_type = FCT_TARGET;
899 data[0] = data[1] = 0;
900 qla2x00_post_async_adisc_work(vha, fcport,
903 case DSC_LS_PLOGI_COMP:
904 if (vha->hw->flags.edif_enabled &&
906 /* check to see if App support secure or not */
907 qla24xx_post_gpdb_work(vha, fcport, 0);
910 if (fcport_is_bigger(fcport)) {
911 /* local adapter is smaller */
912 if (fcport->loop_id != FC_NO_LOOP_ID)
913 qla2x00_clear_loop_id(fcport);
915 fcport->loop_id = loop_id;
916 qla24xx_fcport_handle_login(vha,
922 if (fcport_is_smaller(fcport)) {
923 /* local adapter is bigger */
924 if (fcport->loop_id != FC_NO_LOOP_ID)
925 qla2x00_clear_loop_id(fcport);
927 fcport->loop_id = loop_id;
928 qla24xx_fcport_handle_login(vha,
934 } /* switch (ha->current_topology) */
938 switch (vha->hw->current_topology) {
941 for (i = 0; i < n; i++) {
943 id.b.domain = e->port_id[0];
944 id.b.area = e->port_id[1];
945 id.b.al_pa = e->port_id[2];
947 loop_id = le16_to_cpu(e->nport_handle);
949 if (fcport->d_id.b24 == id.b24) {
951 qla2x00_find_fcport_by_wwpn(vha,
953 if (conflict_fcport) {
954 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
956 "%s %d %8phC post del sess\n",
958 conflict_fcport->port_name);
959 qlt_schedule_sess_for_deletion
964 * FW already picked this loop id for
967 if (fcport->loop_id == loop_id)
968 fcport->loop_id = FC_NO_LOOP_ID;
970 qla24xx_fcport_handle_login(vha, fcport);
973 qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
974 if (time_after_eq(jiffies, fcport->dm_login_expire)) {
975 if (fcport->n2n_link_reset_cnt < 2) {
976 fcport->n2n_link_reset_cnt++;
978 * remote port is not sending PLOGI.
979 * Reset link to kick start his state
982 set_bit(N2N_LINK_RESET,
985 if (fcport->n2n_chip_reset < 1) {
986 ql_log(ql_log_info, vha, 0x705d,
987 "Chip reset to bring laser down");
988 set_bit(ISP_ABORT_NEEDED,
990 fcport->n2n_chip_reset++;
992 ql_log(ql_log_info, vha, 0x705d,
993 "Remote port %8ph is not coming back\n",
995 fcport->scan_state = 0;
998 qla2xxx_wake_dpc(vha);
1001 * report port suppose to do PLOGI. Give him
1002 * more time. FW will catch it.
1004 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1008 qla24xx_fcport_handle_login(vha, fcport);
1016 static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
1018 struct scsi_qla_host *vha = sp->vha;
1019 unsigned long flags;
1020 struct fc_port *fcport = NULL, *tf;
1021 u16 i, n = 0, loop_id;
1022 struct event_arg ea;
1023 struct get_name_list_extended *e;
1028 ql_dbg(ql_dbg_disc, vha, 0x20e7,
1029 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
1030 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
1031 sp->u.iocb_cmd.u.mbx.in_mb[2]);
1034 sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
1035 memset(&ea, 0, sizeof(ea));
1039 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
1040 sizeof(struct get_name_list_extended)) {
1041 n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
1042 sizeof(struct get_name_list_extended);
1043 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
1046 for (i = 0; i < n; i++) {
1048 loop_id = le16_to_cpu(e->nport_handle);
1049 /* mask out reserve bit */
1050 loop_id = (loop_id & 0x7fff);
1051 set_bit(loop_id, vha->hw->loop_id_map);
1052 wwn = wwn_to_u64(e->port_name);
1054 ql_dbg(ql_dbg_disc, vha, 0x20e8,
1055 "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
1056 __func__, &wwn, e->port_id[2], e->port_id[1],
1057 e->port_id[0], e->current_login_state, e->last_login_state,
1058 (loop_id & 0x7fff));
1061 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1065 if (!list_empty(&vha->gnl.fcports))
1066 list_splice_init(&vha->gnl.fcports, &h);
1067 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1069 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
1070 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1071 list_del_init(&fcport->gnl_entry);
1072 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1073 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1076 qla24xx_handle_gnl_done_event(vha, &ea);
1079 /* create new fcport if fw has knowledge of new sessions */
1080 for (i = 0; i < n; i++) {
1085 wwn = wwn_to_u64(e->port_name);
1088 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
1089 if (!memcmp((u8 *)&wwn, fcport->port_name,
1096 id.b.domain = e->port_id[2];
1097 id.b.area = e->port_id[1];
1098 id.b.al_pa = e->port_id[0];
1101 if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
1102 ql_dbg(ql_dbg_disc, vha, 0x2065,
1103 "%s %d %8phC %06x post new sess\n",
1104 __func__, __LINE__, (u8 *)&wwn, id.b24);
1105 wwnn = wwn_to_u64(e->node_name);
1106 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
1107 (u8 *)&wwnn, NULL, 0);
1111 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1113 if (!list_empty(&vha->gnl.fcports)) {
1115 list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports,
1117 list_del_init(&fcport->gnl_entry);
1118 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1119 if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS)
1123 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1126 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1129 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
1132 int rval = QLA_FUNCTION_FAILED;
1133 unsigned long flags;
1136 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
1139 ql_dbg(ql_dbg_disc, vha, 0x20d9,
1140 "Async-gnlist WWPN %8phC \n", fcport->port_name);
1142 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1143 fcport->flags |= FCF_ASYNC_SENT;
1144 qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
1145 fcport->last_rscn_gen = fcport->rscn_gen;
1146 fcport->last_login_gen = fcport->login_gen;
1148 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
1149 if (vha->gnl.sent) {
1150 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1154 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1157 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1161 sp->type = SRB_MB_IOCB;
1162 sp->name = "gnlist";
1163 sp->gen1 = fcport->rscn_gen;
1164 sp->gen2 = fcport->login_gen;
1165 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1166 qla24xx_async_gnl_sp_done);
1168 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1169 mb[0] = MBC_PORT_NODE_NAME_LIST;
1170 mb[1] = BIT_2 | BIT_3;
1171 mb[2] = MSW(vha->gnl.ldma);
1172 mb[3] = LSW(vha->gnl.ldma);
1173 mb[6] = MSW(MSD(vha->gnl.ldma));
1174 mb[7] = LSW(MSD(vha->gnl.ldma));
1175 mb[8] = vha->gnl.size;
1176 mb[9] = vha->vp_idx;
1178 ql_dbg(ql_dbg_disc, vha, 0x20da,
1179 "Async-%s - OUT WWPN %8phC hndl %x\n",
1180 sp->name, fcport->port_name, sp->handle);
1182 rval = qla2x00_start_sp(sp);
1183 if (rval != QLA_SUCCESS)
1190 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1192 fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
1196 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1198 struct qla_work_evt *e;
1200 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
1202 return QLA_FUNCTION_FAILED;
1204 e->u.fcport.fcport = fcport;
1205 fcport->flags |= FCF_ASYNC_ACTIVE;
1206 return qla2x00_post_work(vha, e);
1209 static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res)
1211 struct scsi_qla_host *vha = sp->vha;
1212 struct qla_hw_data *ha = vha->hw;
1213 fc_port_t *fcport = sp->fcport;
1214 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
1215 struct event_arg ea;
1217 ql_dbg(ql_dbg_disc, vha, 0x20db,
1218 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
1219 sp->name, res, fcport->port_name, mb[1], mb[2]);
1221 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1223 if (res == QLA_FUNCTION_TIMEOUT)
1226 memset(&ea, 0, sizeof(ea));
1230 qla24xx_handle_gpdb_event(vha, &ea);
1233 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
1234 sp->u.iocb_cmd.u.mbx.in_dma);
1236 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1239 int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1241 struct qla_work_evt *e;
1243 if (vha->host->active_mode == MODE_TARGET)
1244 return QLA_FUNCTION_FAILED;
1246 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
1248 return QLA_FUNCTION_FAILED;
1250 e->u.fcport.fcport = fcport;
1252 return qla2x00_post_work(vha, e);
1255 static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
1257 struct scsi_qla_host *vha = sp->vha;
1258 struct srb_iocb *lio = &sp->u.iocb_cmd;
1259 struct event_arg ea;
1261 ql_dbg(ql_dbg_disc, vha, 0x2129,
1262 "%s %8phC res %x\n", __func__,
1263 sp->fcport->port_name, res);
1265 sp->fcport->flags &= ~FCF_ASYNC_SENT;
1267 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
1268 memset(&ea, 0, sizeof(ea));
1269 ea.fcport = sp->fcport;
1270 ea.data[0] = lio->u.logio.data[0];
1271 ea.data[1] = lio->u.logio.data[1];
1272 ea.iop[0] = lio->u.logio.iop[0];
1273 ea.iop[1] = lio->u.logio.iop[1];
1275 if (res == QLA_OS_TIMER_EXPIRED)
1276 ea.data[0] = QLA_OS_TIMER_EXPIRED;
1278 ea.data[0] = MBS_COMMAND_ERROR;
1280 qla24xx_handle_prli_done_event(vha, &ea);
1283 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1287 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
1290 struct srb_iocb *lio;
1291 int rval = QLA_FUNCTION_FAILED;
1293 if (!vha->flags.online) {
1294 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
1295 __func__, __LINE__, fcport->port_name);
1299 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
1300 fcport->fw_login_state == DSC_LS_PRLI_PEND) &&
1301 qla_dual_mode_enabled(vha)) {
1302 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
1303 __func__, __LINE__, fcport->port_name);
1307 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1311 fcport->flags |= FCF_ASYNC_SENT;
1312 fcport->logout_completed = 0;
1314 sp->type = SRB_PRLI_CMD;
1316 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1317 qla2x00_async_prli_sp_done);
1319 lio = &sp->u.iocb_cmd;
1320 lio->u.logio.flags = 0;
1322 if (NVME_TARGET(vha->hw, fcport))
1323 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
1325 ql_dbg(ql_dbg_disc, vha, 0x211b,
1326 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n",
1327 fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
1328 fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority,
1329 NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp");
1331 rval = qla2x00_start_sp(sp);
1332 if (rval != QLA_SUCCESS) {
1333 fcport->flags |= FCF_LOGIN_NEEDED;
1334 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1342 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1343 fcport->flags &= ~FCF_ASYNC_SENT;
1347 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1349 struct qla_work_evt *e;
1351 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
1353 return QLA_FUNCTION_FAILED;
1355 e->u.fcport.fcport = fcport;
1356 e->u.fcport.opt = opt;
1357 fcport->flags |= FCF_ASYNC_ACTIVE;
1358 return qla2x00_post_work(vha, e);
1361 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1364 struct srb_iocb *mbx;
1365 int rval = QLA_FUNCTION_FAILED;
1368 struct port_database_24xx *pd;
1369 struct qla_hw_data *ha = vha->hw;
1371 if (IS_SESSION_DELETED(fcport)) {
1372 ql_log(ql_log_warn, vha, 0xffff,
1373 "%s: %8phC is being delete - not sending command.\n",
1374 __func__, fcport->port_name);
1375 fcport->flags &= ~FCF_ASYNC_ACTIVE;
1379 if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) {
1380 ql_log(ql_log_warn, vha, 0xffff,
1381 "%s: %8phC online %d flags %x - not sending command.\n",
1382 __func__, fcport->port_name, vha->flags.online, fcport->flags);
1386 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1390 qla2x00_set_fcport_disc_state(fcport, DSC_GPDB);
1392 fcport->flags |= FCF_ASYNC_SENT;
1393 sp->type = SRB_MB_IOCB;
1395 sp->gen1 = fcport->rscn_gen;
1396 sp->gen2 = fcport->login_gen;
1397 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1398 qla24xx_async_gpdb_sp_done);
1400 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1402 ql_log(ql_log_warn, vha, 0xd043,
1403 "Failed to allocate port database structure.\n");
1407 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1408 mb[0] = MBC_GET_PORT_DATABASE;
1409 mb[1] = fcport->loop_id;
1410 mb[2] = MSW(pd_dma);
1411 mb[3] = LSW(pd_dma);
1412 mb[6] = MSW(MSD(pd_dma));
1413 mb[7] = LSW(MSD(pd_dma));
1414 mb[9] = vha->vp_idx;
1417 mbx = &sp->u.iocb_cmd;
1418 mbx->u.mbx.in = (void *)pd;
1419 mbx->u.mbx.in_dma = pd_dma;
1421 ql_dbg(ql_dbg_disc, vha, 0x20dc,
1422 "Async-%s %8phC hndl %x opt %x\n",
1423 sp->name, fcport->port_name, sp->handle, opt);
1425 rval = qla2x00_start_sp(sp);
1426 if (rval != QLA_SUCCESS)
1432 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1434 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1435 fcport->flags &= ~FCF_ASYNC_SENT;
1437 fcport->flags &= ~FCF_ASYNC_ACTIVE;
1438 qla24xx_post_gpdb_work(vha, fcport, opt);
1443 void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1445 unsigned long flags;
1447 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1448 ea->fcport->login_gen++;
1449 ea->fcport->deleted = 0;
1450 ea->fcport->logout_on_delete = 1;
1452 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
1453 vha->fcport_count++;
1454 ea->fcport->login_succ = 1;
1456 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1457 qla24xx_sched_upd_fcport(ea->fcport);
1458 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1459 } else if (ea->fcport->login_succ) {
1461 * We have an existing session. A late RSCN delivery
1462 * must have triggered the session to be re-validate.
1463 * Session is still valid.
1465 ql_dbg(ql_dbg_disc, vha, 0x20d6,
1466 "%s %d %8phC session revalidate success\n",
1467 __func__, __LINE__, ea->fcport->port_name);
1468 qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE);
1470 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1473 static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport,
1474 struct port_database_24xx *pd)
1478 if (pd->secure_login) {
1479 ql_dbg(ql_dbg_disc, vha, 0x104d,
1480 "Secure Login established on %8phC\n",
1482 fcport->flags |= FCF_FCSP_DEVICE;
1484 ql_dbg(ql_dbg_disc, vha, 0x104d,
1485 "non-Secure Login %8phC",
1487 fcport->flags &= ~FCF_FCSP_DEVICE;
1489 if (vha->hw->flags.edif_enabled) {
1490 if (fcport->flags & FCF_FCSP_DEVICE) {
1491 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND);
1492 /* Start edif prli timer & ring doorbell for app */
1493 fcport->edif.rx_sa_set = 0;
1494 fcport->edif.tx_sa_set = 0;
1495 fcport->edif.rx_sa_pending = 0;
1496 fcport->edif.tx_sa_pending = 0;
1498 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
1501 if (DBELL_ACTIVE(vha)) {
1502 ql_dbg(ql_dbg_disc, vha, 0x20ef,
1503 "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n",
1504 __func__, __LINE__, fcport->port_name);
1505 fcport->edif.app_sess_online = 1;
1507 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED,
1508 fcport->d_id.b24, 0, fcport);
1512 } else if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
1513 ql_dbg(ql_dbg_disc, vha, 0x2117,
1514 "%s %d %8phC post prli\n",
1515 __func__, __LINE__, fcport->port_name);
1516 qla24xx_post_prli_work(vha, fcport);
1524 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1526 fc_port_t *fcport = ea->fcport;
1527 struct port_database_24xx *pd;
1528 struct srb *sp = ea->sp;
1531 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
1533 fcport->flags &= ~FCF_ASYNC_SENT;
1535 ql_dbg(ql_dbg_disc, vha, 0x20d2,
1536 "%s %8phC DS %d LS %x fc4_type %x rc %x\n", __func__,
1537 fcport->port_name, fcport->disc_state, pd->current_login_state,
1538 fcport->fc4_type, ea->rc);
1540 if (fcport->disc_state == DSC_DELETE_PEND) {
1541 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC\n",
1542 __func__, __LINE__, fcport->port_name);
1546 if (NVME_TARGET(vha->hw, fcport))
1547 ls = pd->current_login_state >> 4;
1549 ls = pd->current_login_state & 0xf;
1551 if (ea->sp->gen2 != fcport->login_gen) {
1552 /* target side must have changed it. */
1554 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1555 "%s %8phC generation changed\n",
1556 __func__, fcport->port_name);
1558 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1559 qla_rscn_replay(fcport);
1560 qlt_schedule_sess_for_deletion(fcport);
1561 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1562 __func__, __LINE__, fcport->port_name, ls);
1567 case PDS_PRLI_COMPLETE:
1568 __qla24xx_parse_gpdb(vha, fcport, pd);
1570 case PDS_PLOGI_COMPLETE:
1571 if (qla_chk_secure_login(vha, fcport, pd)) {
1572 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1573 __func__, __LINE__, fcport->port_name, ls);
1577 case PDS_PLOGI_PENDING:
1578 case PDS_PRLI_PENDING:
1579 case PDS_PRLI2_PENDING:
1580 /* Set discovery state back to GNL to Relogin attempt */
1581 if (qla_dual_mode_enabled(vha) ||
1582 qla_ini_mode_enabled(vha)) {
1583 qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
1584 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1586 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1587 __func__, __LINE__, fcport->port_name, ls);
1589 case PDS_LOGO_PENDING:
1590 case PDS_PORT_UNAVAILABLE:
1592 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
1593 __func__, __LINE__, fcport->port_name);
1594 qlt_schedule_sess_for_deletion(fcport);
1597 __qla24xx_handle_gpdb_event(vha, ea);
1600 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1605 ql_dbg(ql_dbg_disc, vha, 0x307b,
1606 "%s %8phC DS %d LS %d lid %d retries=%d\n",
1607 __func__, fcport->port_name, fcport->disc_state,
1608 fcport->fw_login_state, fcport->loop_id, fcport->login_retry);
1610 if (qla_tgt_mode_enabled(vha))
1613 if (qla_dual_mode_enabled(vha)) {
1614 if (N2N_TOPO(vha->hw)) {
1617 mywwn = wwn_to_u64(vha->port_name);
1618 wwn = wwn_to_u64(fcport->port_name);
1621 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1622 && time_after_eq(jiffies,
1623 fcport->plogi_nack_done_deadline))
1629 /* initiator mode */
1633 if (login && fcport->login_retry) {
1634 fcport->login_retry--;
1635 if (fcport->loop_id == FC_NO_LOOP_ID) {
1636 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
1637 rc = qla2x00_find_new_loop_id(vha, fcport);
1639 ql_dbg(ql_dbg_disc, vha, 0x20e6,
1640 "%s %d %8phC post del sess - out of loopid\n",
1641 __func__, __LINE__, fcport->port_name);
1642 fcport->scan_state = 0;
1643 qlt_schedule_sess_for_deletion(fcport);
1647 ql_dbg(ql_dbg_disc, vha, 0x20bf,
1648 "%s %d %8phC post login\n",
1649 __func__, __LINE__, fcport->port_name);
1650 qla2x00_post_async_login_work(vha, fcport, NULL);
1654 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1659 ql_dbg(ql_dbg_disc, vha, 0x20d8,
1660 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d fc4type %x\n",
1661 __func__, fcport->port_name, fcport->disc_state,
1662 fcport->fw_login_state, fcport->login_pause, fcport->flags,
1663 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
1664 fcport->login_gen, fcport->loop_id, fcport->scan_state,
1667 if (fcport->scan_state != QLA_FCPORT_FOUND ||
1668 fcport->disc_state == DSC_DELETE_PEND)
1671 if ((fcport->loop_id != FC_NO_LOOP_ID) &&
1672 qla_dual_mode_enabled(vha) &&
1673 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1674 (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
1677 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP &&
1678 !N2N_TOPO(vha->hw)) {
1679 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1680 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1685 /* Target won't initiate port login if fabric is present */
1686 if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw))
1689 if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) {
1690 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1694 switch (fcport->disc_state) {
1696 switch (vha->hw->current_topology) {
1698 if (fcport_is_smaller(fcport)) {
1699 /* this adapter is bigger */
1700 if (fcport->login_retry) {
1701 if (fcport->loop_id == FC_NO_LOOP_ID) {
1702 qla2x00_find_new_loop_id(vha,
1704 fcport->fw_login_state =
1705 DSC_LS_PORT_UNAVAIL;
1707 fcport->login_retry--;
1708 qla_post_els_plogi_work(vha, fcport);
1710 ql_log(ql_log_info, vha, 0x705d,
1711 "Unable to reach remote port %8phC",
1715 qla24xx_post_gnl_work(vha, fcport);
1719 if (fcport->loop_id == FC_NO_LOOP_ID) {
1720 ql_dbg(ql_dbg_disc, vha, 0x20bd,
1721 "%s %d %8phC post gnl\n",
1722 __func__, __LINE__, fcport->port_name);
1723 qla24xx_post_gnl_work(vha, fcport);
1725 qla_chk_n2n_b4_login(vha, fcport);
1732 switch (vha->hw->current_topology) {
1734 if ((fcport->current_login_state & 0xf) == 0x6) {
1735 ql_dbg(ql_dbg_disc, vha, 0x2118,
1736 "%s %d %8phC post GPDB work\n",
1737 __func__, __LINE__, fcport->port_name);
1738 fcport->chip_reset =
1739 vha->hw->base_qpair->chip_reset;
1740 qla24xx_post_gpdb_work(vha, fcport, 0);
1742 ql_dbg(ql_dbg_disc, vha, 0x2118,
1743 "%s %d %8phC post %s PRLI\n",
1744 __func__, __LINE__, fcport->port_name,
1745 NVME_TARGET(vha->hw, fcport) ? "NVME" :
1747 qla24xx_post_prli_work(vha, fcport);
1751 if (fcport->login_pause) {
1752 ql_dbg(ql_dbg_disc, vha, 0x20d8,
1753 "%s %d %8phC exit\n",
1756 fcport->last_rscn_gen = fcport->rscn_gen;
1757 fcport->last_login_gen = fcport->login_gen;
1758 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1761 qla_chk_n2n_b4_login(vha, fcport);
1766 case DSC_LOGIN_FAILED:
1767 if (N2N_TOPO(vha->hw))
1768 qla_chk_n2n_b4_login(vha, fcport);
1770 qlt_schedule_sess_for_deletion(fcport);
1773 case DSC_LOGIN_COMPLETE:
1774 /* recheck login state */
1775 data[0] = data[1] = 0;
1776 qla2x00_post_async_adisc_work(vha, fcport, data);
1779 case DSC_LOGIN_PEND:
1780 if (vha->hw->flags.edif_enabled)
1783 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
1784 ql_dbg(ql_dbg_disc, vha, 0x2118,
1785 "%s %d %8phC post %s PRLI\n",
1786 __func__, __LINE__, fcport->port_name,
1787 NVME_TARGET(vha->hw, fcport) ? "NVME" : "FC");
1788 qla24xx_post_prli_work(vha, fcport);
1792 case DSC_UPD_FCPORT:
1793 sec = jiffies_to_msecs(jiffies -
1794 fcport->jiffies_at_registration)/1000;
1795 if (fcport->sec_since_registration < sec && sec &&
1797 fcport->sec_since_registration = sec;
1798 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
1799 "%s %8phC - Slow Rport registration(%d Sec)\n",
1800 __func__, fcport->port_name, sec);
1803 if (fcport->next_disc_state != DSC_DELETE_PEND)
1804 fcport->next_disc_state = DSC_ADISC;
1805 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1815 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1816 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
1818 struct qla_work_evt *e;
1820 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1822 return QLA_FUNCTION_FAILED;
1824 e->u.new_sess.id = *id;
1825 e->u.new_sess.pla = pla;
1826 e->u.new_sess.fc4_type = fc4_type;
1827 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
1829 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
1831 return qla2x00_post_work(vha, e);
1834 void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
1837 unsigned long flags;
1839 switch (ea->id.b.rsvd_1) {
1840 case RSCN_PORT_ADDR:
1841 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1843 if (ql2xfc2target &&
1844 fcport->flags & FCF_FCP2_DEVICE &&
1845 atomic_read(&fcport->state) == FCS_ONLINE) {
1846 ql_dbg(ql_dbg_disc, vha, 0x2115,
1847 "Delaying session delete for FCP2 portid=%06x %8phC ",
1848 fcport->d_id.b24, fcport->port_name);
1852 if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) {
1854 * On ipsec start by remote port, Target port
1855 * may use RSCN to trigger initiator to
1856 * relogin. If driver is already in the
1857 * process of a relogin, then ignore the RSCN
1858 * and allow the current relogin to continue.
1859 * This reduces thrashing of the connection.
1861 if (atomic_read(&fcport->state) == FCS_ONLINE) {
1863 * If state = online, then set scan_needed=1 to do relogin.
1864 * Otherwise we're already in the middle of a relogin
1866 fcport->scan_needed = 1;
1870 fcport->scan_needed = 1;
1875 case RSCN_AREA_ADDR:
1876 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1877 if (fcport->flags & FCF_FCP2_DEVICE &&
1878 atomic_read(&fcport->state) == FCS_ONLINE)
1881 if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) {
1882 fcport->scan_needed = 1;
1888 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1889 if (fcport->flags & FCF_FCP2_DEVICE &&
1890 atomic_read(&fcport->state) == FCS_ONLINE)
1893 if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) {
1894 fcport->scan_needed = 1;
1901 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1902 if (fcport->flags & FCF_FCP2_DEVICE &&
1903 atomic_read(&fcport->state) == FCS_ONLINE)
1906 fcport->scan_needed = 1;
1912 spin_lock_irqsave(&vha->work_lock, flags);
1913 if (vha->scan.scan_flags == 0) {
1914 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
1915 vha->scan.scan_flags |= SF_QUEUED;
1916 schedule_delayed_work(&vha->scan.scan_work, 5);
1918 spin_unlock_irqrestore(&vha->work_lock, flags);
1921 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1922 struct event_arg *ea)
1924 fc_port_t *fcport = ea->fcport;
1926 if (test_bit(UNLOADING, &vha->dpc_flags))
1929 ql_dbg(ql_dbg_disc, vha, 0x2102,
1930 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1931 __func__, fcport->port_name, fcport->disc_state,
1932 fcport->fw_login_state, fcport->login_pause,
1933 fcport->deleted, fcport->conflict,
1934 fcport->last_rscn_gen, fcport->rscn_gen,
1935 fcport->last_login_gen, fcport->login_gen,
1938 if (fcport->last_rscn_gen != fcport->rscn_gen) {
1939 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n",
1940 __func__, __LINE__, fcport->port_name);
1941 qla24xx_post_gnl_work(vha, fcport);
1945 qla24xx_fcport_handle_login(vha, fcport);
1948 void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
1949 struct event_arg *ea)
1951 if (N2N_TOPO(vha->hw) && fcport_is_smaller(ea->fcport) &&
1952 vha->hw->flags.edif_enabled) {
1953 /* check to see if App support Secure */
1954 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1958 /* for pure Target Mode, PRLI will not be initiated */
1959 if (vha->host->active_mode == MODE_TARGET)
1962 ql_dbg(ql_dbg_disc, vha, 0x2118,
1963 "%s %d %8phC post PRLI\n",
1964 __func__, __LINE__, ea->fcport->port_name);
1965 qla24xx_post_prli_work(vha, ea->fcport);
1969 * RSCN(s) came in for this fcport, but the RSCN(s) was not able
1970 * to be consumed by the fcport
1972 void qla_rscn_replay(fc_port_t *fcport)
1974 struct event_arg ea;
1976 switch (fcport->disc_state) {
1977 case DSC_DELETE_PEND:
1983 if (fcport->scan_needed) {
1984 memset(&ea, 0, sizeof(ea));
1985 ea.id = fcport->d_id;
1986 ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
1987 qla2x00_handle_rscn(fcport->vha, &ea);
1992 qla2x00_tmf_iocb_timeout(void *data)
1995 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1997 unsigned long flags;
1999 if (sp->type == SRB_MARKER) {
2000 complete(&tmf->u.tmf.comp);
2004 rc = qla24xx_async_abort_cmd(sp, false);
2006 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2007 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2008 if (sp->qpair->req->outstanding_cmds[h] == sp) {
2009 sp->qpair->req->outstanding_cmds[h] = NULL;
2010 qla_put_fw_resources(sp->qpair, &sp->iores);
2014 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2015 tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT);
2016 tmf->u.tmf.data = QLA_FUNCTION_FAILED;
2017 complete(&tmf->u.tmf.comp);
2021 static void qla_marker_sp_done(srb_t *sp, int res)
2023 struct srb_iocb *tmf = &sp->u.iocb_cmd;
2025 if (res != QLA_SUCCESS)
2026 ql_dbg(ql_dbg_taskm, sp->vha, 0x8004,
2027 "Async-marker fail hdl=%x portid=%06x ctrl=%x lun=%lld qp=%d.\n",
2028 sp->handle, sp->fcport->d_id.b24, sp->u.iocb_cmd.u.tmf.flags,
2029 sp->u.iocb_cmd.u.tmf.lun, sp->qpair->id);
2031 sp->u.iocb_cmd.u.tmf.data = res;
2032 complete(&tmf->u.tmf.comp);
2035 #define START_SP_W_RETRIES(_sp, _rval) \
2039 _rval = qla2x00_start_sp(_sp); \
2040 if (_rval == EAGAIN) \
2049 * qla26xx_marker: send marker IOCB and wait for the completion of it.
2050 * @arg: pointer to argument list.
2051 * It is assume caller will provide an fcport pointer and modifier
2054 qla26xx_marker(struct tmf_arg *arg)
2056 struct scsi_qla_host *vha = arg->vha;
2057 struct srb_iocb *tm_iocb;
2059 int rval = QLA_FUNCTION_FAILED;
2060 fc_port_t *fcport = arg->fcport;
2062 if (TMF_NOT_READY(arg->fcport)) {
2063 ql_dbg(ql_dbg_taskm, vha, 0x8039,
2064 "FC port not ready for marker loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n",
2065 fcport->loop_id, fcport->d_id.b24,
2066 arg->modifier, arg->lun, arg->qpair->id);
2067 return QLA_SUSPENDED;
2071 sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
2075 sp->type = SRB_MARKER;
2076 sp->name = "marker";
2077 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), qla_marker_sp_done);
2078 sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
2080 tm_iocb = &sp->u.iocb_cmd;
2081 init_completion(&tm_iocb->u.tmf.comp);
2082 tm_iocb->u.tmf.modifier = arg->modifier;
2083 tm_iocb->u.tmf.lun = arg->lun;
2084 tm_iocb->u.tmf.loop_id = fcport->loop_id;
2085 tm_iocb->u.tmf.vp_index = vha->vp_idx;
2087 START_SP_W_RETRIES(sp, rval);
2089 ql_dbg(ql_dbg_taskm, vha, 0x8006,
2090 "Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
2091 sp->handle, fcport->loop_id, fcport->d_id.b24,
2092 arg->modifier, arg->lun, sp->qpair->id, rval);
2094 if (rval != QLA_SUCCESS) {
2095 ql_log(ql_log_warn, vha, 0x8031,
2096 "Marker IOCB send failure (%x).\n", rval);
2100 wait_for_completion(&tm_iocb->u.tmf.comp);
2101 rval = tm_iocb->u.tmf.data;
2103 if (rval != QLA_SUCCESS) {
2104 ql_log(ql_log_warn, vha, 0x8019,
2105 "Marker failed hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
2106 sp->handle, fcport->loop_id, fcport->d_id.b24,
2107 arg->modifier, arg->lun, sp->qpair->id, rval);
2112 kref_put(&sp->cmd_kref, qla2x00_sp_release);
2117 static void qla2x00_tmf_sp_done(srb_t *sp, int res)
2119 struct srb_iocb *tmf = &sp->u.iocb_cmd;
2122 tmf->u.tmf.data = res;
2123 complete(&tmf->u.tmf.comp);
2127 __qla2x00_async_tm_cmd(struct tmf_arg *arg)
2129 struct scsi_qla_host *vha = arg->vha;
2130 struct srb_iocb *tm_iocb;
2132 int rval = QLA_FUNCTION_FAILED;
2134 fc_port_t *fcport = arg->fcport;
2136 if (TMF_NOT_READY(arg->fcport)) {
2137 ql_dbg(ql_dbg_taskm, vha, 0x8032,
2138 "FC port not ready for TM command loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n",
2139 fcport->loop_id, fcport->d_id.b24,
2140 arg->modifier, arg->lun, arg->qpair->id);
2141 return QLA_SUSPENDED;
2145 sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
2149 qla_vha_mark_busy(vha);
2150 sp->type = SRB_TM_CMD;
2152 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha),
2153 qla2x00_tmf_sp_done);
2154 sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
2156 tm_iocb = &sp->u.iocb_cmd;
2157 init_completion(&tm_iocb->u.tmf.comp);
2158 tm_iocb->u.tmf.flags = arg->flags;
2159 tm_iocb->u.tmf.lun = arg->lun;
2161 START_SP_W_RETRIES(sp, rval);
2163 ql_dbg(ql_dbg_taskm, vha, 0x802f,
2164 "Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n",
2165 sp->handle, fcport->loop_id, fcport->d_id.b24,
2166 arg->flags, arg->lun, sp->qpair->id, rval);
2168 if (rval != QLA_SUCCESS)
2170 wait_for_completion(&tm_iocb->u.tmf.comp);
2172 rval = tm_iocb->u.tmf.data;
2174 if (rval != QLA_SUCCESS) {
2175 ql_log(ql_log_warn, vha, 0x8030,
2176 "TM IOCB failed (%x).\n", rval);
2179 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw))
2180 rval = qla26xx_marker(arg);
2184 kref_put(&sp->cmd_kref, qla2x00_sp_release);
2189 static void qla_put_tmf(fc_port_t *fcport)
2191 struct scsi_qla_host *vha = fcport->vha;
2192 struct qla_hw_data *ha = vha->hw;
2193 unsigned long flags;
2195 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2196 fcport->active_tmf--;
2197 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2201 int qla_get_tmf(fc_port_t *fcport)
2203 struct scsi_qla_host *vha = fcport->vha;
2204 struct qla_hw_data *ha = vha->hw;
2205 unsigned long flags;
2207 LIST_HEAD(tmf_elem);
2209 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2210 list_add_tail(&tmf_elem, &fcport->tmf_pending);
2212 while (fcport->active_tmf >= MAX_ACTIVE_TMF) {
2213 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2217 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2218 if (TMF_NOT_READY(fcport)) {
2219 ql_log(ql_log_warn, vha, 0x802c,
2220 "Unable to acquire TM resource due to disruption.\n");
2224 if (fcport->active_tmf < MAX_ACTIVE_TMF &&
2225 list_is_first(&tmf_elem, &fcport->tmf_pending))
2229 list_del(&tmf_elem);
2232 fcport->active_tmf++;
2234 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2240 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
2243 struct scsi_qla_host *vha = fcport->vha;
2244 struct qla_qpair *qpair;
2246 int i, rval = QLA_SUCCESS;
2248 if (TMF_NOT_READY(fcport))
2249 return QLA_SUSPENDED;
2251 a.vha = fcport->vha;
2254 if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
2255 a.modifier = MK_SYNC_ID_LUN;
2257 if (qla_get_tmf(fcport))
2258 return QLA_FUNCTION_FAILED;
2260 a.modifier = MK_SYNC_ID;
2263 if (vha->hw->mqenable) {
2264 for (i = 0; i < vha->hw->num_qpairs; i++) {
2265 qpair = vha->hw->queue_pair_map[i];
2269 if (TMF_NOT_READY(fcport)) {
2270 ql_log(ql_log_warn, vha, 0x8026,
2271 "Unable to send TM due to disruption.\n");
2272 rval = QLA_SUSPENDED;
2277 a.flags = flags|TCF_NOTMCMD_TO_TARGET;
2278 rval = __qla2x00_async_tm_cmd(&a);
2287 a.qpair = vha->hw->base_qpair;
2289 rval = __qla2x00_async_tm_cmd(&a);
2292 if (a.modifier == MK_SYNC_ID_LUN)
2293 qla_put_tmf(fcport);
2299 qla24xx_async_abort_command(srb_t *sp)
2301 unsigned long flags = 0;
2304 fc_port_t *fcport = sp->fcport;
2305 struct qla_qpair *qpair = sp->qpair;
2306 struct scsi_qla_host *vha = fcport->vha;
2307 struct req_que *req = qpair->req;
2309 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2310 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
2311 if (req->outstanding_cmds[handle] == sp)
2314 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2316 if (handle == req->num_outstanding_cmds) {
2317 /* Command not found. */
2318 return QLA_ERR_NOT_FOUND;
2320 if (sp->type == SRB_FXIOCB_DCMD)
2321 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
2322 FXDISC_ABORT_IOCTL);
2324 return qla24xx_async_abort_cmd(sp, true);
2328 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
2331 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
2334 switch (ea->data[0]) {
2335 case MBS_COMMAND_COMPLETE:
2336 ql_dbg(ql_dbg_disc, vha, 0x2118,
2337 "%s %d %8phC post gpdb\n",
2338 __func__, __LINE__, ea->fcport->port_name);
2340 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2341 ea->fcport->logout_on_delete = 1;
2342 ea->fcport->nvme_prli_service_param = ea->iop[0];
2343 if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST)
2344 ea->fcport->nvme_first_burst_size =
2345 (ea->iop[1] & 0xffff) * 512;
2347 ea->fcport->nvme_first_burst_size = 0;
2348 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
2352 ql_dbg(ql_dbg_disc, vha, 0x2118,
2353 "%s %d %8phC priority %s, fc4type %x prev try %s\n",
2354 __func__, __LINE__, ea->fcport->port_name,
2355 vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ?
2356 "FCP" : "NVMe", ea->fcport->fc4_type,
2357 (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) ?
2360 if (NVME_FCP_TARGET(ea->fcport)) {
2361 if (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI)
2362 ea->fcport->do_prli_nvme = 0;
2364 ea->fcport->do_prli_nvme = 1;
2366 ea->fcport->do_prli_nvme = 0;
2369 if (N2N_TOPO(vha->hw)) {
2370 if (ea->fcport->n2n_link_reset_cnt ==
2371 vha->hw->login_retry_count &&
2372 ea->fcport->flags & FCF_FCSP_DEVICE) {
2373 /* remote authentication app just started */
2374 ea->fcport->n2n_link_reset_cnt = 0;
2377 if (ea->fcport->n2n_link_reset_cnt <
2378 vha->hw->login_retry_count) {
2379 ea->fcport->n2n_link_reset_cnt++;
2380 vha->relogin_jif = jiffies + 2 * HZ;
2382 * PRLI failed. Reset link to kick start
2385 set_bit(N2N_LINK_RESET, &vha->dpc_flags);
2386 qla2xxx_wake_dpc(vha);
2388 ql_log(ql_log_warn, vha, 0x2119,
2389 "%s %d %8phC Unable to reconnect\n",
2391 ea->fcport->port_name);
2395 * switch connect. login failed. Take connection down
2396 * and allow relogin to retrigger
2398 ea->fcport->flags &= ~FCF_ASYNC_SENT;
2399 ea->fcport->keep_nport_handle = 0;
2400 ea->fcport->logout_on_delete = 1;
2401 qlt_schedule_sess_for_deletion(ea->fcport);
2408 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
2410 port_id_t cid; /* conflict Nport id */
2412 struct fc_port *conflict_fcport;
2413 unsigned long flags;
2414 struct fc_port *fcport = ea->fcport;
2416 ql_dbg(ql_dbg_disc, vha, 0xffff,
2417 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
2418 __func__, fcport->port_name, fcport->disc_state,
2419 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
2420 ea->sp->gen1, fcport->rscn_gen,
2421 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
2423 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
2424 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
2425 ql_dbg(ql_dbg_disc, vha, 0x20ea,
2426 "%s %d %8phC Remote is trying to login\n",
2427 __func__, __LINE__, fcport->port_name);
2431 if ((fcport->disc_state == DSC_DELETE_PEND) ||
2432 (fcport->disc_state == DSC_DELETED)) {
2433 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2437 if (ea->sp->gen2 != fcport->login_gen) {
2438 /* target side must have changed it. */
2439 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2440 "%s %8phC generation changed\n",
2441 __func__, fcport->port_name);
2442 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2444 } else if (ea->sp->gen1 != fcport->rscn_gen) {
2445 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2446 "%s %8phC RSCN generation changed\n",
2447 __func__, fcport->port_name);
2448 qla_rscn_replay(fcport);
2449 qlt_schedule_sess_for_deletion(fcport);
2453 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
2456 switch (ea->data[0]) {
2457 case MBS_COMMAND_COMPLETE:
2459 * Driver must validate login state - If PRLI not complete,
2460 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
2463 if (vha->hw->flags.edif_enabled) {
2464 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2465 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
2466 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2467 ea->fcport->logout_on_delete = 1;
2468 ea->fcport->send_els_logo = 0;
2469 ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
2470 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
2472 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
2474 if (NVME_TARGET(vha->hw, fcport)) {
2475 ql_dbg(ql_dbg_disc, vha, 0x2117,
2476 "%s %d %8phC post prli\n",
2477 __func__, __LINE__, fcport->port_name);
2478 qla24xx_post_prli_work(vha, fcport);
2480 ql_dbg(ql_dbg_disc, vha, 0x20ea,
2481 "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
2482 __func__, __LINE__, fcport->port_name,
2483 fcport->loop_id, fcport->d_id.b24);
2485 set_bit(fcport->loop_id, vha->hw->loop_id_map);
2486 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
2487 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2488 fcport->logout_on_delete = 1;
2489 fcport->send_els_logo = 0;
2490 fcport->fw_login_state = DSC_LS_PRLI_COMP;
2491 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
2493 qla24xx_post_gpdb_work(vha, fcport, 0);
2497 case MBS_COMMAND_ERROR:
2498 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
2499 __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
2501 qlt_schedule_sess_for_deletion(ea->fcport);
2503 case MBS_LOOP_ID_USED:
2504 /* data[1] = IO PARAM 1 = nport ID */
2505 cid.b.domain = (ea->iop[1] >> 16) & 0xff;
2506 cid.b.area = (ea->iop[1] >> 8) & 0xff;
2507 cid.b.al_pa = ea->iop[1] & 0xff;
2510 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2511 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2512 __func__, __LINE__, ea->fcport->port_name,
2513 ea->fcport->loop_id, cid.b24);
2515 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2516 ea->fcport->loop_id = FC_NO_LOOP_ID;
2517 qla24xx_post_gnl_work(vha, ea->fcport);
2519 case MBS_PORT_ID_USED:
2520 lid = ea->iop[1] & 0xffff;
2521 qlt_find_sess_invalidate_other(vha,
2522 wwn_to_u64(ea->fcport->port_name),
2523 ea->fcport->d_id, lid, &conflict_fcport);
2525 if (conflict_fcport) {
2527 * Another fcport share the same loop_id/nport id.
2528 * Conflict fcport needs to finish cleanup before this
2529 * fcport can proceed to login.
2531 conflict_fcport->conflict = ea->fcport;
2532 ea->fcport->login_pause = 1;
2534 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2535 "%s %d %8phC NPortId %06x inuse with loopid 0x%x.\n",
2536 __func__, __LINE__, ea->fcport->port_name,
2537 ea->fcport->d_id.b24, lid);
2539 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2540 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
2541 __func__, __LINE__, ea->fcport->port_name,
2542 ea->fcport->d_id.b24, lid);
2544 qla2x00_clear_loop_id(ea->fcport);
2545 set_bit(lid, vha->hw->loop_id_map);
2546 ea->fcport->loop_id = lid;
2547 ea->fcport->keep_nport_handle = 0;
2548 ea->fcport->logout_on_delete = 1;
2549 qlt_schedule_sess_for_deletion(ea->fcport);
2556 /****************************************************************************/
2557 /* QLogic ISP2x00 Hardware Support Functions. */
2558 /****************************************************************************/
2561 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
2563 int rval = QLA_SUCCESS;
2564 struct qla_hw_data *ha = vha->hw;
2565 uint32_t idc_major_ver, idc_minor_ver;
2568 qla83xx_idc_lock(vha, 0);
2570 /* SV: TODO: Assign initialization timeout from
2571 * flash-info / other param
2573 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
2574 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
2576 /* Set our fcoe function presence */
2577 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
2578 ql_dbg(ql_dbg_p3p, vha, 0xb077,
2579 "Error while setting DRV-Presence.\n");
2580 rval = QLA_FUNCTION_FAILED;
2584 /* Decide the reset ownership */
2585 qla83xx_reset_ownership(vha);
2588 * On first protocol driver load:
2589 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
2591 * Others: Check compatibility with current IDC Major version.
2593 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
2594 if (ha->flags.nic_core_reset_owner) {
2595 /* Set IDC Major version */
2596 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
2597 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
2599 /* Clearing IDC-Lock-Recovery register */
2600 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
2601 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
2603 * Clear further IDC participation if we are not compatible with
2604 * the current IDC Major Version.
2606 ql_log(ql_log_warn, vha, 0xb07d,
2607 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
2608 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
2609 __qla83xx_clear_drv_presence(vha);
2610 rval = QLA_FUNCTION_FAILED;
2613 /* Each function sets its supported Minor version. */
2614 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
2615 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
2616 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
2618 if (ha->flags.nic_core_reset_owner) {
2619 memset(config, 0, sizeof(config));
2620 if (!qla81xx_get_port_config(vha, config))
2621 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
2625 rval = qla83xx_idc_state_handler(vha);
2628 qla83xx_idc_unlock(vha, 0);
2634 * qla2x00_initialize_adapter
2638 * ha = adapter block pointer.
2644 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
2647 struct qla_hw_data *ha = vha->hw;
2648 struct req_que *req = ha->req_q_map[0];
2649 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2651 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2652 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2654 /* Clear adapter flags. */
2655 vha->flags.online = 0;
2656 ha->flags.chip_reset_done = 0;
2657 vha->flags.reset_active = 0;
2658 ha->flags.pci_channel_io_perm_failure = 0;
2659 ha->flags.eeh_busy = 0;
2660 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2661 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2662 atomic_set(&vha->loop_state, LOOP_DOWN);
2663 vha->device_flags = DFLG_NO_CABLE;
2665 vha->flags.management_server_logged_in = 0;
2666 vha->marker_needed = 0;
2667 ha->isp_abort_cnt = 0;
2668 ha->beacon_blink_led = 0;
2670 set_bit(0, ha->req_qid_map);
2671 set_bit(0, ha->rsp_qid_map);
2673 ql_dbg(ql_dbg_init, vha, 0x0040,
2674 "Configuring PCI space...\n");
2675 rval = ha->isp_ops->pci_config(vha);
2677 ql_log(ql_log_warn, vha, 0x0044,
2678 "Unable to configure PCI space.\n");
2682 ha->isp_ops->reset_chip(vha);
2684 /* Check for secure flash support */
2685 if (IS_QLA28XX(ha)) {
2686 if (rd_reg_word(®->mailbox12) & BIT_0)
2687 ha->flags.secure_adapter = 1;
2688 ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n",
2689 (ha->flags.secure_adapter) ? "Yes" : "No");
2693 rval = qla2xxx_get_flash_info(vha);
2695 ql_log(ql_log_fatal, vha, 0x004f,
2696 "Unable to validate FLASH data.\n");
2700 if (IS_QLA8044(ha)) {
2701 qla8044_read_reset_template(vha);
2703 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
2704 * If DONRESET_BIT0 is set, drivers should not set dev_state
2705 * to NEED_RESET. But if NEED_RESET is set, drivers should
2706 * should honor the reset. */
2707 if (ql2xdontresethba == 1)
2708 qla8044_set_idc_dontreset(vha);
2711 ha->isp_ops->get_flash_version(vha, req->ring);
2712 ql_dbg(ql_dbg_init, vha, 0x0061,
2713 "Configure NVRAM parameters...\n");
2715 /* Let priority default to FCP, can be overridden by nvram_config */
2716 ha->fc4_type_priority = FC4_PRIORITY_FCP;
2718 ha->isp_ops->nvram_config(vha);
2720 if (ha->fc4_type_priority != FC4_PRIORITY_FCP &&
2721 ha->fc4_type_priority != FC4_PRIORITY_NVME)
2722 ha->fc4_type_priority = FC4_PRIORITY_FCP;
2724 ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n",
2725 ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe");
2727 if (ha->flags.disable_serdes) {
2728 /* Mask HBA via NVRAM settings? */
2729 ql_log(ql_log_info, vha, 0x0077,
2730 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
2731 return QLA_FUNCTION_FAILED;
2734 ql_dbg(ql_dbg_init, vha, 0x0078,
2735 "Verifying loaded RISC code...\n");
2737 /* If smartsan enabled then require fdmi and rdp enabled */
2743 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
2744 rval = ha->isp_ops->chip_diag(vha);
2747 rval = qla2x00_setup_chip(vha);
2752 if (IS_QLA84XX(ha)) {
2753 ha->cs84xx = qla84xx_get_chip(vha);
2755 ql_log(ql_log_warn, vha, 0x00d0,
2756 "Unable to configure ISP84XX.\n");
2757 return QLA_FUNCTION_FAILED;
2761 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2762 rval = qla2x00_init_rings(vha);
2764 /* No point in continuing if firmware initialization failed. */
2765 if (rval != QLA_SUCCESS)
2768 ha->flags.chip_reset_done = 1;
2770 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
2771 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
2772 rval = qla84xx_init_chip(vha);
2773 if (rval != QLA_SUCCESS) {
2774 ql_log(ql_log_warn, vha, 0x00d4,
2775 "Unable to initialize ISP84XX.\n");
2776 qla84xx_put_chip(vha);
2780 /* Load the NIC Core f/w if we are the first protocol driver. */
2781 if (IS_QLA8031(ha)) {
2782 rval = qla83xx_nic_core_fw_load(vha);
2784 ql_log(ql_log_warn, vha, 0x0124,
2785 "Error in initializing NIC Core f/w.\n");
2788 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
2789 qla24xx_read_fcp_prio_cfg(vha);
2791 if (IS_P3P_TYPE(ha))
2792 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
2794 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
2800 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
2803 * Returns 0 on success.
2806 qla2100_pci_config(scsi_qla_host_t *vha)
2809 unsigned long flags;
2810 struct qla_hw_data *ha = vha->hw;
2811 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2813 pci_set_master(ha->pdev);
2814 pci_try_set_mwi(ha->pdev);
2816 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2817 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2818 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2820 pci_disable_rom(ha->pdev);
2822 /* Get PCI bus information. */
2823 spin_lock_irqsave(&ha->hardware_lock, flags);
2824 ha->pci_attr = rd_reg_word(®->ctrl_status);
2825 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2831 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
2834 * Returns 0 on success.
2837 qla2300_pci_config(scsi_qla_host_t *vha)
2840 unsigned long flags = 0;
2842 struct qla_hw_data *ha = vha->hw;
2843 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2845 pci_set_master(ha->pdev);
2846 pci_try_set_mwi(ha->pdev);
2848 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2849 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2851 if (IS_QLA2322(ha) || IS_QLA6322(ha))
2852 w &= ~PCI_COMMAND_INTX_DISABLE;
2853 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2856 * If this is a 2300 card and not 2312, reset the
2857 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
2858 * the 2310 also reports itself as a 2300 so we need to get the
2859 * fb revision level -- a 6 indicates it really is a 2300 and
2862 if (IS_QLA2300(ha)) {
2863 spin_lock_irqsave(&ha->hardware_lock, flags);
2866 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC);
2867 for (cnt = 0; cnt < 30000; cnt++) {
2868 if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) != 0)
2874 /* Select FPM registers. */
2875 wrt_reg_word(®->ctrl_status, 0x20);
2876 rd_reg_word(®->ctrl_status);
2878 /* Get the fb rev level */
2879 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
2881 if (ha->fb_rev == FPM_2300)
2882 pci_clear_mwi(ha->pdev);
2884 /* Deselect FPM registers. */
2885 wrt_reg_word(®->ctrl_status, 0x0);
2886 rd_reg_word(®->ctrl_status);
2888 /* Release RISC module. */
2889 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
2890 for (cnt = 0; cnt < 30000; cnt++) {
2891 if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0)
2897 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2900 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2902 pci_disable_rom(ha->pdev);
2904 /* Get PCI bus information. */
2905 spin_lock_irqsave(&ha->hardware_lock, flags);
2906 ha->pci_attr = rd_reg_word(®->ctrl_status);
2907 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2913 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
2916 * Returns 0 on success.
2919 qla24xx_pci_config(scsi_qla_host_t *vha)
2922 unsigned long flags = 0;
2923 struct qla_hw_data *ha = vha->hw;
2924 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2926 pci_set_master(ha->pdev);
2927 pci_try_set_mwi(ha->pdev);
2929 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2930 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2931 w &= ~PCI_COMMAND_INTX_DISABLE;
2932 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2934 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2936 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
2937 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
2938 pcix_set_mmrbc(ha->pdev, 2048);
2940 /* PCIe -- adjust Maximum Read Request Size (2048). */
2941 if (pci_is_pcie(ha->pdev))
2942 pcie_set_readrq(ha->pdev, 4096);
2944 pci_disable_rom(ha->pdev);
2946 ha->chip_revision = ha->pdev->revision;
2948 /* Get PCI bus information. */
2949 spin_lock_irqsave(&ha->hardware_lock, flags);
2950 ha->pci_attr = rd_reg_dword(®->ctrl_status);
2951 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2957 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
2960 * Returns 0 on success.
2963 qla25xx_pci_config(scsi_qla_host_t *vha)
2966 struct qla_hw_data *ha = vha->hw;
2968 pci_set_master(ha->pdev);
2969 pci_try_set_mwi(ha->pdev);
2971 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2972 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2973 w &= ~PCI_COMMAND_INTX_DISABLE;
2974 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2976 /* PCIe -- adjust Maximum Read Request Size (2048). */
2977 if (pci_is_pcie(ha->pdev))
2978 pcie_set_readrq(ha->pdev, 4096);
2980 pci_disable_rom(ha->pdev);
2982 ha->chip_revision = ha->pdev->revision;
2988 * qla2x00_isp_firmware() - Choose firmware image.
2991 * Returns 0 on success.
2994 qla2x00_isp_firmware(scsi_qla_host_t *vha)
2997 uint16_t loop_id, topo, sw_cap;
2998 uint8_t domain, area, al_pa;
2999 struct qla_hw_data *ha = vha->hw;
3001 /* Assume loading risc code */
3002 rval = QLA_FUNCTION_FAILED;
3004 if (ha->flags.disable_risc_code_load) {
3005 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
3007 /* Verify checksum of loaded RISC code. */
3008 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
3009 if (rval == QLA_SUCCESS) {
3010 /* And, verify we are not in ROM code. */
3011 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
3012 &area, &domain, &topo, &sw_cap);
3017 ql_dbg(ql_dbg_init, vha, 0x007a,
3018 "**** Load RISC code ****.\n");
3024 * qla2x00_reset_chip() - Reset ISP chip.
3027 * Returns 0 on success.
3030 qla2x00_reset_chip(scsi_qla_host_t *vha)
3032 unsigned long flags = 0;
3033 struct qla_hw_data *ha = vha->hw;
3034 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3037 int rval = QLA_FUNCTION_FAILED;
3039 if (unlikely(pci_channel_offline(ha->pdev)))
3042 ha->isp_ops->disable_intrs(ha);
3044 spin_lock_irqsave(&ha->hardware_lock, flags);
3046 /* Turn off master enable */
3048 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
3049 cmd &= ~PCI_COMMAND_MASTER;
3050 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
3052 if (!IS_QLA2100(ha)) {
3054 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC);
3055 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
3056 for (cnt = 0; cnt < 30000; cnt++) {
3057 if ((rd_reg_word(®->hccr) &
3058 HCCR_RISC_PAUSE) != 0)
3063 rd_reg_word(®->hccr); /* PCI Posting. */
3067 /* Select FPM registers. */
3068 wrt_reg_word(®->ctrl_status, 0x20);
3069 rd_reg_word(®->ctrl_status); /* PCI Posting. */
3071 /* FPM Soft Reset. */
3072 wrt_reg_word(®->fpm_diag_config, 0x100);
3073 rd_reg_word(®->fpm_diag_config); /* PCI Posting. */
3075 /* Toggle Fpm Reset. */
3076 if (!IS_QLA2200(ha)) {
3077 wrt_reg_word(®->fpm_diag_config, 0x0);
3078 rd_reg_word(®->fpm_diag_config); /* PCI Posting. */
3081 /* Select frame buffer registers. */
3082 wrt_reg_word(®->ctrl_status, 0x10);
3083 rd_reg_word(®->ctrl_status); /* PCI Posting. */
3085 /* Reset frame buffer FIFOs. */
3086 if (IS_QLA2200(ha)) {
3087 WRT_FB_CMD_REG(ha, reg, 0xa000);
3088 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
3090 WRT_FB_CMD_REG(ha, reg, 0x00fc);
3092 /* Read back fb_cmd until zero or 3 seconds max */
3093 for (cnt = 0; cnt < 3000; cnt++) {
3094 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
3100 /* Select RISC module registers. */
3101 wrt_reg_word(®->ctrl_status, 0);
3102 rd_reg_word(®->ctrl_status); /* PCI Posting. */
3104 /* Reset RISC processor. */
3105 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
3106 rd_reg_word(®->hccr); /* PCI Posting. */
3108 /* Release RISC processor. */
3109 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
3110 rd_reg_word(®->hccr); /* PCI Posting. */
3113 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
3114 wrt_reg_word(®->hccr, HCCR_CLR_HOST_INT);
3116 /* Reset ISP chip. */
3117 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET);
3119 /* Wait for RISC to recover from reset. */
3120 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
3122 * It is necessary to for a delay here since the card doesn't
3123 * respond to PCI reads during a reset. On some architectures
3124 * this will result in an MCA.
3127 for (cnt = 30000; cnt; cnt--) {
3128 if ((rd_reg_word(®->ctrl_status) &
3129 CSR_ISP_SOFT_RESET) == 0)
3136 /* Reset RISC processor. */
3137 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
3139 wrt_reg_word(®->semaphore, 0);
3141 /* Release RISC processor. */
3142 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
3143 rd_reg_word(®->hccr); /* PCI Posting. */
3145 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
3146 for (cnt = 0; cnt < 30000; cnt++) {
3147 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
3155 /* Turn on master enable */
3156 cmd |= PCI_COMMAND_MASTER;
3157 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
3159 /* Disable RISC pause on FPM parity error. */
3160 if (!IS_QLA2100(ha)) {
3161 wrt_reg_word(®->hccr, HCCR_DISABLE_PARITY_PAUSE);
3162 rd_reg_word(®->hccr); /* PCI Posting. */
3165 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3171 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
3174 * Returns 0 on success.
3177 qla81xx_reset_mpi(scsi_qla_host_t *vha)
3179 uint16_t mb[4] = {0x1010, 0, 1, 0};
3181 if (!IS_QLA81XX(vha->hw))
3184 return qla81xx_write_mpi_register(vha, mb);
3188 qla_chk_risc_recovery(scsi_qla_host_t *vha)
3190 struct qla_hw_data *ha = vha->hw;
3191 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3192 __le16 __iomem *mbptr = ®->mailbox0;
3195 int rc = QLA_SUCCESS;
3197 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3200 /* this check is only valid after RISC reset */
3201 mb[0] = rd_reg_word(mbptr);
3204 rc = QLA_FUNCTION_FAILED;
3206 for (i = 1; i < 32; i++) {
3207 mb[i] = rd_reg_word(mbptr);
3211 ql_log(ql_log_warn, vha, 0x1015,
3212 "RISC reset failed. mb[0-7] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
3213 mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6], mb[7]);
3214 ql_log(ql_log_warn, vha, 0x1015,
3215 "RISC reset failed. mb[8-15] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
3216 mb[8], mb[9], mb[10], mb[11], mb[12], mb[13], mb[14],
3218 ql_log(ql_log_warn, vha, 0x1015,
3219 "RISC reset failed. mb[16-23] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
3220 mb[16], mb[17], mb[18], mb[19], mb[20], mb[21], mb[22],
3222 ql_log(ql_log_warn, vha, 0x1015,
3223 "RISC reset failed. mb[24-31] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
3224 mb[24], mb[25], mb[26], mb[27], mb[28], mb[29], mb[30],
3231 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
3234 * Returns 0 on success.
3237 qla24xx_reset_risc(scsi_qla_host_t *vha)
3239 unsigned long flags = 0;
3240 struct qla_hw_data *ha = vha->hw;
3241 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3244 static int abts_cnt; /* ISP abort retry counts */
3245 int rval = QLA_SUCCESS;
3248 spin_lock_irqsave(&ha->hardware_lock, flags);
3251 wrt_reg_dword(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
3252 for (cnt = 0; cnt < 30000; cnt++) {
3253 if ((rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
3259 if (!(rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE))
3260 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
3262 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
3263 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
3264 rd_reg_dword(®->hccr),
3265 rd_reg_dword(®->ctrl_status),
3266 (rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE));
3268 wrt_reg_dword(®->ctrl_status,
3269 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
3270 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
3274 /* Wait for firmware to complete NVRAM accesses. */
3275 rd_reg_word(®->mailbox0);
3276 for (cnt = 10000; rd_reg_word(®->mailbox0) != 0 &&
3277 rval == QLA_SUCCESS; cnt--) {
3282 rval = QLA_FUNCTION_TIMEOUT;
3285 if (rval == QLA_SUCCESS)
3286 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
3288 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
3289 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
3290 rd_reg_dword(®->hccr),
3291 rd_reg_word(®->mailbox0));
3293 /* Wait for soft-reset to complete. */
3294 rd_reg_dword(®->ctrl_status);
3295 for (cnt = 0; cnt < 60; cnt++) {
3297 if ((rd_reg_dword(®->ctrl_status) &
3298 CSRX_ISP_SOFT_RESET) == 0)
3303 if (!(rd_reg_dword(®->ctrl_status) & CSRX_ISP_SOFT_RESET))
3304 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
3306 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
3307 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
3308 rd_reg_dword(®->hccr),
3309 rd_reg_dword(®->ctrl_status));
3311 /* If required, do an MPI FW reset now */
3312 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
3313 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
3314 if (++abts_cnt < 5) {
3315 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3316 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
3319 * We exhausted the ISP abort retries. We have to
3320 * set the board offline.
3323 vha->flags.online = 0;
3328 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET);
3329 rd_reg_dword(®->hccr);
3331 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE);
3332 rd_reg_dword(®->hccr);
3334 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET);
3336 rd_reg_dword(®->hccr);
3338 wd = rd_reg_word(®->mailbox0);
3339 for (cnt = 300; wd != 0 && rval == QLA_SUCCESS; cnt--) {
3343 if (print && qla_chk_risc_recovery(vha))
3346 wd = rd_reg_word(®->mailbox0);
3348 rval = QLA_FUNCTION_TIMEOUT;
3350 ql_log(ql_log_warn, vha, 0x015e,
3351 "RISC reset timeout\n");
3355 if (rval == QLA_SUCCESS)
3356 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
3358 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
3359 "Host Risc 0x%x, mailbox0 0x%x\n",
3360 rd_reg_dword(®->hccr),
3361 rd_reg_word(®->mailbox0));
3363 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3365 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
3366 "Driver in %s mode\n",
3367 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
3369 if (IS_NOPOLLING_TYPE(ha))
3370 ha->isp_ops->enable_intrs(ha);
3376 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
3378 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
3380 wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET);
3381 *data = rd_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET);
3385 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
3387 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
3389 wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET);
3390 wrt_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data);
3394 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
3397 uint delta_msec = 100;
3398 uint elapsed_msec = 0;
3402 if (vha->hw->pdev->subsystem_device != 0x0175 &&
3403 vha->hw->pdev->subsystem_device != 0x0240)
3406 wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
3410 timeout_msec = TIMEOUT_SEMAPHORE;
3411 n = timeout_msec / delta_msec;
3413 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
3414 qla25xx_read_risc_sema_reg(vha, &wd32);
3415 if (wd32 & RISC_SEMAPHORE)
3418 elapsed_msec += delta_msec;
3419 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
3423 if (!(wd32 & RISC_SEMAPHORE))
3426 if (!(wd32 & RISC_SEMAPHORE_FORCE))
3429 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
3430 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
3431 n = timeout_msec / delta_msec;
3433 qla25xx_read_risc_sema_reg(vha, &wd32);
3434 if (!(wd32 & RISC_SEMAPHORE_FORCE))
3437 elapsed_msec += delta_msec;
3438 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
3442 if (wd32 & RISC_SEMAPHORE_FORCE)
3443 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
3448 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
3455 * qla24xx_reset_chip() - Reset ISP24xx chip.
3458 * Returns 0 on success.
3461 qla24xx_reset_chip(scsi_qla_host_t *vha)
3463 struct qla_hw_data *ha = vha->hw;
3464 int rval = QLA_FUNCTION_FAILED;
3466 if (pci_channel_offline(ha->pdev) &&
3467 ha->flags.pci_channel_io_perm_failure) {
3471 ha->isp_ops->disable_intrs(ha);
3473 qla25xx_manipulate_risc_semaphore(vha);
3475 /* Perform RISC reset. */
3476 rval = qla24xx_reset_risc(vha);
3482 * qla2x00_chip_diag() - Test chip for proper operation.
3485 * Returns 0 on success.
3488 qla2x00_chip_diag(scsi_qla_host_t *vha)
3491 struct qla_hw_data *ha = vha->hw;
3492 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3493 unsigned long flags = 0;
3497 struct req_que *req = ha->req_q_map[0];
3499 /* Assume a failed state */
3500 rval = QLA_FUNCTION_FAILED;
3502 ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
3503 ®->flash_address);
3505 spin_lock_irqsave(&ha->hardware_lock, flags);
3507 /* Reset ISP chip. */
3508 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET);
3511 * We need to have a delay here since the card will not respond while
3512 * in reset causing an MCA on some architectures.
3515 data = qla2x00_debounce_register(®->ctrl_status);
3516 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
3518 data = rd_reg_word(®->ctrl_status);
3523 goto chip_diag_failed;
3525 ql_dbg(ql_dbg_init, vha, 0x007c,
3526 "Reset register cleared by chip reset.\n");
3528 /* Reset RISC processor. */
3529 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
3530 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
3532 /* Workaround for QLA2312 PCI parity error */
3533 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
3534 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
3535 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
3537 data = RD_MAILBOX_REG(ha, reg, 0);
3544 goto chip_diag_failed;
3546 /* Check product ID of chip */
3547 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
3549 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
3550 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
3551 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
3552 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
3553 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
3554 mb[3] != PROD_ID_3) {
3555 ql_log(ql_log_warn, vha, 0x0062,
3556 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
3557 mb[1], mb[2], mb[3]);
3559 goto chip_diag_failed;
3561 ha->product_id[0] = mb[1];
3562 ha->product_id[1] = mb[2];
3563 ha->product_id[2] = mb[3];
3564 ha->product_id[3] = mb[4];
3566 /* Adjust fw RISC transfer size */
3567 if (req->length > 1024)
3568 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
3570 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
3573 if (IS_QLA2200(ha) &&
3574 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
3575 /* Limit firmware transfer size with a 2200A */
3576 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
3578 ha->device_type |= DT_ISP2200A;
3579 ha->fw_transfer_size = 128;
3582 /* Wrap Incoming Mailboxes Test. */
3583 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3585 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
3586 rval = qla2x00_mbx_reg_test(vha);
3588 ql_log(ql_log_warn, vha, 0x0080,
3589 "Failed mailbox send register test.\n");
3591 /* Flag a successful rval */
3593 spin_lock_irqsave(&ha->hardware_lock, flags);
3597 ql_log(ql_log_info, vha, 0x0081,
3598 "Chip diagnostics **** FAILED ****.\n");
3600 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3606 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
3609 * Returns 0 on success.
3612 qla24xx_chip_diag(scsi_qla_host_t *vha)
3615 struct qla_hw_data *ha = vha->hw;
3616 struct req_que *req = ha->req_q_map[0];
3618 if (IS_P3P_TYPE(ha))
3621 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
3623 rval = qla2x00_mbx_reg_test(vha);
3625 ql_log(ql_log_warn, vha, 0x0082,
3626 "Failed mailbox send register test.\n");
3628 /* Flag a successful rval */
3636 qla2x00_init_fce_trace(scsi_qla_host_t *vha)
3641 struct qla_hw_data *ha = vha->hw;
3643 if (!IS_FWI2_CAPABLE(ha))
3646 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3647 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3651 ql_dbg(ql_dbg_init, vha, 0x00bd,
3652 "%s: FCE Mem is already allocated.\n",
3657 /* Allocate memory for Fibre Channel Event Buffer. */
3658 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
3661 ql_log(ql_log_warn, vha, 0x00be,
3662 "Unable to allocate (%d KB) for FCE.\n",
3667 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
3668 ha->fce_mb, &ha->fce_bufs);
3670 ql_log(ql_log_warn, vha, 0x00bf,
3671 "Unable to initialize FCE (%d).\n", rval);
3672 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
3676 ql_dbg(ql_dbg_init, vha, 0x00c0,
3677 "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
3679 ha->flags.fce_enabled = 1;
3680 ha->fce_dma = tc_dma;
3685 qla2x00_init_eft_trace(scsi_qla_host_t *vha)
3690 struct qla_hw_data *ha = vha->hw;
3692 if (!IS_FWI2_CAPABLE(ha))
3696 ql_dbg(ql_dbg_init, vha, 0x00bd,
3697 "%s: EFT Mem is already allocated.\n",
3702 /* Allocate memory for Extended Trace Buffer. */
3703 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3706 ql_log(ql_log_warn, vha, 0x00c1,
3707 "Unable to allocate (%d KB) for EFT.\n",
3712 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
3714 ql_log(ql_log_warn, vha, 0x00c2,
3715 "Unable to initialize EFT (%d).\n", rval);
3716 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
3720 ql_dbg(ql_dbg_init, vha, 0x00c3,
3721 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
3723 ha->eft_dma = tc_dma;
3728 qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
3730 qla2x00_init_fce_trace(vha);
3731 qla2x00_init_eft_trace(vha);
3735 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
3737 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
3738 eft_size, fce_size, mq_size;
3739 struct qla_hw_data *ha = vha->hw;
3740 struct req_que *req = ha->req_q_map[0];
3741 struct rsp_que *rsp = ha->rsp_q_map[0];
3742 struct qla2xxx_fw_dump *fw_dump;
3745 ql_dbg(ql_dbg_init, vha, 0x00bd,
3746 "Firmware dump already allocated.\n");
3751 ha->fw_dump_cap_flags = 0;
3752 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
3753 req_q_size = rsp_q_size = 0;
3755 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3756 fixed_size = sizeof(struct qla2100_fw_dump);
3757 } else if (IS_QLA23XX(ha)) {
3758 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
3759 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
3761 } else if (IS_FWI2_CAPABLE(ha)) {
3763 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
3764 else if (IS_QLA81XX(ha))
3765 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
3766 else if (IS_QLA25XX(ha))
3767 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
3769 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
3771 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
3774 if (!IS_QLA83XX(ha))
3775 mq_size = sizeof(struct qla2xxx_mq_chain);
3777 * Allocate maximum buffer size for all queues - Q0.
3778 * Resizing must be done at end-of-dump processing.
3780 mq_size += (ha->max_req_queues - 1) *
3781 (req->length * sizeof(request_t));
3782 mq_size += (ha->max_rsp_queues - 1) *
3783 (rsp->length * sizeof(response_t));
3785 if (ha->tgt.atio_ring)
3786 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
3788 qla2x00_init_fce_trace(vha);
3790 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
3791 qla2x00_init_eft_trace(vha);
3793 eft_size = EFT_SIZE;
3796 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3797 struct fwdt *fwdt = ha->fwdt;
3800 for (j = 0; j < 2; j++, fwdt++) {
3801 if (!fwdt->template) {
3802 ql_dbg(ql_dbg_init, vha, 0x00ba,
3803 "-> fwdt%u no template\n", j);
3806 ql_dbg(ql_dbg_init, vha, 0x00fa,
3807 "-> fwdt%u calculating fwdump size...\n", j);
3808 fwdt->dump_size = qla27xx_fwdt_calculate_dump_size(
3809 vha, fwdt->template);
3810 ql_dbg(ql_dbg_init, vha, 0x00fa,
3811 "-> fwdt%u calculated fwdump size = %#lx bytes\n",
3812 j, fwdt->dump_size);
3813 dump_size += fwdt->dump_size;
3815 /* Add space for spare MPI fw dump. */
3816 dump_size += ha->fwdt[1].dump_size;
3818 req_q_size = req->length * sizeof(request_t);
3819 rsp_q_size = rsp->length * sizeof(response_t);
3820 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
3821 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size
3823 ha->chain_offset = dump_size;
3824 dump_size += mq_size + fce_size;
3825 if (ha->exchoffld_buf)
3826 dump_size += sizeof(struct qla2xxx_offld_chain) +
3828 if (ha->exlogin_buf)
3829 dump_size += sizeof(struct qla2xxx_offld_chain) +
3833 if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
3835 ql_dbg(ql_dbg_init, vha, 0x00c5,
3836 "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n",
3837 __func__, dump_size, ha->fw_dump_len,
3838 ha->fw_dump_alloc_len);
3840 fw_dump = vmalloc(dump_size);
3842 ql_log(ql_log_warn, vha, 0x00c4,
3843 "Unable to allocate (%d KB) for firmware dump.\n",
3846 mutex_lock(&ha->optrom_mutex);
3847 if (ha->fw_dumped) {
3848 memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len);
3850 ha->fw_dump = fw_dump;
3851 ha->fw_dump_alloc_len = dump_size;
3852 ql_dbg(ql_dbg_init, vha, 0x00c5,
3853 "Re-Allocated (%d KB) and save firmware dump.\n",
3857 ha->fw_dump = fw_dump;
3859 ha->fw_dump_len = ha->fw_dump_alloc_len =
3861 ql_dbg(ql_dbg_init, vha, 0x00c5,
3862 "Allocated (%d KB) for firmware dump.\n",
3865 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3866 ha->mpi_fw_dump = (char *)fw_dump +
3867 ha->fwdt[1].dump_size;
3868 mutex_unlock(&ha->optrom_mutex);
3872 ha->fw_dump->signature[0] = 'Q';
3873 ha->fw_dump->signature[1] = 'L';
3874 ha->fw_dump->signature[2] = 'G';
3875 ha->fw_dump->signature[3] = 'C';
3876 ha->fw_dump->version = htonl(1);
3878 ha->fw_dump->fixed_size = htonl(fixed_size);
3879 ha->fw_dump->mem_size = htonl(mem_size);
3880 ha->fw_dump->req_q_size = htonl(req_q_size);
3881 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
3883 ha->fw_dump->eft_size = htonl(eft_size);
3884 ha->fw_dump->eft_addr_l =
3885 htonl(LSD(ha->eft_dma));
3886 ha->fw_dump->eft_addr_h =
3887 htonl(MSD(ha->eft_dma));
3889 ha->fw_dump->header_size =
3891 (struct qla2xxx_fw_dump, isp));
3893 mutex_unlock(&ha->optrom_mutex);
3899 qla81xx_mpi_sync(scsi_qla_host_t *vha)
3901 #define MPS_MASK 0xe0
3906 if (!IS_QLA81XX(vha->hw))
3909 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
3910 if (rval != QLA_SUCCESS) {
3911 ql_log(ql_log_warn, vha, 0x0105,
3912 "Unable to acquire semaphore.\n");
3916 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
3917 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
3918 if (rval != QLA_SUCCESS) {
3919 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
3924 if (dc == (dw & MPS_MASK))
3929 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
3930 if (rval != QLA_SUCCESS) {
3931 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
3935 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
3936 if (rval != QLA_SUCCESS) {
3937 ql_log(ql_log_warn, vha, 0x006d,
3938 "Unable to release semaphore.\n");
3946 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
3948 /* Don't try to reallocate the array */
3949 if (req->outstanding_cmds)
3952 if (!IS_FWI2_CAPABLE(ha))
3953 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
3955 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
3956 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
3958 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
3961 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3965 if (!req->outstanding_cmds) {
3967 * Try to allocate a minimal size just so we can get through
3970 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
3971 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3975 if (!req->outstanding_cmds) {
3976 ql_log(ql_log_fatal, NULL, 0x0126,
3977 "Failed to allocate memory for "
3978 "outstanding_cmds for req_que %p.\n", req);
3979 req->num_outstanding_cmds = 0;
3980 return QLA_FUNCTION_FAILED;
3987 #define PRINT_FIELD(_field, _flag, _str) { \
3988 if (a0->_field & _flag) {\
3994 len = snprintf(ptr, leftover, "%s", _str); \
4001 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
4004 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
4005 u8 str[STR_LEN], *ptr, p;
4008 memset(str, 0, STR_LEN);
4009 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
4010 ql_dbg(ql_dbg_init, vha, 0x015a,
4011 "SFP MFG Name: %s\n", str);
4013 memset(str, 0, STR_LEN);
4014 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
4015 ql_dbg(ql_dbg_init, vha, 0x015c,
4016 "SFP Part Name: %s\n", str);
4019 memset(str, 0, STR_LEN);
4023 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
4024 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
4025 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
4026 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
4027 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
4028 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
4029 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
4030 ql_dbg(ql_dbg_init, vha, 0x0160,
4031 "SFP Media: %s\n", str);
4034 memset(str, 0, STR_LEN);
4038 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
4039 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
4040 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
4041 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
4042 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
4043 ql_dbg(ql_dbg_init, vha, 0x0196,
4044 "SFP Link Length: %s\n", str);
4046 memset(str, 0, STR_LEN);
4050 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
4051 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
4052 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
4053 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
4054 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
4055 ql_dbg(ql_dbg_init, vha, 0x016e,
4056 "SFP FC Link Tech: %s\n", str);
4059 ql_dbg(ql_dbg_init, vha, 0x016f,
4060 "SFP Distant: %d km\n", a0->length_km);
4061 if (a0->length_100m)
4062 ql_dbg(ql_dbg_init, vha, 0x0170,
4063 "SFP Distant: %d m\n", a0->length_100m*100);
4064 if (a0->length_50um_10m)
4065 ql_dbg(ql_dbg_init, vha, 0x0189,
4066 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
4067 if (a0->length_62um_10m)
4068 ql_dbg(ql_dbg_init, vha, 0x018a,
4069 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
4070 if (a0->length_om4_10m)
4071 ql_dbg(ql_dbg_init, vha, 0x0194,
4072 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
4073 if (a0->length_om3_10m)
4074 ql_dbg(ql_dbg_init, vha, 0x0195,
4075 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
4080 * qla24xx_detect_sfp()
4082 * @vha: adapter state pointer.
4085 * 0 -- Configure firmware to use short-range settings -- normal
4086 * buffer-to-buffer credits.
4088 * 1 -- Configure firmware to use long-range settings -- extra
4089 * buffer-to-buffer credits should be allocated with
4090 * ha->lr_distance containing distance settings from NVRAM or SFP
4094 qla24xx_detect_sfp(scsi_qla_host_t *vha)
4097 struct sff_8247_a0 *a;
4098 struct qla_hw_data *ha = vha->hw;
4099 struct nvram_81xx *nv = ha->nvram;
4100 #define LR_DISTANCE_UNKNOWN 2
4101 static const char * const types[] = { "Short", "Long" };
4102 static const char * const lengths[] = { "(10km)", "(5km)", "" };
4105 /* Seed with NVRAM settings. */
4107 ha->flags.lr_detected = 0;
4108 if (IS_BPM_RANGE_CAPABLE(ha) &&
4109 (nv->enhanced_features & NEF_LR_DIST_ENABLE)) {
4111 ha->flags.lr_detected = 1;
4113 (nv->enhanced_features >> LR_DIST_NV_POS)
4117 if (!IS_BPM_ENABLED(vha))
4119 /* Determine SR/LR capabilities of SFP/Transceiver. */
4120 rc = qla2x00_read_sfp_dev(vha, NULL, 0);
4125 a = (struct sff_8247_a0 *)vha->hw->sfp_data;
4126 qla2xxx_print_sfp_info(vha);
4128 ha->flags.lr_detected = 0;
4130 if (ll & FC_LL_VL || ll & FC_LL_L) {
4131 /* Long range, track length. */
4132 ha->flags.lr_detected = 1;
4134 if (a->length_km > 5 || a->length_100m > 50)
4135 ha->lr_distance = LR_DISTANCE_10K;
4137 ha->lr_distance = LR_DISTANCE_5K;
4141 ql_dbg(ql_dbg_async, vha, 0x507b,
4142 "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n",
4143 types[ha->flags.lr_detected],
4144 ha->flags.lr_detected ? lengths[ha->lr_distance] :
4145 lengths[LR_DISTANCE_UNKNOWN],
4146 used_nvram, ll, ha->flags.lr_detected, ha->lr_distance);
4147 return ha->flags.lr_detected;
4150 void qla_init_iocb_limit(scsi_qla_host_t *vha)
4154 struct qla_hw_data *ha = vha->hw;
4156 num_qps = ha->num_qpairs + 1;
4157 limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
4159 ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
4160 ha->base_qpair->fwres.iocbs_limit = limit;
4161 ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
4162 ha->base_qpair->fwres.iocbs_used = 0;
4164 ha->base_qpair->fwres.exch_total = ha->orig_fw_xcb_count;
4165 ha->base_qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
4166 QLA_IOCB_PCT_LIMIT) / 100;
4167 ha->base_qpair->fwres.exch_used = 0;
4169 for (i = 0; i < ha->max_qpairs; i++) {
4170 if (ha->queue_pair_map[i]) {
4171 ha->queue_pair_map[i]->fwres.iocbs_total =
4172 ha->orig_fw_iocb_count;
4173 ha->queue_pair_map[i]->fwres.iocbs_limit = limit;
4174 ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
4176 ha->queue_pair_map[i]->fwres.iocbs_used = 0;
4177 ha->queue_pair_map[i]->fwres.exch_total = ha->orig_fw_xcb_count;
4178 ha->queue_pair_map[i]->fwres.exch_limit =
4179 (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
4180 ha->queue_pair_map[i]->fwres.exch_used = 0;
4186 * qla2x00_setup_chip() - Load and start RISC firmware.
4189 * Returns 0 on success.
4192 qla2x00_setup_chip(scsi_qla_host_t *vha)
4195 uint32_t srisc_address = 0;
4196 struct qla_hw_data *ha = vha->hw;
4197 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4198 unsigned long flags;
4199 uint16_t fw_major_version;
4202 if (IS_P3P_TYPE(ha)) {
4203 rval = ha->isp_ops->load_risc(vha, &srisc_address);
4204 if (rval == QLA_SUCCESS) {
4205 qla2x00_stop_firmware(vha);
4206 goto enable_82xx_npiv;
4211 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
4212 /* Disable SRAM, Instruction RAM and GP RAM parity. */
4213 spin_lock_irqsave(&ha->hardware_lock, flags);
4214 wrt_reg_word(®->hccr, (HCCR_ENABLE_PARITY + 0x0));
4215 rd_reg_word(®->hccr);
4216 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4219 qla81xx_mpi_sync(vha);
4222 /* Load firmware sequences */
4223 rval = ha->isp_ops->load_risc(vha, &srisc_address);
4224 if (rval == QLA_SUCCESS) {
4225 ql_dbg(ql_dbg_init, vha, 0x00c9,
4226 "Verifying Checksum of loaded RISC code.\n");
4228 rval = qla2x00_verify_checksum(vha, srisc_address);
4229 if (rval == QLA_SUCCESS) {
4230 /* Start firmware execution. */
4231 ql_dbg(ql_dbg_init, vha, 0x00ca,
4232 "Starting firmware.\n");
4235 ha->flags.exlogins_enabled = 1;
4237 if (qla_is_exch_offld_enabled(vha))
4238 ha->flags.exchoffld_enabled = 1;
4240 rval = qla2x00_execute_fw(vha, srisc_address);
4241 /* Retrieve firmware information. */
4242 if (rval == QLA_SUCCESS) {
4243 /* Enable BPM support? */
4244 if (!done_once++ && qla24xx_detect_sfp(vha)) {
4245 ql_dbg(ql_dbg_init, vha, 0x00ca,
4246 "Re-starting firmware -- BPM.\n");
4247 /* Best-effort - re-init. */
4248 ha->isp_ops->reset_chip(vha);
4249 ha->isp_ops->chip_diag(vha);
4250 goto execute_fw_with_lr;
4253 if (IS_ZIO_THRESHOLD_CAPABLE(ha))
4254 qla27xx_set_zio_threshold(vha,
4255 ha->last_zio_threshold);
4257 rval = qla2x00_set_exlogins_buffer(vha);
4258 if (rval != QLA_SUCCESS)
4261 rval = qla2x00_set_exchoffld_buffer(vha);
4262 if (rval != QLA_SUCCESS)
4266 fw_major_version = ha->fw_major_version;
4267 if (IS_P3P_TYPE(ha))
4268 qla82xx_check_md_needed(vha);
4270 rval = qla2x00_get_fw_version(vha);
4271 if (rval != QLA_SUCCESS)
4273 ha->flags.npiv_supported = 0;
4274 if (IS_QLA2XXX_MIDTYPE(ha) &&
4275 (ha->fw_attributes & BIT_2)) {
4276 ha->flags.npiv_supported = 1;
4277 if ((!ha->max_npiv_vports) ||
4278 ((ha->max_npiv_vports + 1) %
4279 MIN_MULTI_ID_FABRIC))
4280 ha->max_npiv_vports =
4281 MIN_MULTI_ID_FABRIC - 1;
4283 qla2x00_get_resource_cnts(vha);
4284 qla_init_iocb_limit(vha);
4287 * Allocate the array of outstanding commands
4288 * now that we know the firmware resources.
4290 rval = qla2x00_alloc_outstanding_cmds(ha,
4292 if (rval != QLA_SUCCESS)
4295 if (!fw_major_version && !(IS_P3P_TYPE(ha)))
4296 qla2x00_alloc_offload_mem(vha);
4298 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
4299 qla2x00_alloc_fw_dump(vha);
4305 ql_log(ql_log_fatal, vha, 0x00cd,
4306 "ISP Firmware failed checksum.\n");
4310 /* Enable PUREX PASSTHRU */
4311 if (ql2xrdpenable || ha->flags.scm_supported_f ||
4312 ha->flags.edif_enabled)
4313 qla25xx_set_els_cmds_supported(vha);
4317 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
4318 /* Enable proper parity. */
4319 spin_lock_irqsave(&ha->hardware_lock, flags);
4322 wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x1);
4324 /* SRAM, Instruction RAM and GP RAM parity */
4325 wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x7);
4326 rd_reg_word(®->hccr);
4327 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4330 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
4331 ha->flags.fac_supported = 1;
4332 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
4335 rval = qla81xx_fac_get_sector_size(vha, &size);
4336 if (rval == QLA_SUCCESS) {
4337 ha->flags.fac_supported = 1;
4338 ha->fdt_block_size = size << 2;
4340 ql_log(ql_log_warn, vha, 0x00ce,
4341 "Unsupported FAC firmware (%d.%02d.%02d).\n",
4342 ha->fw_major_version, ha->fw_minor_version,
4343 ha->fw_subminor_version);
4345 if (IS_QLA83XX(ha)) {
4346 ha->flags.fac_supported = 0;
4353 ql_log(ql_log_fatal, vha, 0x00cf,
4354 "Setup chip ****FAILED****.\n");
4361 * qla2x00_init_response_q_entries() - Initializes response queue entries.
4362 * @rsp: response queue
4364 * Beginning of request ring has initialization control block already built
4365 * by nvram config routine.
4367 * Returns 0 on success.
4370 qla2x00_init_response_q_entries(struct rsp_que *rsp)
4375 rsp->ring_ptr = rsp->ring;
4376 rsp->ring_index = 0;
4377 rsp->status_srb = NULL;
4378 pkt = rsp->ring_ptr;
4379 for (cnt = 0; cnt < rsp->length; cnt++) {
4380 pkt->signature = RESPONSE_PROCESSED;
4386 * qla2x00_update_fw_options() - Read and process firmware options.
4389 * Returns 0 on success.
4392 qla2x00_update_fw_options(scsi_qla_host_t *vha)
4394 uint16_t swing, emphasis, tx_sens, rx_sens;
4395 struct qla_hw_data *ha = vha->hw;
4397 memset(ha->fw_options, 0, sizeof(ha->fw_options));
4398 qla2x00_get_fw_options(vha, ha->fw_options);
4400 if (IS_QLA2100(ha) || IS_QLA2200(ha))
4403 /* Serial Link options. */
4404 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
4405 "Serial link options.\n");
4406 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
4407 ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options));
4409 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
4410 if (ha->fw_seriallink_options[3] & BIT_2) {
4411 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
4414 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
4415 emphasis = (ha->fw_seriallink_options[2] &
4416 (BIT_4 | BIT_3)) >> 3;
4417 tx_sens = ha->fw_seriallink_options[0] &
4418 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4419 rx_sens = (ha->fw_seriallink_options[0] &
4420 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
4421 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
4422 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
4425 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
4426 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
4427 ha->fw_options[10] |= BIT_5 |
4428 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
4429 (tx_sens & (BIT_1 | BIT_0));
4432 swing = (ha->fw_seriallink_options[2] &
4433 (BIT_7 | BIT_6 | BIT_5)) >> 5;
4434 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
4435 tx_sens = ha->fw_seriallink_options[1] &
4436 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4437 rx_sens = (ha->fw_seriallink_options[1] &
4438 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
4439 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
4440 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
4443 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
4444 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
4445 ha->fw_options[11] |= BIT_5 |
4446 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
4447 (tx_sens & (BIT_1 | BIT_0));
4451 /* Return command IOCBs without waiting for an ABTS to complete. */
4452 ha->fw_options[3] |= BIT_13;
4455 if (ha->flags.enable_led_scheme)
4456 ha->fw_options[2] |= BIT_12;
4458 /* Detect ISP6312. */
4460 ha->fw_options[2] |= BIT_13;
4462 /* Set Retry FLOGI in case of P2P connection */
4463 if (ha->operating_mode == P2P) {
4464 ha->fw_options[2] |= BIT_3;
4465 ql_dbg(ql_dbg_disc, vha, 0x2100,
4466 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
4467 __func__, ha->fw_options[2]);
4470 /* Update firmware options. */
4471 qla2x00_set_fw_options(vha, ha->fw_options);
4475 qla24xx_update_fw_options(scsi_qla_host_t *vha)
4478 struct qla_hw_data *ha = vha->hw;
4480 if (IS_P3P_TYPE(ha))
4483 /* Hold status IOCBs until ABTS response received. */
4485 ha->fw_options[3] |= BIT_12;
4487 /* Set Retry FLOGI in case of P2P connection */
4488 if (ha->operating_mode == P2P) {
4489 ha->fw_options[2] |= BIT_3;
4490 ql_dbg(ql_dbg_disc, vha, 0x2101,
4491 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
4492 __func__, ha->fw_options[2]);
4495 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
4496 if (ql2xmvasynctoatio && !ha->flags.edif_enabled &&
4497 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
4498 if (qla_tgt_mode_enabled(vha) ||
4499 qla_dual_mode_enabled(vha))
4500 ha->fw_options[2] |= BIT_11;
4502 ha->fw_options[2] &= ~BIT_11;
4505 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4508 * Tell FW to track each exchange to prevent
4509 * driver from using stale exchange.
4511 if (qla_tgt_mode_enabled(vha) ||
4512 qla_dual_mode_enabled(vha))
4513 ha->fw_options[2] |= BIT_4;
4515 ha->fw_options[2] &= ~(BIT_4);
4517 /* Reserve 1/2 of emergency exchanges for ELS.*/
4518 if (qla2xuseresexchforels)
4519 ha->fw_options[2] |= BIT_8;
4521 ha->fw_options[2] &= ~BIT_8;
4524 * N2N: set Secure=1 for PLOGI ACC and
4525 * fw shal not send PRLI after PLOGI Acc
4527 if (ha->flags.edif_enabled &&
4528 DBELL_ACTIVE(vha)) {
4529 ha->fw_options[3] |= BIT_15;
4530 ha->flags.n2n_fw_acc_sec = 1;
4532 ha->fw_options[3] &= ~BIT_15;
4533 ha->flags.n2n_fw_acc_sec = 0;
4537 if (ql2xrdpenable || ha->flags.scm_supported_f ||
4538 ha->flags.edif_enabled)
4539 ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB;
4541 /* Enable Async 8130/8131 events -- transceiver insertion/removal */
4542 if (IS_BPM_RANGE_CAPABLE(ha))
4543 ha->fw_options[3] |= BIT_10;
4545 ql_dbg(ql_dbg_init, vha, 0x00e8,
4546 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
4547 __func__, ha->fw_options[1], ha->fw_options[2],
4548 ha->fw_options[3], vha->host->active_mode);
4550 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
4551 qla2x00_set_fw_options(vha, ha->fw_options);
4553 /* Update Serial Link options. */
4554 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
4557 rval = qla2x00_set_serdes_params(vha,
4558 le16_to_cpu(ha->fw_seriallink_options24[1]),
4559 le16_to_cpu(ha->fw_seriallink_options24[2]),
4560 le16_to_cpu(ha->fw_seriallink_options24[3]));
4561 if (rval != QLA_SUCCESS) {
4562 ql_log(ql_log_warn, vha, 0x0104,
4563 "Unable to update Serial Link options (%x).\n", rval);
4568 qla2x00_config_rings(struct scsi_qla_host *vha)
4570 struct qla_hw_data *ha = vha->hw;
4571 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4572 struct req_que *req = ha->req_q_map[0];
4573 struct rsp_que *rsp = ha->rsp_q_map[0];
4575 /* Setup ring parameters in initialization control block. */
4576 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
4577 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
4578 ha->init_cb->request_q_length = cpu_to_le16(req->length);
4579 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
4580 put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
4581 put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
4583 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0);
4584 wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0);
4585 wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0);
4586 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0);
4587 rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
4591 qla24xx_config_rings(struct scsi_qla_host *vha)
4593 struct qla_hw_data *ha = vha->hw;
4594 device_reg_t *reg = ISP_QUE_REG(ha, 0);
4595 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
4596 struct qla_msix_entry *msix;
4597 struct init_cb_24xx *icb;
4599 struct req_que *req = ha->req_q_map[0];
4600 struct rsp_que *rsp = ha->rsp_q_map[0];
4602 /* Setup ring parameters in initialization control block. */
4603 icb = (struct init_cb_24xx *)ha->init_cb;
4604 icb->request_q_outpointer = cpu_to_le16(0);
4605 icb->response_q_inpointer = cpu_to_le16(0);
4606 icb->request_q_length = cpu_to_le16(req->length);
4607 icb->response_q_length = cpu_to_le16(rsp->length);
4608 put_unaligned_le64(req->dma, &icb->request_q_address);
4609 put_unaligned_le64(rsp->dma, &icb->response_q_address);
4611 /* Setup ATIO queue dma pointers for target mode */
4612 icb->atio_q_inpointer = cpu_to_le16(0);
4613 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
4614 put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address);
4616 if (IS_SHADOW_REG_CAPABLE(ha))
4617 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
4619 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4621 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
4622 icb->rid = cpu_to_le16(rid);
4623 if (ha->flags.msix_enabled) {
4624 msix = &ha->msix_entries[1];
4625 ql_dbg(ql_dbg_init, vha, 0x0019,
4626 "Registering vector 0x%x for base que.\n",
4628 icb->msix = cpu_to_le16(msix->entry);
4630 /* Use alternate PCI bus number */
4632 icb->firmware_options_2 |= cpu_to_le32(BIT_19);
4633 /* Use alternate PCI devfn */
4635 icb->firmware_options_2 |= cpu_to_le32(BIT_18);
4637 /* Use Disable MSIX Handshake mode for capable adapters */
4638 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
4639 (ha->flags.msix_enabled)) {
4640 icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
4641 ha->flags.disable_msix_handshake = 1;
4642 ql_dbg(ql_dbg_init, vha, 0x00fe,
4643 "MSIX Handshake Disable Mode turned on.\n");
4645 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
4647 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
4649 wrt_reg_dword(®->isp25mq.req_q_in, 0);
4650 wrt_reg_dword(®->isp25mq.req_q_out, 0);
4651 wrt_reg_dword(®->isp25mq.rsp_q_in, 0);
4652 wrt_reg_dword(®->isp25mq.rsp_q_out, 0);
4654 wrt_reg_dword(®->isp24.req_q_in, 0);
4655 wrt_reg_dword(®->isp24.req_q_out, 0);
4656 wrt_reg_dword(®->isp24.rsp_q_in, 0);
4657 wrt_reg_dword(®->isp24.rsp_q_out, 0);
4660 qlt_24xx_config_rings(vha);
4662 /* If the user has configured the speed, set it here */
4663 if (ha->set_data_rate) {
4664 ql_dbg(ql_dbg_init, vha, 0x00fd,
4665 "Speed set by user : %s Gbps \n",
4666 qla2x00_get_link_speed_str(ha, ha->set_data_rate));
4667 icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13);
4671 rd_reg_word(&ioreg->hccr);
4675 * qla2x00_init_rings() - Initializes firmware.
4678 * Beginning of request ring has initialization control block already built
4679 * by nvram config routine.
4681 * Returns 0 on success.
4684 qla2x00_init_rings(scsi_qla_host_t *vha)
4687 unsigned long flags = 0;
4689 struct qla_hw_data *ha = vha->hw;
4690 struct req_que *req;
4691 struct rsp_que *rsp;
4692 struct mid_init_cb_24xx *mid_init_cb =
4693 (struct mid_init_cb_24xx *) ha->init_cb;
4695 spin_lock_irqsave(&ha->hardware_lock, flags);
4697 /* Clear outstanding commands array. */
4698 for (que = 0; que < ha->max_req_queues; que++) {
4699 req = ha->req_q_map[que];
4700 if (!req || !test_bit(que, ha->req_qid_map))
4702 req->out_ptr = (uint16_t *)(req->ring + req->length);
4704 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
4705 req->outstanding_cmds[cnt] = NULL;
4707 req->current_outstanding_cmd = 1;
4709 /* Initialize firmware. */
4710 req->ring_ptr = req->ring;
4711 req->ring_index = 0;
4712 req->cnt = req->length;
4715 for (que = 0; que < ha->max_rsp_queues; que++) {
4716 rsp = ha->rsp_q_map[que];
4717 if (!rsp || !test_bit(que, ha->rsp_qid_map))
4719 rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
4721 /* Initialize response queue entries */
4723 qlafx00_init_response_q_entries(rsp);
4725 qla2x00_init_response_q_entries(rsp);
4728 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
4729 ha->tgt.atio_ring_index = 0;
4730 /* Initialize ATIO queue entries */
4731 qlt_init_atio_q_entries(vha);
4733 ha->isp_ops->config_rings(vha);
4735 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4737 if (IS_QLAFX00(ha)) {
4738 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
4742 /* Update any ISP specific firmware options before initialization. */
4743 ha->isp_ops->update_fw_options(vha);
4745 ql_dbg(ql_dbg_init, vha, 0x00d1,
4746 "Issue init firmware FW opt 1-3= %08x %08x %08x.\n",
4747 le32_to_cpu(mid_init_cb->init_cb.firmware_options_1),
4748 le32_to_cpu(mid_init_cb->init_cb.firmware_options_2),
4749 le32_to_cpu(mid_init_cb->init_cb.firmware_options_3));
4751 if (ha->flags.npiv_supported) {
4752 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
4753 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
4754 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
4757 if (IS_FWI2_CAPABLE(ha)) {
4758 mid_init_cb->options = cpu_to_le16(BIT_1);
4759 mid_init_cb->init_cb.execution_throttle =
4760 cpu_to_le16(ha->cur_fw_xcb_count);
4761 ha->flags.dport_enabled =
4762 (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
4764 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
4765 (ha->flags.dport_enabled) ? "enabled" : "disabled");
4766 /* FA-WWPN Status */
4767 ha->flags.fawwpn_enabled =
4768 (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
4770 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
4771 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
4772 /* Init_cb will be reused for other command(s). Save a backup copy of port_name */
4773 memcpy(ha->port_name, ha->init_cb->port_name, WWN_SIZE);
4776 /* ELS pass through payload is limit by frame size. */
4777 if (ha->flags.edif_enabled)
4778 mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD);
4780 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
4783 ql_log(ql_log_fatal, vha, 0x00d2,
4784 "Init Firmware **** FAILED ****.\n");
4786 ql_dbg(ql_dbg_init, vha, 0x00d3,
4787 "Init Firmware -- success.\n");
4789 vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
4796 * qla2x00_fw_ready() - Waits for firmware ready.
4799 * Returns 0 on success.
4802 qla2x00_fw_ready(scsi_qla_host_t *vha)
4805 unsigned long wtime, mtime, cs84xx_time;
4806 uint16_t min_wait; /* Minimum wait time if loop is down */
4807 uint16_t wait_time; /* Wait time if loop is coming ready */
4809 struct qla_hw_data *ha = vha->hw;
4811 if (IS_QLAFX00(vha->hw))
4812 return qlafx00_fw_ready(vha);
4814 /* Time to wait for loop down */
4815 if (IS_P3P_TYPE(ha))
4821 * Firmware should take at most one RATOV to login, plus 5 seconds for
4822 * our own processing.
4824 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
4825 wait_time = min_wait;
4828 /* Min wait time if loop down */
4829 mtime = jiffies + (min_wait * HZ);
4831 /* wait time before firmware ready */
4832 wtime = jiffies + (wait_time * HZ);
4834 /* Wait for ISP to finish LIP */
4835 if (!vha->flags.init_done)
4836 ql_log(ql_log_info, vha, 0x801e,
4837 "Waiting for LIP to complete.\n");
4840 memset(state, -1, sizeof(state));
4841 rval = qla2x00_get_firmware_state(vha, state);
4842 if (rval == QLA_SUCCESS) {
4843 if (state[0] < FSTATE_LOSS_OF_SYNC) {
4844 vha->device_flags &= ~DFLG_NO_CABLE;
4846 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
4847 ql_dbg(ql_dbg_taskm, vha, 0x801f,
4848 "fw_state=%x 84xx=%x.\n", state[0],
4850 if ((state[2] & FSTATE_LOGGED_IN) &&
4851 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
4852 ql_dbg(ql_dbg_taskm, vha, 0x8028,
4853 "Sending verify iocb.\n");
4855 cs84xx_time = jiffies;
4856 rval = qla84xx_init_chip(vha);
4857 if (rval != QLA_SUCCESS) {
4860 "Init chip failed.\n");
4864 /* Add time taken to initialize. */
4865 cs84xx_time = jiffies - cs84xx_time;
4866 wtime += cs84xx_time;
4867 mtime += cs84xx_time;
4868 ql_dbg(ql_dbg_taskm, vha, 0x8008,
4869 "Increasing wait time by %ld. "
4870 "New time %ld.\n", cs84xx_time,
4873 } else if (state[0] == FSTATE_READY) {
4874 ql_dbg(ql_dbg_taskm, vha, 0x8037,
4875 "F/W Ready - OK.\n");
4877 qla2x00_get_retry_cnt(vha, &ha->retry_count,
4878 &ha->login_timeout, &ha->r_a_tov);
4884 rval = QLA_FUNCTION_FAILED;
4886 if (atomic_read(&vha->loop_down_timer) &&
4887 state[0] != FSTATE_READY) {
4888 /* Loop down. Timeout on min_wait for states
4889 * other than Wait for Login.
4891 if (time_after_eq(jiffies, mtime)) {
4892 ql_log(ql_log_info, vha, 0x8038,
4893 "Cable is unplugged...\n");
4895 vha->device_flags |= DFLG_NO_CABLE;
4900 /* Mailbox cmd failed. Timeout on min_wait. */
4901 if (time_after_eq(jiffies, mtime) ||
4902 ha->flags.isp82xx_fw_hung)
4906 if (time_after_eq(jiffies, wtime))
4909 /* Delay for a while */
4913 ql_dbg(ql_dbg_taskm, vha, 0x803a,
4914 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
4915 state[1], state[2], state[3], state[4], state[5], jiffies);
4917 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
4918 ql_log(ql_log_warn, vha, 0x803b,
4919 "Firmware ready **** FAILED ****.\n");
4926 * qla2x00_configure_hba
4927 * Setup adapter context.
4930 * ha = adapter state pointer.
4939 qla2x00_configure_hba(scsi_qla_host_t *vha)
4948 char connect_type[22];
4949 struct qla_hw_data *ha = vha->hw;
4950 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
4952 unsigned long flags;
4954 /* Get host addresses. */
4955 rval = qla2x00_get_adapter_id(vha,
4956 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
4957 if (rval != QLA_SUCCESS) {
4958 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
4959 IS_CNA_CAPABLE(ha) ||
4960 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
4961 ql_dbg(ql_dbg_disc, vha, 0x2008,
4962 "Loop is in a transition state.\n");
4964 ql_log(ql_log_warn, vha, 0x2009,
4965 "Unable to get host loop ID.\n");
4966 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
4967 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
4968 ql_log(ql_log_warn, vha, 0x1151,
4969 "Doing link init.\n");
4970 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
4973 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4979 ql_log(ql_log_info, vha, 0x200a,
4980 "Cannot get topology - retrying.\n");
4981 return (QLA_FUNCTION_FAILED);
4984 vha->loop_id = loop_id;
4987 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
4988 ha->operating_mode = LOOP;
4992 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
4994 ha->current_topology = ISP_CFG_NL;
4995 strcpy(connect_type, "(Loop)");
4999 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
5000 ha->switch_cap = sw_cap;
5001 ha->current_topology = ISP_CFG_FL;
5002 strcpy(connect_type, "(FL_Port)");
5006 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
5008 ha->operating_mode = P2P;
5009 ha->current_topology = ISP_CFG_N;
5010 strcpy(connect_type, "(N_Port-to-N_Port)");
5014 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
5015 ha->switch_cap = sw_cap;
5016 ha->operating_mode = P2P;
5017 ha->current_topology = ISP_CFG_F;
5018 strcpy(connect_type, "(F_Port)");
5022 ql_dbg(ql_dbg_disc, vha, 0x200f,
5023 "HBA in unknown topology %x, using NL.\n", topo);
5025 ha->current_topology = ISP_CFG_NL;
5026 strcpy(connect_type, "(Loop)");
5030 /* Save Host port and loop ID. */
5031 /* byte order - Big Endian */
5032 id.b.domain = domain;
5036 spin_lock_irqsave(&ha->hardware_lock, flags);
5037 if (vha->hw->flags.edif_enabled) {
5039 qla_update_host_map(vha, id);
5040 } else if (!(topo == 2 && ha->flags.n2n_bigger))
5041 qla_update_host_map(vha, id);
5042 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5044 if (!vha->flags.init_done)
5045 ql_log(ql_log_info, vha, 0x2010,
5046 "Topology - %s, Host Loop address 0x%x.\n",
5047 connect_type, vha->loop_id);
5053 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
5058 uint64_t zero[2] = { 0 };
5059 struct qla_hw_data *ha = vha->hw;
5060 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
5061 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
5063 if (len > sizeof(zero))
5065 if (memcmp(model, &zero, len) != 0) {
5066 memcpy(ha->model_number, model, len);
5067 st = en = ha->model_number;
5070 if (*en != 0x20 && *en != 0x00)
5075 index = (ha->pdev->subsystem_device & 0xff);
5077 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5078 index < QLA_MODEL_NAMES)
5079 strscpy(ha->model_desc,
5080 qla2x00_model_name[index * 2 + 1],
5081 sizeof(ha->model_desc));
5083 index = (ha->pdev->subsystem_device & 0xff);
5085 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5086 index < QLA_MODEL_NAMES) {
5087 strscpy(ha->model_number,
5088 qla2x00_model_name[index * 2],
5089 sizeof(ha->model_number));
5090 strscpy(ha->model_desc,
5091 qla2x00_model_name[index * 2 + 1],
5092 sizeof(ha->model_desc));
5094 strscpy(ha->model_number, def,
5095 sizeof(ha->model_number));
5098 if (IS_FWI2_CAPABLE(ha))
5099 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
5100 sizeof(ha->model_desc));
5103 /* On sparc systems, obtain port and node WWN from firmware
5106 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
5109 struct qla_hw_data *ha = vha->hw;
5110 struct pci_dev *pdev = ha->pdev;
5111 struct device_node *dp = pci_device_to_OF_node(pdev);
5115 val = of_get_property(dp, "port-wwn", &len);
5116 if (val && len >= WWN_SIZE)
5117 memcpy(nv->port_name, val, WWN_SIZE);
5119 val = of_get_property(dp, "node-wwn", &len);
5120 if (val && len >= WWN_SIZE)
5121 memcpy(nv->node_name, val, WWN_SIZE);
5126 * NVRAM configuration for ISP 2xxx
5129 * ha = adapter block pointer.
5132 * initialization control block in response_ring
5133 * host adapters parameters in host adapter block
5139 qla2x00_nvram_config(scsi_qla_host_t *vha)
5144 uint8_t *dptr1, *dptr2;
5145 struct qla_hw_data *ha = vha->hw;
5146 init_cb_t *icb = ha->init_cb;
5147 nvram_t *nv = ha->nvram;
5148 uint8_t *ptr = ha->nvram;
5149 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
5153 /* Determine NVRAM starting address. */
5154 ha->nvram_size = sizeof(*nv);
5156 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
5157 if ((rd_reg_word(®->ctrl_status) >> 14) == 1)
5158 ha->nvram_base = 0x80;
5160 /* Get NVRAM data and calculate checksum. */
5161 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
5162 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
5165 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
5166 "Contents of NVRAM.\n");
5167 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
5168 nv, ha->nvram_size);
5170 /* Bad NVRAM data, set defaults parameters. */
5171 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
5172 nv->nvram_version < 1) {
5173 /* Reset NVRAM data. */
5174 ql_log(ql_log_warn, vha, 0x0064,
5175 "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n",
5176 chksum, nv->id, nv->nvram_version);
5177 ql_log(ql_log_warn, vha, 0x0065,
5179 "functioning (yet invalid -- WWPN) defaults.\n");
5182 * Set default initialization control block.
5184 memset(nv, 0, ha->nvram_size);
5185 nv->parameter_block_version = ICB_VERSION;
5187 if (IS_QLA23XX(ha)) {
5188 nv->firmware_options[0] = BIT_2 | BIT_1;
5189 nv->firmware_options[1] = BIT_7 | BIT_5;
5190 nv->add_firmware_options[0] = BIT_5;
5191 nv->add_firmware_options[1] = BIT_5 | BIT_4;
5192 nv->frame_payload_size = cpu_to_le16(2048);
5193 nv->special_options[1] = BIT_7;
5194 } else if (IS_QLA2200(ha)) {
5195 nv->firmware_options[0] = BIT_2 | BIT_1;
5196 nv->firmware_options[1] = BIT_7 | BIT_5;
5197 nv->add_firmware_options[0] = BIT_5;
5198 nv->add_firmware_options[1] = BIT_5 | BIT_4;
5199 nv->frame_payload_size = cpu_to_le16(1024);
5200 } else if (IS_QLA2100(ha)) {
5201 nv->firmware_options[0] = BIT_3 | BIT_1;
5202 nv->firmware_options[1] = BIT_5;
5203 nv->frame_payload_size = cpu_to_le16(1024);
5206 nv->max_iocb_allocation = cpu_to_le16(256);
5207 nv->execution_throttle = cpu_to_le16(16);
5208 nv->retry_count = 8;
5209 nv->retry_delay = 1;
5211 nv->port_name[0] = 33;
5212 nv->port_name[3] = 224;
5213 nv->port_name[4] = 139;
5215 qla2xxx_nvram_wwn_from_ofw(vha, nv);
5217 nv->login_timeout = 4;
5220 * Set default host adapter parameters
5222 nv->host_p[1] = BIT_2;
5223 nv->reset_delay = 5;
5224 nv->port_down_retry_count = 8;
5225 nv->max_luns_per_target = cpu_to_le16(8);
5226 nv->link_down_timeout = 60;
5231 /* Reset Initialization control block */
5232 memset(icb, 0, ha->init_cb_size);
5235 * Setup driver NVRAM options.
5237 nv->firmware_options[0] |= (BIT_6 | BIT_1);
5238 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
5239 nv->firmware_options[1] |= (BIT_5 | BIT_0);
5240 nv->firmware_options[1] &= ~BIT_4;
5242 if (IS_QLA23XX(ha)) {
5243 nv->firmware_options[0] |= BIT_2;
5244 nv->firmware_options[0] &= ~BIT_3;
5245 nv->special_options[0] &= ~BIT_6;
5246 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
5248 if (IS_QLA2300(ha)) {
5249 if (ha->fb_rev == FPM_2310) {
5250 strcpy(ha->model_number, "QLA2310");
5252 strcpy(ha->model_number, "QLA2300");
5255 qla2x00_set_model_info(vha, nv->model_number,
5256 sizeof(nv->model_number), "QLA23xx");
5258 } else if (IS_QLA2200(ha)) {
5259 nv->firmware_options[0] |= BIT_2;
5261 * 'Point-to-point preferred, else loop' is not a safe
5262 * connection mode setting.
5264 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
5266 /* Force 'loop preferred, else point-to-point'. */
5267 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
5268 nv->add_firmware_options[0] |= BIT_5;
5270 strcpy(ha->model_number, "QLA22xx");
5271 } else /*if (IS_QLA2100(ha))*/ {
5272 strcpy(ha->model_number, "QLA2100");
5276 * Copy over NVRAM RISC parameter block to initialization control block.
5278 dptr1 = (uint8_t *)icb;
5279 dptr2 = (uint8_t *)&nv->parameter_block_version;
5280 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
5282 *dptr1++ = *dptr2++;
5284 /* Copy 2nd half. */
5285 dptr1 = (uint8_t *)icb->add_firmware_options;
5286 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
5288 *dptr1++ = *dptr2++;
5289 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
5290 /* Use alternate WWN? */
5291 if (nv->host_p[1] & BIT_7) {
5292 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
5293 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
5296 /* Prepare nodename */
5297 if ((icb->firmware_options[1] & BIT_6) == 0) {
5299 * Firmware will apply the following mask if the nodename was
5302 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
5303 icb->node_name[0] &= 0xF0;
5307 * Set host adapter parameters.
5311 * BIT_7 in the host-parameters section allows for modification to
5312 * internal driver logging.
5314 if (nv->host_p[0] & BIT_7)
5315 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
5316 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
5317 /* Always load RISC code on non ISP2[12]00 chips. */
5318 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
5319 ha->flags.disable_risc_code_load = 0;
5320 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
5321 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
5322 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
5323 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
5324 ha->flags.disable_serdes = 0;
5326 ha->operating_mode =
5327 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
5329 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
5330 sizeof(ha->fw_seriallink_options));
5332 /* save HBA serial number */
5333 ha->serial0 = icb->port_name[5];
5334 ha->serial1 = icb->port_name[6];
5335 ha->serial2 = icb->port_name[7];
5336 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
5337 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
5339 icb->execution_throttle = cpu_to_le16(0xFFFF);
5341 ha->retry_count = nv->retry_count;
5343 /* Set minimum login_timeout to 4 seconds. */
5344 if (nv->login_timeout != ql2xlogintimeout)
5345 nv->login_timeout = ql2xlogintimeout;
5346 if (nv->login_timeout < 4)
5347 nv->login_timeout = 4;
5348 ha->login_timeout = nv->login_timeout;
5350 /* Set minimum RATOV to 100 tenths of a second. */
5353 ha->loop_reset_delay = nv->reset_delay;
5355 /* Link Down Timeout = 0:
5357 * When Port Down timer expires we will start returning
5358 * I/O's to OS with "DID_NO_CONNECT".
5360 * Link Down Timeout != 0:
5362 * The driver waits for the link to come up after link down
5363 * before returning I/Os to OS with "DID_NO_CONNECT".
5365 if (nv->link_down_timeout == 0) {
5366 ha->loop_down_abort_time =
5367 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
5369 ha->link_down_timeout = nv->link_down_timeout;
5370 ha->loop_down_abort_time =
5371 (LOOP_DOWN_TIME - ha->link_down_timeout);
5375 * Need enough time to try and get the port back.
5377 ha->port_down_retry_count = nv->port_down_retry_count;
5378 if (qlport_down_retry)
5379 ha->port_down_retry_count = qlport_down_retry;
5380 /* Set login_retry_count */
5381 ha->login_retry_count = nv->retry_count;
5382 if (ha->port_down_retry_count == nv->port_down_retry_count &&
5383 ha->port_down_retry_count > 3)
5384 ha->login_retry_count = ha->port_down_retry_count;
5385 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
5386 ha->login_retry_count = ha->port_down_retry_count;
5387 if (ql2xloginretrycount)
5388 ha->login_retry_count = ql2xloginretrycount;
5390 icb->lun_enables = cpu_to_le16(0);
5391 icb->command_resource_count = 0;
5392 icb->immediate_notify_resource_count = 0;
5393 icb->timeout = cpu_to_le16(0);
5395 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5397 icb->firmware_options[0] &= ~BIT_3;
5398 icb->add_firmware_options[0] &=
5399 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
5400 icb->add_firmware_options[0] |= BIT_2;
5401 icb->response_accumulation_timer = 3;
5402 icb->interrupt_delay_timer = 5;
5404 vha->flags.process_response_queue = 1;
5407 if (!vha->flags.init_done) {
5408 ha->zio_mode = icb->add_firmware_options[0] &
5409 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
5410 ha->zio_timer = icb->interrupt_delay_timer ?
5411 icb->interrupt_delay_timer : 2;
5413 icb->add_firmware_options[0] &=
5414 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
5415 vha->flags.process_response_queue = 0;
5416 if (ha->zio_mode != QLA_ZIO_DISABLED) {
5417 ha->zio_mode = QLA_ZIO_MODE_6;
5419 ql_log(ql_log_info, vha, 0x0068,
5420 "ZIO mode %d enabled; timer delay (%d us).\n",
5421 ha->zio_mode, ha->zio_timer * 100);
5423 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
5424 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
5425 vha->flags.process_response_queue = 1;
5430 ql_log(ql_log_warn, vha, 0x0069,
5431 "NVRAM configuration failed.\n");
5436 void qla2x00_set_fcport_state(fc_port_t *fcport, int state)
5440 old_state = atomic_read(&fcport->state);
5441 atomic_set(&fcport->state, state);
5443 /* Don't print state transitions during initial allocation of fcport */
5444 if (old_state && old_state != state) {
5445 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
5446 "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n",
5447 fcport->port_name, port_state_str[old_state],
5448 port_state_str[state], fcport->d_id.b.domain,
5449 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5454 * qla2x00_alloc_fcport() - Allocate a generic fcport.
5456 * @flags: allocation flags
5458 * Returns a pointer to the allocated fcport, or NULL, if none available.
5461 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
5465 fcport = kzalloc(sizeof(fc_port_t), flags);
5469 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
5470 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
5472 if (!fcport->ct_desc.ct_sns) {
5473 ql_log(ql_log_warn, vha, 0xd049,
5474 "Failed to allocate ct_sns request.\n");
5479 /* Setup fcport template structure. */
5481 fcport->port_type = FCT_UNKNOWN;
5482 fcport->loop_id = FC_NO_LOOP_ID;
5483 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
5484 fcport->supported_classes = FC_COS_UNSPECIFIED;
5485 fcport->fp_speed = PORT_SPEED_UNKNOWN;
5487 fcport->disc_state = DSC_DELETED;
5488 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
5489 fcport->deleted = QLA_SESS_DELETED;
5490 fcport->login_retry = vha->hw->login_retry_count;
5491 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
5492 fcport->logout_on_delete = 1;
5493 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
5494 fcport->tgt_short_link_down_cnt = 0;
5495 fcport->dev_loss_tmo = 0;
5497 if (!fcport->ct_desc.ct_sns) {
5498 ql_log(ql_log_warn, vha, 0xd049,
5499 "Failed to allocate ct_sns request.\n");
5504 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
5505 INIT_WORK(&fcport->free_work, qlt_free_session_done);
5506 INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
5507 INIT_LIST_HEAD(&fcport->gnl_entry);
5508 INIT_LIST_HEAD(&fcport->list);
5509 INIT_LIST_HEAD(&fcport->tmf_pending);
5511 INIT_LIST_HEAD(&fcport->sess_cmd_list);
5512 spin_lock_init(&fcport->sess_cmd_lock);
5514 spin_lock_init(&fcport->edif.sa_list_lock);
5515 INIT_LIST_HEAD(&fcport->edif.tx_sa_list);
5516 INIT_LIST_HEAD(&fcport->edif.rx_sa_list);
5518 spin_lock_init(&fcport->edif.indx_list_lock);
5519 INIT_LIST_HEAD(&fcport->edif.edif_indx_list);
5525 qla2x00_free_fcport(fc_port_t *fcport)
5527 if (fcport->ct_desc.ct_sns) {
5528 dma_free_coherent(&fcport->vha->hw->pdev->dev,
5529 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
5530 fcport->ct_desc.ct_sns_dma);
5532 fcport->ct_desc.ct_sns = NULL;
5535 qla_edif_flush_sa_ctl_lists(fcport);
5536 list_del(&fcport->list);
5537 qla2x00_clear_loop_id(fcport);
5539 qla_edif_list_del(fcport);
5544 static void qla_get_login_template(scsi_qla_host_t *vha)
5546 struct qla_hw_data *ha = vha->hw;
5551 memset(ha->init_cb, 0, ha->init_cb_size);
5552 sz = min_t(int, sizeof(struct fc_els_csp), ha->init_cb_size);
5553 rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
5555 if (rval != QLA_SUCCESS) {
5556 ql_dbg(ql_dbg_init, vha, 0x00d1,
5557 "PLOGI ELS param read fail.\n");
5560 q = (__be32 *)&ha->plogi_els_payld.fl_csp;
5562 bp = (uint32_t *)ha->init_cb;
5563 cpu_to_be32_array(q, bp, sz / 4);
5564 ha->flags.plogi_template_valid = 1;
5568 * qla2x00_configure_loop
5569 * Updates Fibre Channel Device Database with what is actually on loop.
5572 * ha = adapter block pointer.
5577 * 2 = database was full and device was not configured.
5580 qla2x00_configure_loop(scsi_qla_host_t *vha)
5583 unsigned long flags, save_flags;
5584 struct qla_hw_data *ha = vha->hw;
5588 /* Get Initiator ID */
5589 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
5590 rval = qla2x00_configure_hba(vha);
5591 if (rval != QLA_SUCCESS) {
5592 ql_dbg(ql_dbg_disc, vha, 0x2013,
5593 "Unable to configure HBA.\n");
5598 save_flags = flags = vha->dpc_flags;
5599 ql_dbg(ql_dbg_disc, vha, 0x2014,
5600 "Configure loop -- dpc flags = 0x%lx.\n", flags);
5603 * If we have both an RSCN and PORT UPDATE pending then handle them
5604 * both at the same time.
5606 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5607 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
5609 qla2x00_get_data_rate(vha);
5610 qla_get_login_template(vha);
5612 /* Determine what we need to do */
5613 if ((ha->current_topology == ISP_CFG_FL ||
5614 ha->current_topology == ISP_CFG_F) &&
5615 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
5617 set_bit(RSCN_UPDATE, &flags);
5618 clear_bit(LOCAL_LOOP_UPDATE, &flags);
5620 } else if (ha->current_topology == ISP_CFG_NL ||
5621 ha->current_topology == ISP_CFG_N) {
5622 clear_bit(RSCN_UPDATE, &flags);
5623 set_bit(LOCAL_LOOP_UPDATE, &flags);
5624 } else if (!vha->flags.online ||
5625 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
5626 set_bit(RSCN_UPDATE, &flags);
5627 set_bit(LOCAL_LOOP_UPDATE, &flags);
5630 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
5631 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5632 ql_dbg(ql_dbg_disc, vha, 0x2015,
5633 "Loop resync needed, failing.\n");
5634 rval = QLA_FUNCTION_FAILED;
5636 rval = qla2x00_configure_local_loop(vha);
5639 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
5640 if (LOOP_TRANSITION(vha)) {
5641 ql_dbg(ql_dbg_disc, vha, 0x2099,
5642 "Needs RSCN update and loop transition.\n");
5643 rval = QLA_FUNCTION_FAILED;
5646 rval = qla2x00_configure_fabric(vha);
5649 if (rval == QLA_SUCCESS) {
5650 if (atomic_read(&vha->loop_down_timer) ||
5651 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5652 rval = QLA_FUNCTION_FAILED;
5654 atomic_set(&vha->loop_state, LOOP_READY);
5655 ql_dbg(ql_dbg_disc, vha, 0x2069,
5657 ha->flags.fw_init_done = 1;
5660 * use link up to wake up app to get ready for
5663 if (ha->flags.edif_enabled && DBELL_INACTIVE(vha))
5664 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP,
5665 ha->link_data_rate);
5668 * Process any ATIO queue entries that came in
5669 * while we weren't online.
5671 if (qla_tgt_mode_enabled(vha) ||
5672 qla_dual_mode_enabled(vha)) {
5673 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
5674 qlt_24xx_process_atio_queue(vha, 0);
5675 spin_unlock_irqrestore(&ha->tgt.atio_lock,
5682 ql_dbg(ql_dbg_disc, vha, 0x206a,
5683 "%s *** FAILED ***.\n", __func__);
5685 ql_dbg(ql_dbg_disc, vha, 0x206b,
5686 "%s: exiting normally. local port wwpn %8phN id %06x)\n",
5687 __func__, vha->port_name, vha->d_id.b24);
5690 /* Restore state if a resync event occurred during processing */
5691 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5692 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
5693 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5694 if (test_bit(RSCN_UPDATE, &save_flags)) {
5695 set_bit(RSCN_UPDATE, &vha->dpc_flags);
5702 static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
5704 unsigned long flags;
5707 ql_dbg(ql_dbg_disc, vha, 0x206a, "%s %d.\n", __func__, __LINE__);
5709 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags))
5710 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5712 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5713 if (fcport->n2n_flag) {
5714 qla24xx_fcport_handle_login(vha, fcport);
5719 spin_lock_irqsave(&vha->work_lock, flags);
5720 vha->scan.scan_retry++;
5721 spin_unlock_irqrestore(&vha->work_lock, flags);
5723 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5724 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5725 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5727 return QLA_FUNCTION_FAILED;
5731 qla_reinitialize_link(scsi_qla_host_t *vha)
5735 atomic_set(&vha->loop_state, LOOP_DOWN);
5736 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
5737 rval = qla2x00_full_login_lip(vha);
5738 if (rval == QLA_SUCCESS) {
5739 ql_dbg(ql_dbg_disc, vha, 0xd050, "Link reinitialized\n");
5741 ql_dbg(ql_dbg_disc, vha, 0xd051,
5742 "Link reinitialization failed (%d)\n", rval);
5747 * qla2x00_configure_local_loop
5748 * Updates Fibre Channel Device Database with local loop devices.
5751 * ha = adapter block pointer.
5757 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
5761 fc_port_t *fcport, *new_fcport;
5764 struct gid_list_info *gid;
5766 uint8_t domain, area, al_pa;
5767 struct qla_hw_data *ha = vha->hw;
5768 unsigned long flags;
5770 /* Inititae N2N login. */
5772 return qla2x00_configure_n2n_loop(vha);
5775 entries = MAX_FIBRE_DEVICES_LOOP;
5777 /* Get list of logged in devices. */
5778 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
5779 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
5781 if (rval != QLA_SUCCESS)
5784 ql_dbg(ql_dbg_disc, vha, 0x2011,
5785 "Entries in ID list (%d).\n", entries);
5786 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
5787 ha->gid_list, entries * sizeof(*ha->gid_list));
5790 spin_lock_irqsave(&vha->work_lock, flags);
5791 vha->scan.scan_retry++;
5792 spin_unlock_irqrestore(&vha->work_lock, flags);
5794 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5795 u8 loop_map_entries = 0;
5798 rc = qla2x00_get_fcal_position_map(vha, NULL,
5800 if (rc == QLA_SUCCESS && loop_map_entries > 1) {
5802 * There are devices that are still not logged
5803 * in. Reinitialize to give them a chance.
5805 qla_reinitialize_link(vha);
5806 return QLA_FUNCTION_FAILED;
5808 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5809 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5812 vha->scan.scan_retry = 0;
5815 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5816 fcport->scan_state = QLA_FCPORT_SCAN;
5819 /* Allocate temporary fcport for any new fcports discovered. */
5820 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5821 if (new_fcport == NULL) {
5822 ql_log(ql_log_warn, vha, 0x2012,
5823 "Memory allocation failed for fcport.\n");
5824 rval = QLA_MEMORY_ALLOC_FAILED;
5827 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5829 /* Add devices to port list. */
5831 for (index = 0; index < entries; index++) {
5832 domain = gid->domain;
5835 if (IS_QLA2100(ha) || IS_QLA2200(ha))
5836 loop_id = gid->loop_id_2100;
5838 loop_id = le16_to_cpu(gid->loop_id);
5839 gid = (void *)gid + ha->gid_list_info_size;
5841 /* Bypass reserved domain fields. */
5842 if ((domain & 0xf0) == 0xf0)
5845 /* Bypass if not same domain and area of adapter. */
5846 if (area && domain && ((area != vha->d_id.b.area) ||
5847 (domain != vha->d_id.b.domain)) &&
5848 (ha->current_topology == ISP_CFG_NL))
5852 /* Bypass invalid local loop ID. */
5853 if (loop_id > LAST_LOCAL_LOOP_ID)
5856 memset(new_fcport->port_name, 0, WWN_SIZE);
5858 /* Fill in member data. */
5859 new_fcport->d_id.b.domain = domain;
5860 new_fcport->d_id.b.area = area;
5861 new_fcport->d_id.b.al_pa = al_pa;
5862 new_fcport->loop_id = loop_id;
5863 new_fcport->scan_state = QLA_FCPORT_FOUND;
5865 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
5866 if (rval2 != QLA_SUCCESS) {
5867 ql_dbg(ql_dbg_disc, vha, 0x2097,
5868 "Failed to retrieve fcport information "
5869 "-- get_port_database=%x, loop_id=0x%04x.\n",
5870 rval2, new_fcport->loop_id);
5871 /* Skip retry if N2N */
5872 if (ha->current_topology != ISP_CFG_N) {
5873 ql_dbg(ql_dbg_disc, vha, 0x2105,
5874 "Scheduling resync.\n");
5875 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5880 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5881 /* Check for matching device in port list. */
5884 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5885 if (memcmp(new_fcport->port_name, fcport->port_name,
5889 fcport->flags &= ~FCF_FABRIC_DEVICE;
5890 fcport->loop_id = new_fcport->loop_id;
5891 fcport->port_type = new_fcport->port_type;
5892 fcport->d_id.b24 = new_fcport->d_id.b24;
5893 memcpy(fcport->node_name, new_fcport->node_name,
5895 fcport->scan_state = QLA_FCPORT_FOUND;
5896 if (fcport->login_retry == 0) {
5897 fcport->login_retry = vha->hw->login_retry_count;
5898 ql_dbg(ql_dbg_disc, vha, 0x2135,
5899 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
5900 fcport->port_name, fcport->loop_id,
5901 fcport->login_retry);
5908 /* New device, add to fcports list. */
5909 list_add_tail(&new_fcport->list, &vha->vp_fcports);
5911 /* Allocate a new replacement fcport. */
5912 fcport = new_fcport;
5914 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5916 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5918 if (new_fcport == NULL) {
5919 ql_log(ql_log_warn, vha, 0xd031,
5920 "Failed to allocate memory for fcport.\n");
5921 rval = QLA_MEMORY_ALLOC_FAILED;
5924 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5925 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5928 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5930 /* Base iIDMA settings on HBA port speed. */
5931 fcport->fp_speed = ha->link_data_rate;
5934 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5935 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5938 if (fcport->scan_state == QLA_FCPORT_SCAN) {
5939 if ((qla_dual_mode_enabled(vha) ||
5940 qla_ini_mode_enabled(vha)) &&
5941 atomic_read(&fcport->state) == FCS_ONLINE) {
5942 qla2x00_mark_device_lost(vha, fcport,
5943 ql2xplogiabsentdevice);
5944 if (fcport->loop_id != FC_NO_LOOP_ID &&
5945 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5946 fcport->port_type != FCT_INITIATOR &&
5947 fcport->port_type != FCT_BROADCAST) {
5948 ql_dbg(ql_dbg_disc, vha, 0x20f0,
5949 "%s %d %8phC post del sess\n",
5953 qlt_schedule_sess_for_deletion(fcport);
5959 if (fcport->scan_state == QLA_FCPORT_FOUND)
5960 qla24xx_fcport_handle_login(vha, fcport);
5963 qla2x00_free_fcport(new_fcport);
5968 ql_dbg(ql_dbg_disc, vha, 0x2098,
5969 "Configure local loop error exit: rval=%x.\n", rval);
5974 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5977 uint16_t mb[MAILBOX_REGISTER_COUNT];
5978 struct qla_hw_data *ha = vha->hw;
5980 if (!IS_IIDMA_CAPABLE(ha))
5983 if (atomic_read(&fcport->state) != FCS_ONLINE)
5986 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
5987 fcport->fp_speed > ha->link_data_rate ||
5988 !ha->flags.gpsc_supported)
5991 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
5993 if (rval != QLA_SUCCESS) {
5994 ql_dbg(ql_dbg_disc, vha, 0x2004,
5995 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
5996 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
5998 ql_dbg(ql_dbg_disc, vha, 0x2005,
5999 "iIDMA adjusted to %s GB/s (%X) on %8phN.\n",
6000 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
6001 fcport->fp_speed, fcport->port_name);
6005 void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
6007 qla2x00_iidma_fcport(vha, fcport);
6008 qla24xx_update_fcport_fcp_prio(vha, fcport);
6011 int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
6013 struct qla_work_evt *e;
6015 e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
6017 return QLA_FUNCTION_FAILED;
6019 e->u.fcport.fcport = fcport;
6020 return qla2x00_post_work(vha, e);
6023 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
6025 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
6027 struct fc_rport_identifiers rport_ids;
6028 struct fc_rport *rport;
6029 unsigned long flags;
6031 if (atomic_read(&fcport->state) == FCS_ONLINE)
6034 rport_ids.node_name = wwn_to_u64(fcport->node_name);
6035 rport_ids.port_name = wwn_to_u64(fcport->port_name);
6036 rport_ids.port_id = fcport->d_id.b.domain << 16 |
6037 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
6038 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
6039 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
6041 ql_log(ql_log_warn, vha, 0x2006,
6042 "Unable to allocate fc remote port.\n");
6046 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
6047 *((fc_port_t **)rport->dd_data) = fcport;
6048 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
6049 fcport->dev_loss_tmo = rport->dev_loss_tmo;
6051 rport->supported_classes = fcport->supported_classes;
6053 rport_ids.roles = FC_PORT_ROLE_UNKNOWN;
6054 if (fcport->port_type == FCT_INITIATOR)
6055 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
6056 if (fcport->port_type == FCT_TARGET)
6057 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
6058 if (fcport->port_type & FCT_NVME_INITIATOR)
6059 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
6060 if (fcport->port_type & FCT_NVME_TARGET)
6061 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
6062 if (fcport->port_type & FCT_NVME_DISCOVERY)
6063 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
6065 fc_remote_port_rolechg(rport, rport_ids.roles);
6067 ql_dbg(ql_dbg_disc, vha, 0x20ee,
6068 "%s: %8phN. rport %ld:0:%d (%p) is %s mode\n",
6069 __func__, fcport->port_name, vha->host_no,
6070 rport->scsi_target_id, rport,
6071 (fcport->port_type == FCT_TARGET) ? "tgt" :
6072 ((fcport->port_type & FCT_NVME) ? "nvme" : "ini"));
6076 * qla2x00_update_fcport
6077 * Updates device on list.
6080 * ha = adapter block pointer.
6081 * fcport = port structure pointer.
6091 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
6093 if (IS_SW_RESV_ADDR(fcport->d_id))
6096 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
6097 __func__, fcport->port_name);
6099 qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
6100 fcport->login_retry = vha->hw->login_retry_count;
6101 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6102 fcport->deleted = 0;
6103 if (vha->hw->current_topology == ISP_CFG_NL)
6104 fcport->logout_on_delete = 0;
6106 fcport->logout_on_delete = 1;
6107 fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
6109 if (fcport->tgt_link_down_time < fcport->dev_loss_tmo) {
6110 fcport->tgt_short_link_down_cnt++;
6111 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
6114 switch (vha->hw->current_topology) {
6117 fcport->keep_nport_handle = 1;
6123 qla2x00_iidma_fcport(vha, fcport);
6125 qla2x00_dfs_create_rport(vha, fcport);
6127 qla24xx_update_fcport_fcp_prio(vha, fcport);
6129 switch (vha->host->active_mode) {
6130 case MODE_INITIATOR:
6131 qla2x00_reg_remote_port(vha, fcport);
6134 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
6135 !vha->vha_tgt.qla_tgt->tgt_stopped)
6136 qlt_fc_port_added(vha, fcport);
6139 qla2x00_reg_remote_port(vha, fcport);
6140 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
6141 !vha->vha_tgt.qla_tgt->tgt_stopped)
6142 qlt_fc_port_added(vha, fcport);
6148 if (NVME_TARGET(vha->hw, fcport))
6149 qla_nvme_register_remote(vha, fcport);
6151 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
6153 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
6154 if (fcport->id_changed) {
6155 fcport->id_changed = 0;
6156 ql_dbg(ql_dbg_disc, vha, 0x20d7,
6157 "%s %d %8phC post gfpnid fcp_cnt %d\n",
6158 __func__, __LINE__, fcport->port_name,
6160 qla24xx_post_gfpnid_work(vha, fcport);
6162 ql_dbg(ql_dbg_disc, vha, 0x20d7,
6163 "%s %d %8phC post gpsc fcp_cnt %d\n",
6164 __func__, __LINE__, fcport->port_name,
6166 qla24xx_post_gpsc_work(vha, fcport);
6170 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
6173 void qla_register_fcport_fn(struct work_struct *work)
6175 fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
6176 u32 rscn_gen = fcport->rscn_gen;
6179 if (IS_SW_RESV_ADDR(fcport->d_id))
6182 qla2x00_update_fcport(fcport->vha, fcport);
6184 ql_dbg(ql_dbg_disc, fcport->vha, 0x911e,
6185 "%s rscn gen %d/%d next DS %d\n", __func__,
6186 rscn_gen, fcport->rscn_gen, fcport->next_disc_state);
6188 if (rscn_gen != fcport->rscn_gen) {
6189 /* RSCN(s) came in while registration */
6190 switch (fcport->next_disc_state) {
6191 case DSC_DELETE_PEND:
6192 qlt_schedule_sess_for_deletion(fcport);
6195 data[0] = data[1] = 0;
6196 qla2x00_post_async_adisc_work(fcport->vha, fcport,
6206 * qla2x00_configure_fabric
6207 * Setup SNS devices with loop ID's.
6210 * ha = adapter block pointer.
6217 qla2x00_configure_fabric(scsi_qla_host_t *vha)
6221 uint16_t mb[MAILBOX_REGISTER_COUNT];
6223 struct qla_hw_data *ha = vha->hw;
6226 /* If FL port exists, then SNS is present */
6227 if (IS_FWI2_CAPABLE(ha))
6228 loop_id = NPH_F_PORT;
6230 loop_id = SNS_FL_PORT;
6231 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
6232 if (rval != QLA_SUCCESS) {
6233 ql_dbg(ql_dbg_disc, vha, 0x20a0,
6234 "MBX_GET_PORT_NAME failed, No FL Port.\n");
6236 vha->device_flags &= ~SWITCH_FOUND;
6237 return (QLA_SUCCESS);
6239 vha->device_flags |= SWITCH_FOUND;
6241 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0);
6242 if (rval != QLA_SUCCESS)
6243 ql_dbg(ql_dbg_disc, vha, 0x20ff,
6244 "Failed to get Fabric Port Name\n");
6246 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6247 rval = qla2x00_send_change_request(vha, 0x3, 0);
6248 if (rval != QLA_SUCCESS)
6249 ql_log(ql_log_warn, vha, 0x121,
6250 "Failed to enable receiving of RSCN requests: 0x%x.\n",
6255 qla2x00_mgmt_svr_login(vha);
6257 /* Ensure we are logged into the SNS. */
6258 loop_id = NPH_SNS_LID(ha);
6259 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
6260 0xfc, mb, BIT_1|BIT_0);
6261 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
6262 ql_dbg(ql_dbg_disc, vha, 0x20a1,
6263 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
6264 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
6265 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6270 if (ql2xfdmienable &&
6271 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
6272 qla2x00_fdmi_register(vha);
6274 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
6275 if (qla2x00_rft_id(vha)) {
6277 ql_dbg(ql_dbg_disc, vha, 0x20a2,
6278 "Register FC-4 TYPE failed.\n");
6279 if (test_bit(LOOP_RESYNC_NEEDED,
6283 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
6285 ql_dbg(ql_dbg_disc, vha, 0x209a,
6286 "Register FC-4 Features failed.\n");
6287 if (test_bit(LOOP_RESYNC_NEEDED,
6291 if (vha->flags.nvme_enabled) {
6292 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
6293 ql_dbg(ql_dbg_disc, vha, 0x2049,
6294 "Register NVME FC Type Features failed.\n");
6297 if (qla2x00_rnn_id(vha)) {
6299 ql_dbg(ql_dbg_disc, vha, 0x2104,
6300 "Register Node Name failed.\n");
6301 if (test_bit(LOOP_RESYNC_NEEDED,
6304 } else if (qla2x00_rsnn_nn(vha)) {
6306 ql_dbg(ql_dbg_disc, vha, 0x209b,
6307 "Register Symbolic Node Name failed.\n");
6308 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6314 /* Mark the time right before querying FW for connected ports.
6315 * This process is long, asynchronous and by the time it's done,
6316 * collected information might not be accurate anymore. E.g.
6317 * disconnected port might have re-connected and a brand new
6318 * session has been created. In this case session's generation
6319 * will be newer than discovery_gen. */
6320 qlt_do_generation_tick(vha, &discovery_gen);
6322 if (USE_ASYNC_SCAN(ha)) {
6323 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
6326 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6328 list_for_each_entry(fcport, &vha->vp_fcports, list)
6329 fcport->scan_state = QLA_FCPORT_SCAN;
6331 rval = qla2x00_find_all_fabric_devs(vha);
6333 if (rval != QLA_SUCCESS)
6337 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
6338 qla_nvme_register_hba(vha);
6341 ql_dbg(ql_dbg_disc, vha, 0x2068,
6342 "Configure fabric error exit rval=%d.\n", rval);
6348 * qla2x00_find_all_fabric_devs
6351 * ha = adapter block pointer.
6352 * dev = database device entry pointer.
6361 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
6365 fc_port_t *fcport, *new_fcport;
6370 int first_dev, last_dev;
6371 port_id_t wrap = {}, nxt_d_id;
6372 struct qla_hw_data *ha = vha->hw;
6373 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
6374 unsigned long flags;
6378 /* Try GID_PT to get device list, else GAN. */
6380 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
6385 ql_dbg(ql_dbg_disc, vha, 0x209c,
6386 "GID_PT allocations failed, fallback on GA_NXT.\n");
6388 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
6389 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
6391 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6393 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
6395 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6397 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
6399 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6401 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
6403 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6407 /* If other queries succeeded probe for FC-4 type */
6409 qla2x00_gff_id(vha, swl);
6410 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6416 /* Allocate temporary fcport for any new fcports discovered. */
6417 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6418 if (new_fcport == NULL) {
6419 ql_log(ql_log_warn, vha, 0x209d,
6420 "Failed to allocate memory for fcport.\n");
6421 return (QLA_MEMORY_ALLOC_FAILED);
6423 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
6424 /* Set start port ID scan at adapter ID. */
6428 /* Starting free loop ID. */
6429 loop_id = ha->min_external_loopid;
6430 for (; loop_id <= ha->max_loop_id; loop_id++) {
6431 if (qla2x00_is_reserved_id(vha, loop_id))
6434 if (ha->current_topology == ISP_CFG_FL &&
6435 (atomic_read(&vha->loop_down_timer) ||
6436 LOOP_TRANSITION(vha))) {
6437 atomic_set(&vha->loop_down_timer, 0);
6438 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6439 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
6445 wrap.b24 = new_fcport->d_id.b24;
6447 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
6448 memcpy(new_fcport->node_name,
6449 swl[swl_idx].node_name, WWN_SIZE);
6450 memcpy(new_fcport->port_name,
6451 swl[swl_idx].port_name, WWN_SIZE);
6452 memcpy(new_fcport->fabric_port_name,
6453 swl[swl_idx].fabric_port_name, WWN_SIZE);
6454 new_fcport->fp_speed = swl[swl_idx].fp_speed;
6455 new_fcport->fc4_type = swl[swl_idx].fc4_type;
6457 new_fcport->nvme_flag = 0;
6458 if (vha->flags.nvme_enabled &&
6459 swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) {
6460 ql_log(ql_log_info, vha, 0x2131,
6461 "FOUND: NVME port %8phC as FC Type 28h\n",
6462 new_fcport->port_name);
6465 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
6471 /* Send GA_NXT to the switch */
6472 rval = qla2x00_ga_nxt(vha, new_fcport);
6473 if (rval != QLA_SUCCESS) {
6474 ql_log(ql_log_warn, vha, 0x209e,
6475 "SNS scan failed -- assuming "
6476 "zero-entry result.\n");
6482 /* If wrap on switch device list, exit. */
6484 wrap.b24 = new_fcport->d_id.b24;
6486 } else if (new_fcport->d_id.b24 == wrap.b24) {
6487 ql_dbg(ql_dbg_disc, vha, 0x209f,
6488 "Device wrap (%02x%02x%02x).\n",
6489 new_fcport->d_id.b.domain,
6490 new_fcport->d_id.b.area,
6491 new_fcport->d_id.b.al_pa);
6495 /* Bypass if same physical adapter. */
6496 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
6499 /* Bypass virtual ports of the same host. */
6500 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
6503 /* Bypass if same domain and area of adapter. */
6504 if (((new_fcport->d_id.b24 & 0xffff00) ==
6505 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
6509 /* Bypass reserved domain fields. */
6510 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
6513 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
6514 if (ql2xgffidenable &&
6515 (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) &&
6516 new_fcport->fc4_type != 0))
6519 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6521 /* Locate matching device in database. */
6523 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6524 if (memcmp(new_fcport->port_name, fcport->port_name,
6528 fcport->scan_state = QLA_FCPORT_FOUND;
6532 /* Update port state. */
6533 memcpy(fcport->fabric_port_name,
6534 new_fcport->fabric_port_name, WWN_SIZE);
6535 fcport->fp_speed = new_fcport->fp_speed;
6538 * If address the same and state FCS_ONLINE
6539 * (or in target mode), nothing changed.
6541 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
6542 (atomic_read(&fcport->state) == FCS_ONLINE ||
6543 (vha->host->active_mode == MODE_TARGET))) {
6547 if (fcport->login_retry == 0)
6548 fcport->login_retry =
6549 vha->hw->login_retry_count;
6551 * If device was not a fabric device before.
6553 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
6554 fcport->d_id.b24 = new_fcport->d_id.b24;
6555 qla2x00_clear_loop_id(fcport);
6556 fcport->flags |= (FCF_FABRIC_DEVICE |
6562 * Port ID changed or device was marked to be updated;
6563 * Log it out if still logged in and mark it for
6566 if (qla_tgt_mode_enabled(base_vha)) {
6567 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
6568 "port changed FC ID, %8phC"
6569 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
6571 fcport->d_id.b.domain,
6572 fcport->d_id.b.area,
6573 fcport->d_id.b.al_pa,
6575 new_fcport->d_id.b.domain,
6576 new_fcport->d_id.b.area,
6577 new_fcport->d_id.b.al_pa);
6578 fcport->d_id.b24 = new_fcport->d_id.b24;
6582 fcport->d_id.b24 = new_fcport->d_id.b24;
6583 fcport->flags |= FCF_LOGIN_NEEDED;
6587 if (found && NVME_TARGET(vha->hw, fcport)) {
6588 if (fcport->disc_state == DSC_DELETE_PEND) {
6589 qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
6590 vha->fcport_count--;
6591 fcport->login_succ = 0;
6596 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6599 /* If device was not in our fcports list, then add it. */
6600 new_fcport->scan_state = QLA_FCPORT_FOUND;
6601 list_add_tail(&new_fcport->list, &vha->vp_fcports);
6603 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6606 /* Allocate a new replacement fcport. */
6607 nxt_d_id.b24 = new_fcport->d_id.b24;
6608 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6609 if (new_fcport == NULL) {
6610 ql_log(ql_log_warn, vha, 0xd032,
6611 "Memory allocation failed for fcport.\n");
6612 return (QLA_MEMORY_ALLOC_FAILED);
6614 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
6615 new_fcport->d_id.b24 = nxt_d_id.b24;
6618 qla2x00_free_fcport(new_fcport);
6621 * Logout all previous fabric dev marked lost, except FCP2 devices.
6623 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6624 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6627 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
6630 if (fcport->scan_state == QLA_FCPORT_SCAN) {
6631 if ((qla_dual_mode_enabled(vha) ||
6632 qla_ini_mode_enabled(vha)) &&
6633 atomic_read(&fcport->state) == FCS_ONLINE) {
6634 qla2x00_mark_device_lost(vha, fcport,
6635 ql2xplogiabsentdevice);
6636 if (fcport->loop_id != FC_NO_LOOP_ID &&
6637 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
6638 fcport->port_type != FCT_INITIATOR &&
6639 fcport->port_type != FCT_BROADCAST) {
6640 ql_dbg(ql_dbg_disc, vha, 0x20f0,
6641 "%s %d %8phC post del sess\n",
6644 qlt_schedule_sess_for_deletion(fcport);
6650 if (fcport->scan_state == QLA_FCPORT_FOUND &&
6651 (fcport->flags & FCF_LOGIN_NEEDED) != 0)
6652 qla24xx_fcport_handle_login(vha, fcport);
6657 /* FW does not set aside Loop id for MGMT Server/FFFFFAh */
6659 qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
6661 int loop_id = FC_NO_LOOP_ID;
6662 int lid = NPH_MGMT_SERVER - vha->vp_idx;
6663 unsigned long flags;
6664 struct qla_hw_data *ha = vha->hw;
6666 if (vha->vp_idx == 0) {
6667 set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
6668 return NPH_MGMT_SERVER;
6671 /* pick id from high and work down to low */
6672 spin_lock_irqsave(&ha->vport_slock, flags);
6673 for (; lid > 0; lid--) {
6674 if (!test_bit(lid, vha->hw->loop_id_map)) {
6675 set_bit(lid, vha->hw->loop_id_map);
6680 spin_unlock_irqrestore(&ha->vport_slock, flags);
6686 * qla2x00_fabric_login
6687 * Issue fabric login command.
6690 * ha = adapter block pointer.
6691 * device = pointer to FC device type structure.
6694 * 0 - Login successfully
6696 * 2 - Initiator device
6700 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
6701 uint16_t *next_loopid)
6705 uint16_t tmp_loopid;
6706 uint16_t mb[MAILBOX_REGISTER_COUNT];
6707 struct qla_hw_data *ha = vha->hw;
6713 ql_dbg(ql_dbg_disc, vha, 0x2000,
6714 "Trying Fabric Login w/loop id 0x%04x for port "
6716 fcport->loop_id, fcport->d_id.b.domain,
6717 fcport->d_id.b.area, fcport->d_id.b.al_pa);
6719 /* Login fcport on switch. */
6720 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
6721 fcport->d_id.b.domain, fcport->d_id.b.area,
6722 fcport->d_id.b.al_pa, mb, BIT_0);
6723 if (rval != QLA_SUCCESS) {
6726 if (mb[0] == MBS_PORT_ID_USED) {
6728 * Device has another loop ID. The firmware team
6729 * recommends the driver perform an implicit login with
6730 * the specified ID again. The ID we just used is save
6731 * here so we return with an ID that can be tried by
6735 tmp_loopid = fcport->loop_id;
6736 fcport->loop_id = mb[1];
6738 ql_dbg(ql_dbg_disc, vha, 0x2001,
6739 "Fabric Login: port in use - next loop "
6740 "id=0x%04x, port id= %02x%02x%02x.\n",
6741 fcport->loop_id, fcport->d_id.b.domain,
6742 fcport->d_id.b.area, fcport->d_id.b.al_pa);
6744 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
6749 /* A retry occurred before. */
6750 *next_loopid = tmp_loopid;
6753 * No retry occurred before. Just increment the
6754 * ID value for next login.
6756 *next_loopid = (fcport->loop_id + 1);
6759 if (mb[1] & BIT_0) {
6760 fcport->port_type = FCT_INITIATOR;
6762 fcport->port_type = FCT_TARGET;
6763 if (mb[1] & BIT_1) {
6764 fcport->flags |= FCF_FCP2_DEVICE;
6769 fcport->supported_classes |= FC_COS_CLASS2;
6771 fcport->supported_classes |= FC_COS_CLASS3;
6773 if (IS_FWI2_CAPABLE(ha)) {
6776 FCF_CONF_COMP_SUPPORTED;
6781 } else if (mb[0] == MBS_LOOP_ID_USED) {
6783 * Loop ID already used, try next loop ID.
6786 rval = qla2x00_find_new_loop_id(vha, fcport);
6787 if (rval != QLA_SUCCESS) {
6788 /* Ran out of loop IDs to use */
6791 } else if (mb[0] == MBS_COMMAND_ERROR) {
6793 * Firmware possibly timed out during login. If NO
6794 * retries are left to do then the device is declared
6797 *next_loopid = fcport->loop_id;
6798 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6799 fcport->d_id.b.domain, fcport->d_id.b.area,
6800 fcport->d_id.b.al_pa);
6801 qla2x00_mark_device_lost(vha, fcport, 1);
6807 * unrecoverable / not handled error
6809 ql_dbg(ql_dbg_disc, vha, 0x2002,
6810 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
6811 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
6812 fcport->d_id.b.area, fcport->d_id.b.al_pa,
6813 fcport->loop_id, jiffies);
6815 *next_loopid = fcport->loop_id;
6816 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6817 fcport->d_id.b.domain, fcport->d_id.b.area,
6818 fcport->d_id.b.al_pa);
6819 qla2x00_clear_loop_id(fcport);
6820 fcport->login_retry = 0;
6831 * qla2x00_local_device_login
6832 * Issue local device login command.
6835 * ha = adapter block pointer.
6836 * loop_id = loop id of device to login to.
6838 * Returns (Where's the #define!!!!):
6839 * 0 - Login successfully
6844 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
6847 uint16_t mb[MAILBOX_REGISTER_COUNT];
6849 memset(mb, 0, sizeof(mb));
6850 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
6851 if (rval == QLA_SUCCESS) {
6852 /* Interrogate mailbox registers for any errors */
6853 if (mb[0] == MBS_COMMAND_ERROR)
6855 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
6856 /* device not in PCB table */
6864 * qla2x00_loop_resync
6865 * Resync with fibre channel devices.
6868 * ha = adapter block pointer.
6874 qla2x00_loop_resync(scsi_qla_host_t *vha)
6876 int rval = QLA_SUCCESS;
6879 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6880 if (vha->flags.online) {
6881 if (!(rval = qla2x00_fw_ready(vha))) {
6882 /* Wait at most MAX_TARGET RSCNs for a stable link. */
6885 if (!IS_QLAFX00(vha->hw)) {
6887 * Issue a marker after FW becomes
6890 qla2x00_marker(vha, vha->hw->base_qpair,
6892 vha->marker_needed = 0;
6895 /* Remap devices on Loop. */
6896 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6898 if (IS_QLAFX00(vha->hw))
6899 qlafx00_configure_devices(vha);
6901 qla2x00_configure_loop(vha);
6904 } while (!atomic_read(&vha->loop_down_timer) &&
6905 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6906 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
6911 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6912 return (QLA_FUNCTION_FAILED);
6915 ql_dbg(ql_dbg_disc, vha, 0x206c,
6916 "%s *** FAILED ***.\n", __func__);
6922 * qla2x00_perform_loop_resync
6923 * Description: This function will set the appropriate flags and call
6924 * qla2x00_loop_resync. If successful loop will be resynced
6925 * Arguments : scsi_qla_host_t pointer
6926 * returm : Success or Failure
6929 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
6933 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
6934 /*Configure the flags so that resync happens properly*/
6935 atomic_set(&ha->loop_down_timer, 0);
6936 if (!(ha->device_flags & DFLG_NO_CABLE)) {
6937 atomic_set(&ha->loop_state, LOOP_UP);
6938 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
6939 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
6940 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
6942 rval = qla2x00_loop_resync(ha);
6944 atomic_set(&ha->loop_state, LOOP_DEAD);
6946 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
6952 /* Assumes idc_lock always held on entry */
6954 qla83xx_reset_ownership(scsi_qla_host_t *vha)
6956 struct qla_hw_data *ha = vha->hw;
6957 uint32_t drv_presence, drv_presence_mask;
6958 uint32_t dev_part_info1, dev_part_info2, class_type;
6959 uint32_t class_type_mask = 0x3;
6960 uint16_t fcoe_other_function = 0xffff, i;
6962 if (IS_QLA8044(ha)) {
6963 drv_presence = qla8044_rd_direct(vha,
6964 QLA8044_CRB_DRV_ACTIVE_INDEX);
6965 dev_part_info1 = qla8044_rd_direct(vha,
6966 QLA8044_CRB_DEV_PART_INFO_INDEX);
6967 dev_part_info2 = qla8044_rd_direct(vha,
6968 QLA8044_CRB_DEV_PART_INFO2);
6970 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6971 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
6972 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
6974 for (i = 0; i < 8; i++) {
6975 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
6976 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6977 (i != ha->portnum)) {
6978 fcoe_other_function = i;
6982 if (fcoe_other_function == 0xffff) {
6983 for (i = 0; i < 8; i++) {
6984 class_type = ((dev_part_info2 >> (i * 4)) &
6986 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6987 ((i + 8) != ha->portnum)) {
6988 fcoe_other_function = i + 8;
6994 * Prepare drv-presence mask based on fcoe functions present.
6995 * However consider only valid physical fcoe function numbers (0-15).
6997 drv_presence_mask = ~((1 << (ha->portnum)) |
6998 ((fcoe_other_function == 0xffff) ?
6999 0 : (1 << (fcoe_other_function))));
7001 /* We are the reset owner iff:
7002 * - No other protocol drivers present.
7003 * - This is the lowest among fcoe functions. */
7004 if (!(drv_presence & drv_presence_mask) &&
7005 (ha->portnum < fcoe_other_function)) {
7006 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
7007 "This host is Reset owner.\n");
7008 ha->flags.nic_core_reset_owner = 1;
7013 __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
7015 int rval = QLA_SUCCESS;
7016 struct qla_hw_data *ha = vha->hw;
7019 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
7020 if (rval == QLA_SUCCESS) {
7021 drv_ack |= (1 << ha->portnum);
7022 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
7029 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
7031 int rval = QLA_SUCCESS;
7032 struct qla_hw_data *ha = vha->hw;
7035 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
7036 if (rval == QLA_SUCCESS) {
7037 drv_ack &= ~(1 << ha->portnum);
7038 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
7044 /* Assumes idc-lock always held on entry */
7046 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
7048 struct qla_hw_data *ha = vha->hw;
7049 uint32_t idc_audit_reg = 0, duration_secs = 0;
7051 switch (audit_type) {
7052 case IDC_AUDIT_TIMESTAMP:
7053 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
7054 idc_audit_reg = (ha->portnum) |
7055 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
7056 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
7059 case IDC_AUDIT_COMPLETION:
7060 duration_secs = ((jiffies_to_msecs(jiffies) -
7061 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
7062 idc_audit_reg = (ha->portnum) |
7063 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
7064 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
7068 ql_log(ql_log_warn, vha, 0xb078,
7069 "Invalid audit type specified.\n");
7074 /* Assumes idc_lock always held on entry */
7076 qla83xx_initiating_reset(scsi_qla_host_t *vha)
7078 struct qla_hw_data *ha = vha->hw;
7079 uint32_t idc_control, dev_state;
7081 __qla83xx_get_idc_control(vha, &idc_control);
7082 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
7083 ql_log(ql_log_info, vha, 0xb080,
7084 "NIC Core reset has been disabled. idc-control=0x%x\n",
7086 return QLA_FUNCTION_FAILED;
7089 /* Set NEED-RESET iff in READY state and we are the reset-owner */
7090 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
7091 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
7092 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
7093 QLA8XXX_DEV_NEED_RESET);
7094 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
7095 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
7097 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n",
7098 qdev_state(dev_state));
7100 /* SV: XXX: Is timeout required here? */
7101 /* Wait for IDC state change READY -> NEED_RESET */
7102 while (dev_state == QLA8XXX_DEV_READY) {
7103 qla83xx_idc_unlock(vha, 0);
7105 qla83xx_idc_lock(vha, 0);
7106 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
7110 /* Send IDC ack by writing to drv-ack register */
7111 __qla83xx_set_drv_ack(vha);
7117 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
7119 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
7123 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
7125 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
7129 qla83xx_check_driver_presence(scsi_qla_host_t *vha)
7131 uint32_t drv_presence = 0;
7132 struct qla_hw_data *ha = vha->hw;
7134 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
7135 if (drv_presence & (1 << ha->portnum))
7138 return QLA_TEST_FAILED;
7142 qla83xx_nic_core_reset(scsi_qla_host_t *vha)
7144 int rval = QLA_SUCCESS;
7145 struct qla_hw_data *ha = vha->hw;
7147 ql_dbg(ql_dbg_p3p, vha, 0xb058,
7148 "Entered %s().\n", __func__);
7150 if (vha->device_flags & DFLG_DEV_FAILED) {
7151 ql_log(ql_log_warn, vha, 0xb059,
7152 "Device in unrecoverable FAILED state.\n");
7153 return QLA_FUNCTION_FAILED;
7156 qla83xx_idc_lock(vha, 0);
7158 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
7159 ql_log(ql_log_warn, vha, 0xb05a,
7160 "Function=0x%x has been removed from IDC participation.\n",
7162 rval = QLA_FUNCTION_FAILED;
7166 qla83xx_reset_ownership(vha);
7168 rval = qla83xx_initiating_reset(vha);
7171 * Perform reset if we are the reset-owner,
7172 * else wait till IDC state changes to READY/FAILED.
7174 if (rval == QLA_SUCCESS) {
7175 rval = qla83xx_idc_state_handler(vha);
7177 if (rval == QLA_SUCCESS)
7178 ha->flags.nic_core_hung = 0;
7179 __qla83xx_clear_drv_ack(vha);
7183 qla83xx_idc_unlock(vha, 0);
7185 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
7191 qla2xxx_mctp_dump(scsi_qla_host_t *vha)
7193 struct qla_hw_data *ha = vha->hw;
7194 int rval = QLA_FUNCTION_FAILED;
7196 if (!IS_MCTP_CAPABLE(ha)) {
7197 /* This message can be removed from the final version */
7198 ql_log(ql_log_info, vha, 0x506d,
7199 "This board is not MCTP capable\n");
7203 if (!ha->mctp_dump) {
7204 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
7205 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
7207 if (!ha->mctp_dump) {
7208 ql_log(ql_log_warn, vha, 0x506e,
7209 "Failed to allocate memory for mctp dump\n");
7214 #define MCTP_DUMP_STR_ADDR 0x00000000
7215 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
7216 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
7217 if (rval != QLA_SUCCESS) {
7218 ql_log(ql_log_warn, vha, 0x506f,
7219 "Failed to capture mctp dump\n");
7221 ql_log(ql_log_info, vha, 0x5070,
7222 "Mctp dump capture for host (%ld/%p).\n",
7223 vha->host_no, ha->mctp_dump);
7224 ha->mctp_dumped = 1;
7227 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
7228 ha->flags.nic_core_reset_hdlr_active = 1;
7229 rval = qla83xx_restart_nic_firmware(vha);
7231 /* NIC Core reset failed. */
7232 ql_log(ql_log_warn, vha, 0x5071,
7233 "Failed to restart nic firmware\n");
7235 ql_dbg(ql_dbg_p3p, vha, 0xb084,
7236 "Restarted NIC firmware successfully.\n");
7237 ha->flags.nic_core_reset_hdlr_active = 0;
7245 * qla2x00_quiesce_io
7246 * Description: This function will block the new I/Os
7247 * Its not aborting any I/Os as context
7248 * is not destroyed during quiescence
7249 * Arguments: scsi_qla_host_t
7253 qla2x00_quiesce_io(scsi_qla_host_t *vha)
7255 struct qla_hw_data *ha = vha->hw;
7256 struct scsi_qla_host *vp, *tvp;
7257 unsigned long flags;
7259 ql_dbg(ql_dbg_dpc, vha, 0x401d,
7260 "Quiescing I/O - ha=%p.\n", ha);
7262 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
7263 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
7264 atomic_set(&vha->loop_state, LOOP_DOWN);
7265 qla2x00_mark_all_devices_lost(vha);
7267 spin_lock_irqsave(&ha->vport_slock, flags);
7268 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7269 atomic_inc(&vp->vref_count);
7270 spin_unlock_irqrestore(&ha->vport_slock, flags);
7272 qla2x00_mark_all_devices_lost(vp);
7274 spin_lock_irqsave(&ha->vport_slock, flags);
7275 atomic_dec(&vp->vref_count);
7277 spin_unlock_irqrestore(&ha->vport_slock, flags);
7279 if (!atomic_read(&vha->loop_down_timer))
7280 atomic_set(&vha->loop_down_timer,
7283 /* Wait for pending cmds to complete */
7284 WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST)
7289 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
7291 struct qla_hw_data *ha = vha->hw;
7292 struct scsi_qla_host *vp, *tvp;
7293 unsigned long flags;
7297 /* For ISP82XX, driver waits for completion of the commands.
7298 * online flag should be set.
7300 if (!(IS_P3P_TYPE(ha)))
7301 vha->flags.online = 0;
7302 ha->flags.chip_reset_done = 0;
7303 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
7304 vha->qla_stats.total_isp_aborts++;
7306 ql_log(ql_log_info, vha, 0x00af,
7307 "Performing ISP error recovery - ha=%p.\n", ha);
7309 ha->flags.purge_mbox = 1;
7310 /* For ISP82XX, reset_chip is just disabling interrupts.
7311 * Driver waits for the completion of the commands.
7312 * the interrupts need to be enabled.
7314 if (!(IS_P3P_TYPE(ha)))
7315 ha->isp_ops->reset_chip(vha);
7317 ha->link_data_rate = PORT_SPEED_UNKNOWN;
7319 ha->flags.rida_fmt2 = 0;
7320 ha->flags.n2n_ae = 0;
7321 ha->flags.lip_ae = 0;
7322 ha->current_topology = 0;
7324 ha->flags.fw_init_done = 0;
7326 ha->base_qpair->chip_reset = ha->chip_reset;
7327 ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0;
7328 ha->base_qpair->prev_completion_cnt = 0;
7329 for (i = 0; i < ha->max_qpairs; i++) {
7330 if (ha->queue_pair_map[i]) {
7331 ha->queue_pair_map[i]->chip_reset =
7332 ha->base_qpair->chip_reset;
7333 ha->queue_pair_map[i]->cmd_cnt =
7334 ha->queue_pair_map[i]->cmd_completion_cnt = 0;
7335 ha->base_qpair->prev_completion_cnt = 0;
7339 /* purge MBox commands */
7340 if (atomic_read(&ha->num_pend_mbx_stage3)) {
7341 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
7342 complete(&ha->mbx_intr_comp);
7346 while (atomic_read(&ha->num_pend_mbx_stage3) ||
7347 atomic_read(&ha->num_pend_mbx_stage2) ||
7348 atomic_read(&ha->num_pend_mbx_stage1)) {
7354 ha->flags.purge_mbox = 0;
7356 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
7357 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
7358 atomic_set(&vha->loop_state, LOOP_DOWN);
7359 qla2x00_mark_all_devices_lost(vha);
7361 spin_lock_irqsave(&ha->vport_slock, flags);
7362 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7363 atomic_inc(&vp->vref_count);
7364 spin_unlock_irqrestore(&ha->vport_slock, flags);
7366 qla2x00_mark_all_devices_lost(vp);
7368 spin_lock_irqsave(&ha->vport_slock, flags);
7369 atomic_dec(&vp->vref_count);
7371 spin_unlock_irqrestore(&ha->vport_slock, flags);
7373 if (!atomic_read(&vha->loop_down_timer))
7374 atomic_set(&vha->loop_down_timer,
7378 /* Clear all async request states across all VPs. */
7379 list_for_each_entry(fcport, &vha->vp_fcports, list) {
7380 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7381 fcport->scan_state = 0;
7383 spin_lock_irqsave(&ha->vport_slock, flags);
7384 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7385 atomic_inc(&vp->vref_count);
7386 spin_unlock_irqrestore(&ha->vport_slock, flags);
7388 list_for_each_entry(fcport, &vp->vp_fcports, list)
7389 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7391 spin_lock_irqsave(&ha->vport_slock, flags);
7392 atomic_dec(&vp->vref_count);
7394 spin_unlock_irqrestore(&ha->vport_slock, flags);
7396 /* Make sure for ISP 82XX IO DMA is complete */
7397 if (IS_P3P_TYPE(ha)) {
7398 qla82xx_chip_reset_cleanup(vha);
7399 ql_log(ql_log_info, vha, 0x00b4,
7400 "Done chip reset cleanup.\n");
7402 /* Done waiting for pending commands. Reset online flag */
7403 vha->flags.online = 0;
7406 /* Requeue all commands in outstanding command list. */
7407 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
7408 /* memory barrier */
7414 * Resets ISP and aborts all outstanding commands.
7417 * ha = adapter block pointer.
7423 qla2x00_abort_isp(scsi_qla_host_t *vha)
7427 struct qla_hw_data *ha = vha->hw;
7428 struct scsi_qla_host *vp, *tvp;
7429 struct req_que *req = ha->req_q_map[0];
7430 unsigned long flags;
7432 if (vha->flags.online) {
7433 qla2x00_abort_isp_cleanup(vha);
7435 vha->dport_status |= DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
7436 vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
7438 if (vha->hw->flags.port_isolated)
7441 if (qla2x00_isp_reg_stat(ha)) {
7442 ql_log(ql_log_info, vha, 0x803f,
7443 "ISP Abort - ISP reg disconnect, exiting.\n");
7447 if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
7448 ha->flags.chip_reset_done = 1;
7449 vha->flags.online = 1;
7451 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7455 if (IS_QLA8031(ha)) {
7456 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
7457 "Clearing fcoe driver presence.\n");
7458 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
7459 ql_dbg(ql_dbg_p3p, vha, 0xb073,
7460 "Error while clearing DRV-Presence.\n");
7463 if (unlikely(pci_channel_offline(ha->pdev) &&
7464 ha->flags.pci_channel_io_perm_failure)) {
7465 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7470 switch (vha->qlini_mode) {
7471 case QLA2XXX_INI_MODE_DISABLED:
7472 if (!qla_tgt_mode_enabled(vha))
7475 case QLA2XXX_INI_MODE_DUAL:
7476 if (!qla_dual_mode_enabled(vha) &&
7477 !qla_ini_mode_enabled(vha))
7480 case QLA2XXX_INI_MODE_ENABLED:
7485 ha->isp_ops->get_flash_version(vha, req->ring);
7487 if (qla2x00_isp_reg_stat(ha)) {
7488 ql_log(ql_log_info, vha, 0x803f,
7489 "ISP Abort - ISP reg disconnect pre nvram config, exiting.\n");
7492 ha->isp_ops->nvram_config(vha);
7494 if (qla2x00_isp_reg_stat(ha)) {
7495 ql_log(ql_log_info, vha, 0x803f,
7496 "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n");
7499 if (!qla2x00_restart_isp(vha)) {
7500 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7502 if (!atomic_read(&vha->loop_down_timer)) {
7504 * Issue marker command only when we are going
7505 * to start the I/O .
7507 vha->marker_needed = 1;
7510 vha->flags.online = 1;
7512 ha->isp_ops->enable_intrs(ha);
7514 ha->isp_abort_cnt = 0;
7515 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7517 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
7518 qla2x00_get_fw_version(vha);
7520 ha->flags.fce_enabled = 1;
7522 fce_calc_size(ha->fce_bufs));
7523 rval = qla2x00_enable_fce_trace(vha,
7524 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
7527 ql_log(ql_log_warn, vha, 0x8033,
7528 "Unable to reinitialize FCE "
7530 ha->flags.fce_enabled = 0;
7535 memset(ha->eft, 0, EFT_SIZE);
7536 rval = qla2x00_enable_eft_trace(vha,
7537 ha->eft_dma, EFT_NUM_BUFFERS);
7539 ql_log(ql_log_warn, vha, 0x8034,
7540 "Unable to reinitialize EFT "
7544 } else { /* failed the ISP abort */
7545 vha->flags.online = 1;
7546 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
7547 if (ha->isp_abort_cnt == 0) {
7548 ql_log(ql_log_fatal, vha, 0x8035,
7549 "ISP error recover failed - "
7550 "board disabled.\n");
7552 * The next call disables the board
7555 qla2x00_abort_isp_cleanup(vha);
7556 vha->flags.online = 0;
7557 clear_bit(ISP_ABORT_RETRY,
7560 } else { /* schedule another ISP abort */
7561 ha->isp_abort_cnt--;
7562 ql_dbg(ql_dbg_taskm, vha, 0x8020,
7563 "ISP abort - retry remaining %d.\n",
7568 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
7569 ql_dbg(ql_dbg_taskm, vha, 0x8021,
7570 "ISP error recovery - retrying (%d) "
7571 "more times.\n", ha->isp_abort_cnt);
7572 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7579 if (vha->hw->flags.port_isolated) {
7580 qla2x00_abort_isp_cleanup(vha);
7585 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
7586 qla2x00_configure_hba(vha);
7587 spin_lock_irqsave(&ha->vport_slock, flags);
7588 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7590 atomic_inc(&vp->vref_count);
7591 spin_unlock_irqrestore(&ha->vport_slock, flags);
7593 qla2x00_vp_abort_isp(vp);
7595 spin_lock_irqsave(&ha->vport_slock, flags);
7596 atomic_dec(&vp->vref_count);
7599 spin_unlock_irqrestore(&ha->vport_slock, flags);
7601 if (IS_QLA8031(ha)) {
7602 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
7603 "Setting back fcoe driver presence.\n");
7604 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
7605 ql_dbg(ql_dbg_p3p, vha, 0xb074,
7606 "Error while setting DRV-Presence.\n");
7609 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
7617 * qla2x00_restart_isp
7618 * restarts the ISP after a reset
7621 * ha = adapter block pointer.
7627 qla2x00_restart_isp(scsi_qla_host_t *vha)
7630 struct qla_hw_data *ha = vha->hw;
7632 /* If firmware needs to be loaded */
7633 if (qla2x00_isp_firmware(vha)) {
7634 vha->flags.online = 0;
7635 status = ha->isp_ops->chip_diag(vha);
7638 status = qla2x00_setup_chip(vha);
7643 status = qla2x00_init_rings(vha);
7647 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7648 ha->flags.chip_reset_done = 1;
7650 /* Initialize the queues in use */
7651 qla25xx_init_queues(ha);
7653 status = qla2x00_fw_ready(vha);
7655 /* if no cable then assume it's good */
7656 return vha->device_flags & DFLG_NO_CABLE ? 0 : status;
7659 /* Issue a marker after FW becomes ready. */
7660 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
7661 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7667 qla25xx_init_queues(struct qla_hw_data *ha)
7669 struct rsp_que *rsp = NULL;
7670 struct req_que *req = NULL;
7671 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7675 for (i = 1; i < ha->max_rsp_queues; i++) {
7676 rsp = ha->rsp_q_map[i];
7677 if (rsp && test_bit(i, ha->rsp_qid_map)) {
7678 rsp->options &= ~BIT_0;
7679 ret = qla25xx_init_rsp_que(base_vha, rsp);
7680 if (ret != QLA_SUCCESS)
7681 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
7682 "%s Rsp que: %d init failed.\n",
7685 ql_dbg(ql_dbg_init, base_vha, 0x0100,
7686 "%s Rsp que: %d inited.\n",
7690 for (i = 1; i < ha->max_req_queues; i++) {
7691 req = ha->req_q_map[i];
7692 if (req && test_bit(i, ha->req_qid_map)) {
7693 /* Clear outstanding commands array. */
7694 req->options &= ~BIT_0;
7695 ret = qla25xx_init_req_que(base_vha, req);
7696 if (ret != QLA_SUCCESS)
7697 ql_dbg(ql_dbg_init, base_vha, 0x0101,
7698 "%s Req que: %d init failed.\n",
7701 ql_dbg(ql_dbg_init, base_vha, 0x0102,
7702 "%s Req que: %d inited.\n",
7710 * qla2x00_reset_adapter
7714 * ha = adapter block pointer.
7717 qla2x00_reset_adapter(scsi_qla_host_t *vha)
7719 unsigned long flags = 0;
7720 struct qla_hw_data *ha = vha->hw;
7721 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
7723 vha->flags.online = 0;
7724 ha->isp_ops->disable_intrs(ha);
7726 spin_lock_irqsave(&ha->hardware_lock, flags);
7727 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
7728 rd_reg_word(®->hccr); /* PCI Posting. */
7729 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
7730 rd_reg_word(®->hccr); /* PCI Posting. */
7731 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7737 qla24xx_reset_adapter(scsi_qla_host_t *vha)
7739 unsigned long flags = 0;
7740 struct qla_hw_data *ha = vha->hw;
7741 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
7743 if (IS_P3P_TYPE(ha))
7746 vha->flags.online = 0;
7747 ha->isp_ops->disable_intrs(ha);
7749 spin_lock_irqsave(&ha->hardware_lock, flags);
7750 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET);
7751 rd_reg_dword(®->hccr);
7752 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE);
7753 rd_reg_dword(®->hccr);
7754 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7756 if (IS_NOPOLLING_TYPE(ha))
7757 ha->isp_ops->enable_intrs(ha);
7762 /* On sparc systems, obtain port and node WWN from firmware
7765 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
7766 struct nvram_24xx *nv)
7769 struct qla_hw_data *ha = vha->hw;
7770 struct pci_dev *pdev = ha->pdev;
7771 struct device_node *dp = pci_device_to_OF_node(pdev);
7775 val = of_get_property(dp, "port-wwn", &len);
7776 if (val && len >= WWN_SIZE)
7777 memcpy(nv->port_name, val, WWN_SIZE);
7779 val = of_get_property(dp, "node-wwn", &len);
7780 if (val && len >= WWN_SIZE)
7781 memcpy(nv->node_name, val, WWN_SIZE);
7786 qla24xx_nvram_config(scsi_qla_host_t *vha)
7789 struct init_cb_24xx *icb;
7790 struct nvram_24xx *nv;
7792 uint8_t *dptr1, *dptr2;
7795 struct qla_hw_data *ha = vha->hw;
7798 icb = (struct init_cb_24xx *)ha->init_cb;
7801 /* Determine NVRAM starting address. */
7802 if (ha->port_no == 0) {
7803 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
7804 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
7806 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
7807 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
7810 ha->nvram_size = sizeof(*nv);
7811 ha->vpd_size = FA_NVRAM_VPD_SIZE;
7813 /* Get VPD data into cache */
7814 ha->vpd = ha->nvram + VPD_OFFSET;
7815 ha->isp_ops->read_nvram(vha, ha->vpd,
7816 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
7818 /* Get NVRAM data into cache and calculate checksum. */
7819 dptr = (__force __le32 *)nv;
7820 ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
7821 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7822 chksum += le32_to_cpu(*dptr);
7824 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
7825 "Contents of NVRAM\n");
7826 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
7827 nv, ha->nvram_size);
7829 /* Bad NVRAM data, set defaults parameters. */
7830 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
7831 le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
7832 /* Reset NVRAM data. */
7833 ql_log(ql_log_warn, vha, 0x006b,
7834 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
7835 chksum, nv->id, nv->nvram_version);
7836 ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv));
7837 ql_log(ql_log_warn, vha, 0x006c,
7838 "Falling back to functioning (yet invalid -- WWPN) "
7842 * Set default initialization control block.
7844 memset(nv, 0, ha->nvram_size);
7845 nv->nvram_version = cpu_to_le16(ICB_VERSION);
7846 nv->version = cpu_to_le16(ICB_VERSION);
7847 nv->frame_payload_size = cpu_to_le16(2048);
7848 nv->execution_throttle = cpu_to_le16(0xFFFF);
7849 nv->exchange_count = cpu_to_le16(0);
7850 nv->hard_address = cpu_to_le16(124);
7851 nv->port_name[0] = 0x21;
7852 nv->port_name[1] = 0x00 + ha->port_no + 1;
7853 nv->port_name[2] = 0x00;
7854 nv->port_name[3] = 0xe0;
7855 nv->port_name[4] = 0x8b;
7856 nv->port_name[5] = 0x1c;
7857 nv->port_name[6] = 0x55;
7858 nv->port_name[7] = 0x86;
7859 nv->node_name[0] = 0x20;
7860 nv->node_name[1] = 0x00;
7861 nv->node_name[2] = 0x00;
7862 nv->node_name[3] = 0xe0;
7863 nv->node_name[4] = 0x8b;
7864 nv->node_name[5] = 0x1c;
7865 nv->node_name[6] = 0x55;
7866 nv->node_name[7] = 0x86;
7867 qla24xx_nvram_wwn_from_ofw(vha, nv);
7868 nv->login_retry_count = cpu_to_le16(8);
7869 nv->interrupt_delay_timer = cpu_to_le16(0);
7870 nv->login_timeout = cpu_to_le16(0);
7871 nv->firmware_options_1 =
7872 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7873 nv->firmware_options_2 = cpu_to_le32(2 << 4);
7874 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7875 nv->firmware_options_3 = cpu_to_le32(2 << 13);
7876 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7877 nv->efi_parameters = cpu_to_le32(0);
7878 nv->reset_delay = 5;
7879 nv->max_luns_per_target = cpu_to_le16(128);
7880 nv->port_down_retry_count = cpu_to_le16(30);
7881 nv->link_down_timeout = cpu_to_le16(30);
7886 if (qla_tgt_mode_enabled(vha)) {
7887 /* Don't enable full login after initial LIP */
7888 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
7889 /* Don't enable LIP full login for initiator */
7890 nv->host_p &= cpu_to_le32(~BIT_10);
7893 qlt_24xx_config_nvram_stage1(vha, nv);
7895 /* Reset Initialization control block */
7896 memset(icb, 0, ha->init_cb_size);
7898 /* Copy 1st segment. */
7899 dptr1 = (uint8_t *)icb;
7900 dptr2 = (uint8_t *)&nv->version;
7901 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7903 *dptr1++ = *dptr2++;
7905 icb->login_retry_count = nv->login_retry_count;
7906 icb->link_down_on_nos = nv->link_down_on_nos;
7908 /* Copy 2nd segment. */
7909 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7910 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7911 cnt = (uint8_t *)&icb->reserved_3 -
7912 (uint8_t *)&icb->interrupt_delay_timer;
7914 *dptr1++ = *dptr2++;
7915 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
7917 * Setup driver NVRAM options.
7919 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
7922 qlt_24xx_config_nvram_stage2(vha, icb);
7924 if (nv->host_p & cpu_to_le32(BIT_15)) {
7925 /* Use alternate WWN? */
7926 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7927 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7930 /* Prepare nodename */
7931 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
7933 * Firmware will apply the following mask if the nodename was
7936 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7937 icb->node_name[0] &= 0xF0;
7940 /* Set host adapter parameters. */
7941 ha->flags.disable_risc_code_load = 0;
7942 ha->flags.enable_lip_reset = 0;
7943 ha->flags.enable_lip_full_login =
7944 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
7945 ha->flags.enable_target_reset =
7946 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
7947 ha->flags.enable_led_scheme = 0;
7948 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
7950 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7951 (BIT_6 | BIT_5 | BIT_4)) >> 4;
7953 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
7954 sizeof(ha->fw_seriallink_options24));
7956 /* save HBA serial number */
7957 ha->serial0 = icb->port_name[5];
7958 ha->serial1 = icb->port_name[6];
7959 ha->serial2 = icb->port_name[7];
7960 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7961 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
7963 icb->execution_throttle = cpu_to_le16(0xFFFF);
7965 ha->retry_count = le16_to_cpu(nv->login_retry_count);
7967 /* Set minimum login_timeout to 4 seconds. */
7968 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7969 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7970 if (le16_to_cpu(nv->login_timeout) < 4)
7971 nv->login_timeout = cpu_to_le16(4);
7972 ha->login_timeout = le16_to_cpu(nv->login_timeout);
7974 /* Set minimum RATOV to 100 tenths of a second. */
7977 ha->loop_reset_delay = nv->reset_delay;
7979 /* Link Down Timeout = 0:
7981 * When Port Down timer expires we will start returning
7982 * I/O's to OS with "DID_NO_CONNECT".
7984 * Link Down Timeout != 0:
7986 * The driver waits for the link to come up after link down
7987 * before returning I/Os to OS with "DID_NO_CONNECT".
7989 if (le16_to_cpu(nv->link_down_timeout) == 0) {
7990 ha->loop_down_abort_time =
7991 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7993 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
7994 ha->loop_down_abort_time =
7995 (LOOP_DOWN_TIME - ha->link_down_timeout);
7998 /* Need enough time to try and get the port back. */
7999 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
8000 if (qlport_down_retry)
8001 ha->port_down_retry_count = qlport_down_retry;
8003 /* Set login_retry_count */
8004 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
8005 if (ha->port_down_retry_count ==
8006 le16_to_cpu(nv->port_down_retry_count) &&
8007 ha->port_down_retry_count > 3)
8008 ha->login_retry_count = ha->port_down_retry_count;
8009 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
8010 ha->login_retry_count = ha->port_down_retry_count;
8011 if (ql2xloginretrycount)
8012 ha->login_retry_count = ql2xloginretrycount;
8014 /* N2N: driver will initiate Login instead of FW */
8015 icb->firmware_options_3 |= cpu_to_le32(BIT_8);
8018 if (!vha->flags.init_done) {
8019 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
8020 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
8021 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
8022 le16_to_cpu(icb->interrupt_delay_timer) : 2;
8024 icb->firmware_options_2 &= cpu_to_le32(
8025 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
8026 if (ha->zio_mode != QLA_ZIO_DISABLED) {
8027 ha->zio_mode = QLA_ZIO_MODE_6;
8029 ql_log(ql_log_info, vha, 0x006f,
8030 "ZIO mode %d enabled; timer delay (%d us).\n",
8031 ha->zio_mode, ha->zio_timer * 100);
8033 icb->firmware_options_2 |= cpu_to_le32(
8034 (uint32_t)ha->zio_mode);
8035 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
8039 ql_log(ql_log_warn, vha, 0x0070,
8040 "NVRAM configuration failed.\n");
8046 qla27xx_print_image(struct scsi_qla_host *vha, char *name,
8047 struct qla27xx_image_status *image_status)
8049 ql_dbg(ql_dbg_init, vha, 0x018b,
8050 "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n",
8052 image_status->image_status_mask,
8053 le16_to_cpu(image_status->generation),
8054 image_status->ver_major,
8055 image_status->ver_minor,
8056 image_status->bitmap,
8057 le32_to_cpu(image_status->checksum),
8058 le32_to_cpu(image_status->signature));
8062 qla28xx_check_aux_image_status_signature(
8063 struct qla27xx_image_status *image_status)
8065 ulong signature = le32_to_cpu(image_status->signature);
8067 return signature != QLA28XX_AUX_IMG_STATUS_SIGN;
8071 qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
8073 ulong signature = le32_to_cpu(image_status->signature);
8076 signature != QLA27XX_IMG_STATUS_SIGN &&
8077 signature != QLA28XX_IMG_STATUS_SIGN;
8081 qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
8083 __le32 *p = (__force __le32 *)image_status;
8084 uint n = sizeof(*image_status) / sizeof(*p);
8088 sum += le32_to_cpup(p);
8094 qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask)
8096 return aux->bitmap & bitmask ?
8097 QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE;
8101 qla28xx_component_status(
8102 struct active_regions *active_regions, struct qla27xx_image_status *aux)
8104 active_regions->aux.board_config =
8105 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG);
8107 active_regions->aux.vpd_nvram =
8108 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM);
8110 active_regions->aux.npiv_config_0_1 =
8111 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1);
8113 active_regions->aux.npiv_config_2_3 =
8114 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
8116 active_regions->aux.nvme_params =
8117 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NVME_PARAMS);
8121 qla27xx_compare_image_generation(
8122 struct qla27xx_image_status *pri_image_status,
8123 struct qla27xx_image_status *sec_image_status)
8125 /* calculate generation delta as uint16 (this accounts for wrap) */
8127 le16_to_cpu(pri_image_status->generation) -
8128 le16_to_cpu(sec_image_status->generation);
8130 ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta);
8136 qla28xx_get_aux_images(
8137 struct scsi_qla_host *vha, struct active_regions *active_regions)
8139 struct qla_hw_data *ha = vha->hw;
8140 struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
8141 bool valid_pri_image = false, valid_sec_image = false;
8142 bool active_pri_image = false, active_sec_image = false;
8144 if (!ha->flt_region_aux_img_status_pri) {
8145 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
8146 goto check_sec_image;
8149 qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
8150 ha->flt_region_aux_img_status_pri,
8151 sizeof(pri_aux_image_status) >> 2);
8152 qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
8154 if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
8155 ql_dbg(ql_dbg_init, vha, 0x018b,
8156 "Primary aux image signature (%#x) not valid\n",
8157 le32_to_cpu(pri_aux_image_status.signature));
8158 goto check_sec_image;
8161 if (qla27xx_image_status_checksum(&pri_aux_image_status)) {
8162 ql_dbg(ql_dbg_init, vha, 0x018c,
8163 "Primary aux image checksum failed\n");
8164 goto check_sec_image;
8167 valid_pri_image = true;
8169 if (pri_aux_image_status.image_status_mask & 1) {
8170 ql_dbg(ql_dbg_init, vha, 0x018d,
8171 "Primary aux image is active\n");
8172 active_pri_image = true;
8176 if (!ha->flt_region_aux_img_status_sec) {
8177 ql_dbg(ql_dbg_init, vha, 0x018a,
8178 "Secondary aux image not addressed\n");
8179 goto check_valid_image;
8182 qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
8183 ha->flt_region_aux_img_status_sec,
8184 sizeof(sec_aux_image_status) >> 2);
8185 qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
8187 if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
8188 ql_dbg(ql_dbg_init, vha, 0x018b,
8189 "Secondary aux image signature (%#x) not valid\n",
8190 le32_to_cpu(sec_aux_image_status.signature));
8191 goto check_valid_image;
8194 if (qla27xx_image_status_checksum(&sec_aux_image_status)) {
8195 ql_dbg(ql_dbg_init, vha, 0x018c,
8196 "Secondary aux image checksum failed\n");
8197 goto check_valid_image;
8200 valid_sec_image = true;
8202 if (sec_aux_image_status.image_status_mask & 1) {
8203 ql_dbg(ql_dbg_init, vha, 0x018d,
8204 "Secondary aux image is active\n");
8205 active_sec_image = true;
8209 if (valid_pri_image && active_pri_image &&
8210 valid_sec_image && active_sec_image) {
8211 if (qla27xx_compare_image_generation(&pri_aux_image_status,
8212 &sec_aux_image_status) >= 0) {
8213 qla28xx_component_status(active_regions,
8214 &pri_aux_image_status);
8216 qla28xx_component_status(active_regions,
8217 &sec_aux_image_status);
8219 } else if (valid_pri_image && active_pri_image) {
8220 qla28xx_component_status(active_regions, &pri_aux_image_status);
8221 } else if (valid_sec_image && active_sec_image) {
8222 qla28xx_component_status(active_regions, &sec_aux_image_status);
8225 ql_dbg(ql_dbg_init, vha, 0x018f,
8226 "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u, NVME=%u\n",
8227 active_regions->aux.board_config,
8228 active_regions->aux.vpd_nvram,
8229 active_regions->aux.npiv_config_0_1,
8230 active_regions->aux.npiv_config_2_3,
8231 active_regions->aux.nvme_params);
8235 qla27xx_get_active_image(struct scsi_qla_host *vha,
8236 struct active_regions *active_regions)
8238 struct qla_hw_data *ha = vha->hw;
8239 struct qla27xx_image_status pri_image_status, sec_image_status;
8240 bool valid_pri_image = false, valid_sec_image = false;
8241 bool active_pri_image = false, active_sec_image = false;
8243 if (!ha->flt_region_img_status_pri) {
8244 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
8245 goto check_sec_image;
8248 if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status,
8249 ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
8252 goto check_sec_image;
8254 qla27xx_print_image(vha, "Primary image", &pri_image_status);
8256 if (qla27xx_check_image_status_signature(&pri_image_status)) {
8257 ql_dbg(ql_dbg_init, vha, 0x018b,
8258 "Primary image signature (%#x) not valid\n",
8259 le32_to_cpu(pri_image_status.signature));
8260 goto check_sec_image;
8263 if (qla27xx_image_status_checksum(&pri_image_status)) {
8264 ql_dbg(ql_dbg_init, vha, 0x018c,
8265 "Primary image checksum failed\n");
8266 goto check_sec_image;
8269 valid_pri_image = true;
8271 if (pri_image_status.image_status_mask & 1) {
8272 ql_dbg(ql_dbg_init, vha, 0x018d,
8273 "Primary image is active\n");
8274 active_pri_image = true;
8278 if (!ha->flt_region_img_status_sec) {
8279 ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n");
8280 goto check_valid_image;
8283 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
8284 ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
8285 qla27xx_print_image(vha, "Secondary image", &sec_image_status);
8287 if (qla27xx_check_image_status_signature(&sec_image_status)) {
8288 ql_dbg(ql_dbg_init, vha, 0x018b,
8289 "Secondary image signature (%#x) not valid\n",
8290 le32_to_cpu(sec_image_status.signature));
8291 goto check_valid_image;
8294 if (qla27xx_image_status_checksum(&sec_image_status)) {
8295 ql_dbg(ql_dbg_init, vha, 0x018c,
8296 "Secondary image checksum failed\n");
8297 goto check_valid_image;
8300 valid_sec_image = true;
8302 if (sec_image_status.image_status_mask & 1) {
8303 ql_dbg(ql_dbg_init, vha, 0x018d,
8304 "Secondary image is active\n");
8305 active_sec_image = true;
8309 if (valid_pri_image && active_pri_image)
8310 active_regions->global = QLA27XX_PRIMARY_IMAGE;
8312 if (valid_sec_image && active_sec_image) {
8313 if (!active_regions->global ||
8314 qla27xx_compare_image_generation(
8315 &pri_image_status, &sec_image_status) < 0) {
8316 active_regions->global = QLA27XX_SECONDARY_IMAGE;
8320 ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n",
8321 active_regions->global == QLA27XX_DEFAULT_IMAGE ?
8322 "default (boot/fw)" :
8323 active_regions->global == QLA27XX_PRIMARY_IMAGE ?
8325 active_regions->global == QLA27XX_SECONDARY_IMAGE ?
8326 "secondary" : "invalid",
8327 active_regions->global);
8330 bool qla24xx_risc_firmware_invalid(uint32_t *dword)
8333 !(dword[4] | dword[5] | dword[6] | dword[7]) ||
8334 !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]);
8338 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
8342 uint templates, segments, fragment;
8347 uint32_t risc_addr, risc_size, risc_attr = 0;
8348 struct qla_hw_data *ha = vha->hw;
8349 struct req_que *req = ha->req_q_map[0];
8350 struct fwdt *fwdt = ha->fwdt;
8352 ql_dbg(ql_dbg_init, vha, 0x008b,
8353 "FW: Loading firmware from flash (%x).\n", faddr);
8355 dcode = (uint32_t *)req->ring;
8356 qla24xx_read_flash_data(vha, dcode, faddr, 8);
8357 if (qla24xx_risc_firmware_invalid(dcode)) {
8358 ql_log(ql_log_fatal, vha, 0x008c,
8359 "Unable to verify the integrity of flash firmware "
8361 ql_log(ql_log_fatal, vha, 0x008d,
8362 "Firmware data: %08x %08x %08x %08x.\n",
8363 dcode[0], dcode[1], dcode[2], dcode[3]);
8365 return QLA_FUNCTION_FAILED;
8368 dcode = (uint32_t *)req->ring;
8370 segments = FA_RISC_CODE_SEGMENTS;
8371 for (j = 0; j < segments; j++) {
8372 ql_dbg(ql_dbg_init, vha, 0x008d,
8373 "-> Loading segment %u...\n", j);
8374 qla24xx_read_flash_data(vha, dcode, faddr, 10);
8375 risc_addr = be32_to_cpu((__force __be32)dcode[2]);
8376 risc_size = be32_to_cpu((__force __be32)dcode[3]);
8378 *srisc_addr = risc_addr;
8379 risc_attr = be32_to_cpu((__force __be32)dcode[9]);
8382 dlen = ha->fw_transfer_size >> 2;
8383 for (fragment = 0; risc_size; fragment++) {
8384 if (dlen > risc_size)
8387 ql_dbg(ql_dbg_init, vha, 0x008e,
8388 "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
8389 fragment, risc_addr, faddr, dlen);
8390 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
8391 for (i = 0; i < dlen; i++)
8392 dcode[i] = swab32(dcode[i]);
8394 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
8396 ql_log(ql_log_fatal, vha, 0x008f,
8397 "-> Failed load firmware fragment %u.\n",
8399 return QLA_FUNCTION_FAILED;
8408 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8411 templates = (risc_attr & BIT_9) ? 2 : 1;
8412 ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
8413 for (j = 0; j < templates; j++, fwdt++) {
8414 vfree(fwdt->template);
8415 fwdt->template = NULL;
8418 dcode = (uint32_t *)req->ring;
8419 qla24xx_read_flash_data(vha, dcode, faddr, 7);
8420 risc_size = be32_to_cpu((__force __be32)dcode[2]);
8421 ql_dbg(ql_dbg_init, vha, 0x0161,
8422 "-> fwdt%u template array at %#x (%#x dwords)\n",
8423 j, faddr, risc_size);
8424 if (!risc_size || !~risc_size) {
8425 ql_dbg(ql_dbg_init, vha, 0x0162,
8426 "-> fwdt%u failed to read array\n", j);
8430 /* skip header and ignore checksum */
8434 ql_dbg(ql_dbg_init, vha, 0x0163,
8435 "-> fwdt%u template allocate template %#x words...\n",
8437 fwdt->template = vmalloc(risc_size * sizeof(*dcode));
8438 if (!fwdt->template) {
8439 ql_log(ql_log_warn, vha, 0x0164,
8440 "-> fwdt%u failed allocate template.\n", j);
8444 dcode = fwdt->template;
8445 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
8447 if (!qla27xx_fwdt_template_valid(dcode)) {
8448 ql_log(ql_log_warn, vha, 0x0165,
8449 "-> fwdt%u failed template validate\n", j);
8453 dlen = qla27xx_fwdt_template_size(dcode);
8454 ql_dbg(ql_dbg_init, vha, 0x0166,
8455 "-> fwdt%u template size %#lx bytes (%#lx words)\n",
8456 j, dlen, dlen / sizeof(*dcode));
8457 if (dlen > risc_size * sizeof(*dcode)) {
8458 ql_log(ql_log_warn, vha, 0x0167,
8459 "-> fwdt%u template exceeds array (%-lu bytes)\n",
8460 j, dlen - risc_size * sizeof(*dcode));
8464 fwdt->length = dlen;
8465 ql_dbg(ql_dbg_init, vha, 0x0168,
8466 "-> fwdt%u loaded template ok\n", j);
8468 faddr += risc_size + 1;
8474 vfree(fwdt->template);
8475 fwdt->template = NULL;
8481 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
8484 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8490 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
8491 struct fw_blob *blob;
8492 struct qla_hw_data *ha = vha->hw;
8493 struct req_que *req = ha->req_q_map[0];
8495 /* Load firmware blob. */
8496 blob = qla2x00_request_firmware(vha);
8498 ql_log(ql_log_info, vha, 0x0083,
8499 "Firmware image unavailable.\n");
8500 ql_log(ql_log_info, vha, 0x0084,
8501 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
8502 return QLA_FUNCTION_FAILED;
8507 wcode = (uint16_t *)req->ring;
8509 fwcode = (__force __be16 *)blob->fw->data;
8512 /* Validate firmware image by checking version. */
8513 if (blob->fw->size < 8 * sizeof(uint16_t)) {
8514 ql_log(ql_log_fatal, vha, 0x0085,
8515 "Unable to verify integrity of firmware image (%zd).\n",
8517 goto fail_fw_integrity;
8519 for (i = 0; i < 4; i++)
8520 wcode[i] = be16_to_cpu(fwcode[i + 4]);
8521 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
8522 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
8523 wcode[2] == 0 && wcode[3] == 0)) {
8524 ql_log(ql_log_fatal, vha, 0x0086,
8525 "Unable to verify integrity of firmware image.\n");
8526 ql_log(ql_log_fatal, vha, 0x0087,
8527 "Firmware data: %04x %04x %04x %04x.\n",
8528 wcode[0], wcode[1], wcode[2], wcode[3]);
8529 goto fail_fw_integrity;
8533 while (*seg && rval == QLA_SUCCESS) {
8535 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
8536 risc_size = be16_to_cpu(fwcode[3]);
8538 /* Validate firmware image size. */
8539 fwclen += risc_size * sizeof(uint16_t);
8540 if (blob->fw->size < fwclen) {
8541 ql_log(ql_log_fatal, vha, 0x0088,
8542 "Unable to verify integrity of firmware image "
8543 "(%zd).\n", blob->fw->size);
8544 goto fail_fw_integrity;
8548 while (risc_size > 0 && rval == QLA_SUCCESS) {
8549 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
8550 if (wlen > risc_size)
8552 ql_dbg(ql_dbg_init, vha, 0x0089,
8553 "Loading risc segment@ risc addr %x number of "
8554 "words 0x%x.\n", risc_addr, wlen);
8556 for (i = 0; i < wlen; i++)
8557 wcode[i] = swab16((__force u32)fwcode[i]);
8559 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
8562 ql_log(ql_log_fatal, vha, 0x008a,
8563 "Failed to load segment %d of firmware.\n",
8580 return QLA_FUNCTION_FAILED;
8584 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8587 uint templates, segments, fragment;
8590 uint32_t risc_addr, risc_size, risc_attr = 0;
8593 struct fw_blob *blob;
8595 struct qla_hw_data *ha = vha->hw;
8596 struct req_que *req = ha->req_q_map[0];
8597 struct fwdt *fwdt = ha->fwdt;
8599 ql_dbg(ql_dbg_init, vha, 0x0090,
8600 "-> FW: Loading via request-firmware.\n");
8602 blob = qla2x00_request_firmware(vha);
8604 ql_log(ql_log_warn, vha, 0x0092,
8605 "-> Firmware file not found.\n");
8607 return QLA_FUNCTION_FAILED;
8610 fwcode = (__force __be32 *)blob->fw->data;
8611 dcode = (__force uint32_t *)fwcode;
8612 if (qla24xx_risc_firmware_invalid(dcode)) {
8613 ql_log(ql_log_fatal, vha, 0x0093,
8614 "Unable to verify integrity of firmware image (%zd).\n",
8616 ql_log(ql_log_fatal, vha, 0x0095,
8617 "Firmware data: %08x %08x %08x %08x.\n",
8618 dcode[0], dcode[1], dcode[2], dcode[3]);
8619 return QLA_FUNCTION_FAILED;
8622 dcode = (uint32_t *)req->ring;
8624 segments = FA_RISC_CODE_SEGMENTS;
8625 for (j = 0; j < segments; j++) {
8626 ql_dbg(ql_dbg_init, vha, 0x0096,
8627 "-> Loading segment %u...\n", j);
8628 risc_addr = be32_to_cpu(fwcode[2]);
8629 risc_size = be32_to_cpu(fwcode[3]);
8632 *srisc_addr = risc_addr;
8633 risc_attr = be32_to_cpu(fwcode[9]);
8636 dlen = ha->fw_transfer_size >> 2;
8637 for (fragment = 0; risc_size; fragment++) {
8638 if (dlen > risc_size)
8641 ql_dbg(ql_dbg_init, vha, 0x0097,
8642 "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n",
8643 fragment, risc_addr,
8644 (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data),
8647 for (i = 0; i < dlen; i++)
8648 dcode[i] = swab32((__force u32)fwcode[i]);
8650 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
8652 ql_log(ql_log_fatal, vha, 0x0098,
8653 "-> Failed load firmware fragment %u.\n",
8655 return QLA_FUNCTION_FAILED;
8664 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8667 templates = (risc_attr & BIT_9) ? 2 : 1;
8668 ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates);
8669 for (j = 0; j < templates; j++, fwdt++) {
8670 vfree(fwdt->template);
8671 fwdt->template = NULL;
8674 risc_size = be32_to_cpu(fwcode[2]);
8675 ql_dbg(ql_dbg_init, vha, 0x0171,
8676 "-> fwdt%u template array at %#x (%#x dwords)\n",
8677 j, (uint32_t)((void *)fwcode - (void *)blob->fw->data),
8679 if (!risc_size || !~risc_size) {
8680 ql_dbg(ql_dbg_init, vha, 0x0172,
8681 "-> fwdt%u failed to read array\n", j);
8685 /* skip header and ignore checksum */
8689 ql_dbg(ql_dbg_init, vha, 0x0173,
8690 "-> fwdt%u template allocate template %#x words...\n",
8692 fwdt->template = vmalloc(risc_size * sizeof(*dcode));
8693 if (!fwdt->template) {
8694 ql_log(ql_log_warn, vha, 0x0174,
8695 "-> fwdt%u failed allocate template.\n", j);
8699 dcode = fwdt->template;
8700 for (i = 0; i < risc_size; i++)
8701 dcode[i] = (__force u32)fwcode[i];
8703 if (!qla27xx_fwdt_template_valid(dcode)) {
8704 ql_log(ql_log_warn, vha, 0x0175,
8705 "-> fwdt%u failed template validate\n", j);
8709 dlen = qla27xx_fwdt_template_size(dcode);
8710 ql_dbg(ql_dbg_init, vha, 0x0176,
8711 "-> fwdt%u template size %#lx bytes (%#lx words)\n",
8712 j, dlen, dlen / sizeof(*dcode));
8713 if (dlen > risc_size * sizeof(*dcode)) {
8714 ql_log(ql_log_warn, vha, 0x0177,
8715 "-> fwdt%u template exceeds array (%-lu bytes)\n",
8716 j, dlen - risc_size * sizeof(*dcode));
8720 fwdt->length = dlen;
8721 ql_dbg(ql_dbg_init, vha, 0x0178,
8722 "-> fwdt%u loaded template ok\n", j);
8724 fwcode += risc_size + 1;
8730 vfree(fwdt->template);
8731 fwdt->template = NULL;
8738 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8742 if (ql2xfwloadbin == 1)
8743 return qla81xx_load_risc(vha, srisc_addr);
8747 * 1) Firmware via request-firmware interface (.bin file).
8748 * 2) Firmware residing in flash.
8750 rval = qla24xx_load_risc_blob(vha, srisc_addr);
8751 if (rval == QLA_SUCCESS)
8754 return qla24xx_load_risc_flash(vha, srisc_addr,
8755 vha->hw->flt_region_fw);
8759 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8762 struct qla_hw_data *ha = vha->hw;
8763 struct active_regions active_regions = { };
8765 if (ql2xfwloadbin == 2)
8768 /* FW Load priority:
8769 * 1) Firmware residing in flash.
8770 * 2) Firmware via request-firmware interface (.bin file).
8771 * 3) Golden-Firmware residing in flash -- (limited operation).
8774 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8775 goto try_primary_fw;
8777 qla27xx_get_active_image(vha, &active_regions);
8779 if (active_regions.global != QLA27XX_SECONDARY_IMAGE)
8780 goto try_primary_fw;
8782 ql_dbg(ql_dbg_init, vha, 0x008b,
8783 "Loading secondary firmware image.\n");
8784 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec);
8789 ql_dbg(ql_dbg_init, vha, 0x008b,
8790 "Loading primary firmware image.\n");
8791 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
8796 rval = qla24xx_load_risc_blob(vha, srisc_addr);
8797 if (!rval || !ha->flt_region_gold_fw)
8800 ql_log(ql_log_info, vha, 0x0099,
8801 "Attempting to fallback to golden firmware.\n");
8802 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
8806 ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n");
8807 ha->flags.running_gold_fw = 1;
8812 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
8815 struct qla_hw_data *ha = vha->hw;
8817 if (ha->flags.pci_channel_io_perm_failure)
8819 if (!IS_FWI2_CAPABLE(ha))
8821 if (!ha->fw_major_version)
8823 if (!ha->flags.fw_started)
8826 ret = qla2x00_stop_firmware(vha);
8827 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
8828 ret != QLA_INVALID_COMMAND && retries ; retries--) {
8829 ha->isp_ops->reset_chip(vha);
8830 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
8832 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
8834 ql_log(ql_log_info, vha, 0x8015,
8835 "Attempting retry of stop-firmware command.\n");
8836 ret = qla2x00_stop_firmware(vha);
8840 ha->flags.fw_init_done = 0;
8844 qla24xx_configure_vhba(scsi_qla_host_t *vha)
8846 int rval = QLA_SUCCESS;
8848 uint16_t mb[MAILBOX_REGISTER_COUNT];
8849 struct qla_hw_data *ha = vha->hw;
8850 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
8855 rval = qla2x00_fw_ready(base_vha);
8857 if (rval == QLA_SUCCESS) {
8858 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8859 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
8862 vha->flags.management_server_logged_in = 0;
8864 /* Login to SNS first */
8865 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
8867 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
8868 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
8869 ql_dbg(ql_dbg_init, vha, 0x0120,
8870 "Failed SNS login: loop_id=%x, rval2=%d\n",
8873 ql_dbg(ql_dbg_init, vha, 0x0103,
8874 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
8875 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
8876 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
8877 return (QLA_FUNCTION_FAILED);
8880 atomic_set(&vha->loop_down_timer, 0);
8881 atomic_set(&vha->loop_state, LOOP_UP);
8882 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
8883 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
8884 rval = qla2x00_loop_resync(base_vha);
8889 /* 84XX Support **************************************************************/
8891 static LIST_HEAD(qla_cs84xx_list);
8892 static DEFINE_MUTEX(qla_cs84xx_mutex);
8894 static struct qla_chip_state_84xx *
8895 qla84xx_get_chip(struct scsi_qla_host *vha)
8897 struct qla_chip_state_84xx *cs84xx;
8898 struct qla_hw_data *ha = vha->hw;
8900 mutex_lock(&qla_cs84xx_mutex);
8902 /* Find any shared 84xx chip. */
8903 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
8904 if (cs84xx->bus == ha->pdev->bus) {
8905 kref_get(&cs84xx->kref);
8910 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
8914 kref_init(&cs84xx->kref);
8915 spin_lock_init(&cs84xx->access_lock);
8916 mutex_init(&cs84xx->fw_update_mutex);
8917 cs84xx->bus = ha->pdev->bus;
8919 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
8921 mutex_unlock(&qla_cs84xx_mutex);
8926 __qla84xx_chip_release(struct kref *kref)
8928 struct qla_chip_state_84xx *cs84xx =
8929 container_of(kref, struct qla_chip_state_84xx, kref);
8931 mutex_lock(&qla_cs84xx_mutex);
8932 list_del(&cs84xx->list);
8933 mutex_unlock(&qla_cs84xx_mutex);
8938 qla84xx_put_chip(struct scsi_qla_host *vha)
8940 struct qla_hw_data *ha = vha->hw;
8943 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
8947 qla84xx_init_chip(scsi_qla_host_t *vha)
8951 struct qla_hw_data *ha = vha->hw;
8953 mutex_lock(&ha->cs84xx->fw_update_mutex);
8955 rval = qla84xx_verify_chip(vha, status);
8957 mutex_unlock(&ha->cs84xx->fw_update_mutex);
8959 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED :
8963 /* 81XX Support **************************************************************/
8966 qla81xx_nvram_config(scsi_qla_host_t *vha)
8969 struct init_cb_81xx *icb;
8970 struct nvram_81xx *nv;
8972 uint8_t *dptr1, *dptr2;
8975 struct qla_hw_data *ha = vha->hw;
8977 struct active_regions active_regions = { };
8980 icb = (struct init_cb_81xx *)ha->init_cb;
8983 /* Determine NVRAM starting address. */
8984 ha->nvram_size = sizeof(*nv);
8985 ha->vpd_size = FA_NVRAM_VPD_SIZE;
8986 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
8987 ha->vpd_size = FA_VPD_SIZE_82XX;
8989 if (IS_QLA28XX(ha) || IS_QLA27XX(ha))
8990 qla28xx_get_aux_images(vha, &active_regions);
8992 /* Get VPD data into cache */
8993 ha->vpd = ha->nvram + VPD_OFFSET;
8995 faddr = ha->flt_region_vpd;
8996 if (IS_QLA28XX(ha)) {
8997 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
8998 faddr = ha->flt_region_vpd_sec;
8999 ql_dbg(ql_dbg_init, vha, 0x0110,
9000 "Loading %s nvram image.\n",
9001 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
9002 "primary" : "secondary");
9004 ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size);
9006 /* Get NVRAM data into cache and calculate checksum. */
9007 faddr = ha->flt_region_nvram;
9008 if (IS_QLA28XX(ha)) {
9009 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
9010 faddr = ha->flt_region_nvram_sec;
9012 ql_dbg(ql_dbg_init, vha, 0x0110,
9013 "Loading %s nvram image.\n",
9014 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
9015 "primary" : "secondary");
9016 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
9018 dptr = (__force __le32 *)nv;
9019 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
9020 chksum += le32_to_cpu(*dptr);
9022 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
9023 "Contents of NVRAM:\n");
9024 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
9025 nv, ha->nvram_size);
9027 /* Bad NVRAM data, set defaults parameters. */
9028 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
9029 le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
9030 /* Reset NVRAM data. */
9031 ql_log(ql_log_info, vha, 0x0073,
9032 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
9033 chksum, nv->id, le16_to_cpu(nv->nvram_version));
9034 ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv));
9035 ql_log(ql_log_info, vha, 0x0074,
9036 "Falling back to functioning (yet invalid -- WWPN) "
9040 * Set default initialization control block.
9042 memset(nv, 0, ha->nvram_size);
9043 nv->nvram_version = cpu_to_le16(ICB_VERSION);
9044 nv->version = cpu_to_le16(ICB_VERSION);
9045 nv->frame_payload_size = cpu_to_le16(2048);
9046 nv->execution_throttle = cpu_to_le16(0xFFFF);
9047 nv->exchange_count = cpu_to_le16(0);
9048 nv->port_name[0] = 0x21;
9049 nv->port_name[1] = 0x00 + ha->port_no + 1;
9050 nv->port_name[2] = 0x00;
9051 nv->port_name[3] = 0xe0;
9052 nv->port_name[4] = 0x8b;
9053 nv->port_name[5] = 0x1c;
9054 nv->port_name[6] = 0x55;
9055 nv->port_name[7] = 0x86;
9056 nv->node_name[0] = 0x20;
9057 nv->node_name[1] = 0x00;
9058 nv->node_name[2] = 0x00;
9059 nv->node_name[3] = 0xe0;
9060 nv->node_name[4] = 0x8b;
9061 nv->node_name[5] = 0x1c;
9062 nv->node_name[6] = 0x55;
9063 nv->node_name[7] = 0x86;
9064 nv->login_retry_count = cpu_to_le16(8);
9065 nv->interrupt_delay_timer = cpu_to_le16(0);
9066 nv->login_timeout = cpu_to_le16(0);
9067 nv->firmware_options_1 =
9068 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
9069 nv->firmware_options_2 = cpu_to_le32(2 << 4);
9070 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
9071 nv->firmware_options_3 = cpu_to_le32(2 << 13);
9072 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
9073 nv->efi_parameters = cpu_to_le32(0);
9074 nv->reset_delay = 5;
9075 nv->max_luns_per_target = cpu_to_le16(128);
9076 nv->port_down_retry_count = cpu_to_le16(30);
9077 nv->link_down_timeout = cpu_to_le16(180);
9078 nv->enode_mac[0] = 0x00;
9079 nv->enode_mac[1] = 0xC0;
9080 nv->enode_mac[2] = 0xDD;
9081 nv->enode_mac[3] = 0x04;
9082 nv->enode_mac[4] = 0x05;
9083 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
9088 if (IS_T10_PI_CAPABLE(ha))
9089 nv->frame_payload_size &= cpu_to_le16(~7);
9091 qlt_81xx_config_nvram_stage1(vha, nv);
9093 /* Reset Initialization control block */
9094 memset(icb, 0, ha->init_cb_size);
9096 /* Copy 1st segment. */
9097 dptr1 = (uint8_t *)icb;
9098 dptr2 = (uint8_t *)&nv->version;
9099 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
9101 *dptr1++ = *dptr2++;
9103 icb->login_retry_count = nv->login_retry_count;
9105 /* Copy 2nd segment. */
9106 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
9107 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
9108 cnt = (uint8_t *)&icb->reserved_5 -
9109 (uint8_t *)&icb->interrupt_delay_timer;
9111 *dptr1++ = *dptr2++;
9113 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
9114 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
9115 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
9116 icb->enode_mac[0] = 0x00;
9117 icb->enode_mac[1] = 0xC0;
9118 icb->enode_mac[2] = 0xDD;
9119 icb->enode_mac[3] = 0x04;
9120 icb->enode_mac[4] = 0x05;
9121 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
9124 /* Use extended-initialization control block. */
9125 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
9126 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
9128 * Setup driver NVRAM options.
9130 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
9133 qlt_81xx_config_nvram_stage2(vha, icb);
9135 /* Use alternate WWN? */
9136 if (nv->host_p & cpu_to_le32(BIT_15)) {
9137 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
9138 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
9141 /* Prepare nodename */
9142 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
9144 * Firmware will apply the following mask if the nodename was
9147 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
9148 icb->node_name[0] &= 0xF0;
9151 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
9152 if ((nv->enhanced_features & BIT_7) == 0)
9153 ha->flags.scm_supported_a = 1;
9156 /* Set host adapter parameters. */
9157 ha->flags.disable_risc_code_load = 0;
9158 ha->flags.enable_lip_reset = 0;
9159 ha->flags.enable_lip_full_login =
9160 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
9161 ha->flags.enable_target_reset =
9162 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
9163 ha->flags.enable_led_scheme = 0;
9164 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
9166 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
9167 (BIT_6 | BIT_5 | BIT_4)) >> 4;
9169 /* save HBA serial number */
9170 ha->serial0 = icb->port_name[5];
9171 ha->serial1 = icb->port_name[6];
9172 ha->serial2 = icb->port_name[7];
9173 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
9174 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
9176 icb->execution_throttle = cpu_to_le16(0xFFFF);
9178 ha->retry_count = le16_to_cpu(nv->login_retry_count);
9180 /* Set minimum login_timeout to 4 seconds. */
9181 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
9182 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
9183 if (le16_to_cpu(nv->login_timeout) < 4)
9184 nv->login_timeout = cpu_to_le16(4);
9185 ha->login_timeout = le16_to_cpu(nv->login_timeout);
9187 /* Set minimum RATOV to 100 tenths of a second. */
9190 ha->loop_reset_delay = nv->reset_delay;
9192 /* Link Down Timeout = 0:
9194 * When Port Down timer expires we will start returning
9195 * I/O's to OS with "DID_NO_CONNECT".
9197 * Link Down Timeout != 0:
9199 * The driver waits for the link to come up after link down
9200 * before returning I/Os to OS with "DID_NO_CONNECT".
9202 if (le16_to_cpu(nv->link_down_timeout) == 0) {
9203 ha->loop_down_abort_time =
9204 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
9206 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
9207 ha->loop_down_abort_time =
9208 (LOOP_DOWN_TIME - ha->link_down_timeout);
9211 /* Need enough time to try and get the port back. */
9212 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
9213 if (qlport_down_retry)
9214 ha->port_down_retry_count = qlport_down_retry;
9216 /* Set login_retry_count */
9217 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
9218 if (ha->port_down_retry_count ==
9219 le16_to_cpu(nv->port_down_retry_count) &&
9220 ha->port_down_retry_count > 3)
9221 ha->login_retry_count = ha->port_down_retry_count;
9222 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
9223 ha->login_retry_count = ha->port_down_retry_count;
9224 if (ql2xloginretrycount)
9225 ha->login_retry_count = ql2xloginretrycount;
9227 /* if not running MSI-X we need handshaking on interrupts */
9228 if (!vha->hw->flags.msix_enabled &&
9229 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)))
9230 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
9233 if (!vha->flags.init_done) {
9234 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
9235 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
9236 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
9237 le16_to_cpu(icb->interrupt_delay_timer) : 2;
9239 icb->firmware_options_2 &= cpu_to_le32(
9240 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
9241 vha->flags.process_response_queue = 0;
9242 if (ha->zio_mode != QLA_ZIO_DISABLED) {
9243 ha->zio_mode = QLA_ZIO_MODE_6;
9245 ql_log(ql_log_info, vha, 0x0075,
9246 "ZIO mode %d enabled; timer delay (%d us).\n",
9248 ha->zio_timer * 100);
9250 icb->firmware_options_2 |= cpu_to_le32(
9251 (uint32_t)ha->zio_mode);
9252 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
9253 vha->flags.process_response_queue = 1;
9256 /* enable RIDA Format2 */
9257 icb->firmware_options_3 |= cpu_to_le32(BIT_0);
9259 /* N2N: driver will initiate Login instead of FW */
9260 icb->firmware_options_3 |= cpu_to_le32(BIT_8);
9262 /* Determine NVMe/FCP priority for target ports */
9263 ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
9266 ql_log(ql_log_warn, vha, 0x0076,
9267 "NVRAM configuration failed.\n");
9273 qla82xx_restart_isp(scsi_qla_host_t *vha)
9276 struct qla_hw_data *ha = vha->hw;
9277 struct scsi_qla_host *vp, *tvp;
9278 unsigned long flags;
9280 status = qla2x00_init_rings(vha);
9282 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
9283 ha->flags.chip_reset_done = 1;
9285 status = qla2x00_fw_ready(vha);
9287 /* Issue a marker after FW becomes ready. */
9288 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
9289 vha->flags.online = 1;
9290 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
9293 /* if no cable then assume it's good */
9294 if ((vha->device_flags & DFLG_NO_CABLE))
9299 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
9301 if (!atomic_read(&vha->loop_down_timer)) {
9303 * Issue marker command only when we are going
9304 * to start the I/O .
9306 vha->marker_needed = 1;
9309 ha->isp_ops->enable_intrs(ha);
9311 ha->isp_abort_cnt = 0;
9312 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
9314 /* Update the firmware version */
9315 status = qla82xx_check_md_needed(vha);
9318 ha->flags.fce_enabled = 1;
9320 fce_calc_size(ha->fce_bufs));
9321 rval = qla2x00_enable_fce_trace(vha,
9322 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
9325 ql_log(ql_log_warn, vha, 0x8001,
9326 "Unable to reinitialize FCE (%d).\n",
9328 ha->flags.fce_enabled = 0;
9333 memset(ha->eft, 0, EFT_SIZE);
9334 rval = qla2x00_enable_eft_trace(vha,
9335 ha->eft_dma, EFT_NUM_BUFFERS);
9337 ql_log(ql_log_warn, vha, 0x8010,
9338 "Unable to reinitialize EFT (%d).\n",
9345 ql_dbg(ql_dbg_taskm, vha, 0x8011,
9346 "qla82xx_restart_isp succeeded.\n");
9348 spin_lock_irqsave(&ha->vport_slock, flags);
9349 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
9351 atomic_inc(&vp->vref_count);
9352 spin_unlock_irqrestore(&ha->vport_slock, flags);
9354 qla2x00_vp_abort_isp(vp);
9356 spin_lock_irqsave(&ha->vport_slock, flags);
9357 atomic_dec(&vp->vref_count);
9360 spin_unlock_irqrestore(&ha->vport_slock, flags);
9363 ql_log(ql_log_warn, vha, 0x8016,
9364 "qla82xx_restart_isp **** FAILED ****.\n");
9371 * qla24xx_get_fcp_prio
9372 * Gets the fcp cmd priority value for the logged in port.
9373 * Looks for a match of the port descriptors within
9374 * each of the fcp prio config entries. If a match is found,
9375 * the tag (priority) value is returned.
9378 * vha = scsi host structure pointer.
9379 * fcport = port structure pointer.
9382 * non-zero (if found)
9389 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
9392 uint8_t pid_match, wwn_match;
9394 uint32_t pid1, pid2;
9395 uint64_t wwn1, wwn2;
9396 struct qla_fcp_prio_entry *pri_entry;
9397 struct qla_hw_data *ha = vha->hw;
9399 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
9403 entries = ha->fcp_prio_cfg->num_entries;
9404 pri_entry = &ha->fcp_prio_cfg->entry[0];
9406 for (i = 0; i < entries; i++) {
9407 pid_match = wwn_match = 0;
9409 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
9414 /* check source pid for a match */
9415 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
9416 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
9417 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
9418 if (pid1 == INVALID_PORT_ID)
9420 else if (pid1 == pid2)
9424 /* check destination pid for a match */
9425 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
9426 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
9427 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
9428 if (pid1 == INVALID_PORT_ID)
9430 else if (pid1 == pid2)
9434 /* check source WWN for a match */
9435 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
9436 wwn1 = wwn_to_u64(vha->port_name);
9437 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
9438 if (wwn2 == (uint64_t)-1)
9440 else if (wwn1 == wwn2)
9444 /* check destination WWN for a match */
9445 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
9446 wwn1 = wwn_to_u64(fcport->port_name);
9447 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
9448 if (wwn2 == (uint64_t)-1)
9450 else if (wwn1 == wwn2)
9454 if (pid_match == 2 || wwn_match == 2) {
9455 /* Found a matching entry */
9456 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
9457 priority = pri_entry->tag;
9468 * qla24xx_update_fcport_fcp_prio
9469 * Activates fcp priority for the logged in fc port
9472 * vha = scsi host structure pointer.
9473 * fcp = port structure pointer.
9476 * QLA_SUCCESS or QLA_FUNCTION_FAILED
9482 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
9488 if (fcport->port_type != FCT_TARGET ||
9489 fcport->loop_id == FC_NO_LOOP_ID)
9490 return QLA_FUNCTION_FAILED;
9492 priority = qla24xx_get_fcp_prio(vha, fcport);
9494 return QLA_FUNCTION_FAILED;
9496 if (IS_P3P_TYPE(vha->hw)) {
9497 fcport->fcp_prio = priority & 0xf;
9501 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
9502 if (ret == QLA_SUCCESS) {
9503 if (fcport->fcp_prio != priority)
9504 ql_dbg(ql_dbg_user, vha, 0x709e,
9505 "Updated FCP_CMND priority - value=%d loop_id=%d "
9506 "port_id=%02x%02x%02x.\n", priority,
9507 fcport->loop_id, fcport->d_id.b.domain,
9508 fcport->d_id.b.area, fcport->d_id.b.al_pa);
9509 fcport->fcp_prio = priority & 0xf;
9511 ql_dbg(ql_dbg_user, vha, 0x704f,
9512 "Unable to update FCP_CMND priority - ret=0x%x for "
9513 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
9514 fcport->d_id.b.domain, fcport->d_id.b.area,
9515 fcport->d_id.b.al_pa);
9520 * qla24xx_update_all_fcp_prio
9521 * Activates fcp priority for all the logged in ports
9524 * ha = adapter block pointer.
9527 * QLA_SUCCESS or QLA_FUNCTION_FAILED
9533 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
9538 ret = QLA_FUNCTION_FAILED;
9539 /* We need to set priority for all logged in ports */
9540 list_for_each_entry(fcport, &vha->vp_fcports, list)
9541 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
9546 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
9547 int vp_idx, bool startqp)
9552 struct qla_hw_data *ha = vha->hw;
9553 uint16_t qpair_id = 0;
9554 struct qla_qpair *qpair = NULL;
9555 struct qla_msix_entry *msix;
9557 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
9558 ql_log(ql_log_warn, vha, 0x00181,
9559 "FW/Driver is not multi-queue capable.\n");
9563 if (ql2xmqsupport || ql2xnvmeenable) {
9564 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
9565 if (qpair == NULL) {
9566 ql_log(ql_log_warn, vha, 0x0182,
9567 "Failed to allocate memory for queue pair.\n");
9571 qpair->hw = vha->hw;
9573 qpair->qp_lock_ptr = &qpair->qp_lock;
9574 spin_lock_init(&qpair->qp_lock);
9575 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
9577 /* Assign available que pair id */
9578 mutex_lock(&ha->mq_lock);
9579 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
9580 if (ha->num_qpairs >= ha->max_qpairs) {
9581 mutex_unlock(&ha->mq_lock);
9582 ql_log(ql_log_warn, vha, 0x0183,
9583 "No resources to create additional q pair.\n");
9587 set_bit(qpair_id, ha->qpair_qid_map);
9588 ha->queue_pair_map[qpair_id] = qpair;
9589 qpair->id = qpair_id;
9590 qpair->vp_idx = vp_idx;
9591 qpair->fw_started = ha->flags.fw_started;
9592 INIT_LIST_HEAD(&qpair->hints_list);
9593 qpair->chip_reset = ha->base_qpair->chip_reset;
9594 qpair->enable_class_2 = ha->base_qpair->enable_class_2;
9595 qpair->enable_explicit_conf =
9596 ha->base_qpair->enable_explicit_conf;
9598 for (i = 0; i < ha->msix_count; i++) {
9599 msix = &ha->msix_entries[i];
9603 ql_dbg(ql_dbg_multiq, vha, 0xc00f,
9604 "Vector %x selected for qpair\n", msix->vector);
9608 ql_log(ql_log_warn, vha, 0x0184,
9609 "Out of MSI-X vectors!.\n");
9613 qpair->msix->in_use = 1;
9614 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
9615 qpair->pdev = ha->pdev;
9616 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
9617 qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
9619 mutex_unlock(&ha->mq_lock);
9621 /* Create response queue first */
9622 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
9624 ql_log(ql_log_warn, vha, 0x0185,
9625 "Failed to create response queue.\n");
9629 qpair->rsp = ha->rsp_q_map[rsp_id];
9631 /* Create request queue */
9632 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
9635 ql_log(ql_log_warn, vha, 0x0186,
9636 "Failed to create request queue.\n");
9640 qpair->req = ha->req_q_map[req_id];
9641 qpair->rsp->req = qpair->req;
9642 qpair->rsp->qpair = qpair;
9644 if (!qpair->cpu_mapped)
9645 qla_cpu_update(qpair, raw_smp_processor_id());
9647 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
9648 if (ha->fw_attributes & BIT_4)
9649 qpair->difdix_supported = 1;
9652 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
9653 if (!qpair->srb_mempool) {
9654 ql_log(ql_log_warn, vha, 0xd036,
9655 "Failed to create srb mempool for qpair %d\n",
9660 if (qla_create_buf_pool(vha, qpair)) {
9661 ql_log(ql_log_warn, vha, 0xd036,
9662 "Failed to initialize buf pool for qpair %d\n",
9667 /* Mark as online */
9670 if (!vha->flags.qpairs_available)
9671 vha->flags.qpairs_available = 1;
9673 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
9674 "Request/Response queue pair created, id %d\n",
9676 ql_dbg(ql_dbg_init, vha, 0x0187,
9677 "Request/Response queue pair created, id %d\n",
9683 mempool_destroy(qpair->srb_mempool);
9685 qla25xx_delete_req_que(vha, qpair->req);
9687 qla25xx_delete_rsp_que(vha, qpair->rsp);
9689 mutex_lock(&ha->mq_lock);
9690 qpair->msix->in_use = 0;
9691 list_del(&qpair->qp_list_elem);
9692 if (list_empty(&vha->qp_list))
9693 vha->flags.qpairs_available = 0;
9695 ha->queue_pair_map[qpair_id] = NULL;
9696 clear_bit(qpair_id, ha->qpair_qid_map);
9698 mutex_unlock(&ha->mq_lock);
9704 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
9706 int ret = QLA_FUNCTION_FAILED;
9707 struct qla_hw_data *ha = qpair->hw;
9709 qpair->delete_in_progress = 1;
9711 qla_free_buf_pool(qpair);
9713 ret = qla25xx_delete_req_que(vha, qpair->req);
9714 if (ret != QLA_SUCCESS)
9717 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
9718 if (ret != QLA_SUCCESS)
9721 mutex_lock(&ha->mq_lock);
9722 ha->queue_pair_map[qpair->id] = NULL;
9723 clear_bit(qpair->id, ha->qpair_qid_map);
9725 list_del(&qpair->qp_list_elem);
9726 if (list_empty(&vha->qp_list)) {
9727 vha->flags.qpairs_available = 0;
9728 vha->flags.qpairs_req_created = 0;
9729 vha->flags.qpairs_rsp_created = 0;
9731 mempool_destroy(qpair->srb_mempool);
9733 mutex_unlock(&ha->mq_lock);
9741 qla2x00_count_set_bits(uint32_t num)
9743 /* Brian Kernighan's Algorithm */
9754 qla2x00_get_num_tgts(scsi_qla_host_t *vha)
9762 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
9763 if (f->port_type != FCT_TARGET)
9770 int qla2xxx_reset_stats(struct Scsi_Host *host, u32 flags)
9772 scsi_qla_host_t *vha = shost_priv(host);
9773 fc_port_t *fcport = NULL;
9774 unsigned long int_flags;
9776 if (flags & QLA2XX_HW_ERROR)
9777 vha->hw_err_cnt = 0;
9778 if (flags & QLA2XX_SHT_LNK_DWN)
9779 vha->short_link_down_cnt = 0;
9780 if (flags & QLA2XX_INT_ERR)
9781 vha->interface_err_cnt = 0;
9782 if (flags & QLA2XX_CMD_TIMEOUT)
9783 vha->cmd_timeout_cnt = 0;
9784 if (flags & QLA2XX_RESET_CMD_ERR)
9785 vha->reset_cmd_err_cnt = 0;
9786 if (flags & QLA2XX_TGT_SHT_LNK_DOWN) {
9787 spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
9788 list_for_each_entry(fcport, &vha->vp_fcports, list) {
9789 fcport->tgt_short_link_down_cnt = 0;
9790 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
9792 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
9794 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
9798 int qla2xxx_start_stats(struct Scsi_Host *host, u32 flags)
9800 return qla2xxx_reset_stats(host, flags);
9803 int qla2xxx_stop_stats(struct Scsi_Host *host, u32 flags)
9805 return qla2xxx_reset_stats(host, flags);
9808 int qla2xxx_get_ini_stats(struct Scsi_Host *host, u32 flags,
9809 void *data, u64 size)
9811 scsi_qla_host_t *vha = shost_priv(host);
9812 struct ql_vnd_host_stats_resp *resp = (struct ql_vnd_host_stats_resp *)data;
9813 struct ql_vnd_stats *rsp_data = &resp->stats;
9814 u64 ini_entry_count = 0;
9816 u64 entry_count = 0;
9818 u32 tmp_stat_type = 0;
9819 fc_port_t *fcport = NULL;
9820 unsigned long int_flags;
9822 /* Copy stat type to work on it */
9823 tmp_stat_type = flags;
9825 if (tmp_stat_type & BIT_17) {
9826 num_tgt = qla2x00_get_num_tgts(vha);
9828 tmp_stat_type &= ~(1 << 17);
9830 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
9832 entry_count = ini_entry_count + num_tgt;
9834 rsp_data->entry_count = entry_count;
9837 if (flags & QLA2XX_HW_ERROR) {
9838 rsp_data->entry[i].stat_type = QLA2XX_HW_ERROR;
9839 rsp_data->entry[i].tgt_num = 0x0;
9840 rsp_data->entry[i].cnt = vha->hw_err_cnt;
9844 if (flags & QLA2XX_SHT_LNK_DWN) {
9845 rsp_data->entry[i].stat_type = QLA2XX_SHT_LNK_DWN;
9846 rsp_data->entry[i].tgt_num = 0x0;
9847 rsp_data->entry[i].cnt = vha->short_link_down_cnt;
9851 if (flags & QLA2XX_INT_ERR) {
9852 rsp_data->entry[i].stat_type = QLA2XX_INT_ERR;
9853 rsp_data->entry[i].tgt_num = 0x0;
9854 rsp_data->entry[i].cnt = vha->interface_err_cnt;
9858 if (flags & QLA2XX_CMD_TIMEOUT) {
9859 rsp_data->entry[i].stat_type = QLA2XX_CMD_TIMEOUT;
9860 rsp_data->entry[i].tgt_num = 0x0;
9861 rsp_data->entry[i].cnt = vha->cmd_timeout_cnt;
9865 if (flags & QLA2XX_RESET_CMD_ERR) {
9866 rsp_data->entry[i].stat_type = QLA2XX_RESET_CMD_ERR;
9867 rsp_data->entry[i].tgt_num = 0x0;
9868 rsp_data->entry[i].cnt = vha->reset_cmd_err_cnt;
9872 /* i will continue from previous loop, as target
9873 * entries are after initiator
9875 if (flags & QLA2XX_TGT_SHT_LNK_DOWN) {
9876 spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
9877 list_for_each_entry(fcport, &vha->vp_fcports, list) {
9878 if (fcport->port_type != FCT_TARGET)
9882 rsp_data->entry[i].stat_type = QLA2XX_TGT_SHT_LNK_DOWN;
9883 rsp_data->entry[i].tgt_num = fcport->rport->number;
9884 rsp_data->entry[i].cnt = fcport->tgt_short_link_down_cnt;
9887 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
9889 resp->status = EXT_STATUS_OK;
9894 int qla2xxx_get_tgt_stats(struct Scsi_Host *host, u32 flags,
9895 struct fc_rport *rport, void *data, u64 size)
9897 struct ql_vnd_tgt_stats_resp *tgt_data = data;
9898 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
9900 tgt_data->status = 0;
9901 tgt_data->stats.entry_count = 1;
9902 tgt_data->stats.entry[0].stat_type = flags;
9903 tgt_data->stats.entry[0].tgt_num = rport->number;
9904 tgt_data->stats.entry[0].cnt = fcport->tgt_short_link_down_cnt;
9909 int qla2xxx_disable_port(struct Scsi_Host *host)
9911 scsi_qla_host_t *vha = shost_priv(host);
9913 vha->hw->flags.port_isolated = 1;
9915 if (qla2x00_isp_reg_stat(vha->hw)) {
9916 ql_log(ql_log_info, vha, 0x9006,
9917 "PCI/Register disconnect, exiting.\n");
9918 qla_pci_set_eeh_busy(vha);
9921 if (qla2x00_chip_is_down(vha))
9924 if (vha->flags.online) {
9925 qla2x00_abort_isp_cleanup(vha);
9926 qla2x00_wait_for_sess_deletion(vha);
9932 int qla2xxx_enable_port(struct Scsi_Host *host)
9934 scsi_qla_host_t *vha = shost_priv(host);
9936 if (qla2x00_isp_reg_stat(vha->hw)) {
9937 ql_log(ql_log_info, vha, 0x9001,
9938 "PCI/Register disconnect, exiting.\n");
9939 qla_pci_set_eeh_busy(vha);
9943 vha->hw->flags.port_isolated = 0;
9944 /* Set the flag to 1, so that isp_abort can proceed */
9945 vha->flags.online = 1;
9946 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
9947 qla2xxx_wake_dpc(vha);