2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
9 #include <linux/utsname.h>
11 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
12 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
13 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
14 static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
15 static int qla2x00_sns_rft_id(scsi_qla_host_t *);
16 static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
17 static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
18 static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
19 static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
20 static int qla_async_rsnn_nn(scsi_qla_host_t *);
23 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
27 * Returns a pointer to the @vha's ms_iocb.
30 qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
32 struct qla_hw_data *ha = vha->hw;
33 ms_iocb_entry_t *ms_pkt;
35 ms_pkt = (ms_iocb_entry_t *)arg->iocb;
36 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
38 ms_pkt->entry_type = MS_IOCB_TYPE;
39 ms_pkt->entry_count = 1;
40 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
41 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
42 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
43 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
44 ms_pkt->total_dsd_count = cpu_to_le16(2);
45 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
46 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
48 ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(arg->req_dma));
49 ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(arg->req_dma));
50 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
52 ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
53 ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
54 ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
56 vha->qla_stats.control_requests++;
62 * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
66 * Returns a pointer to the @ha's ms_iocb.
69 qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
71 struct qla_hw_data *ha = vha->hw;
72 struct ct_entry_24xx *ct_pkt;
74 ct_pkt = (struct ct_entry_24xx *)arg->iocb;
75 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
77 ct_pkt->entry_type = CT_IOCB_TYPE;
78 ct_pkt->entry_count = 1;
79 ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
80 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
81 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
82 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
83 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
84 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
86 ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(arg->req_dma));
87 ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(arg->req_dma));
88 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
90 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
91 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
92 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
93 ct_pkt->vp_index = vha->vp_idx;
95 vha->qla_stats.control_requests++;
101 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
102 * @p: CT request buffer
104 * @rsp_size: response size in bytes
106 * Returns a pointer to the intitialized @ct_req.
108 static inline struct ct_sns_req *
109 qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
111 memset(p, 0, sizeof(struct ct_sns_pkt));
113 p->p.req.header.revision = 0x01;
114 p->p.req.header.gs_type = 0xFC;
115 p->p.req.header.gs_subtype = 0x02;
116 p->p.req.command = cpu_to_be16(cmd);
117 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
123 qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
124 struct ct_sns_rsp *ct_rsp, const char *routine)
127 uint16_t comp_status;
128 struct qla_hw_data *ha = vha->hw;
129 bool lid_is_sns = false;
131 rval = QLA_FUNCTION_FAILED;
132 if (ms_pkt->entry_status != 0) {
133 ql_dbg(ql_dbg_disc, vha, 0x2031,
134 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
135 routine, ms_pkt->entry_status, vha->d_id.b.domain,
136 vha->d_id.b.area, vha->d_id.b.al_pa);
138 if (IS_FWI2_CAPABLE(ha))
139 comp_status = le16_to_cpu(
140 ((struct ct_entry_24xx *)ms_pkt)->comp_status);
142 comp_status = le16_to_cpu(ms_pkt->status);
143 switch (comp_status) {
145 case CS_DATA_UNDERRUN:
146 case CS_DATA_OVERRUN: /* Overrun? */
147 if (ct_rsp->header.response !=
148 cpu_to_be16(CT_ACCEPT_RESPONSE)) {
149 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
150 "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n",
151 routine, vha->d_id.b.domain,
152 vha->d_id.b.area, vha->d_id.b.al_pa,
153 comp_status, ct_rsp->header.response);
154 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
155 0x2078, (uint8_t *)&ct_rsp->header,
156 sizeof(struct ct_rsp_hdr));
157 rval = QLA_INVALID_COMMAND;
161 case CS_PORT_LOGGED_OUT:
162 if (IS_FWI2_CAPABLE(ha)) {
163 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
167 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
172 ql_dbg(ql_dbg_async, vha, 0x502b,
173 "%s failed, Name server has logged out",
175 rval = QLA_NOT_LOGGED_IN;
176 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
177 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
181 rval = QLA_FUNCTION_TIMEOUT;
184 ql_dbg(ql_dbg_disc, vha, 0x2033,
185 "%s failed, completion status (%x) on port_id: "
186 "%02x%02x%02x.\n", routine, comp_status,
187 vha->d_id.b.domain, vha->d_id.b.area,
196 * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
198 * @fcport: fcport entry to updated
200 * Returns 0 on success.
203 qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
207 ms_iocb_entry_t *ms_pkt;
208 struct ct_sns_req *ct_req;
209 struct ct_sns_rsp *ct_rsp;
210 struct qla_hw_data *ha = vha->hw;
213 if (IS_QLA2100(ha) || IS_QLA2200(ha))
214 return qla2x00_sns_ga_nxt(vha, fcport);
216 arg.iocb = ha->ms_iocb;
217 arg.req_dma = ha->ct_sns_dma;
218 arg.rsp_dma = ha->ct_sns_dma;
219 arg.req_size = GA_NXT_REQ_SIZE;
220 arg.rsp_size = GA_NXT_RSP_SIZE;
221 arg.nport_handle = NPH_SNS;
224 /* Prepare common MS IOCB */
225 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
227 /* Prepare CT request */
228 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
230 ct_rsp = &ha->ct_sns->p.rsp;
232 /* Prepare CT arguments -- port_id */
233 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
234 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
235 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
237 /* Execute MS IOCB */
238 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
239 sizeof(ms_iocb_entry_t));
240 if (rval != QLA_SUCCESS) {
242 ql_dbg(ql_dbg_disc, vha, 0x2062,
243 "GA_NXT issue IOCB failed (%d).\n", rval);
244 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
246 rval = QLA_FUNCTION_FAILED;
248 /* Populate fc_port_t entry. */
249 fcport->d_id.b.domain = ct_rsp->rsp.ga_nxt.port_id[0];
250 fcport->d_id.b.area = ct_rsp->rsp.ga_nxt.port_id[1];
251 fcport->d_id.b.al_pa = ct_rsp->rsp.ga_nxt.port_id[2];
253 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
255 memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
258 fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
259 FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER;
261 if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
262 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
263 fcport->d_id.b.domain = 0xf0;
265 ql_dbg(ql_dbg_disc, vha, 0x2063,
266 "GA_NXT entry - nn %8phN pn %8phN "
267 "port_id=%02x%02x%02x.\n",
268 fcport->node_name, fcport->port_name,
269 fcport->d_id.b.domain, fcport->d_id.b.area,
270 fcport->d_id.b.al_pa);
277 qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
279 return vha->hw->max_fibre_devices * 4 + 16;
283 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
285 * @list: switch info entries to populate
287 * NOTE: Non-Nx_Ports are not requested.
289 * Returns 0 on success.
292 qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
297 ms_iocb_entry_t *ms_pkt;
298 struct ct_sns_req *ct_req;
299 struct ct_sns_rsp *ct_rsp;
301 struct ct_sns_gid_pt_data *gid_data;
302 struct qla_hw_data *ha = vha->hw;
303 uint16_t gid_pt_rsp_size;
306 if (IS_QLA2100(ha) || IS_QLA2200(ha))
307 return qla2x00_sns_gid_pt(vha, list);
310 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
312 arg.iocb = ha->ms_iocb;
313 arg.req_dma = ha->ct_sns_dma;
314 arg.rsp_dma = ha->ct_sns_dma;
315 arg.req_size = GID_PT_REQ_SIZE;
316 arg.rsp_size = gid_pt_rsp_size;
317 arg.nport_handle = NPH_SNS;
320 /* Prepare common MS IOCB */
321 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
323 /* Prepare CT request */
324 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
325 ct_rsp = &ha->ct_sns->p.rsp;
327 /* Prepare CT arguments -- port_type */
328 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
330 /* Execute MS IOCB */
331 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
332 sizeof(ms_iocb_entry_t));
333 if (rval != QLA_SUCCESS) {
335 ql_dbg(ql_dbg_disc, vha, 0x2055,
336 "GID_PT issue IOCB failed (%d).\n", rval);
337 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
339 rval = QLA_FUNCTION_FAILED;
341 /* Set port IDs in switch info list. */
342 for (i = 0; i < ha->max_fibre_devices; i++) {
343 gid_data = &ct_rsp->rsp.gid_pt.entries[i];
344 list[i].d_id.b.domain = gid_data->port_id[0];
345 list[i].d_id.b.area = gid_data->port_id[1];
346 list[i].d_id.b.al_pa = gid_data->port_id[2];
347 memset(list[i].fabric_port_name, 0, WWN_SIZE);
348 list[i].fp_speed = PORT_SPEED_UNKNOWN;
351 if (gid_data->control_byte & BIT_7) {
352 list[i].d_id.b.rsvd_1 = gid_data->control_byte;
358 * If we've used all available slots, then the switch is
359 * reporting back more devices than we can handle with this
360 * single call. Return a failed status, and let GA_NXT handle
363 if (i == ha->max_fibre_devices)
364 rval = QLA_FUNCTION_FAILED;
371 * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
373 * @list: switch info entries to populate
375 * Returns 0 on success.
378 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
380 int rval = QLA_SUCCESS;
383 ms_iocb_entry_t *ms_pkt;
384 struct ct_sns_req *ct_req;
385 struct ct_sns_rsp *ct_rsp;
386 struct qla_hw_data *ha = vha->hw;
389 if (IS_QLA2100(ha) || IS_QLA2200(ha))
390 return qla2x00_sns_gpn_id(vha, list);
392 arg.iocb = ha->ms_iocb;
393 arg.req_dma = ha->ct_sns_dma;
394 arg.rsp_dma = ha->ct_sns_dma;
395 arg.req_size = GPN_ID_REQ_SIZE;
396 arg.rsp_size = GPN_ID_RSP_SIZE;
397 arg.nport_handle = NPH_SNS;
399 for (i = 0; i < ha->max_fibre_devices; i++) {
401 /* Prepare common MS IOCB */
402 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
404 /* Prepare CT request */
405 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
407 ct_rsp = &ha->ct_sns->p.rsp;
409 /* Prepare CT arguments -- port_id */
410 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
411 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
412 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
414 /* Execute MS IOCB */
415 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
416 sizeof(ms_iocb_entry_t));
417 if (rval != QLA_SUCCESS) {
419 ql_dbg(ql_dbg_disc, vha, 0x2056,
420 "GPN_ID issue IOCB failed (%d).\n", rval);
422 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
423 "GPN_ID") != QLA_SUCCESS) {
424 rval = QLA_FUNCTION_FAILED;
428 memcpy(list[i].port_name,
429 ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
432 /* Last device exit. */
433 if (list[i].d_id.b.rsvd_1 != 0)
441 * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
443 * @list: switch info entries to populate
445 * Returns 0 on success.
448 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
450 int rval = QLA_SUCCESS;
452 struct qla_hw_data *ha = vha->hw;
453 ms_iocb_entry_t *ms_pkt;
454 struct ct_sns_req *ct_req;
455 struct ct_sns_rsp *ct_rsp;
458 if (IS_QLA2100(ha) || IS_QLA2200(ha))
459 return qla2x00_sns_gnn_id(vha, list);
461 arg.iocb = ha->ms_iocb;
462 arg.req_dma = ha->ct_sns_dma;
463 arg.rsp_dma = ha->ct_sns_dma;
464 arg.req_size = GNN_ID_REQ_SIZE;
465 arg.rsp_size = GNN_ID_RSP_SIZE;
466 arg.nport_handle = NPH_SNS;
468 for (i = 0; i < ha->max_fibre_devices; i++) {
470 /* Prepare common MS IOCB */
471 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
473 /* Prepare CT request */
474 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
476 ct_rsp = &ha->ct_sns->p.rsp;
478 /* Prepare CT arguments -- port_id */
479 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
480 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
481 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
483 /* Execute MS IOCB */
484 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
485 sizeof(ms_iocb_entry_t));
486 if (rval != QLA_SUCCESS) {
488 ql_dbg(ql_dbg_disc, vha, 0x2057,
489 "GNN_ID issue IOCB failed (%d).\n", rval);
491 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
492 "GNN_ID") != QLA_SUCCESS) {
493 rval = QLA_FUNCTION_FAILED;
497 memcpy(list[i].node_name,
498 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
500 ql_dbg(ql_dbg_disc, vha, 0x2058,
501 "GID_PT entry - nn %8phN pn %8phN "
502 "portid=%02x%02x%02x.\n",
503 list[i].node_name, list[i].port_name,
504 list[i].d_id.b.domain, list[i].d_id.b.area,
505 list[i].d_id.b.al_pa);
508 /* Last device exit. */
509 if (list[i].d_id.b.rsvd_1 != 0)
516 static void qla2x00_async_sns_sp_done(void *s, int rc)
519 struct scsi_qla_host *vha = sp->vha;
520 struct ct_sns_pkt *ct_sns;
521 struct qla_work_evt *e;
524 if (rc == QLA_SUCCESS) {
525 ql_dbg(ql_dbg_disc, vha, 0x204f,
526 "Async done-%s exiting normally.\n",
528 } else if (rc == QLA_FUNCTION_TIMEOUT) {
529 ql_dbg(ql_dbg_disc, vha, 0x204f,
530 "Async done-%s timeout\n", sp->name);
532 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
533 memset(ct_sns, 0, sizeof(*ct_sns));
535 if (sp->retry_count > 3)
538 ql_dbg(ql_dbg_disc, vha, 0x204f,
539 "Async done-%s fail rc %x. Retry count %d\n",
540 sp->name, rc, sp->retry_count);
542 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
546 del_timer(&sp->u.iocb_cmd.timer);
548 qla2x00_post_work(vha, e);
553 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
556 /* please ignore kernel warning. otherwise, we have mem leak. */
557 if (sp->u.iocb_cmd.u.ctarg.req) {
558 dma_free_coherent(&vha->hw->pdev->dev,
559 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
560 sp->u.iocb_cmd.u.ctarg.req,
561 sp->u.iocb_cmd.u.ctarg.req_dma);
562 sp->u.iocb_cmd.u.ctarg.req = NULL;
565 if (sp->u.iocb_cmd.u.ctarg.rsp) {
566 dma_free_coherent(&vha->hw->pdev->dev,
567 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
568 sp->u.iocb_cmd.u.ctarg.rsp,
569 sp->u.iocb_cmd.u.ctarg.rsp_dma);
570 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
579 qla2x00_post_work(vha, e);
583 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
586 * Returns 0 on success.
589 qla2x00_rft_id(scsi_qla_host_t *vha)
591 struct qla_hw_data *ha = vha->hw;
593 if (IS_QLA2100(ha) || IS_QLA2200(ha))
594 return qla2x00_sns_rft_id(vha);
596 return qla_async_rftid(vha, &vha->d_id);
599 static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
601 int rval = QLA_MEMORY_ALLOC_FAILED;
602 struct ct_sns_req *ct_req;
604 struct ct_sns_pkt *ct_sns;
606 if (!vha->flags.online)
609 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
613 sp->type = SRB_CT_PTHRU_CMD;
615 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
617 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
618 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
620 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
621 if (!sp->u.iocb_cmd.u.ctarg.req) {
622 ql_log(ql_log_warn, vha, 0xd041,
623 "%s: Failed to allocate ct_sns request.\n",
628 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
629 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
631 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
632 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
633 ql_log(ql_log_warn, vha, 0xd042,
634 "%s: Failed to allocate ct_sns request.\n",
638 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
639 memset(ct_sns, 0, sizeof(*ct_sns));
640 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
642 /* Prepare CT request */
643 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
645 /* Prepare CT arguments -- port_id, FC-4 types */
646 ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
647 ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
648 ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
649 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
651 if (vha->flags.nvme_enabled)
652 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
654 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
655 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
656 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
657 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
658 sp->done = qla2x00_async_sns_sp_done;
660 rval = qla2x00_start_sp(sp);
661 if (rval != QLA_SUCCESS) {
662 ql_dbg(ql_dbg_disc, vha, 0x2043,
663 "RFT_ID issue IOCB failed (%d).\n", rval);
666 ql_dbg(ql_dbg_disc, vha, 0xffff,
667 "Async-%s - hdl=%x portid %06x.\n",
668 sp->name, sp->handle, d_id->b24);
677 * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
681 * Returns 0 on success.
684 qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
686 struct qla_hw_data *ha = vha->hw;
688 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
689 ql_dbg(ql_dbg_disc, vha, 0x2046,
690 "RFF_ID call not supported on ISP2100/ISP2200.\n");
691 return (QLA_SUCCESS);
694 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
698 static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
699 u8 fc4feature, u8 fc4type)
701 int rval = QLA_MEMORY_ALLOC_FAILED;
702 struct ct_sns_req *ct_req;
704 struct ct_sns_pkt *ct_sns;
706 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
710 sp->type = SRB_CT_PTHRU_CMD;
712 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
714 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
715 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
717 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
718 if (!sp->u.iocb_cmd.u.ctarg.req) {
719 ql_log(ql_log_warn, vha, 0xd041,
720 "%s: Failed to allocate ct_sns request.\n",
725 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
726 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
728 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
729 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
730 ql_log(ql_log_warn, vha, 0xd042,
731 "%s: Failed to allocate ct_sns request.\n",
735 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
736 memset(ct_sns, 0, sizeof(*ct_sns));
737 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
739 /* Prepare CT request */
740 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
742 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
743 ct_req->req.rff_id.port_id[0] = d_id->b.domain;
744 ct_req->req.rff_id.port_id[1] = d_id->b.area;
745 ct_req->req.rff_id.port_id[2] = d_id->b.al_pa;
746 ct_req->req.rff_id.fc4_feature = fc4feature;
747 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
749 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
750 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
751 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
752 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
753 sp->done = qla2x00_async_sns_sp_done;
755 rval = qla2x00_start_sp(sp);
756 if (rval != QLA_SUCCESS) {
757 ql_dbg(ql_dbg_disc, vha, 0x2047,
758 "RFF_ID issue IOCB failed (%d).\n", rval);
762 ql_dbg(ql_dbg_disc, vha, 0xffff,
763 "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
764 sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
774 * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
777 * Returns 0 on success.
780 qla2x00_rnn_id(scsi_qla_host_t *vha)
782 struct qla_hw_data *ha = vha->hw;
784 if (IS_QLA2100(ha) || IS_QLA2200(ha))
785 return qla2x00_sns_rnn_id(vha);
787 return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
790 static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
793 int rval = QLA_MEMORY_ALLOC_FAILED;
794 struct ct_sns_req *ct_req;
796 struct ct_sns_pkt *ct_sns;
798 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
802 sp->type = SRB_CT_PTHRU_CMD;
804 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
806 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
807 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
809 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
810 if (!sp->u.iocb_cmd.u.ctarg.req) {
811 ql_log(ql_log_warn, vha, 0xd041,
812 "%s: Failed to allocate ct_sns request.\n",
817 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
818 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
820 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
821 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
822 ql_log(ql_log_warn, vha, 0xd042,
823 "%s: Failed to allocate ct_sns request.\n",
827 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
828 memset(ct_sns, 0, sizeof(*ct_sns));
829 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
831 /* Prepare CT request */
832 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
834 /* Prepare CT arguments -- port_id, node_name */
835 ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
836 ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
837 ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
838 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
840 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
841 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
842 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
844 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
845 sp->done = qla2x00_async_sns_sp_done;
847 rval = qla2x00_start_sp(sp);
848 if (rval != QLA_SUCCESS) {
849 ql_dbg(ql_dbg_disc, vha, 0x204d,
850 "RNN_ID issue IOCB failed (%d).\n", rval);
853 ql_dbg(ql_dbg_disc, vha, 0xffff,
854 "Async-%s - hdl=%x portid %06x\n",
855 sp->name, sp->handle, d_id->b24);
866 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
868 struct qla_hw_data *ha = vha->hw;
871 snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
872 ha->mr.fw_version, qla2x00_version_str);
875 "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
876 ha->fw_major_version, ha->fw_minor_version,
877 ha->fw_subminor_version, qla2x00_version_str);
881 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
884 * Returns 0 on success.
887 qla2x00_rsnn_nn(scsi_qla_host_t *vha)
889 struct qla_hw_data *ha = vha->hw;
891 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
892 ql_dbg(ql_dbg_disc, vha, 0x2050,
893 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
894 return (QLA_SUCCESS);
897 return qla_async_rsnn_nn(vha);
900 static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
902 int rval = QLA_MEMORY_ALLOC_FAILED;
903 struct ct_sns_req *ct_req;
905 struct ct_sns_pkt *ct_sns;
907 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
911 sp->type = SRB_CT_PTHRU_CMD;
912 sp->name = "rsnn_nn";
913 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
915 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
916 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
918 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
919 if (!sp->u.iocb_cmd.u.ctarg.req) {
920 ql_log(ql_log_warn, vha, 0xd041,
921 "%s: Failed to allocate ct_sns request.\n",
926 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
927 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
929 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
930 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
931 ql_log(ql_log_warn, vha, 0xd042,
932 "%s: Failed to allocate ct_sns request.\n",
936 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
937 memset(ct_sns, 0, sizeof(*ct_sns));
938 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
940 /* Prepare CT request */
941 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
943 /* Prepare CT arguments -- node_name, symbolic node_name, size */
944 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
946 /* Prepare the Symbolic Node Name */
947 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
948 sizeof(ct_req->req.rsnn_nn.sym_node_name));
949 ct_req->req.rsnn_nn.name_len =
950 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
953 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
954 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
955 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
957 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
958 sp->done = qla2x00_async_sns_sp_done;
960 rval = qla2x00_start_sp(sp);
961 if (rval != QLA_SUCCESS) {
962 ql_dbg(ql_dbg_disc, vha, 0x2043,
963 "RFT_ID issue IOCB failed (%d).\n", rval);
966 ql_dbg(ql_dbg_disc, vha, 0xffff,
967 "Async-%s - hdl=%x.\n",
968 sp->name, sp->handle);
979 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
982 * @scmd_len: Subcommand length
983 * @data_size: response size in bytes
985 * Returns a pointer to the @ha's sns_cmd.
987 static inline struct sns_cmd_pkt *
988 qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
992 struct sns_cmd_pkt *sns_cmd;
993 struct qla_hw_data *ha = vha->hw;
995 sns_cmd = ha->sns_cmd;
996 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
997 wc = data_size / 2; /* Size in 16bit words. */
998 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
999 sns_cmd->p.cmd.buffer_address[0] = cpu_to_le32(LSD(ha->sns_cmd_dma));
1000 sns_cmd->p.cmd.buffer_address[1] = cpu_to_le32(MSD(ha->sns_cmd_dma));
1001 sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
1002 sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
1003 wc = (data_size - 16) / 4; /* Size in 32bit words. */
1004 sns_cmd->p.cmd.size = cpu_to_le16(wc);
1006 vha->qla_stats.control_requests++;
1012 * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
1014 * @fcport: fcport entry to updated
1016 * This command uses the old Exectute SNS Command mailbox routine.
1018 * Returns 0 on success.
1021 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
1023 int rval = QLA_SUCCESS;
1024 struct qla_hw_data *ha = vha->hw;
1025 struct sns_cmd_pkt *sns_cmd;
1028 /* Prepare SNS command request. */
1029 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
1030 GA_NXT_SNS_DATA_SIZE);
1032 /* Prepare SNS command arguments -- port_id. */
1033 sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
1034 sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
1035 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
1037 /* Execute SNS command. */
1038 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
1039 sizeof(struct sns_cmd_pkt));
1040 if (rval != QLA_SUCCESS) {
1042 ql_dbg(ql_dbg_disc, vha, 0x205f,
1043 "GA_NXT Send SNS failed (%d).\n", rval);
1044 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
1045 sns_cmd->p.gan_data[9] != 0x02) {
1046 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
1047 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
1048 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
1049 sns_cmd->p.gan_data, 16);
1050 rval = QLA_FUNCTION_FAILED;
1052 /* Populate fc_port_t entry. */
1053 fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
1054 fcport->d_id.b.area = sns_cmd->p.gan_data[18];
1055 fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
1057 memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
1058 memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
1060 if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
1061 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
1062 fcport->d_id.b.domain = 0xf0;
1064 ql_dbg(ql_dbg_disc, vha, 0x2061,
1065 "GA_NXT entry - nn %8phN pn %8phN "
1066 "port_id=%02x%02x%02x.\n",
1067 fcport->node_name, fcport->port_name,
1068 fcport->d_id.b.domain, fcport->d_id.b.area,
1069 fcport->d_id.b.al_pa);
1076 * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
1078 * @list: switch info entries to populate
1080 * This command uses the old Exectute SNS Command mailbox routine.
1082 * NOTE: Non-Nx_Ports are not requested.
1084 * Returns 0 on success.
1087 qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
1090 struct qla_hw_data *ha = vha->hw;
1093 struct sns_cmd_pkt *sns_cmd;
1094 uint16_t gid_pt_sns_data_size;
1096 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
1099 /* Prepare SNS command request. */
1100 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
1101 gid_pt_sns_data_size);
1103 /* Prepare SNS command arguments -- port_type. */
1104 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
1106 /* Execute SNS command. */
1107 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
1108 sizeof(struct sns_cmd_pkt));
1109 if (rval != QLA_SUCCESS) {
1111 ql_dbg(ql_dbg_disc, vha, 0x206d,
1112 "GID_PT Send SNS failed (%d).\n", rval);
1113 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
1114 sns_cmd->p.gid_data[9] != 0x02) {
1115 ql_dbg(ql_dbg_disc, vha, 0x202f,
1116 "GID_PT failed, rejected request, gid_rsp:\n");
1117 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
1118 sns_cmd->p.gid_data, 16);
1119 rval = QLA_FUNCTION_FAILED;
1121 /* Set port IDs in switch info list. */
1122 for (i = 0; i < ha->max_fibre_devices; i++) {
1123 entry = &sns_cmd->p.gid_data[(i * 4) + 16];
1124 list[i].d_id.b.domain = entry[1];
1125 list[i].d_id.b.area = entry[2];
1126 list[i].d_id.b.al_pa = entry[3];
1128 /* Last one exit. */
1129 if (entry[0] & BIT_7) {
1130 list[i].d_id.b.rsvd_1 = entry[0];
1136 * If we've used all available slots, then the switch is
1137 * reporting back more devices that we can handle with this
1138 * single call. Return a failed status, and let GA_NXT handle
1141 if (i == ha->max_fibre_devices)
1142 rval = QLA_FUNCTION_FAILED;
1149 * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
1151 * @list: switch info entries to populate
1153 * This command uses the old Exectute SNS Command mailbox routine.
1155 * Returns 0 on success.
1158 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1160 int rval = QLA_SUCCESS;
1161 struct qla_hw_data *ha = vha->hw;
1163 struct sns_cmd_pkt *sns_cmd;
1165 for (i = 0; i < ha->max_fibre_devices; i++) {
1167 /* Prepare SNS command request. */
1168 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
1169 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
1171 /* Prepare SNS command arguments -- port_id. */
1172 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1173 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1174 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1176 /* Execute SNS command. */
1177 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1178 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1179 if (rval != QLA_SUCCESS) {
1181 ql_dbg(ql_dbg_disc, vha, 0x2032,
1182 "GPN_ID Send SNS failed (%d).\n", rval);
1183 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
1184 sns_cmd->p.gpn_data[9] != 0x02) {
1185 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
1186 "GPN_ID failed, rejected request, gpn_rsp:\n");
1187 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
1188 sns_cmd->p.gpn_data, 16);
1189 rval = QLA_FUNCTION_FAILED;
1192 memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
1196 /* Last device exit. */
1197 if (list[i].d_id.b.rsvd_1 != 0)
1205 * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
1207 * @list: switch info entries to populate
1209 * This command uses the old Exectute SNS Command mailbox routine.
1211 * Returns 0 on success.
1214 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
1216 int rval = QLA_SUCCESS;
1217 struct qla_hw_data *ha = vha->hw;
1219 struct sns_cmd_pkt *sns_cmd;
1221 for (i = 0; i < ha->max_fibre_devices; i++) {
1223 /* Prepare SNS command request. */
1224 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
1225 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
1227 /* Prepare SNS command arguments -- port_id. */
1228 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1229 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1230 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1232 /* Execute SNS command. */
1233 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1234 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1235 if (rval != QLA_SUCCESS) {
1237 ql_dbg(ql_dbg_disc, vha, 0x203f,
1238 "GNN_ID Send SNS failed (%d).\n", rval);
1239 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
1240 sns_cmd->p.gnn_data[9] != 0x02) {
1241 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
1242 "GNN_ID failed, rejected request, gnn_rsp:\n");
1243 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
1244 sns_cmd->p.gnn_data, 16);
1245 rval = QLA_FUNCTION_FAILED;
1248 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
1251 ql_dbg(ql_dbg_disc, vha, 0x206e,
1252 "GID_PT entry - nn %8phN pn %8phN "
1253 "port_id=%02x%02x%02x.\n",
1254 list[i].node_name, list[i].port_name,
1255 list[i].d_id.b.domain, list[i].d_id.b.area,
1256 list[i].d_id.b.al_pa);
1259 /* Last device exit. */
1260 if (list[i].d_id.b.rsvd_1 != 0)
1268 * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
1271 * This command uses the old Exectute SNS Command mailbox routine.
1273 * Returns 0 on success.
1276 qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1279 struct qla_hw_data *ha = vha->hw;
1280 struct sns_cmd_pkt *sns_cmd;
1283 /* Prepare SNS command request. */
1284 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1285 RFT_ID_SNS_DATA_SIZE);
1287 /* Prepare SNS command arguments -- port_id, FC-4 types */
1288 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1289 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1290 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1292 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
1294 /* Execute SNS command. */
1295 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1296 sizeof(struct sns_cmd_pkt));
1297 if (rval != QLA_SUCCESS) {
1299 ql_dbg(ql_dbg_disc, vha, 0x2060,
1300 "RFT_ID Send SNS failed (%d).\n", rval);
1301 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1302 sns_cmd->p.rft_data[9] != 0x02) {
1303 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1304 "RFT_ID failed, rejected request rft_rsp:\n");
1305 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1306 sns_cmd->p.rft_data, 16);
1307 rval = QLA_FUNCTION_FAILED;
1309 ql_dbg(ql_dbg_disc, vha, 0x2073,
1310 "RFT_ID exiting normally.\n");
1317 * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
1320 * This command uses the old Exectute SNS Command mailbox routine.
1322 * Returns 0 on success.
1325 qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1328 struct qla_hw_data *ha = vha->hw;
1329 struct sns_cmd_pkt *sns_cmd;
1332 /* Prepare SNS command request. */
1333 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1334 RNN_ID_SNS_DATA_SIZE);
1336 /* Prepare SNS command arguments -- port_id, nodename. */
1337 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1338 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1339 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1341 sns_cmd->p.cmd.param[4] = vha->node_name[7];
1342 sns_cmd->p.cmd.param[5] = vha->node_name[6];
1343 sns_cmd->p.cmd.param[6] = vha->node_name[5];
1344 sns_cmd->p.cmd.param[7] = vha->node_name[4];
1345 sns_cmd->p.cmd.param[8] = vha->node_name[3];
1346 sns_cmd->p.cmd.param[9] = vha->node_name[2];
1347 sns_cmd->p.cmd.param[10] = vha->node_name[1];
1348 sns_cmd->p.cmd.param[11] = vha->node_name[0];
1350 /* Execute SNS command. */
1351 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1352 sizeof(struct sns_cmd_pkt));
1353 if (rval != QLA_SUCCESS) {
1355 ql_dbg(ql_dbg_disc, vha, 0x204a,
1356 "RNN_ID Send SNS failed (%d).\n", rval);
1357 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1358 sns_cmd->p.rnn_data[9] != 0x02) {
1359 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1360 "RNN_ID failed, rejected request, rnn_rsp:\n");
1361 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1362 sns_cmd->p.rnn_data, 16);
1363 rval = QLA_FUNCTION_FAILED;
1365 ql_dbg(ql_dbg_disc, vha, 0x204c,
1366 "RNN_ID exiting normally.\n");
1373 * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
1376 * Returns 0 on success.
1379 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1382 uint16_t mb[MAILBOX_REGISTER_COUNT];
1383 struct qla_hw_data *ha = vha->hw;
1385 if (vha->flags.management_server_logged_in)
1388 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1390 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1391 if (rval == QLA_MEMORY_ALLOC_FAILED)
1392 ql_dbg(ql_dbg_disc, vha, 0x2085,
1393 "Failed management_server login: loopid=%x "
1394 "rval=%d\n", vha->mgmt_svr_loop_id, rval);
1396 ql_dbg(ql_dbg_disc, vha, 0x2024,
1397 "Failed management_server login: loopid=%x "
1398 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1399 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
1401 ret = QLA_FUNCTION_FAILED;
1403 vha->flags.management_server_logged_in = 1;
1409 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1411 * @req_size: request size in bytes
1412 * @rsp_size: response size in bytes
1414 * Returns a pointer to the @ha's ms_iocb.
1417 qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1420 ms_iocb_entry_t *ms_pkt;
1421 struct qla_hw_data *ha = vha->hw;
1422 ms_pkt = ha->ms_iocb;
1423 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1425 ms_pkt->entry_type = MS_IOCB_TYPE;
1426 ms_pkt->entry_count = 1;
1427 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1428 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
1429 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1430 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
1431 ms_pkt->total_dsd_count = cpu_to_le16(2);
1432 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1433 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1435 ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1436 ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1437 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
1439 ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1440 ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1441 ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
1447 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1449 * @req_size: request size in bytes
1450 * @rsp_size: response size in bytes
1452 * Returns a pointer to the @ha's ms_iocb.
1455 qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1458 struct ct_entry_24xx *ct_pkt;
1459 struct qla_hw_data *ha = vha->hw;
1461 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1462 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1464 ct_pkt->entry_type = CT_IOCB_TYPE;
1465 ct_pkt->entry_count = 1;
1466 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1467 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1468 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
1469 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
1470 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1471 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1473 ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1474 ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1475 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
1477 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1478 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1479 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
1480 ct_pkt->vp_index = vha->vp_idx;
1485 static inline ms_iocb_entry_t *
1486 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1488 struct qla_hw_data *ha = vha->hw;
1489 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1490 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1492 if (IS_FWI2_CAPABLE(ha)) {
1493 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1494 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
1496 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1497 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
1504 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
1505 * @p: CT request buffer
1507 * @rsp_size: response size in bytes
1509 * Returns a pointer to the intitialized @ct_req.
1511 static inline struct ct_sns_req *
1512 qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
1515 memset(p, 0, sizeof(struct ct_sns_pkt));
1517 p->p.req.header.revision = 0x01;
1518 p->p.req.header.gs_type = 0xFA;
1519 p->p.req.header.gs_subtype = 0x10;
1520 p->p.req.command = cpu_to_be16(cmd);
1521 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1527 * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
1530 * Returns 0 on success.
1533 qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1538 ms_iocb_entry_t *ms_pkt;
1539 struct ct_sns_req *ct_req;
1540 struct ct_sns_rsp *ct_rsp;
1542 struct ct_fdmi_hba_attr *eiter;
1543 struct qla_hw_data *ha = vha->hw;
1546 /* Prepare common MS IOCB */
1547 /* Request size adjusted after CT preparation */
1548 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1550 /* Prepare CT request */
1551 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, RHBA_RSP_SIZE);
1552 ct_rsp = &ha->ct_sns->p.rsp;
1554 /* Prepare FDMI command arguments -- attribute block, attributes. */
1555 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
1556 ct_req->req.rhba.entry_count = cpu_to_be32(1);
1557 memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
1558 size = 2 * WWN_SIZE + 4 + 4;
1561 ct_req->req.rhba.attrs.count =
1562 cpu_to_be32(FDMI_HBA_ATTR_COUNT);
1563 entries = ct_req->req.rhba.hba_identifier;
1566 eiter = entries + size;
1567 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1568 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1569 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1570 size += 4 + WWN_SIZE;
1572 ql_dbg(ql_dbg_disc, vha, 0x2025,
1573 "NodeName = %8phN.\n", eiter->a.node_name);
1576 eiter = entries + size;
1577 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1578 alen = strlen(QLA2XXX_MANUFACTURER);
1579 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1580 "%s", "QLogic Corporation");
1581 alen += 4 - (alen & 3);
1582 eiter->len = cpu_to_be16(4 + alen);
1585 ql_dbg(ql_dbg_disc, vha, 0x2026,
1586 "Manufacturer = %s.\n", eiter->a.manufacturer);
1588 /* Serial number. */
1589 eiter = entries + size;
1590 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1591 if (IS_FWI2_CAPABLE(ha))
1592 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
1593 sizeof(eiter->a.serial_num));
1595 sn = ((ha->serial0 & 0x1f) << 16) |
1596 (ha->serial2 << 8) | ha->serial1;
1597 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
1598 "%c%05d", 'A' + sn / 100000, sn % 100000);
1600 alen = strlen(eiter->a.serial_num);
1601 alen += 4 - (alen & 3);
1602 eiter->len = cpu_to_be16(4 + alen);
1605 ql_dbg(ql_dbg_disc, vha, 0x2027,
1606 "Serial no. = %s.\n", eiter->a.serial_num);
1609 eiter = entries + size;
1610 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
1611 snprintf(eiter->a.model, sizeof(eiter->a.model),
1612 "%s", ha->model_number);
1613 alen = strlen(eiter->a.model);
1614 alen += 4 - (alen & 3);
1615 eiter->len = cpu_to_be16(4 + alen);
1618 ql_dbg(ql_dbg_disc, vha, 0x2028,
1619 "Model Name = %s.\n", eiter->a.model);
1621 /* Model description. */
1622 eiter = entries + size;
1623 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1624 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
1625 "%s", ha->model_desc);
1626 alen = strlen(eiter->a.model_desc);
1627 alen += 4 - (alen & 3);
1628 eiter->len = cpu_to_be16(4 + alen);
1631 ql_dbg(ql_dbg_disc, vha, 0x2029,
1632 "Model Desc = %s.\n", eiter->a.model_desc);
1634 /* Hardware version. */
1635 eiter = entries + size;
1636 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1637 if (!IS_FWI2_CAPABLE(ha)) {
1638 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1639 "HW:%s", ha->adapter_id);
1640 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
1641 sizeof(eiter->a.hw_version))) {
1643 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
1644 sizeof(eiter->a.hw_version))) {
1647 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1648 "HW:%s", ha->adapter_id);
1650 alen = strlen(eiter->a.hw_version);
1651 alen += 4 - (alen & 3);
1652 eiter->len = cpu_to_be16(4 + alen);
1655 ql_dbg(ql_dbg_disc, vha, 0x202a,
1656 "Hardware ver = %s.\n", eiter->a.hw_version);
1658 /* Driver version. */
1659 eiter = entries + size;
1660 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1661 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
1662 "%s", qla2x00_version_str);
1663 alen = strlen(eiter->a.driver_version);
1664 alen += 4 - (alen & 3);
1665 eiter->len = cpu_to_be16(4 + alen);
1668 ql_dbg(ql_dbg_disc, vha, 0x202b,
1669 "Driver ver = %s.\n", eiter->a.driver_version);
1671 /* Option ROM version. */
1672 eiter = entries + size;
1673 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1674 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
1675 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1676 alen = strlen(eiter->a.orom_version);
1677 alen += 4 - (alen & 3);
1678 eiter->len = cpu_to_be16(4 + alen);
1681 ql_dbg(ql_dbg_disc, vha , 0x202c,
1682 "Optrom vers = %s.\n", eiter->a.orom_version);
1684 /* Firmware version */
1685 eiter = entries + size;
1686 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1687 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1688 sizeof(eiter->a.fw_version));
1689 alen = strlen(eiter->a.fw_version);
1690 alen += 4 - (alen & 3);
1691 eiter->len = cpu_to_be16(4 + alen);
1694 ql_dbg(ql_dbg_disc, vha, 0x202d,
1695 "Firmware vers = %s.\n", eiter->a.fw_version);
1697 /* Update MS request size. */
1698 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1700 ql_dbg(ql_dbg_disc, vha, 0x202e,
1701 "RHBA identifier = %8phN size=%d.\n",
1702 ct_req->req.rhba.hba_identifier, size);
1703 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
1706 /* Execute MS IOCB */
1707 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1708 sizeof(ms_iocb_entry_t));
1709 if (rval != QLA_SUCCESS) {
1711 ql_dbg(ql_dbg_disc, vha, 0x2030,
1712 "RHBA issue IOCB failed (%d).\n", rval);
1713 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1715 rval = QLA_FUNCTION_FAILED;
1716 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1717 ct_rsp->header.explanation_code ==
1718 CT_EXPL_ALREADY_REGISTERED) {
1719 ql_dbg(ql_dbg_disc, vha, 0x2034,
1720 "HBA already registered.\n");
1721 rval = QLA_ALREADY_REGISTERED;
1723 ql_dbg(ql_dbg_disc, vha, 0x20ad,
1724 "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
1725 ct_rsp->header.reason_code,
1726 ct_rsp->header.explanation_code);
1729 ql_dbg(ql_dbg_disc, vha, 0x2035,
1730 "RHBA exiting normally.\n");
1737 * qla2x00_fdmi_rpa() - perform RPA registration
1740 * Returns 0 on success.
1743 qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1747 struct qla_hw_data *ha = vha->hw;
1748 ms_iocb_entry_t *ms_pkt;
1749 struct ct_sns_req *ct_req;
1750 struct ct_sns_rsp *ct_rsp;
1752 struct ct_fdmi_port_attr *eiter;
1753 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
1754 struct new_utsname *p_sysid = NULL;
1757 /* Prepare common MS IOCB */
1758 /* Request size adjusted after CT preparation */
1759 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
1761 /* Prepare CT request */
1762 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
1764 ct_rsp = &ha->ct_sns->p.rsp;
1766 /* Prepare FDMI command arguments -- attribute block, attributes. */
1767 memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
1768 size = WWN_SIZE + 4;
1771 ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
1772 entries = ct_req->req.rpa.port_name;
1775 eiter = entries + size;
1776 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
1777 eiter->len = cpu_to_be16(4 + 32);
1778 eiter->a.fc4_types[2] = 0x01;
1781 ql_dbg(ql_dbg_disc, vha, 0x2039,
1782 "FC4_TYPES=%02x %02x.\n",
1783 eiter->a.fc4_types[2],
1784 eiter->a.fc4_types[1]);
1786 /* Supported speed. */
1787 eiter = entries + size;
1788 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1789 eiter->len = cpu_to_be16(4 + 4);
1790 if (IS_CNA_CAPABLE(ha))
1791 eiter->a.sup_speed = cpu_to_be32(
1792 FDMI_PORT_SPEED_10GB);
1793 else if (IS_QLA27XX(ha))
1794 eiter->a.sup_speed = cpu_to_be32(
1795 FDMI_PORT_SPEED_32GB|
1796 FDMI_PORT_SPEED_16GB|
1797 FDMI_PORT_SPEED_8GB);
1798 else if (IS_QLA2031(ha))
1799 eiter->a.sup_speed = cpu_to_be32(
1800 FDMI_PORT_SPEED_16GB|
1801 FDMI_PORT_SPEED_8GB|
1802 FDMI_PORT_SPEED_4GB);
1803 else if (IS_QLA25XX(ha))
1804 eiter->a.sup_speed = cpu_to_be32(
1805 FDMI_PORT_SPEED_8GB|
1806 FDMI_PORT_SPEED_4GB|
1807 FDMI_PORT_SPEED_2GB|
1808 FDMI_PORT_SPEED_1GB);
1809 else if (IS_QLA24XX_TYPE(ha))
1810 eiter->a.sup_speed = cpu_to_be32(
1811 FDMI_PORT_SPEED_4GB|
1812 FDMI_PORT_SPEED_2GB|
1813 FDMI_PORT_SPEED_1GB);
1814 else if (IS_QLA23XX(ha))
1815 eiter->a.sup_speed = cpu_to_be32(
1816 FDMI_PORT_SPEED_2GB|
1817 FDMI_PORT_SPEED_1GB);
1819 eiter->a.sup_speed = cpu_to_be32(
1820 FDMI_PORT_SPEED_1GB);
1823 ql_dbg(ql_dbg_disc, vha, 0x203a,
1824 "Supported_Speed=%x.\n", eiter->a.sup_speed);
1826 /* Current speed. */
1827 eiter = entries + size;
1828 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1829 eiter->len = cpu_to_be16(4 + 4);
1830 switch (ha->link_data_rate) {
1831 case PORT_SPEED_1GB:
1832 eiter->a.cur_speed =
1833 cpu_to_be32(FDMI_PORT_SPEED_1GB);
1835 case PORT_SPEED_2GB:
1836 eiter->a.cur_speed =
1837 cpu_to_be32(FDMI_PORT_SPEED_2GB);
1839 case PORT_SPEED_4GB:
1840 eiter->a.cur_speed =
1841 cpu_to_be32(FDMI_PORT_SPEED_4GB);
1843 case PORT_SPEED_8GB:
1844 eiter->a.cur_speed =
1845 cpu_to_be32(FDMI_PORT_SPEED_8GB);
1847 case PORT_SPEED_10GB:
1848 eiter->a.cur_speed =
1849 cpu_to_be32(FDMI_PORT_SPEED_10GB);
1851 case PORT_SPEED_16GB:
1852 eiter->a.cur_speed =
1853 cpu_to_be32(FDMI_PORT_SPEED_16GB);
1855 case PORT_SPEED_32GB:
1856 eiter->a.cur_speed =
1857 cpu_to_be32(FDMI_PORT_SPEED_32GB);
1860 eiter->a.cur_speed =
1861 cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
1866 ql_dbg(ql_dbg_disc, vha, 0x203b,
1867 "Current_Speed=%x.\n", eiter->a.cur_speed);
1869 /* Max frame size. */
1870 eiter = entries + size;
1871 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1872 eiter->len = cpu_to_be16(4 + 4);
1873 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
1874 le16_to_cpu(icb24->frame_payload_size) :
1875 le16_to_cpu(ha->init_cb->frame_payload_size);
1876 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
1879 ql_dbg(ql_dbg_disc, vha, 0x203c,
1880 "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
1882 /* OS device name. */
1883 eiter = entries + size;
1884 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1885 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
1886 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
1887 alen = strlen(eiter->a.os_dev_name);
1888 alen += 4 - (alen & 3);
1889 eiter->len = cpu_to_be16(4 + alen);
1892 ql_dbg(ql_dbg_disc, vha, 0x204b,
1893 "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
1896 eiter = entries + size;
1897 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
1898 p_sysid = utsname();
1900 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1901 "%s", p_sysid->nodename);
1903 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1904 "%s", fc_host_system_hostname(vha->host));
1906 alen = strlen(eiter->a.host_name);
1907 alen += 4 - (alen & 3);
1908 eiter->len = cpu_to_be16(4 + alen);
1911 ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
1913 /* Update MS request size. */
1914 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1916 ql_dbg(ql_dbg_disc, vha, 0x203e,
1917 "RPA portname %016llx, size = %d.\n",
1918 wwn_to_u64(ct_req->req.rpa.port_name), size);
1919 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
1922 /* Execute MS IOCB */
1923 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1924 sizeof(ms_iocb_entry_t));
1925 if (rval != QLA_SUCCESS) {
1927 ql_dbg(ql_dbg_disc, vha, 0x2040,
1928 "RPA issue IOCB failed (%d).\n", rval);
1929 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1931 rval = QLA_FUNCTION_FAILED;
1932 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1933 ct_rsp->header.explanation_code ==
1934 CT_EXPL_ALREADY_REGISTERED) {
1935 ql_dbg(ql_dbg_disc, vha, 0x20cd,
1936 "RPA already registered.\n");
1937 rval = QLA_ALREADY_REGISTERED;
1941 ql_dbg(ql_dbg_disc, vha, 0x2041,
1942 "RPA exiting normally.\n");
1949 * qla2x00_fdmiv2_rhba() - perform RHBA FDMI v2 registration
1952 * Returns 0 on success.
1955 qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
1959 ms_iocb_entry_t *ms_pkt;
1960 struct ct_sns_req *ct_req;
1961 struct ct_sns_rsp *ct_rsp;
1963 struct ct_fdmiv2_hba_attr *eiter;
1964 struct qla_hw_data *ha = vha->hw;
1965 struct new_utsname *p_sysid = NULL;
1968 /* Prepare common MS IOCB */
1969 /* Request size adjusted after CT preparation */
1970 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1972 /* Prepare CT request */
1973 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
1975 ct_rsp = &ha->ct_sns->p.rsp;
1977 /* Prepare FDMI command arguments -- attribute block, attributes. */
1978 memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
1979 ct_req->req.rhba2.entry_count = cpu_to_be32(1);
1980 memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
1981 size = 2 * WWN_SIZE + 4 + 4;
1984 ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
1985 entries = ct_req->req.rhba2.hba_identifier;
1988 eiter = entries + size;
1989 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1990 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1991 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1992 size += 4 + WWN_SIZE;
1994 ql_dbg(ql_dbg_disc, vha, 0x207d,
1995 "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1998 eiter = entries + size;
1999 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
2000 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
2001 "%s", "QLogic Corporation");
2002 eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
2003 alen = strlen(eiter->a.manufacturer);
2004 alen += 4 - (alen & 3);
2005 eiter->len = cpu_to_be16(4 + alen);
2008 ql_dbg(ql_dbg_disc, vha, 0x20a5,
2009 "Manufacturer = %s.\n", eiter->a.manufacturer);
2011 /* Serial number. */
2012 eiter = entries + size;
2013 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
2014 if (IS_FWI2_CAPABLE(ha))
2015 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
2016 sizeof(eiter->a.serial_num));
2018 sn = ((ha->serial0 & 0x1f) << 16) |
2019 (ha->serial2 << 8) | ha->serial1;
2020 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
2021 "%c%05d", 'A' + sn / 100000, sn % 100000);
2023 alen = strlen(eiter->a.serial_num);
2024 alen += 4 - (alen & 3);
2025 eiter->len = cpu_to_be16(4 + alen);
2028 ql_dbg(ql_dbg_disc, vha, 0x20a6,
2029 "Serial no. = %s.\n", eiter->a.serial_num);
2032 eiter = entries + size;
2033 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
2034 snprintf(eiter->a.model, sizeof(eiter->a.model),
2035 "%s", ha->model_number);
2036 alen = strlen(eiter->a.model);
2037 alen += 4 - (alen & 3);
2038 eiter->len = cpu_to_be16(4 + alen);
2041 ql_dbg(ql_dbg_disc, vha, 0x20a7,
2042 "Model Name = %s.\n", eiter->a.model);
2044 /* Model description. */
2045 eiter = entries + size;
2046 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
2047 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
2048 "%s", ha->model_desc);
2049 alen = strlen(eiter->a.model_desc);
2050 alen += 4 - (alen & 3);
2051 eiter->len = cpu_to_be16(4 + alen);
2054 ql_dbg(ql_dbg_disc, vha, 0x20a8,
2055 "Model Desc = %s.\n", eiter->a.model_desc);
2057 /* Hardware version. */
2058 eiter = entries + size;
2059 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
2060 if (!IS_FWI2_CAPABLE(ha)) {
2061 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2062 "HW:%s", ha->adapter_id);
2063 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
2064 sizeof(eiter->a.hw_version))) {
2066 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
2067 sizeof(eiter->a.hw_version))) {
2070 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2071 "HW:%s", ha->adapter_id);
2073 alen = strlen(eiter->a.hw_version);
2074 alen += 4 - (alen & 3);
2075 eiter->len = cpu_to_be16(4 + alen);
2078 ql_dbg(ql_dbg_disc, vha, 0x20a9,
2079 "Hardware ver = %s.\n", eiter->a.hw_version);
2081 /* Driver version. */
2082 eiter = entries + size;
2083 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
2084 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
2085 "%s", qla2x00_version_str);
2086 alen = strlen(eiter->a.driver_version);
2087 alen += 4 - (alen & 3);
2088 eiter->len = cpu_to_be16(4 + alen);
2091 ql_dbg(ql_dbg_disc, vha, 0x20aa,
2092 "Driver ver = %s.\n", eiter->a.driver_version);
2094 /* Option ROM version. */
2095 eiter = entries + size;
2096 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
2097 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
2098 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2099 alen = strlen(eiter->a.orom_version);
2100 alen += 4 - (alen & 3);
2101 eiter->len = cpu_to_be16(4 + alen);
2104 ql_dbg(ql_dbg_disc, vha , 0x20ab,
2105 "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
2106 eiter->a.orom_version[0]);
2108 /* Firmware version */
2109 eiter = entries + size;
2110 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
2111 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
2112 sizeof(eiter->a.fw_version));
2113 alen = strlen(eiter->a.fw_version);
2114 alen += 4 - (alen & 3);
2115 eiter->len = cpu_to_be16(4 + alen);
2118 ql_dbg(ql_dbg_disc, vha, 0x20ac,
2119 "Firmware vers = %s.\n", eiter->a.fw_version);
2121 /* OS Name and Version */
2122 eiter = entries + size;
2123 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
2124 p_sysid = utsname();
2126 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2128 p_sysid->sysname, p_sysid->release, p_sysid->version);
2130 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2131 "%s %s", "Linux", fc_host_system_hostname(vha->host));
2133 alen = strlen(eiter->a.os_version);
2134 alen += 4 - (alen & 3);
2135 eiter->len = cpu_to_be16(4 + alen);
2138 ql_dbg(ql_dbg_disc, vha, 0x20ae,
2139 "OS Name and Version = %s.\n", eiter->a.os_version);
2141 /* MAX CT Payload Length */
2142 eiter = entries + size;
2143 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
2144 eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size);
2145 eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
2146 eiter->len = cpu_to_be16(4 + 4);
2149 ql_dbg(ql_dbg_disc, vha, 0x20af,
2150 "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
2152 /* Node Sybolic Name */
2153 eiter = entries + size;
2154 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
2155 qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
2156 sizeof(eiter->a.sym_name));
2157 alen = strlen(eiter->a.sym_name);
2158 alen += 4 - (alen & 3);
2159 eiter->len = cpu_to_be16(4 + alen);
2162 ql_dbg(ql_dbg_disc, vha, 0x20b0,
2163 "Symbolic Name = %s.\n", eiter->a.sym_name);
2166 eiter = entries + size;
2167 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
2168 eiter->a.vendor_id = cpu_to_be32(0x1077);
2169 eiter->len = cpu_to_be16(4 + 4);
2172 ql_dbg(ql_dbg_disc, vha, 0x20b1,
2173 "Vendor Id = %x.\n", eiter->a.vendor_id);
2176 eiter = entries + size;
2177 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
2178 eiter->a.num_ports = cpu_to_be32(1);
2179 eiter->len = cpu_to_be16(4 + 4);
2182 ql_dbg(ql_dbg_disc, vha, 0x20b2,
2183 "Port Num = %x.\n", eiter->a.num_ports);
2186 eiter = entries + size;
2187 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
2188 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2189 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2190 size += 4 + WWN_SIZE;
2192 ql_dbg(ql_dbg_disc, vha, 0x20b3,
2193 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2196 eiter = entries + size;
2197 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
2198 snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
2199 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2200 alen = strlen(eiter->a.bios_name);
2201 alen += 4 - (alen & 3);
2202 eiter->len = cpu_to_be16(4 + alen);
2205 ql_dbg(ql_dbg_disc, vha, 0x20b4,
2206 "BIOS Name = %s\n", eiter->a.bios_name);
2208 /* Vendor Identifier */
2209 eiter = entries + size;
2210 eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
2211 snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
2213 alen = strlen(eiter->a.vendor_identifier);
2214 alen += 4 - (alen & 3);
2215 eiter->len = cpu_to_be16(4 + alen);
2218 ql_dbg(ql_dbg_disc, vha, 0x201b,
2219 "Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
2221 /* Update MS request size. */
2222 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2224 ql_dbg(ql_dbg_disc, vha, 0x20b5,
2225 "RHBA identifier = %016llx.\n",
2226 wwn_to_u64(ct_req->req.rhba2.hba_identifier));
2227 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
2230 /* Execute MS IOCB */
2231 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2232 sizeof(ms_iocb_entry_t));
2233 if (rval != QLA_SUCCESS) {
2235 ql_dbg(ql_dbg_disc, vha, 0x20b7,
2236 "RHBA issue IOCB failed (%d).\n", rval);
2237 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
2239 rval = QLA_FUNCTION_FAILED;
2241 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2242 ct_rsp->header.explanation_code ==
2243 CT_EXPL_ALREADY_REGISTERED) {
2244 ql_dbg(ql_dbg_disc, vha, 0x20b8,
2245 "HBA already registered.\n");
2246 rval = QLA_ALREADY_REGISTERED;
2248 ql_dbg(ql_dbg_disc, vha, 0x2016,
2249 "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2250 ct_rsp->header.reason_code,
2251 ct_rsp->header.explanation_code);
2254 ql_dbg(ql_dbg_disc, vha, 0x20b9,
2255 "RHBA FDMI V2 exiting normally.\n");
2262 * qla2x00_fdmi_dhba() -
2265 * Returns 0 on success.
2268 qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
2271 struct qla_hw_data *ha = vha->hw;
2272 ms_iocb_entry_t *ms_pkt;
2273 struct ct_sns_req *ct_req;
2274 struct ct_sns_rsp *ct_rsp;
2277 /* Prepare common MS IOCB */
2278 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
2281 /* Prepare CT request */
2282 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
2283 ct_rsp = &ha->ct_sns->p.rsp;
2285 /* Prepare FDMI command arguments -- portname. */
2286 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
2288 ql_dbg(ql_dbg_disc, vha, 0x2036,
2289 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
2291 /* Execute MS IOCB */
2292 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2293 sizeof(ms_iocb_entry_t));
2294 if (rval != QLA_SUCCESS) {
2296 ql_dbg(ql_dbg_disc, vha, 0x2037,
2297 "DHBA issue IOCB failed (%d).\n", rval);
2298 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
2300 rval = QLA_FUNCTION_FAILED;
2302 ql_dbg(ql_dbg_disc, vha, 0x2038,
2303 "DHBA exiting normally.\n");
2310 * qla2x00_fdmiv2_rpa() -
2313 * Returns 0 on success.
2316 qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
2320 struct qla_hw_data *ha = vha->hw;
2321 ms_iocb_entry_t *ms_pkt;
2322 struct ct_sns_req *ct_req;
2323 struct ct_sns_rsp *ct_rsp;
2325 struct ct_fdmiv2_port_attr *eiter;
2326 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
2327 struct new_utsname *p_sysid = NULL;
2330 /* Prepare common MS IOCB */
2331 /* Request size adjusted after CT preparation */
2332 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
2334 /* Prepare CT request */
2335 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, RPA_RSP_SIZE);
2336 ct_rsp = &ha->ct_sns->p.rsp;
2338 /* Prepare FDMI command arguments -- attribute block, attributes. */
2339 memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
2340 size = WWN_SIZE + 4;
2343 ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
2344 entries = ct_req->req.rpa2.port_name;
2347 eiter = entries + size;
2348 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
2349 eiter->len = cpu_to_be16(4 + 32);
2350 eiter->a.fc4_types[2] = 0x01;
2353 ql_dbg(ql_dbg_disc, vha, 0x20ba,
2354 "FC4_TYPES=%02x %02x.\n",
2355 eiter->a.fc4_types[2],
2356 eiter->a.fc4_types[1]);
2358 if (vha->flags.nvme_enabled) {
2359 eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
2360 ql_dbg(ql_dbg_disc, vha, 0x211f,
2361 "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2362 eiter->a.fc4_types[6]);
2365 /* Supported speed. */
2366 eiter = entries + size;
2367 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
2368 eiter->len = cpu_to_be16(4 + 4);
2369 if (IS_CNA_CAPABLE(ha))
2370 eiter->a.sup_speed = cpu_to_be32(
2371 FDMI_PORT_SPEED_10GB);
2372 else if (IS_QLA27XX(ha))
2373 eiter->a.sup_speed = cpu_to_be32(
2374 FDMI_PORT_SPEED_32GB|
2375 FDMI_PORT_SPEED_16GB|
2376 FDMI_PORT_SPEED_8GB);
2377 else if (IS_QLA2031(ha))
2378 eiter->a.sup_speed = cpu_to_be32(
2379 FDMI_PORT_SPEED_16GB|
2380 FDMI_PORT_SPEED_8GB|
2381 FDMI_PORT_SPEED_4GB);
2382 else if (IS_QLA25XX(ha))
2383 eiter->a.sup_speed = cpu_to_be32(
2384 FDMI_PORT_SPEED_8GB|
2385 FDMI_PORT_SPEED_4GB|
2386 FDMI_PORT_SPEED_2GB|
2387 FDMI_PORT_SPEED_1GB);
2388 else if (IS_QLA24XX_TYPE(ha))
2389 eiter->a.sup_speed = cpu_to_be32(
2390 FDMI_PORT_SPEED_4GB|
2391 FDMI_PORT_SPEED_2GB|
2392 FDMI_PORT_SPEED_1GB);
2393 else if (IS_QLA23XX(ha))
2394 eiter->a.sup_speed = cpu_to_be32(
2395 FDMI_PORT_SPEED_2GB|
2396 FDMI_PORT_SPEED_1GB);
2398 eiter->a.sup_speed = cpu_to_be32(
2399 FDMI_PORT_SPEED_1GB);
2402 ql_dbg(ql_dbg_disc, vha, 0x20bb,
2403 "Supported Port Speed = %x.\n", eiter->a.sup_speed);
2405 /* Current speed. */
2406 eiter = entries + size;
2407 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
2408 eiter->len = cpu_to_be16(4 + 4);
2409 switch (ha->link_data_rate) {
2410 case PORT_SPEED_1GB:
2411 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
2413 case PORT_SPEED_2GB:
2414 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
2416 case PORT_SPEED_4GB:
2417 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
2419 case PORT_SPEED_8GB:
2420 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
2422 case PORT_SPEED_10GB:
2423 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
2425 case PORT_SPEED_16GB:
2426 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
2428 case PORT_SPEED_32GB:
2429 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
2432 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
2437 ql_dbg(ql_dbg_disc, vha, 0x2017,
2438 "Current_Speed = %x.\n", eiter->a.cur_speed);
2440 /* Max frame size. */
2441 eiter = entries + size;
2442 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
2443 eiter->len = cpu_to_be16(4 + 4);
2444 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
2445 le16_to_cpu(icb24->frame_payload_size):
2446 le16_to_cpu(ha->init_cb->frame_payload_size);
2447 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
2450 ql_dbg(ql_dbg_disc, vha, 0x20bc,
2451 "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
2453 /* OS device name. */
2454 eiter = entries + size;
2455 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
2456 alen = strlen(QLA2XXX_DRIVER_NAME);
2457 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
2458 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
2459 alen += 4 - (alen & 3);
2460 eiter->len = cpu_to_be16(4 + alen);
2463 ql_dbg(ql_dbg_disc, vha, 0x20be,
2464 "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
2467 eiter = entries + size;
2468 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
2469 p_sysid = utsname();
2471 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2472 "%s", p_sysid->nodename);
2474 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2475 "%s", fc_host_system_hostname(vha->host));
2477 alen = strlen(eiter->a.host_name);
2478 alen += 4 - (alen & 3);
2479 eiter->len = cpu_to_be16(4 + alen);
2482 ql_dbg(ql_dbg_disc, vha, 0x201a,
2483 "HostName=%s.\n", eiter->a.host_name);
2486 eiter = entries + size;
2487 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
2488 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
2489 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2490 size += 4 + WWN_SIZE;
2492 ql_dbg(ql_dbg_disc, vha, 0x20c0,
2493 "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
2496 eiter = entries + size;
2497 eiter->type = cpu_to_be16(FDMI_PORT_NAME);
2498 memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
2499 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2500 size += 4 + WWN_SIZE;
2502 ql_dbg(ql_dbg_disc, vha, 0x20c1,
2503 "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
2505 /* Port Symbolic Name */
2506 eiter = entries + size;
2507 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
2508 qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
2509 sizeof(eiter->a.port_sym_name));
2510 alen = strlen(eiter->a.port_sym_name);
2511 alen += 4 - (alen & 3);
2512 eiter->len = cpu_to_be16(4 + alen);
2515 ql_dbg(ql_dbg_disc, vha, 0x20c2,
2516 "port symbolic name = %s\n", eiter->a.port_sym_name);
2519 eiter = entries + size;
2520 eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
2521 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
2522 eiter->len = cpu_to_be16(4 + 4);
2525 ql_dbg(ql_dbg_disc, vha, 0x20c3,
2526 "Port Type = %x.\n", eiter->a.port_type);
2528 /* Class of Service */
2529 eiter = entries + size;
2530 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
2531 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
2532 eiter->len = cpu_to_be16(4 + 4);
2535 ql_dbg(ql_dbg_disc, vha, 0x20c4,
2536 "Supported COS = %08x\n", eiter->a.port_supported_cos);
2538 /* Port Fabric Name */
2539 eiter = entries + size;
2540 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
2541 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2542 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2543 size += 4 + WWN_SIZE;
2545 ql_dbg(ql_dbg_disc, vha, 0x20c5,
2546 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2549 eiter = entries + size;
2550 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
2551 eiter->a.port_fc4_type[0] = 0;
2552 eiter->a.port_fc4_type[1] = 0;
2553 eiter->a.port_fc4_type[2] = 1;
2554 eiter->a.port_fc4_type[3] = 0;
2555 eiter->len = cpu_to_be16(4 + 32);
2558 ql_dbg(ql_dbg_disc, vha, 0x20c6,
2559 "Port Active FC4 Type = %02x %02x.\n",
2560 eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
2562 if (vha->flags.nvme_enabled) {
2563 eiter->a.port_fc4_type[4] = 0;
2564 eiter->a.port_fc4_type[5] = 0;
2565 eiter->a.port_fc4_type[6] = 1; /* NVMe type 28h */
2566 ql_dbg(ql_dbg_disc, vha, 0x2120,
2567 "NVME Port Active FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2568 eiter->a.port_fc4_type[6]);
2572 eiter = entries + size;
2573 eiter->type = cpu_to_be16(FDMI_PORT_STATE);
2574 eiter->a.port_state = cpu_to_be32(1);
2575 eiter->len = cpu_to_be16(4 + 4);
2578 ql_dbg(ql_dbg_disc, vha, 0x20c7,
2579 "Port State = %x.\n", eiter->a.port_state);
2581 /* Number of Ports */
2582 eiter = entries + size;
2583 eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
2584 eiter->a.num_ports = cpu_to_be32(1);
2585 eiter->len = cpu_to_be16(4 + 4);
2588 ql_dbg(ql_dbg_disc, vha, 0x20c8,
2589 "Number of ports = %x.\n", eiter->a.num_ports);
2592 eiter = entries + size;
2593 eiter->type = cpu_to_be16(FDMI_PORT_ID);
2594 eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
2595 eiter->len = cpu_to_be16(4 + 4);
2598 ql_dbg(ql_dbg_disc, vha, 0x201c,
2599 "Port Id = %x.\n", eiter->a.port_id);
2601 /* Update MS request size. */
2602 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2604 ql_dbg(ql_dbg_disc, vha, 0x2018,
2605 "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
2606 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
2609 /* Execute MS IOCB */
2610 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2611 sizeof(ms_iocb_entry_t));
2612 if (rval != QLA_SUCCESS) {
2614 ql_dbg(ql_dbg_disc, vha, 0x20cb,
2615 "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
2616 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
2618 rval = QLA_FUNCTION_FAILED;
2619 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2620 ct_rsp->header.explanation_code ==
2621 CT_EXPL_ALREADY_REGISTERED) {
2622 ql_dbg(ql_dbg_disc, vha, 0x20ce,
2623 "RPA FDMI v2 already registered\n");
2624 rval = QLA_ALREADY_REGISTERED;
2626 ql_dbg(ql_dbg_disc, vha, 0x2020,
2627 "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2628 ct_rsp->header.reason_code,
2629 ct_rsp->header.explanation_code);
2632 ql_dbg(ql_dbg_disc, vha, 0x20cc,
2633 "RPA FDMI V2 exiting normally.\n");
2640 * qla2x00_fdmi_register() -
2643 * Returns 0 on success.
2646 qla2x00_fdmi_register(scsi_qla_host_t *vha)
2648 int rval = QLA_FUNCTION_FAILED;
2649 struct qla_hw_data *ha = vha->hw;
2651 if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
2653 return QLA_FUNCTION_FAILED;
2655 rval = qla2x00_mgmt_svr_login(vha);
2659 rval = qla2x00_fdmiv2_rhba(vha);
2661 if (rval != QLA_ALREADY_REGISTERED)
2664 rval = qla2x00_fdmi_dhba(vha);
2668 rval = qla2x00_fdmiv2_rhba(vha);
2672 rval = qla2x00_fdmiv2_rpa(vha);
2679 rval = qla2x00_fdmi_rhba(vha);
2681 if (rval != QLA_ALREADY_REGISTERED)
2684 rval = qla2x00_fdmi_dhba(vha);
2688 rval = qla2x00_fdmi_rhba(vha);
2692 rval = qla2x00_fdmi_rpa(vha);
2698 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
2700 * @list: switch info entries to populate
2702 * Returns 0 on success.
2705 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
2707 int rval = QLA_SUCCESS;
2709 struct qla_hw_data *ha = vha->hw;
2710 ms_iocb_entry_t *ms_pkt;
2711 struct ct_sns_req *ct_req;
2712 struct ct_sns_rsp *ct_rsp;
2715 if (!IS_IIDMA_CAPABLE(ha))
2716 return QLA_FUNCTION_FAILED;
2718 arg.iocb = ha->ms_iocb;
2719 arg.req_dma = ha->ct_sns_dma;
2720 arg.rsp_dma = ha->ct_sns_dma;
2721 arg.req_size = GFPN_ID_REQ_SIZE;
2722 arg.rsp_size = GFPN_ID_RSP_SIZE;
2723 arg.nport_handle = NPH_SNS;
2725 for (i = 0; i < ha->max_fibre_devices; i++) {
2727 /* Prepare common MS IOCB */
2728 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2730 /* Prepare CT request */
2731 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
2733 ct_rsp = &ha->ct_sns->p.rsp;
2735 /* Prepare CT arguments -- port_id */
2736 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
2737 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
2738 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
2740 /* Execute MS IOCB */
2741 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2742 sizeof(ms_iocb_entry_t));
2743 if (rval != QLA_SUCCESS) {
2745 ql_dbg(ql_dbg_disc, vha, 0x2023,
2746 "GFPN_ID issue IOCB failed (%d).\n", rval);
2748 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2749 "GFPN_ID") != QLA_SUCCESS) {
2750 rval = QLA_FUNCTION_FAILED;
2753 /* Save fabric portname */
2754 memcpy(list[i].fabric_port_name,
2755 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
2758 /* Last device exit. */
2759 if (list[i].d_id.b.rsvd_1 != 0)
2767 static inline struct ct_sns_req *
2768 qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
2771 memset(p, 0, sizeof(struct ct_sns_pkt));
2773 p->p.req.header.revision = 0x01;
2774 p->p.req.header.gs_type = 0xFA;
2775 p->p.req.header.gs_subtype = 0x01;
2776 p->p.req.command = cpu_to_be16(cmd);
2777 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
2783 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
2785 * @list: switch info entries to populate
2787 * Returns 0 on success.
2790 qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
2794 struct qla_hw_data *ha = vha->hw;
2795 ms_iocb_entry_t *ms_pkt;
2796 struct ct_sns_req *ct_req;
2797 struct ct_sns_rsp *ct_rsp;
2800 if (!IS_IIDMA_CAPABLE(ha))
2801 return QLA_FUNCTION_FAILED;
2802 if (!ha->flags.gpsc_supported)
2803 return QLA_FUNCTION_FAILED;
2805 rval = qla2x00_mgmt_svr_login(vha);
2809 arg.iocb = ha->ms_iocb;
2810 arg.req_dma = ha->ct_sns_dma;
2811 arg.rsp_dma = ha->ct_sns_dma;
2812 arg.req_size = GPSC_REQ_SIZE;
2813 arg.rsp_size = GPSC_RSP_SIZE;
2814 arg.nport_handle = vha->mgmt_svr_loop_id;
2816 for (i = 0; i < ha->max_fibre_devices; i++) {
2818 /* Prepare common MS IOCB */
2819 ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
2821 /* Prepare CT request */
2822 ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
2824 ct_rsp = &ha->ct_sns->p.rsp;
2826 /* Prepare CT arguments -- port_name */
2827 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
2830 /* Execute MS IOCB */
2831 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2832 sizeof(ms_iocb_entry_t));
2833 if (rval != QLA_SUCCESS) {
2835 ql_dbg(ql_dbg_disc, vha, 0x2059,
2836 "GPSC issue IOCB failed (%d).\n", rval);
2837 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2838 "GPSC")) != QLA_SUCCESS) {
2839 /* FM command unsupported? */
2840 if (rval == QLA_INVALID_COMMAND &&
2841 (ct_rsp->header.reason_code ==
2842 CT_REASON_INVALID_COMMAND_CODE ||
2843 ct_rsp->header.reason_code ==
2844 CT_REASON_COMMAND_UNSUPPORTED)) {
2845 ql_dbg(ql_dbg_disc, vha, 0x205a,
2846 "GPSC command unsupported, disabling "
2848 ha->flags.gpsc_supported = 0;
2849 rval = QLA_FUNCTION_FAILED;
2852 rval = QLA_FUNCTION_FAILED;
2854 /* Save port-speed */
2855 switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
2857 list[i].fp_speed = PORT_SPEED_1GB;
2860 list[i].fp_speed = PORT_SPEED_2GB;
2863 list[i].fp_speed = PORT_SPEED_4GB;
2866 list[i].fp_speed = PORT_SPEED_10GB;
2869 list[i].fp_speed = PORT_SPEED_8GB;
2872 list[i].fp_speed = PORT_SPEED_16GB;
2875 list[i].fp_speed = PORT_SPEED_32GB;
2879 ql_dbg(ql_dbg_disc, vha, 0x205b,
2880 "GPSC ext entry - fpn "
2881 "%8phN speeds=%04x speed=%04x.\n",
2882 list[i].fabric_port_name,
2883 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2884 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2887 /* Last device exit. */
2888 if (list[i].d_id.b.rsvd_1 != 0)
2896 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
2899 * @list: switch info entries to populate
2903 qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
2908 ms_iocb_entry_t *ms_pkt;
2909 struct ct_sns_req *ct_req;
2910 struct ct_sns_rsp *ct_rsp;
2911 struct qla_hw_data *ha = vha->hw;
2912 uint8_t fcp_scsi_features = 0;
2915 for (i = 0; i < ha->max_fibre_devices; i++) {
2916 /* Set default FC4 Type as UNKNOWN so the default is to
2917 * Process this port */
2918 list[i].fc4_type = FC4_TYPE_UNKNOWN;
2920 /* Do not attempt GFF_ID if we are not FWI_2 capable */
2921 if (!IS_FWI2_CAPABLE(ha))
2924 arg.iocb = ha->ms_iocb;
2925 arg.req_dma = ha->ct_sns_dma;
2926 arg.rsp_dma = ha->ct_sns_dma;
2927 arg.req_size = GFF_ID_REQ_SIZE;
2928 arg.rsp_size = GFF_ID_RSP_SIZE;
2929 arg.nport_handle = NPH_SNS;
2931 /* Prepare common MS IOCB */
2932 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2934 /* Prepare CT request */
2935 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
2937 ct_rsp = &ha->ct_sns->p.rsp;
2939 /* Prepare CT arguments -- port_id */
2940 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
2941 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
2942 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
2944 /* Execute MS IOCB */
2945 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2946 sizeof(ms_iocb_entry_t));
2948 if (rval != QLA_SUCCESS) {
2949 ql_dbg(ql_dbg_disc, vha, 0x205c,
2950 "GFF_ID issue IOCB failed (%d).\n", rval);
2951 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2952 "GFF_ID") != QLA_SUCCESS) {
2953 ql_dbg(ql_dbg_disc, vha, 0x205d,
2954 "GFF_ID IOCB status had a failure status code.\n");
2957 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
2958 fcp_scsi_features &= 0x0f;
2960 if (fcp_scsi_features)
2961 list[i].fc4_type = FC4_TYPE_FCP_SCSI;
2963 list[i].fc4_type = FC4_TYPE_OTHER;
2966 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
2967 list[i].fc4f_nvme &= 0xf;
2970 /* Last device exit. */
2971 if (list[i].d_id.b.rsvd_1 != 0)
2976 /* GID_PN completion processing. */
2977 void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
2979 fc_port_t *fcport = ea->fcport;
2981 ql_dbg(ql_dbg_disc, vha, 0x201d,
2982 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
2983 __func__, fcport->port_name, fcport->disc_state,
2984 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
2985 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
2987 if (fcport->disc_state == DSC_DELETE_PEND)
2990 if (ea->sp->gen2 != fcport->login_gen) {
2991 /* PLOGI/PRLI/LOGO came in while cmd was out.*/
2992 ql_dbg(ql_dbg_disc, vha, 0x201e,
2993 "%s %8phC generation changed rscn %d|%d n",
2994 __func__, fcport->port_name, fcport->last_rscn_gen,
3000 if (ea->sp->gen1 == fcport->rscn_gen) {
3001 fcport->scan_state = QLA_FCPORT_FOUND;
3002 fcport->flags |= FCF_FABRIC_DEVICE;
3004 if (fcport->d_id.b24 == ea->id.b24) {
3005 /* cable plugged into the same place */
3006 switch (vha->host->active_mode) {
3008 if (fcport->fw_login_state ==
3012 * Late RSCN was delivered.
3013 * Remote port already login'ed.
3015 ql_dbg(ql_dbg_disc, vha, 0x201f,
3016 "%s %d %8phC post adisc\n",
3019 data[0] = data[1] = 0;
3020 qla2x00_post_async_adisc_work(
3024 case MODE_INITIATOR:
3027 ql_dbg(ql_dbg_disc, vha, 0x201f,
3028 "%s %d %8phC post %s\n", __func__,
3029 __LINE__, fcport->port_name,
3030 (atomic_read(&fcport->state) ==
3031 FCS_ONLINE) ? "adisc" : "gnl");
3033 if (atomic_read(&fcport->state) ==
3037 data[0] = data[1] = 0;
3038 qla2x00_post_async_adisc_work(
3041 qla24xx_post_gnl_work(vha,
3046 } else { /* fcport->d_id.b24 != ea->id.b24 */
3047 fcport->d_id.b24 = ea->id.b24;
3048 fcport->id_changed = 1;
3049 if (fcport->deleted != QLA_SESS_DELETED) {
3050 ql_dbg(ql_dbg_disc, vha, 0x2021,
3051 "%s %d %8phC post del sess\n",
3052 __func__, __LINE__, fcport->port_name);
3053 qlt_schedule_sess_for_deletion(fcport);
3056 } else { /* ea->sp->gen1 != fcport->rscn_gen */
3057 ql_dbg(ql_dbg_disc, vha, 0x2022,
3058 "%s %d %8phC post gidpn\n",
3059 __func__, __LINE__, fcport->port_name);
3060 /* rscn came in while cmd was out */
3061 qla24xx_post_gidpn_work(vha, fcport);
3063 } else { /* ea->rc */
3065 if (ea->sp->gen1 == fcport->rscn_gen) {
3066 if (ea->sp->gen2 == fcport->login_gen) {
3067 ql_dbg(ql_dbg_disc, vha, 0x2042,
3068 "%s %d %8phC post del sess\n", __func__,
3069 __LINE__, fcport->port_name);
3070 qlt_schedule_sess_for_deletion(fcport);
3072 ql_dbg(ql_dbg_disc, vha, 0x2045,
3073 "%s %d %8phC login\n", __func__, __LINE__,
3075 qla24xx_fcport_handle_login(vha, fcport);
3078 ql_dbg(ql_dbg_disc, vha, 0x2049,
3079 "%s %d %8phC post gidpn\n", __func__, __LINE__,
3081 qla24xx_post_gidpn_work(vha, fcport);
3086 static void qla2x00_async_gidpn_sp_done(void *s, int res)
3089 struct scsi_qla_host *vha = sp->vha;
3090 fc_port_t *fcport = sp->fcport;
3091 u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
3092 struct event_arg ea;
3094 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3096 memset(&ea, 0, sizeof(ea));
3098 ea.id.b.domain = id[0];
3099 ea.id.b.area = id[1];
3100 ea.id.b.al_pa = id[2];
3103 ea.event = FCME_GIDPN_DONE;
3105 if (res == QLA_FUNCTION_TIMEOUT) {
3106 ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
3107 "Async done-%s WWPN %8phC timed out.\n",
3108 sp->name, fcport->port_name);
3109 qla24xx_post_gidpn_work(sp->vha, fcport);
3113 ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
3114 "Async done-%s fail res %x, WWPN %8phC\n",
3115 sp->name, res, fcport->port_name);
3117 ql_dbg(ql_dbg_disc, vha, 0x204f,
3118 "Async done-%s good WWPN %8phC ID %3phC\n",
3119 sp->name, fcport->port_name, id);
3122 qla2x00_fcport_event_handler(vha, &ea);
3127 int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
3129 int rval = QLA_FUNCTION_FAILED;
3130 struct ct_sns_req *ct_req;
3133 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3136 fcport->disc_state = DSC_GID_PN;
3137 fcport->scan_state = QLA_FCPORT_SCAN;
3138 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
3142 fcport->flags |= FCF_ASYNC_SENT;
3143 sp->type = SRB_CT_PTHRU_CMD;
3145 sp->gen1 = fcport->rscn_gen;
3146 sp->gen2 = fcport->login_gen;
3148 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3150 /* CT_IU preamble */
3151 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GID_PN_CMD,
3155 memcpy(ct_req->req.gid_pn.port_name, fcport->port_name,
3158 /* req & rsp use the same buffer */
3159 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3160 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3161 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3162 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3163 sp->u.iocb_cmd.u.ctarg.req_size = GID_PN_REQ_SIZE;
3164 sp->u.iocb_cmd.u.ctarg.rsp_size = GID_PN_RSP_SIZE;
3165 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3167 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3168 sp->done = qla2x00_async_gidpn_sp_done;
3170 rval = qla2x00_start_sp(sp);
3171 if (rval != QLA_SUCCESS)
3174 ql_dbg(ql_dbg_disc, vha, 0x20a4,
3175 "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
3176 sp->name, fcport->port_name,
3177 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
3178 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3184 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3188 int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
3190 struct qla_work_evt *e;
3193 ls = atomic_read(&vha->loop_state);
3194 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
3195 test_bit(UNLOADING, &vha->dpc_flags))
3198 e = qla2x00_alloc_work(vha, QLA_EVT_GIDPN);
3200 return QLA_FUNCTION_FAILED;
3202 e->u.fcport.fcport = fcport;
3203 fcport->flags |= FCF_ASYNC_ACTIVE;
3204 return qla2x00_post_work(vha, e);
3207 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
3209 struct qla_work_evt *e;
3211 e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
3213 return QLA_FUNCTION_FAILED;
3215 e->u.fcport.fcport = fcport;
3216 fcport->flags |= FCF_ASYNC_ACTIVE;
3217 return qla2x00_post_work(vha, e);
3220 void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
3222 struct fc_port *fcport = ea->fcport;
3224 ql_dbg(ql_dbg_disc, vha, 0x20d8,
3225 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
3226 __func__, fcport->port_name, fcport->disc_state,
3227 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
3228 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
3230 if (fcport->disc_state == DSC_DELETE_PEND)
3233 if (ea->sp->gen2 != fcport->login_gen) {
3234 /* target side must have changed it. */
3235 ql_dbg(ql_dbg_disc, vha, 0x20d3,
3236 "%s %8phC generation changed\n",
3237 __func__, fcport->port_name);
3239 } else if (ea->sp->gen1 != fcport->rscn_gen) {
3240 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
3241 __func__, __LINE__, fcport->port_name);
3242 qla24xx_post_gidpn_work(vha, fcport);
3246 qla_post_iidma_work(vha, fcport);
3249 static void qla24xx_async_gpsc_sp_done(void *s, int res)
3252 struct scsi_qla_host *vha = sp->vha;
3253 struct qla_hw_data *ha = vha->hw;
3254 fc_port_t *fcport = sp->fcport;
3255 struct ct_sns_rsp *ct_rsp;
3256 struct event_arg ea;
3258 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3260 ql_dbg(ql_dbg_disc, vha, 0x2053,
3261 "Async done-%s res %x, WWPN %8phC \n",
3262 sp->name, res, fcport->port_name);
3264 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3266 if (res == QLA_FUNCTION_TIMEOUT)
3269 if (res == (DID_ERROR << 16)) {
3270 /* entry status error */
3273 if ((ct_rsp->header.reason_code ==
3274 CT_REASON_INVALID_COMMAND_CODE) ||
3275 (ct_rsp->header.reason_code ==
3276 CT_REASON_COMMAND_UNSUPPORTED)) {
3277 ql_dbg(ql_dbg_disc, vha, 0x2019,
3278 "GPSC command unsupported, disabling query.\n");
3279 ha->flags.gpsc_supported = 0;
3283 switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
3285 fcport->fp_speed = PORT_SPEED_1GB;
3288 fcport->fp_speed = PORT_SPEED_2GB;
3291 fcport->fp_speed = PORT_SPEED_4GB;
3294 fcport->fp_speed = PORT_SPEED_10GB;
3297 fcport->fp_speed = PORT_SPEED_8GB;
3300 fcport->fp_speed = PORT_SPEED_16GB;
3303 fcport->fp_speed = PORT_SPEED_32GB;
3307 ql_dbg(ql_dbg_disc, vha, 0x2054,
3308 "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
3309 sp->name, fcport->fabric_port_name,
3310 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
3311 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
3314 memset(&ea, 0, sizeof(ea));
3315 ea.event = FCME_GPSC_DONE;
3319 qla2x00_fcport_event_handler(vha, &ea);
3324 int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
3326 int rval = QLA_FUNCTION_FAILED;
3327 struct ct_sns_req *ct_req;
3330 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3333 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3337 sp->type = SRB_CT_PTHRU_CMD;
3339 sp->gen1 = fcport->rscn_gen;
3340 sp->gen2 = fcport->login_gen;
3342 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3344 /* CT_IU preamble */
3345 ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
3349 memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
3352 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3353 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3354 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3355 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3356 sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
3357 sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
3358 sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
3360 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3361 sp->done = qla24xx_async_gpsc_sp_done;
3363 ql_dbg(ql_dbg_disc, vha, 0x205e,
3364 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
3365 sp->name, fcport->port_name, sp->handle,
3366 fcport->loop_id, fcport->d_id.b.domain,
3367 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3369 rval = qla2x00_start_sp(sp);
3370 if (rval != QLA_SUCCESS)
3376 fcport->flags &= ~FCF_ASYNC_SENT;
3378 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3382 int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
3384 struct qla_work_evt *e;
3386 if (test_bit(UNLOADING, &vha->dpc_flags))
3389 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
3391 return QLA_FUNCTION_FAILED;
3393 e->u.gpnid.id = *id;
3394 return qla2x00_post_work(vha, e);
3397 void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
3399 struct srb_iocb *c = &sp->u.iocb_cmd;
3403 if (c->u.els_plogi.els_plogi_pyld)
3404 dma_free_coherent(&vha->hw->pdev->dev,
3405 c->u.els_plogi.tx_size,
3406 c->u.els_plogi.els_plogi_pyld,
3407 c->u.els_plogi.els_plogi_pyld_dma);
3409 if (c->u.els_plogi.els_resp_pyld)
3410 dma_free_coherent(&vha->hw->pdev->dev,
3411 c->u.els_plogi.rx_size,
3412 c->u.els_plogi.els_resp_pyld,
3413 c->u.els_plogi.els_resp_pyld_dma);
3415 case SRB_CT_PTHRU_CMD:
3417 if (sp->u.iocb_cmd.u.ctarg.req) {
3418 dma_free_coherent(&vha->hw->pdev->dev,
3419 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3420 sp->u.iocb_cmd.u.ctarg.req,
3421 sp->u.iocb_cmd.u.ctarg.req_dma);
3422 sp->u.iocb_cmd.u.ctarg.req = NULL;
3425 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3426 dma_free_coherent(&vha->hw->pdev->dev,
3427 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3428 sp->u.iocb_cmd.u.ctarg.rsp,
3429 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3430 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3438 void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3440 fc_port_t *fcport, *conflict, *t;
3443 ql_dbg(ql_dbg_disc, vha, 0xffff,
3444 "%s %d port_id: %06x\n",
3445 __func__, __LINE__, ea->id.b24);
3448 /* cable is disconnected */
3449 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
3450 if (fcport->d_id.b24 == ea->id.b24) {
3451 ql_dbg(ql_dbg_disc, vha, 0xffff,
3452 "%s %d %8phC DS %d\n",
3455 fcport->disc_state);
3456 fcport->scan_state = QLA_FCPORT_SCAN;
3457 switch (fcport->disc_state) {
3459 case DSC_DELETE_PEND:
3462 ql_dbg(ql_dbg_disc, vha, 0xffff,
3463 "%s %d %8phC post del sess\n",
3466 qlt_schedule_sess_for_deletion(fcport);
3472 /* cable is connected */
3473 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
3475 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3477 if ((conflict->d_id.b24 == ea->id.b24) &&
3478 (fcport != conflict)) {
3479 /* 2 fcports with conflict Nport ID or
3480 * an existing fcport is having nport ID
3481 * conflict with new fcport.
3484 ql_dbg(ql_dbg_disc, vha, 0xffff,
3485 "%s %d %8phC DS %d\n",
3487 conflict->port_name,
3488 conflict->disc_state);
3489 conflict->scan_state = QLA_FCPORT_SCAN;
3490 switch (conflict->disc_state) {
3492 case DSC_DELETE_PEND:
3495 ql_dbg(ql_dbg_disc, vha, 0xffff,
3496 "%s %d %8phC post del sess\n",
3498 conflict->port_name);
3499 qlt_schedule_sess_for_deletion
3507 fcport->scan_state = QLA_FCPORT_FOUND;
3508 fcport->flags |= FCF_FABRIC_DEVICE;
3509 if (fcport->login_retry == 0) {
3510 fcport->login_retry =
3511 vha->hw->login_retry_count;
3512 ql_dbg(ql_dbg_disc, vha, 0xffff,
3513 "Port login retry %8phN, lid 0x%04x cnt=%d.\n",
3514 fcport->port_name, fcport->loop_id,
3515 fcport->login_retry);
3517 switch (fcport->disc_state) {
3518 case DSC_LOGIN_COMPLETE:
3519 /* recheck session is still intact. */
3520 ql_dbg(ql_dbg_disc, vha, 0x210d,
3521 "%s %d %8phC revalidate session with ADISC\n",
3522 __func__, __LINE__, fcport->port_name);
3523 data[0] = data[1] = 0;
3524 qla2x00_post_async_adisc_work(vha, fcport,
3528 ql_dbg(ql_dbg_disc, vha, 0x210d,
3529 "%s %d %8phC login\n", __func__, __LINE__,
3531 fcport->d_id = ea->id;
3532 qla24xx_fcport_handle_login(vha, fcport);
3534 case DSC_DELETE_PEND:
3535 fcport->d_id = ea->id;
3538 fcport->d_id = ea->id;
3542 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3544 if (conflict->d_id.b24 == ea->id.b24) {
3545 /* 2 fcports with conflict Nport ID or
3546 * an existing fcport is having nport ID
3547 * conflict with new fcport.
3549 ql_dbg(ql_dbg_disc, vha, 0xffff,
3550 "%s %d %8phC DS %d\n",
3552 conflict->port_name,
3553 conflict->disc_state);
3555 conflict->scan_state = QLA_FCPORT_SCAN;
3556 switch (conflict->disc_state) {
3558 case DSC_DELETE_PEND:
3561 ql_dbg(ql_dbg_disc, vha, 0xffff,
3562 "%s %d %8phC post del sess\n",
3564 conflict->port_name);
3565 qlt_schedule_sess_for_deletion
3572 /* create new fcport */
3573 ql_dbg(ql_dbg_disc, vha, 0x2065,
3574 "%s %d %8phC post new sess\n",
3575 __func__, __LINE__, ea->port_name);
3576 qla24xx_post_newsess_work(vha, &ea->id,
3577 ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
3582 static void qla2x00_async_gpnid_sp_done(void *s, int res)
3585 struct scsi_qla_host *vha = sp->vha;
3586 struct ct_sns_req *ct_req =
3587 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3588 struct ct_sns_rsp *ct_rsp =
3589 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3590 struct event_arg ea;
3591 struct qla_work_evt *e;
3592 unsigned long flags;
3595 ql_dbg(ql_dbg_disc, vha, 0x2066,
3596 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
3597 sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
3598 ct_rsp->rsp.gpn_id.port_name);
3600 ql_dbg(ql_dbg_disc, vha, 0x2066,
3601 "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
3602 sp->name, sp->gen1, ct_req->req.port_id.port_id,
3603 ct_rsp->rsp.gpn_id.port_name);
3605 memset(&ea, 0, sizeof(ea));
3606 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
3608 ea.id.b.domain = ct_req->req.port_id.port_id[0];
3609 ea.id.b.area = ct_req->req.port_id.port_id[1];
3610 ea.id.b.al_pa = ct_req->req.port_id.port_id[2];
3612 ea.event = FCME_GPNID_DONE;
3614 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3615 list_del(&sp->elem);
3616 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3619 if (res == QLA_FUNCTION_TIMEOUT) {
3620 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3624 } else if (sp->gen1) {
3625 /* There was another RSCN for this Nport ID */
3626 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3631 qla2x00_fcport_event_handler(vha, &ea);
3633 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3635 /* please ignore kernel warning. otherwise, we have mem leak. */
3636 if (sp->u.iocb_cmd.u.ctarg.req) {
3637 dma_free_coherent(&vha->hw->pdev->dev,
3638 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3639 sp->u.iocb_cmd.u.ctarg.req,
3640 sp->u.iocb_cmd.u.ctarg.req_dma);
3641 sp->u.iocb_cmd.u.ctarg.req = NULL;
3643 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3644 dma_free_coherent(&vha->hw->pdev->dev,
3645 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3646 sp->u.iocb_cmd.u.ctarg.rsp,
3647 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3648 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3656 qla2x00_post_work(vha, e);
3659 /* Get WWPN with Nport ID. */
3660 int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3662 int rval = QLA_FUNCTION_FAILED;
3663 struct ct_sns_req *ct_req;
3665 struct ct_sns_pkt *ct_sns;
3666 unsigned long flags;
3668 if (!vha->flags.online)
3671 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3675 sp->type = SRB_CT_PTHRU_CMD;
3677 sp->u.iocb_cmd.u.ctarg.id = *id;
3679 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3681 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3682 list_for_each_entry(tsp, &vha->gpnid_list, elem) {
3683 if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
3685 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3690 list_add_tail(&sp->elem, &vha->gpnid_list);
3691 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3693 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3694 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
3696 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3697 if (!sp->u.iocb_cmd.u.ctarg.req) {
3698 ql_log(ql_log_warn, vha, 0xd041,
3699 "Failed to allocate ct_sns request.\n");
3703 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3704 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
3706 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
3707 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3708 ql_log(ql_log_warn, vha, 0xd042,
3709 "Failed to allocate ct_sns request.\n");
3713 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
3714 memset(ct_sns, 0, sizeof(*ct_sns));
3716 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3717 /* CT_IU preamble */
3718 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
3721 ct_req->req.port_id.port_id[0] = id->b.domain;
3722 ct_req->req.port_id.port_id[1] = id->b.area;
3723 ct_req->req.port_id.port_id[2] = id->b.al_pa;
3725 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
3726 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
3727 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3729 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3730 sp->done = qla2x00_async_gpnid_sp_done;
3732 ql_dbg(ql_dbg_disc, vha, 0x2067,
3733 "Async-%s hdl=%x ID %3phC.\n", sp->name,
3734 sp->handle, ct_req->req.port_id.port_id);
3736 rval = qla2x00_start_sp(sp);
3737 if (rval != QLA_SUCCESS)
3743 spin_lock_irqsave(&vha->hw->vport_slock, flags);
3744 list_del(&sp->elem);
3745 spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
3747 if (sp->u.iocb_cmd.u.ctarg.req) {
3748 dma_free_coherent(&vha->hw->pdev->dev,
3749 sizeof(struct ct_sns_pkt),
3750 sp->u.iocb_cmd.u.ctarg.req,
3751 sp->u.iocb_cmd.u.ctarg.req_dma);
3752 sp->u.iocb_cmd.u.ctarg.req = NULL;
3754 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3755 dma_free_coherent(&vha->hw->pdev->dev,
3756 sizeof(struct ct_sns_pkt),
3757 sp->u.iocb_cmd.u.ctarg.rsp,
3758 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3759 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3767 void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3769 fc_port_t *fcport = ea->fcport;
3771 qla24xx_post_gnl_work(vha, fcport);
3774 void qla24xx_async_gffid_sp_done(void *s, int res)
3777 struct scsi_qla_host *vha = sp->vha;
3778 fc_port_t *fcport = sp->fcport;
3779 struct ct_sns_rsp *ct_rsp;
3780 struct event_arg ea;
3782 ql_dbg(ql_dbg_disc, vha, 0x2133,
3783 "Async done-%s res %x ID %x. %8phC\n",
3784 sp->name, res, fcport->d_id.b24, fcport->port_name);
3786 fcport->flags &= ~FCF_ASYNC_SENT;
3787 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3789 * FC-GS-7, 5.2.3.12 FC-4 Features - format
3790 * The format of the FC-4 Features object, as defined by the FC-4,
3791 * Shall be an array of 4-bit values, one for each type code value
3794 if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
3797 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
3798 fcport->fc4_type &= 0xf;
3801 if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
3802 /* w5 [00:03]/28h */
3804 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
3805 fcport->fc4f_nvme &= 0xf;
3809 memset(&ea, 0, sizeof(ea));
3811 ea.fcport = sp->fcport;
3813 ea.event = FCME_GFFID_DONE;
3815 qla2x00_fcport_event_handler(vha, &ea);
3819 /* Get FC4 Feature with Nport ID. */
3820 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
3822 int rval = QLA_FUNCTION_FAILED;
3823 struct ct_sns_req *ct_req;
3826 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3829 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3833 fcport->flags |= FCF_ASYNC_SENT;
3834 sp->type = SRB_CT_PTHRU_CMD;
3836 sp->gen1 = fcport->rscn_gen;
3837 sp->gen2 = fcport->login_gen;
3839 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3840 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3842 /* CT_IU preamble */
3843 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
3846 ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
3847 ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
3848 ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
3850 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3851 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3852 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3853 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3854 sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
3855 sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
3856 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3858 sp->done = qla24xx_async_gffid_sp_done;
3860 rval = qla2x00_start_sp(sp);
3861 if (rval != QLA_SUCCESS)
3864 ql_dbg(ql_dbg_disc, vha, 0x2132,
3865 "Async-%s hdl=%x %8phC.\n", sp->name,
3866 sp->handle, fcport->port_name);
3871 fcport->flags &= ~FCF_ASYNC_SENT;
3875 /* GPN_FT + GNN_FT*/
3876 static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
3878 struct qla_hw_data *ha = vha->hw;
3879 scsi_qla_host_t *vp;
3880 unsigned long flags;
3884 if (!ha->num_vhosts)
3887 spin_lock_irqsave(&ha->vport_slock, flags);
3888 list_for_each_entry(vp, &ha->vp_list, list) {
3889 twwn = wwn_to_u64(vp->port_name);
3895 spin_unlock_irqrestore(&ha->vport_slock, flags);
3900 void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3905 struct fab_scan_rp *rp;
3906 unsigned long flags;
3909 ql_dbg(ql_dbg_disc, vha, 0xffff,
3910 "%s enter\n", __func__);
3912 if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
3913 ql_dbg(ql_dbg_disc, vha, 0xffff,
3914 "%s scan stop due to chip reset %x/%x\n",
3915 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
3921 vha->scan.scan_retry++;
3922 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3923 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3924 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3926 ql_dbg(ql_dbg_disc, vha, 0xffff,
3927 "Fabric scan failed on all retries.\n");
3931 vha->scan.scan_retry = 0;
3933 list_for_each_entry(fcport, &vha->vp_fcports, list)
3934 fcport->scan_state = QLA_FCPORT_SCAN;
3936 for (i = 0; i < vha->hw->max_fibre_devices; i++) {
3939 rp = &vha->scan.l[i];
3942 wwn = wwn_to_u64(rp->port_name);
3946 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
3949 /* Bypass reserved domain fields. */
3950 if ((rp->id.b.domain & 0xf0) == 0xf0)
3953 /* Bypass virtual ports of the same host. */
3954 if (qla2x00_is_a_vp(vha, wwn))
3957 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3958 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3960 fcport->rscn_rcvd = 0;
3961 fcport->scan_state = QLA_FCPORT_FOUND;
3964 * If device was not a fabric device before.
3966 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3967 qla2x00_clear_loop_id(fcport);
3968 fcport->flags |= FCF_FABRIC_DEVICE;
3969 } else if (fcport->d_id.b24 != rp->id.b24) {
3970 qlt_schedule_sess_for_deletion(fcport);
3972 fcport->d_id.b24 = rp->id.b24;
3977 ql_dbg(ql_dbg_disc, vha, 0xffff,
3978 "%s %d %8phC post new sess\n",
3979 __func__, __LINE__, rp->port_name);
3980 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
3981 rp->node_name, NULL, rp->fc4type);
3986 * Logout all previous fabric dev marked lost, except FCP2 devices.
3988 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3989 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3990 fcport->rscn_rcvd = 0;
3994 if (fcport->scan_state != QLA_FCPORT_FOUND) {
3995 fcport->rscn_rcvd = 0;
3996 if ((qla_dual_mode_enabled(vha) ||
3997 qla_ini_mode_enabled(vha)) &&
3998 atomic_read(&fcport->state) == FCS_ONLINE) {
3999 qla2x00_mark_device_lost(vha, fcport,
4000 ql2xplogiabsentdevice, 0);
4002 if (fcport->loop_id != FC_NO_LOOP_ID &&
4003 (fcport->flags & FCF_FCP2_DEVICE) == 0) {
4004 ql_dbg(ql_dbg_disc, vha, 0x20f0,
4005 "%s %d %8phC post del sess\n",
4009 qlt_schedule_sess_for_deletion(fcport);
4014 if (fcport->rscn_rcvd ||
4015 fcport->disc_state != DSC_LOGIN_COMPLETE) {
4016 if (fcport->login_retry == 0) {
4017 fcport->login_retry =
4018 vha->hw->login_retry_count;
4019 ql_dbg(ql_dbg_disc, vha, 0x20a3,
4020 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
4021 fcport->port_name, fcport->loop_id,
4022 fcport->login_retry);
4024 fcport->rscn_rcvd = 0;
4025 qla24xx_fcport_handle_login(vha, fcport);
4032 qla24xx_sp_unmap(vha, sp);
4033 spin_lock_irqsave(&vha->work_lock, flags);
4034 vha->scan.scan_flags &= ~SF_SCANNING;
4035 spin_unlock_irqrestore(&vha->work_lock, flags);
4038 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4039 if (fcport->rscn_rcvd) {
4040 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4041 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4048 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
4051 struct qla_hw_data *ha = vha->hw;
4052 int num_fibre_dev = ha->max_fibre_devices;
4053 struct ct_sns_req *ct_req =
4054 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
4055 struct ct_sns_gpnft_rsp *ct_rsp =
4056 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
4057 struct ct_sns_gpn_ft_data *d;
4058 struct fab_scan_rp *rp;
4059 u16 cmd = be16_to_cpu(ct_req->command);
4060 u8 fc4_type = sp->gen2;
4067 for (i = 0; i < num_fibre_dev; i++) {
4068 d = &ct_rsp->entries[i];
4071 id.b.domain = d->port_id[0];
4072 id.b.area = d->port_id[1];
4073 id.b.al_pa = d->port_id[2];
4074 wwn = wwn_to_u64(d->port_name);
4076 if (id.b24 == 0 || wwn == 0)
4079 if (fc4_type == FC4_TYPE_FCP_SCSI) {
4080 if (cmd == GPN_FT_CMD) {
4081 rp = &vha->scan.l[j];
4083 memcpy(rp->port_name, d->port_name, 8);
4085 rp->fc4type = FS_FC4TYPE_FCP;
4087 for (k = 0; k < num_fibre_dev; k++) {
4088 rp = &vha->scan.l[k];
4089 if (id.b24 == rp->id.b24) {
4090 memcpy(rp->node_name,
4097 /* Search if the fibre device supports FC4_TYPE_NVME */
4098 if (cmd == GPN_FT_CMD) {
4101 for (k = 0; k < num_fibre_dev; k++) {
4102 rp = &vha->scan.l[k];
4103 if (!memcmp(rp->port_name,
4106 * Supports FC-NVMe & FCP
4108 rp->fc4type |= FS_FC4TYPE_NVME;
4114 /* We found new FC-NVMe only port */
4116 for (k = 0; k < num_fibre_dev; k++) {
4117 rp = &vha->scan.l[k];
4118 if (wwn_to_u64(rp->port_name)) {
4122 memcpy(rp->port_name,
4131 for (k = 0; k < num_fibre_dev; k++) {
4132 rp = &vha->scan.l[k];
4133 if (id.b24 == rp->id.b24) {
4134 memcpy(rp->node_name,
4144 static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
4147 struct scsi_qla_host *vha = sp->vha;
4148 struct qla_work_evt *e;
4149 struct ct_sns_req *ct_req =
4150 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
4151 u16 cmd = be16_to_cpu(ct_req->command);
4152 u8 fc4_type = sp->gen2;
4153 unsigned long flags;
4155 /* gen2 field is holding the fc4type */
4156 ql_dbg(ql_dbg_disc, vha, 0xffff,
4157 "Async done-%s res %x FC4Type %x\n",
4158 sp->name, res, sp->gen2);
4161 unsigned long flags;
4164 spin_lock_irqsave(&vha->work_lock, flags);
4165 vha->scan.scan_flags &= ~SF_SCANNING;
4166 vha->scan.scan_retry++;
4167 spin_unlock_irqrestore(&vha->work_lock, flags);
4169 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
4170 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4171 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4172 qla2xxx_wake_dpc(vha);
4174 ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
4175 "Async done-%s rescan failed on all retries\n",
4182 qla2x00_find_free_fcp_nvme_slot(vha, sp);
4184 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
4185 cmd == GNN_FT_CMD) {
4186 del_timer(&sp->u.iocb_cmd.timer);
4187 spin_lock_irqsave(&vha->work_lock, flags);
4188 vha->scan.scan_flags &= ~SF_SCANNING;
4189 spin_unlock_irqrestore(&vha->work_lock, flags);
4191 e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT);
4194 * please ignore kernel warning. Otherwise,
4197 if (sp->u.iocb_cmd.u.ctarg.req) {
4198 dma_free_coherent(&vha->hw->pdev->dev,
4199 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4200 sp->u.iocb_cmd.u.ctarg.req,
4201 sp->u.iocb_cmd.u.ctarg.req_dma);
4202 sp->u.iocb_cmd.u.ctarg.req = NULL;
4204 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4205 dma_free_coherent(&vha->hw->pdev->dev,
4206 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4207 sp->u.iocb_cmd.u.ctarg.rsp,
4208 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4209 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4212 ql_dbg(ql_dbg_disc, vha, 0xffff,
4213 "Async done-%s unable to alloc work element\n",
4216 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4217 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4220 e->u.gpnft.fc4_type = FC4_TYPE_NVME;
4224 qla2x00_post_work(vha, e);
4228 if (cmd == GPN_FT_CMD) {
4229 del_timer(&sp->u.iocb_cmd.timer);
4230 e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE);
4232 e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE);
4236 /* please ignore kernel warning. Otherwise, we have mem leak. */
4237 if (sp->u.iocb_cmd.u.ctarg.req) {
4238 dma_free_coherent(&vha->hw->pdev->dev,
4239 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4240 sp->u.iocb_cmd.u.ctarg.req,
4241 sp->u.iocb_cmd.u.ctarg.req_dma);
4242 sp->u.iocb_cmd.u.ctarg.req = NULL;
4244 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4245 dma_free_coherent(&vha->hw->pdev->dev,
4246 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4247 sp->u.iocb_cmd.u.ctarg.rsp,
4248 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4249 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4252 ql_dbg(ql_dbg_disc, vha, 0xffff,
4253 "Async done-%s unable to alloc work element\n",
4256 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4257 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4264 qla2x00_post_work(vha, e);
4268 * Get WWNN list for fc4_type
4270 * It is assumed the same SRB is re-used from GPNFT to avoid
4271 * mem free & re-alloc
4273 static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
4276 int rval = QLA_FUNCTION_FAILED;
4277 struct ct_sns_req *ct_req;
4278 struct ct_sns_pkt *ct_sns;
4279 unsigned long flags;
4281 if (!vha->flags.online) {
4282 spin_lock_irqsave(&vha->work_lock, flags);
4283 vha->scan.scan_flags &= ~SF_SCANNING;
4284 spin_unlock_irqrestore(&vha->work_lock, flags);
4288 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
4289 ql_log(ql_log_warn, vha, 0xffff,
4290 "%s: req %p rsp %p are not setup\n",
4291 __func__, sp->u.iocb_cmd.u.ctarg.req,
4292 sp->u.iocb_cmd.u.ctarg.rsp);
4293 spin_lock_irqsave(&vha->work_lock, flags);
4294 vha->scan.scan_flags &= ~SF_SCANNING;
4295 spin_unlock_irqrestore(&vha->work_lock, flags);
4300 ql_dbg(ql_dbg_disc, vha, 0xfffff,
4301 "%s: FC4Type %x, CT-PASSTRHU %s command ctarg rsp size %d, ctarg req size %d\n",
4302 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
4303 sp->u.iocb_cmd.u.ctarg.req_size);
4305 sp->type = SRB_CT_PTHRU_CMD;
4307 sp->gen1 = vha->hw->base_qpair->chip_reset;
4308 sp->gen2 = fc4_type;
4310 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4311 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4313 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4314 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4316 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4317 /* CT_IU preamble */
4318 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
4319 sp->u.iocb_cmd.u.ctarg.rsp_size);
4322 ct_req->req.gpn_ft.port_type = fc4_type;
4324 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
4325 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4327 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4329 rval = qla2x00_start_sp(sp);
4330 if (rval != QLA_SUCCESS)
4333 ql_dbg(ql_dbg_disc, vha, 0xffff,
4334 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4335 sp->handle, ct_req->req.gpn_ft.port_type);
4339 if (sp->u.iocb_cmd.u.ctarg.req) {
4340 dma_free_coherent(&vha->hw->pdev->dev,
4341 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4342 sp->u.iocb_cmd.u.ctarg.req,
4343 sp->u.iocb_cmd.u.ctarg.req_dma);
4344 sp->u.iocb_cmd.u.ctarg.req = NULL;
4346 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4347 dma_free_coherent(&vha->hw->pdev->dev,
4348 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4349 sp->u.iocb_cmd.u.ctarg.rsp,
4350 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4351 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4359 void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
4361 ql_dbg(ql_dbg_disc, vha, 0xffff,
4362 "%s enter\n", __func__);
4363 qla24xx_async_gnnft(vha, sp, sp->gen2);
4366 /* Get WWPN list for certain fc4_type */
4367 int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4369 int rval = QLA_FUNCTION_FAILED;
4370 struct ct_sns_req *ct_req;
4371 struct ct_sns_pkt *ct_sns;
4373 unsigned long flags;
4375 ql_dbg(ql_dbg_disc, vha, 0xffff,
4376 "%s enter\n", __func__);
4378 if (!vha->flags.online)
4381 spin_lock_irqsave(&vha->work_lock, flags);
4382 if (vha->scan.scan_flags & SF_SCANNING) {
4383 spin_unlock_irqrestore(&vha->work_lock, flags);
4384 ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
4387 vha->scan.scan_flags |= SF_SCANNING;
4388 spin_unlock_irqrestore(&vha->work_lock, flags);
4390 if (fc4_type == FC4_TYPE_FCP_SCSI) {
4391 ql_dbg(ql_dbg_disc, vha, 0xffff,
4392 "%s: Performing FCP Scan\n", __func__);
4395 sp->free(sp); /* should not happen */
4397 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
4399 spin_lock_irqsave(&vha->work_lock, flags);
4400 vha->scan.scan_flags &= ~SF_SCANNING;
4401 spin_unlock_irqrestore(&vha->work_lock, flags);
4405 sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
4406 &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
4407 &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
4408 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
4409 if (!sp->u.iocb_cmd.u.ctarg.req) {
4410 ql_log(ql_log_warn, vha, 0xffff,
4411 "Failed to allocate ct_sns request.\n");
4412 spin_lock_irqsave(&vha->work_lock, flags);
4413 vha->scan.scan_flags &= ~SF_SCANNING;
4414 spin_unlock_irqrestore(&vha->work_lock, flags);
4417 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
4419 rspsz = sizeof(struct ct_sns_gpnft_rsp) +
4420 ((vha->hw->max_fibre_devices - 1) *
4421 sizeof(struct ct_sns_gpn_ft_data));
4423 sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
4424 &vha->hw->pdev->dev, rspsz,
4425 &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
4426 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
4427 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4428 ql_log(ql_log_warn, vha, 0xffff,
4429 "Failed to allocate ct_sns request.\n");
4430 spin_lock_irqsave(&vha->work_lock, flags);
4431 vha->scan.scan_flags &= ~SF_SCANNING;
4432 spin_unlock_irqrestore(&vha->work_lock, flags);
4435 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
4437 ql_dbg(ql_dbg_disc, vha, 0xffff,
4438 "%s scan list size %d\n", __func__, vha->scan.size);
4440 memset(vha->scan.l, 0, vha->scan.size);
4442 ql_dbg(ql_dbg_disc, vha, 0xffff,
4443 "NVME scan did not provide SP\n");
4447 sp->type = SRB_CT_PTHRU_CMD;
4449 sp->gen1 = vha->hw->base_qpair->chip_reset;
4450 sp->gen2 = fc4_type;
4452 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4453 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4455 rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
4456 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4457 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4459 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4460 /* CT_IU preamble */
4461 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
4464 ct_req->req.gpn_ft.port_type = fc4_type;
4466 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4468 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4470 rval = qla2x00_start_sp(sp);
4471 if (rval != QLA_SUCCESS) {
4472 spin_lock_irqsave(&vha->work_lock, flags);
4473 vha->scan.scan_flags &= ~SF_SCANNING;
4474 spin_unlock_irqrestore(&vha->work_lock, flags);
4478 ql_dbg(ql_dbg_disc, vha, 0xffff,
4479 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4480 sp->handle, ct_req->req.gpn_ft.port_type);
4484 if (sp->u.iocb_cmd.u.ctarg.req) {
4485 dma_free_coherent(&vha->hw->pdev->dev,
4486 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4487 sp->u.iocb_cmd.u.ctarg.req,
4488 sp->u.iocb_cmd.u.ctarg.req_dma);
4489 sp->u.iocb_cmd.u.ctarg.req = NULL;
4491 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4492 dma_free_coherent(&vha->hw->pdev->dev,
4493 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4494 sp->u.iocb_cmd.u.ctarg.rsp,
4495 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4496 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4504 void qla_scan_work_fn(struct work_struct *work)
4506 struct fab_scan *s = container_of(to_delayed_work(work),
4507 struct fab_scan, scan_work);
4508 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
4510 unsigned long flags;
4512 ql_dbg(ql_dbg_disc, vha, 0xffff,
4513 "%s: schedule loop resync\n", __func__);
4514 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4515 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4516 qla2xxx_wake_dpc(vha);
4517 spin_lock_irqsave(&vha->work_lock, flags);
4518 vha->scan.scan_flags &= ~SF_QUEUED;
4519 spin_unlock_irqrestore(&vha->work_lock, flags);
4523 void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4525 qla24xx_post_gnl_work(vha, ea->fcport);
4528 static void qla2x00_async_gnnid_sp_done(void *s, int res)
4531 struct scsi_qla_host *vha = sp->vha;
4532 fc_port_t *fcport = sp->fcport;
4533 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
4534 struct event_arg ea;
4537 fcport->flags &= ~FCF_ASYNC_SENT;
4538 wwnn = wwn_to_u64(node_name);
4540 memcpy(fcport->node_name, node_name, WWN_SIZE);
4542 memset(&ea, 0, sizeof(ea));
4546 ea.event = FCME_GNNID_DONE;
4548 ql_dbg(ql_dbg_disc, vha, 0x204f,
4549 "Async done-%s res %x, WWPN %8phC %8phC\n",
4550 sp->name, res, fcport->port_name, fcport->node_name);
4552 qla2x00_fcport_event_handler(vha, &ea);
4557 int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4559 int rval = QLA_FUNCTION_FAILED;
4560 struct ct_sns_req *ct_req;
4563 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4566 fcport->disc_state = DSC_GNN_ID;
4567 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4571 fcport->flags |= FCF_ASYNC_SENT;
4572 sp->type = SRB_CT_PTHRU_CMD;
4574 sp->gen1 = fcport->rscn_gen;
4575 sp->gen2 = fcport->login_gen;
4577 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4578 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4580 /* CT_IU preamble */
4581 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
4585 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
4586 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
4587 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
4590 /* req & rsp use the same buffer */
4591 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4592 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4593 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4594 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4595 sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
4596 sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
4597 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4599 sp->done = qla2x00_async_gnnid_sp_done;
4601 rval = qla2x00_start_sp(sp);
4602 if (rval != QLA_SUCCESS)
4604 ql_dbg(ql_dbg_disc, vha, 0xffff,
4605 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4606 sp->name, fcport->port_name,
4607 sp->handle, fcport->loop_id, fcport->d_id.b24);
4612 fcport->flags &= ~FCF_ASYNC_SENT;
4617 int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4619 struct qla_work_evt *e;
4622 ls = atomic_read(&vha->loop_state);
4623 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4624 test_bit(UNLOADING, &vha->dpc_flags))
4627 e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
4629 return QLA_FUNCTION_FAILED;
4631 e->u.fcport.fcport = fcport;
4632 return qla2x00_post_work(vha, e);
4636 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4638 fc_port_t *fcport = ea->fcport;
4640 ql_dbg(ql_dbg_disc, vha, 0xffff,
4641 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
4642 __func__, fcport->port_name, fcport->disc_state,
4643 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
4644 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
4646 if (fcport->disc_state == DSC_DELETE_PEND)
4649 if (ea->sp->gen2 != fcport->login_gen) {
4650 /* target side must have changed it. */
4651 ql_dbg(ql_dbg_disc, vha, 0x20d3,
4652 "%s %8phC generation changed\n",
4653 __func__, fcport->port_name);
4655 } else if (ea->sp->gen1 != fcport->rscn_gen) {
4656 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
4657 __func__, __LINE__, fcport->port_name);
4658 qla24xx_post_gidpn_work(vha, fcport);
4662 qla24xx_post_gpsc_work(vha, fcport);
4665 static void qla2x00_async_gfpnid_sp_done(void *s, int res)
4668 struct scsi_qla_host *vha = sp->vha;
4669 fc_port_t *fcport = sp->fcport;
4670 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
4671 struct event_arg ea;
4674 wwn = wwn_to_u64(fpn);
4676 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
4678 memset(&ea, 0, sizeof(ea));
4682 ea.event = FCME_GFPNID_DONE;
4684 ql_dbg(ql_dbg_disc, vha, 0x204f,
4685 "Async done-%s res %x, WWPN %8phC %8phC\n",
4686 sp->name, res, fcport->port_name, fcport->fabric_port_name);
4688 qla2x00_fcport_event_handler(vha, &ea);
4693 int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4695 int rval = QLA_FUNCTION_FAILED;
4696 struct ct_sns_req *ct_req;
4699 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4702 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4706 sp->type = SRB_CT_PTHRU_CMD;
4707 sp->name = "gfpnid";
4708 sp->gen1 = fcport->rscn_gen;
4709 sp->gen2 = fcport->login_gen;
4711 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4712 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4714 /* CT_IU preamble */
4715 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
4719 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
4720 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
4721 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
4724 /* req & rsp use the same buffer */
4725 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4726 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4727 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4728 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4729 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
4730 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
4731 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4733 sp->done = qla2x00_async_gfpnid_sp_done;
4735 rval = qla2x00_start_sp(sp);
4736 if (rval != QLA_SUCCESS)
4739 ql_dbg(ql_dbg_disc, vha, 0xffff,
4740 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4741 sp->name, fcport->port_name,
4742 sp->handle, fcport->loop_id, fcport->d_id.b24);
4747 fcport->flags &= ~FCF_ASYNC_SENT;
4752 int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4754 struct qla_work_evt *e;
4757 ls = atomic_read(&vha->loop_state);
4758 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4759 test_bit(UNLOADING, &vha->dpc_flags))
4762 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
4764 return QLA_FUNCTION_FAILED;
4766 e->u.fcport.fcport = fcport;
4767 return qla2x00_post_work(vha, e);