1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
14 static void qla2xxx_free_fcport_work(struct work_struct *work)
16 struct fc_port *fcport = container_of(work, typeof(*fcport),
19 qla2x00_free_fcport(fcport);
22 /* BSG support for ELS/CT pass through */
23 void qla2x00_bsg_job_done(srb_t *sp, int res)
25 struct bsg_job *bsg_job = sp->u.bsg_job;
26 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
28 ql_dbg(ql_dbg_user, sp->vha, 0x7009,
29 "%s: sp hdl %x, result=%x bsg ptr %p\n",
30 __func__, sp->handle, res, bsg_job);
34 bsg_reply->result = res;
35 bsg_job_done(bsg_job, bsg_reply->result,
36 bsg_reply->reply_payload_rcv_len);
39 void qla2x00_bsg_sp_free(srb_t *sp)
41 struct qla_hw_data *ha = sp->vha->hw;
42 struct bsg_job *bsg_job = sp->u.bsg_job;
43 struct fc_bsg_request *bsg_request = bsg_job->request;
44 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
46 if (sp->type == SRB_FXIOCB_BCMD) {
47 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
48 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
50 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
51 dma_unmap_sg(&ha->pdev->dev,
52 bsg_job->request_payload.sg_list,
53 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
55 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
56 dma_unmap_sg(&ha->pdev->dev,
57 bsg_job->reply_payload.sg_list,
58 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
61 if (sp->remap.remapped) {
62 dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
64 dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
67 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
68 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
70 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
71 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
75 if (sp->type == SRB_CT_CMD ||
76 sp->type == SRB_FXIOCB_BCMD ||
77 sp->type == SRB_ELS_CMD_HST) {
78 INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
79 queue_work(ha->wq, &sp->fcport->free_work);
86 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
87 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
89 int i, ret, num_valid;
91 struct qla_fcp_prio_entry *pri_entry;
92 uint32_t *bcode_val_ptr, bcode_val;
96 bcode = (uint8_t *)pri_cfg;
97 bcode_val_ptr = (uint32_t *)pri_cfg;
98 bcode_val = (uint32_t)(*bcode_val_ptr);
100 if (bcode_val == 0xFFFFFFFF) {
101 /* No FCP Priority config data in flash */
102 ql_dbg(ql_dbg_user, vha, 0x7051,
103 "No FCP Priority config data.\n");
107 if (memcmp(bcode, "HQOS", 4)) {
108 /* Invalid FCP priority data header*/
109 ql_dbg(ql_dbg_user, vha, 0x7052,
110 "Invalid FCP Priority data header. bcode=0x%x.\n",
117 pri_entry = &pri_cfg->entry[0];
118 for (i = 0; i < pri_cfg->num_entries; i++) {
119 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
124 if (num_valid == 0) {
125 /* No valid FCP priority data entries */
126 ql_dbg(ql_dbg_user, vha, 0x7053,
127 "No valid FCP Priority data entries.\n");
130 /* FCP priority data is valid */
131 ql_dbg(ql_dbg_user, vha, 0x7054,
132 "Valid FCP priority data. num entries = %d.\n",
140 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
142 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
143 struct fc_bsg_request *bsg_request = bsg_job->request;
144 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
145 scsi_qla_host_t *vha = shost_priv(host);
146 struct qla_hw_data *ha = vha->hw;
151 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
153 goto exit_fcp_prio_cfg;
156 /* Get the sub command */
157 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
159 /* Only set config is allowed if config memory is not allocated */
160 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
162 goto exit_fcp_prio_cfg;
165 case QLFC_FCP_PRIO_DISABLE:
166 if (ha->flags.fcp_prio_enabled) {
167 ha->flags.fcp_prio_enabled = 0;
168 ha->fcp_prio_cfg->attributes &=
169 ~FCP_PRIO_ATTR_ENABLE;
170 qla24xx_update_all_fcp_prio(vha);
171 bsg_reply->result = DID_OK;
174 bsg_reply->result = (DID_ERROR << 16);
175 goto exit_fcp_prio_cfg;
179 case QLFC_FCP_PRIO_ENABLE:
180 if (!ha->flags.fcp_prio_enabled) {
181 if (ha->fcp_prio_cfg) {
182 ha->flags.fcp_prio_enabled = 1;
183 ha->fcp_prio_cfg->attributes |=
184 FCP_PRIO_ATTR_ENABLE;
185 qla24xx_update_all_fcp_prio(vha);
186 bsg_reply->result = DID_OK;
189 bsg_reply->result = (DID_ERROR << 16);
190 goto exit_fcp_prio_cfg;
195 case QLFC_FCP_PRIO_GET_CONFIG:
196 len = bsg_job->reply_payload.payload_len;
197 if (!len || len > FCP_PRIO_CFG_SIZE) {
199 bsg_reply->result = (DID_ERROR << 16);
200 goto exit_fcp_prio_cfg;
203 bsg_reply->result = DID_OK;
204 bsg_reply->reply_payload_rcv_len =
206 bsg_job->reply_payload.sg_list,
207 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
212 case QLFC_FCP_PRIO_SET_CONFIG:
213 len = bsg_job->request_payload.payload_len;
214 if (!len || len > FCP_PRIO_CFG_SIZE) {
215 bsg_reply->result = (DID_ERROR << 16);
217 goto exit_fcp_prio_cfg;
220 if (!ha->fcp_prio_cfg) {
221 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
222 if (!ha->fcp_prio_cfg) {
223 ql_log(ql_log_warn, vha, 0x7050,
224 "Unable to allocate memory for fcp prio "
225 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
226 bsg_reply->result = (DID_ERROR << 16);
228 goto exit_fcp_prio_cfg;
232 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
233 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
234 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
237 /* validate fcp priority data */
239 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) {
240 bsg_reply->result = (DID_ERROR << 16);
242 /* If buffer was invalidatic int
243 * fcp_prio_cfg is of no use
245 vfree(ha->fcp_prio_cfg);
246 ha->fcp_prio_cfg = NULL;
247 goto exit_fcp_prio_cfg;
250 ha->flags.fcp_prio_enabled = 0;
251 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
252 ha->flags.fcp_prio_enabled = 1;
253 qla24xx_update_all_fcp_prio(vha);
254 bsg_reply->result = DID_OK;
262 bsg_job_done(bsg_job, bsg_reply->result,
263 bsg_reply->reply_payload_rcv_len);
268 qla2x00_process_els(struct bsg_job *bsg_job)
270 struct fc_bsg_request *bsg_request = bsg_job->request;
271 struct fc_rport *rport;
272 fc_port_t *fcport = NULL;
273 struct Scsi_Host *host;
274 scsi_qla_host_t *vha;
275 struct qla_hw_data *ha;
278 int req_sg_cnt, rsp_sg_cnt;
279 int rval = (DID_ERROR << 16);
280 uint16_t nextlid = 0;
281 uint32_t els_cmd = 0;
283 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
284 rport = fc_bsg_to_rport(bsg_job);
285 fcport = *(fc_port_t **) rport->dd_data;
286 host = rport_to_shost(rport);
287 vha = shost_priv(host);
289 type = "FC_BSG_RPT_ELS";
291 host = fc_bsg_to_shost(bsg_job);
292 vha = shost_priv(host);
294 type = "FC_BSG_HST_ELS_NOLOGIN";
295 els_cmd = bsg_request->rqst_data.h_els.command_code;
296 if (els_cmd == ELS_AUTH_ELS)
297 return qla_edif_process_els(vha, bsg_job);
300 if (!vha->flags.online) {
301 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
306 /* pass through is supported only for ISP 4Gb or higher */
307 if (!IS_FWI2_CAPABLE(ha)) {
308 ql_dbg(ql_dbg_user, vha, 0x7001,
309 "ELS passthru not supported for ISP23xx based adapters.\n");
314 /* Multiple SG's are not supported for ELS requests */
315 if (bsg_job->request_payload.sg_cnt > 1 ||
316 bsg_job->reply_payload.sg_cnt > 1) {
317 ql_dbg(ql_dbg_user, vha, 0x7002,
318 "Multiple SG's are not supported for ELS requests, "
319 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
320 bsg_job->request_payload.sg_cnt,
321 bsg_job->reply_payload.sg_cnt);
326 /* ELS request for rport */
327 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
328 /* make sure the rport is logged in,
329 * if not perform fabric login
331 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
332 ql_dbg(ql_dbg_user, vha, 0x7003,
333 "Failed to login port %06X for ELS passthru.\n",
339 /* Allocate a dummy fcport structure, since functions
340 * preparing the IOCB and mailbox command retrieves port
341 * specific information from fcport structure. For Host based
342 * ELS commands there will be no fcport structure allocated
344 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
350 /* Initialize all required fields of fcport */
352 fcport->d_id.b.al_pa =
353 bsg_request->rqst_data.h_els.port_id[0];
354 fcport->d_id.b.area =
355 bsg_request->rqst_data.h_els.port_id[1];
356 fcport->d_id.b.domain =
357 bsg_request->rqst_data.h_els.port_id[2];
359 (fcport->d_id.b.al_pa == 0xFD) ?
360 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
364 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
365 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
367 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
368 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
370 goto done_free_fcport;
373 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
374 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
376 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
377 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
379 goto done_free_fcport;
382 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
383 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
384 ql_log(ql_log_warn, vha, 0x7008,
385 "dma mapping resulted in different sg counts, "
386 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
387 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
388 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
393 /* Alloc SRB structure */
394 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
401 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
402 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
404 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
405 "bsg_els_rpt" : "bsg_els_hst");
406 sp->u.bsg_job = bsg_job;
407 sp->free = qla2x00_bsg_sp_free;
408 sp->done = qla2x00_bsg_job_done;
410 ql_dbg(ql_dbg_user, vha, 0x700a,
411 "bsg rqst type: %s els type: %x - loop-id=%x "
412 "portid=%-2x%02x%02x.\n", type,
413 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
414 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
416 rval = qla2x00_start_sp(sp);
417 if (rval != QLA_SUCCESS) {
418 ql_log(ql_log_warn, vha, 0x700e,
419 "qla2x00_start_sp failed = %d\n", rval);
427 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
428 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
429 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
430 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
431 goto done_free_fcport;
434 if (bsg_request->msgcode == FC_BSG_RPT_ELS)
435 qla2x00_free_fcport(fcport);
440 static inline uint16_t
441 qla24xx_calc_ct_iocbs(uint16_t dsds)
447 iocbs += (dsds - 2) / 5;
455 qla2x00_process_ct(struct bsg_job *bsg_job)
458 struct fc_bsg_request *bsg_request = bsg_job->request;
459 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
460 scsi_qla_host_t *vha = shost_priv(host);
461 struct qla_hw_data *ha = vha->hw;
462 int rval = (DID_ERROR << 16);
463 int req_sg_cnt, rsp_sg_cnt;
465 struct fc_port *fcport;
466 char *type = "FC_BSG_HST_CT";
469 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
470 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
472 ql_log(ql_log_warn, vha, 0x700f,
473 "dma_map_sg return %d for request\n", req_sg_cnt);
478 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
479 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
481 ql_log(ql_log_warn, vha, 0x7010,
482 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
487 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
488 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
489 ql_log(ql_log_warn, vha, 0x7011,
490 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
491 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
492 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
497 if (!vha->flags.online) {
498 ql_log(ql_log_warn, vha, 0x7012,
499 "Host is not online.\n");
505 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
512 loop_id = vha->mgmt_svr_loop_id;
515 ql_dbg(ql_dbg_user, vha, 0x7013,
516 "Unknown loop id: %x.\n", loop_id);
521 /* Allocate a dummy fcport structure, since functions preparing the
522 * IOCB and mailbox command retrieves port specific information
523 * from fcport structure. For Host based ELS commands there will be
524 * no fcport structure allocated
526 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
528 ql_log(ql_log_warn, vha, 0x7014,
529 "Failed to allocate fcport.\n");
534 /* Initialize all required fields of fcport */
536 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
537 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
538 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
539 fcport->loop_id = loop_id;
541 /* Alloc SRB structure */
542 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
544 ql_log(ql_log_warn, vha, 0x7015,
545 "qla2x00_get_sp failed.\n");
547 goto done_free_fcport;
550 sp->type = SRB_CT_CMD;
552 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
553 sp->u.bsg_job = bsg_job;
554 sp->free = qla2x00_bsg_sp_free;
555 sp->done = qla2x00_bsg_job_done;
557 ql_dbg(ql_dbg_user, vha, 0x7016,
558 "bsg rqst type: %s else type: %x - "
559 "loop-id=%x portid=%02x%02x%02x.\n", type,
560 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
561 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
562 fcport->d_id.b.al_pa);
564 rval = qla2x00_start_sp(sp);
565 if (rval != QLA_SUCCESS) {
566 ql_log(ql_log_warn, vha, 0x7017,
567 "qla2x00_start_sp failed=%d.\n", rval);
570 goto done_free_fcport;
575 qla2x00_free_fcport(fcport);
577 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
578 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
579 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
580 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
585 /* Disable loopback mode */
587 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
592 uint16_t new_config[4];
593 struct qla_hw_data *ha = vha->hw;
595 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
596 goto done_reset_internal;
598 memset(new_config, 0 , sizeof(new_config));
599 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
600 ENABLE_INTERNAL_LOOPBACK ||
601 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
602 ENABLE_EXTERNAL_LOOPBACK) {
603 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
604 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
605 (new_config[0] & INTERNAL_LOOPBACK_MASK));
606 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
608 ha->notify_dcbx_comp = wait;
609 ha->notify_lb_portup_comp = wait2;
611 ret = qla81xx_set_port_config(vha, new_config);
612 if (ret != QLA_SUCCESS) {
613 ql_log(ql_log_warn, vha, 0x7025,
614 "Set port config failed.\n");
615 ha->notify_dcbx_comp = 0;
616 ha->notify_lb_portup_comp = 0;
618 goto done_reset_internal;
621 /* Wait for DCBX complete event */
622 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
623 (DCBX_COMP_TIMEOUT * HZ))) {
624 ql_dbg(ql_dbg_user, vha, 0x7026,
625 "DCBX completion not received.\n");
626 ha->notify_dcbx_comp = 0;
627 ha->notify_lb_portup_comp = 0;
629 goto done_reset_internal;
631 ql_dbg(ql_dbg_user, vha, 0x7027,
632 "DCBX completion received.\n");
635 !wait_for_completion_timeout(&ha->lb_portup_comp,
636 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
637 ql_dbg(ql_dbg_user, vha, 0x70c5,
638 "Port up completion not received.\n");
639 ha->notify_lb_portup_comp = 0;
641 goto done_reset_internal;
643 ql_dbg(ql_dbg_user, vha, 0x70c6,
644 "Port up completion received.\n");
646 ha->notify_dcbx_comp = 0;
647 ha->notify_lb_portup_comp = 0;
654 * Set the port configuration to enable the internal or external loopback
655 * depending on the loopback mode.
658 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
659 uint16_t *new_config, uint16_t mode)
663 unsigned long rem_tmo = 0, current_tmo = 0;
664 struct qla_hw_data *ha = vha->hw;
666 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
667 goto done_set_internal;
669 if (mode == INTERNAL_LOOPBACK)
670 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
671 else if (mode == EXTERNAL_LOOPBACK)
672 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
673 ql_dbg(ql_dbg_user, vha, 0x70be,
674 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
676 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
678 ha->notify_dcbx_comp = 1;
679 ret = qla81xx_set_port_config(vha, new_config);
680 if (ret != QLA_SUCCESS) {
681 ql_log(ql_log_warn, vha, 0x7021,
682 "set port config failed.\n");
683 ha->notify_dcbx_comp = 0;
685 goto done_set_internal;
688 /* Wait for DCBX complete event */
689 current_tmo = DCBX_COMP_TIMEOUT * HZ;
691 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
693 if (!ha->idc_extend_tmo || rem_tmo) {
694 ha->idc_extend_tmo = 0;
697 current_tmo = ha->idc_extend_tmo * HZ;
698 ha->idc_extend_tmo = 0;
702 ql_dbg(ql_dbg_user, vha, 0x7022,
703 "DCBX completion not received.\n");
704 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
706 * If the reset of the loopback mode doesn't work take a FCoE
707 * dump and reset the chip.
710 qla2xxx_dump_fw(vha);
711 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
715 if (ha->flags.idc_compl_status) {
716 ql_dbg(ql_dbg_user, vha, 0x70c3,
717 "Bad status in IDC Completion AEN\n");
719 ha->flags.idc_compl_status = 0;
721 ql_dbg(ql_dbg_user, vha, 0x7023,
722 "DCBX completion received.\n");
725 ha->notify_dcbx_comp = 0;
726 ha->idc_extend_tmo = 0;
733 qla2x00_process_loopback(struct bsg_job *bsg_job)
735 struct fc_bsg_request *bsg_request = bsg_job->request;
736 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
737 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
738 scsi_qla_host_t *vha = shost_priv(host);
739 struct qla_hw_data *ha = vha->hw;
741 uint8_t command_sent;
743 struct msg_echo_lb elreq;
744 uint16_t response[MAILBOX_REGISTER_COUNT];
745 uint16_t config[4], new_config[4];
747 void *req_data = NULL;
748 dma_addr_t req_data_dma;
749 uint32_t req_data_len;
750 uint8_t *rsp_data = NULL;
751 dma_addr_t rsp_data_dma;
752 uint32_t rsp_data_len;
754 if (!vha->flags.online) {
755 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
759 memset(&elreq, 0, sizeof(elreq));
761 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
762 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
765 if (!elreq.req_sg_cnt) {
766 ql_log(ql_log_warn, vha, 0x701a,
767 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
771 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
772 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
775 if (!elreq.rsp_sg_cnt) {
776 ql_log(ql_log_warn, vha, 0x701b,
777 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
779 goto done_unmap_req_sg;
782 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
783 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
784 ql_log(ql_log_warn, vha, 0x701c,
785 "dma mapping resulted in different sg counts, "
786 "request_sg_cnt: %x dma_request_sg_cnt: %x "
787 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
788 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
789 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
793 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
794 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
795 &req_data_dma, GFP_KERNEL);
797 ql_log(ql_log_warn, vha, 0x701d,
798 "dma alloc failed for req_data.\n");
803 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
804 &rsp_data_dma, GFP_KERNEL);
806 ql_log(ql_log_warn, vha, 0x7004,
807 "dma alloc failed for rsp_data.\n");
809 goto done_free_dma_req;
812 /* Copy the request buffer in req_data now */
813 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
814 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
816 elreq.send_dma = req_data_dma;
817 elreq.rcv_dma = rsp_data_dma;
818 elreq.transfer_size = req_data_len;
820 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
821 elreq.iteration_count =
822 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
824 if (atomic_read(&vha->loop_state) == LOOP_READY &&
825 ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
826 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
827 get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
828 req_data_len == MAX_ELS_FRAME_PAYLOAD &&
829 elreq.options == EXTERNAL_LOOPBACK))) {
830 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
831 ql_dbg(ql_dbg_user, vha, 0x701e,
832 "BSG request type: %s.\n", type);
833 command_sent = INT_DEF_LB_ECHO_CMD;
834 rval = qla2x00_echo_test(vha, &elreq, response);
836 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
837 memset(config, 0, sizeof(config));
838 memset(new_config, 0, sizeof(new_config));
840 if (qla81xx_get_port_config(vha, config)) {
841 ql_log(ql_log_warn, vha, 0x701f,
842 "Get port config failed.\n");
844 goto done_free_dma_rsp;
847 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
848 ql_dbg(ql_dbg_user, vha, 0x70c4,
849 "Loopback operation already in "
852 goto done_free_dma_rsp;
855 ql_dbg(ql_dbg_user, vha, 0x70c0,
856 "elreq.options=%04x\n", elreq.options);
858 if (elreq.options == EXTERNAL_LOOPBACK)
859 if (IS_QLA8031(ha) || IS_QLA8044(ha))
860 rval = qla81xx_set_loopback_mode(vha,
861 config, new_config, elreq.options);
863 rval = qla81xx_reset_loopback_mode(vha,
866 rval = qla81xx_set_loopback_mode(vha, config,
867 new_config, elreq.options);
871 goto done_free_dma_rsp;
874 type = "FC_BSG_HST_VENDOR_LOOPBACK";
875 ql_dbg(ql_dbg_user, vha, 0x7028,
876 "BSG request type: %s.\n", type);
878 command_sent = INT_DEF_LB_LOOPBACK_CMD;
879 rval = qla2x00_loopback_test(vha, &elreq, response);
881 if (response[0] == MBS_COMMAND_ERROR &&
882 response[1] == MBS_LB_RESET) {
883 ql_log(ql_log_warn, vha, 0x7029,
884 "MBX command error, Aborting ISP.\n");
885 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
886 qla2xxx_wake_dpc(vha);
887 qla2x00_wait_for_chip_reset(vha);
888 /* Also reset the MPI */
889 if (IS_QLA81XX(ha)) {
890 if (qla81xx_restart_mpi_firmware(vha) !=
892 ql_log(ql_log_warn, vha, 0x702a,
893 "MPI reset failed.\n");
898 goto done_free_dma_rsp;
904 /* Revert back to original port config
905 * Also clear internal loopback
907 ret = qla81xx_reset_loopback_mode(vha,
911 * If the reset of the loopback mode
912 * doesn't work take FCoE dump and then
915 qla2xxx_dump_fw(vha);
916 set_bit(ISP_ABORT_NEEDED,
923 type = "FC_BSG_HST_VENDOR_LOOPBACK";
924 ql_dbg(ql_dbg_user, vha, 0x702b,
925 "BSG request type: %s.\n", type);
926 command_sent = INT_DEF_LB_LOOPBACK_CMD;
927 rval = qla2x00_loopback_test(vha, &elreq, response);
932 ql_log(ql_log_warn, vha, 0x702c,
933 "Vendor request %s failed.\n", type);
936 bsg_reply->result = (DID_ERROR << 16);
937 bsg_reply->reply_payload_rcv_len = 0;
939 ql_dbg(ql_dbg_user, vha, 0x702d,
940 "Vendor request %s completed.\n", type);
941 bsg_reply->result = (DID_OK << 16);
942 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
943 bsg_job->reply_payload.sg_cnt, rsp_data,
947 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
948 sizeof(response) + sizeof(uint8_t);
949 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
950 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
952 fw_sts_ptr += sizeof(response);
953 *fw_sts_ptr = command_sent;
956 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
957 rsp_data, rsp_data_dma);
959 dma_free_coherent(&ha->pdev->dev, req_data_len,
960 req_data, req_data_dma);
962 dma_unmap_sg(&ha->pdev->dev,
963 bsg_job->reply_payload.sg_list,
964 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
966 dma_unmap_sg(&ha->pdev->dev,
967 bsg_job->request_payload.sg_list,
968 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
970 bsg_job_done(bsg_job, bsg_reply->result,
971 bsg_reply->reply_payload_rcv_len);
976 qla84xx_reset(struct bsg_job *bsg_job)
978 struct fc_bsg_request *bsg_request = bsg_job->request;
979 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
980 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
981 scsi_qla_host_t *vha = shost_priv(host);
982 struct qla_hw_data *ha = vha->hw;
986 if (!IS_QLA84XX(ha)) {
987 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
991 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
993 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
996 ql_log(ql_log_warn, vha, 0x7030,
997 "Vendor request 84xx reset failed.\n");
998 rval = (DID_ERROR << 16);
1001 ql_dbg(ql_dbg_user, vha, 0x7031,
1002 "Vendor request 84xx reset completed.\n");
1003 bsg_reply->result = DID_OK;
1004 bsg_job_done(bsg_job, bsg_reply->result,
1005 bsg_reply->reply_payload_rcv_len);
1012 qla84xx_updatefw(struct bsg_job *bsg_job)
1014 struct fc_bsg_request *bsg_request = bsg_job->request;
1015 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1016 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1017 scsi_qla_host_t *vha = shost_priv(host);
1018 struct qla_hw_data *ha = vha->hw;
1019 struct verify_chip_entry_84xx *mn = NULL;
1020 dma_addr_t mn_dma, fw_dma;
1021 void *fw_buf = NULL;
1029 if (!IS_QLA84XX(ha)) {
1030 ql_dbg(ql_dbg_user, vha, 0x7032,
1031 "Not 84xx, exiting.\n");
1035 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1036 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1038 ql_log(ql_log_warn, vha, 0x7033,
1039 "dma_map_sg returned %d for request.\n", sg_cnt);
1043 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1044 ql_log(ql_log_warn, vha, 0x7034,
1045 "DMA mapping resulted in different sg counts, "
1046 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1047 bsg_job->request_payload.sg_cnt, sg_cnt);
1052 data_len = bsg_job->request_payload.payload_len;
1053 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1054 &fw_dma, GFP_KERNEL);
1056 ql_log(ql_log_warn, vha, 0x7035,
1057 "DMA alloc failed for fw_buf.\n");
1062 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1063 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1065 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1067 ql_log(ql_log_warn, vha, 0x7036,
1068 "DMA alloc failed for fw buffer.\n");
1070 goto done_free_fw_buf;
1073 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1074 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
1076 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1077 mn->entry_count = 1;
1079 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1080 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1081 options |= VCO_DIAG_FW;
1083 mn->options = cpu_to_le16(options);
1084 mn->fw_ver = cpu_to_le32(fw_ver);
1085 mn->fw_size = cpu_to_le32(data_len);
1086 mn->fw_seq_size = cpu_to_le32(data_len);
1087 put_unaligned_le64(fw_dma, &mn->dsd.address);
1088 mn->dsd.length = cpu_to_le32(data_len);
1089 mn->data_seg_cnt = cpu_to_le16(1);
1091 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1094 ql_log(ql_log_warn, vha, 0x7037,
1095 "Vendor request 84xx updatefw failed.\n");
1097 rval = (DID_ERROR << 16);
1099 ql_dbg(ql_dbg_user, vha, 0x7038,
1100 "Vendor request 84xx updatefw completed.\n");
1102 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1103 bsg_reply->result = DID_OK;
1106 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1109 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1112 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1113 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1116 bsg_job_done(bsg_job, bsg_reply->result,
1117 bsg_reply->reply_payload_rcv_len);
1122 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1124 struct fc_bsg_request *bsg_request = bsg_job->request;
1125 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1126 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1127 scsi_qla_host_t *vha = shost_priv(host);
1128 struct qla_hw_data *ha = vha->hw;
1129 struct access_chip_84xx *mn = NULL;
1130 dma_addr_t mn_dma, mgmt_dma;
1131 void *mgmt_b = NULL;
1133 struct qla_bsg_a84_mgmt *ql84_mgmt;
1135 uint32_t data_len = 0;
1136 uint32_t dma_direction = DMA_NONE;
1138 if (!IS_QLA84XX(ha)) {
1139 ql_log(ql_log_warn, vha, 0x703a,
1140 "Not 84xx, exiting.\n");
1144 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1146 ql_log(ql_log_warn, vha, 0x703c,
1147 "DMA alloc failed for fw buffer.\n");
1151 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1152 mn->entry_count = 1;
1153 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1154 switch (ql84_mgmt->mgmt.cmd) {
1155 case QLA84_MGMT_READ_MEM:
1156 case QLA84_MGMT_GET_INFO:
1157 sg_cnt = dma_map_sg(&ha->pdev->dev,
1158 bsg_job->reply_payload.sg_list,
1159 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1161 ql_log(ql_log_warn, vha, 0x703d,
1162 "dma_map_sg returned %d for reply.\n", sg_cnt);
1167 dma_direction = DMA_FROM_DEVICE;
1169 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1170 ql_log(ql_log_warn, vha, 0x703e,
1171 "DMA mapping resulted in different sg counts, "
1172 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1173 bsg_job->reply_payload.sg_cnt, sg_cnt);
1178 data_len = bsg_job->reply_payload.payload_len;
1180 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1181 &mgmt_dma, GFP_KERNEL);
1183 ql_log(ql_log_warn, vha, 0x703f,
1184 "DMA alloc failed for mgmt_b.\n");
1189 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1190 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1193 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1195 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1196 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1198 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1202 ql84_mgmt->mgmt.mgmtp.u.info.context);
1206 case QLA84_MGMT_WRITE_MEM:
1207 sg_cnt = dma_map_sg(&ha->pdev->dev,
1208 bsg_job->request_payload.sg_list,
1209 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1212 ql_log(ql_log_warn, vha, 0x7040,
1213 "dma_map_sg returned %d.\n", sg_cnt);
1218 dma_direction = DMA_TO_DEVICE;
1220 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1221 ql_log(ql_log_warn, vha, 0x7041,
1222 "DMA mapping resulted in different sg counts, "
1223 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1224 bsg_job->request_payload.sg_cnt, sg_cnt);
1229 data_len = bsg_job->request_payload.payload_len;
1230 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1231 &mgmt_dma, GFP_KERNEL);
1233 ql_log(ql_log_warn, vha, 0x7042,
1234 "DMA alloc failed for mgmt_b.\n");
1239 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1240 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1242 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1244 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1247 case QLA84_MGMT_CHNG_CONFIG:
1248 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1250 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1253 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1256 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1264 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1265 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1266 mn->dseg_count = cpu_to_le16(1);
1267 put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1268 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
1271 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1274 ql_log(ql_log_warn, vha, 0x7043,
1275 "Vendor request 84xx mgmt failed.\n");
1277 rval = (DID_ERROR << 16);
1280 ql_dbg(ql_dbg_user, vha, 0x7044,
1281 "Vendor request 84xx mgmt completed.\n");
1283 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1284 bsg_reply->result = DID_OK;
1286 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1287 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1288 bsg_reply->reply_payload_rcv_len =
1289 bsg_job->reply_payload.payload_len;
1291 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1292 bsg_job->reply_payload.sg_cnt, mgmt_b,
1299 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1301 if (dma_direction == DMA_TO_DEVICE)
1302 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1303 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1304 else if (dma_direction == DMA_FROM_DEVICE)
1305 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1306 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1309 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1312 bsg_job_done(bsg_job, bsg_reply->result,
1313 bsg_reply->reply_payload_rcv_len);
1318 qla24xx_iidma(struct bsg_job *bsg_job)
1320 struct fc_bsg_request *bsg_request = bsg_job->request;
1321 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1322 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1323 scsi_qla_host_t *vha = shost_priv(host);
1325 struct qla_port_param *port_param = NULL;
1326 fc_port_t *fcport = NULL;
1328 uint16_t mb[MAILBOX_REGISTER_COUNT];
1329 uint8_t *rsp_ptr = NULL;
1331 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1332 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1336 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1337 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1338 ql_log(ql_log_warn, vha, 0x7048,
1339 "Invalid destination type.\n");
1343 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1344 if (fcport->port_type != FCT_TARGET)
1347 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1348 fcport->port_name, sizeof(fcport->port_name)))
1356 ql_log(ql_log_warn, vha, 0x7049,
1357 "Failed to find port.\n");
1361 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1362 ql_log(ql_log_warn, vha, 0x704a,
1363 "Port is not online.\n");
1367 if (fcport->flags & FCF_LOGIN_NEEDED) {
1368 ql_log(ql_log_warn, vha, 0x704b,
1369 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1373 if (port_param->mode)
1374 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1375 port_param->speed, mb);
1377 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1378 &port_param->speed, mb);
1381 ql_log(ql_log_warn, vha, 0x704c,
1382 "iiDMA cmd failed for %8phN -- "
1383 "%04x %x %04x %04x.\n", fcport->port_name,
1384 rval, fcport->fp_speed, mb[0], mb[1]);
1385 rval = (DID_ERROR << 16);
1387 if (!port_param->mode) {
1388 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1389 sizeof(struct qla_port_param);
1391 rsp_ptr = ((uint8_t *)bsg_reply) +
1392 sizeof(struct fc_bsg_reply);
1394 memcpy(rsp_ptr, port_param,
1395 sizeof(struct qla_port_param));
1398 bsg_reply->result = DID_OK;
1399 bsg_job_done(bsg_job, bsg_reply->result,
1400 bsg_reply->reply_payload_rcv_len);
1407 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1410 struct fc_bsg_request *bsg_request = bsg_job->request;
1413 struct qla_hw_data *ha = vha->hw;
1415 if (unlikely(pci_channel_offline(ha->pdev)))
1418 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1419 if (start > ha->optrom_size) {
1420 ql_log(ql_log_warn, vha, 0x7055,
1421 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1425 if (ha->optrom_state != QLA_SWAITING) {
1426 ql_log(ql_log_info, vha, 0x7056,
1427 "optrom_state %d.\n", ha->optrom_state);
1431 ha->optrom_region_start = start;
1432 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1434 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1436 else if (start == (ha->flt_region_boot * 4) ||
1437 start == (ha->flt_region_fw * 4))
1439 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1440 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1444 ql_log(ql_log_warn, vha, 0x7058,
1445 "Invalid start region 0x%x/0x%x.\n", start,
1446 bsg_job->request_payload.payload_len);
1450 ha->optrom_region_size = start +
1451 bsg_job->request_payload.payload_len > ha->optrom_size ?
1452 ha->optrom_size - start :
1453 bsg_job->request_payload.payload_len;
1454 ha->optrom_state = QLA_SWRITING;
1456 ha->optrom_region_size = start +
1457 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1458 ha->optrom_size - start :
1459 bsg_job->reply_payload.payload_len;
1460 ha->optrom_state = QLA_SREADING;
1463 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1464 if (!ha->optrom_buffer) {
1465 ql_log(ql_log_warn, vha, 0x7059,
1466 "Read: Unable to allocate memory for optrom retrieval "
1467 "(%x)\n", ha->optrom_region_size);
1469 ha->optrom_state = QLA_SWAITING;
1477 qla2x00_read_optrom(struct bsg_job *bsg_job)
1479 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1480 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1481 scsi_qla_host_t *vha = shost_priv(host);
1482 struct qla_hw_data *ha = vha->hw;
1485 if (ha->flags.nic_core_reset_hdlr_active)
1488 mutex_lock(&ha->optrom_mutex);
1489 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1491 mutex_unlock(&ha->optrom_mutex);
1495 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1496 ha->optrom_region_start, ha->optrom_region_size);
1498 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1499 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1500 ha->optrom_region_size);
1502 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1503 bsg_reply->result = DID_OK;
1504 vfree(ha->optrom_buffer);
1505 ha->optrom_buffer = NULL;
1506 ha->optrom_state = QLA_SWAITING;
1507 mutex_unlock(&ha->optrom_mutex);
1508 bsg_job_done(bsg_job, bsg_reply->result,
1509 bsg_reply->reply_payload_rcv_len);
1514 qla2x00_update_optrom(struct bsg_job *bsg_job)
1516 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1517 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1518 scsi_qla_host_t *vha = shost_priv(host);
1519 struct qla_hw_data *ha = vha->hw;
1522 mutex_lock(&ha->optrom_mutex);
1523 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1525 mutex_unlock(&ha->optrom_mutex);
1529 /* Set the isp82xx_no_md_cap not to capture minidump */
1530 ha->flags.isp82xx_no_md_cap = 1;
1532 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1533 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1534 ha->optrom_region_size);
1536 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1537 ha->optrom_region_start, ha->optrom_region_size);
1540 bsg_reply->result = -EINVAL;
1543 bsg_reply->result = DID_OK;
1545 vfree(ha->optrom_buffer);
1546 ha->optrom_buffer = NULL;
1547 ha->optrom_state = QLA_SWAITING;
1548 mutex_unlock(&ha->optrom_mutex);
1549 bsg_job_done(bsg_job, bsg_reply->result,
1550 bsg_reply->reply_payload_rcv_len);
1555 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1557 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1558 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1559 scsi_qla_host_t *vha = shost_priv(host);
1560 struct qla_hw_data *ha = vha->hw;
1562 uint8_t bsg[DMA_POOL_SIZE];
1563 struct qla_image_version_list *list = (void *)bsg;
1564 struct qla_image_version *image;
1567 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1570 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1571 EXT_STATUS_NO_MEMORY;
1575 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1576 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1578 image = list->version;
1579 count = list->count;
1581 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1582 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1583 image->field_address.device, image->field_address.offset,
1584 sizeof(image->field_info), image->field_address.option);
1586 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1593 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1596 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1599 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1600 bsg_reply->result = DID_OK << 16;
1601 bsg_job_done(bsg_job, bsg_reply->result,
1602 bsg_reply->reply_payload_rcv_len);
1608 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1610 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1611 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1612 scsi_qla_host_t *vha = shost_priv(host);
1613 struct qla_hw_data *ha = vha->hw;
1615 uint8_t bsg[DMA_POOL_SIZE];
1616 struct qla_status_reg *sr = (void *)bsg;
1618 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1621 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1622 EXT_STATUS_NO_MEMORY;
1626 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1627 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1629 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1630 sr->field_address.device, sr->field_address.offset,
1631 sizeof(sr->status_reg), sr->field_address.option);
1632 sr->status_reg = *sfp;
1635 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1640 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1641 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1643 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1646 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1649 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1650 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1651 bsg_reply->result = DID_OK << 16;
1652 bsg_job_done(bsg_job, bsg_reply->result,
1653 bsg_reply->reply_payload_rcv_len);
1659 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1661 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1662 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1663 scsi_qla_host_t *vha = shost_priv(host);
1664 struct qla_hw_data *ha = vha->hw;
1666 uint8_t bsg[DMA_POOL_SIZE];
1667 struct qla_status_reg *sr = (void *)bsg;
1669 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1672 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1673 EXT_STATUS_NO_MEMORY;
1677 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1678 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1680 *sfp = sr->status_reg;
1681 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1682 sr->field_address.device, sr->field_address.offset,
1683 sizeof(sr->status_reg), sr->field_address.option);
1686 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1691 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1694 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1697 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1698 bsg_reply->result = DID_OK << 16;
1699 bsg_job_done(bsg_job, bsg_reply->result,
1700 bsg_reply->reply_payload_rcv_len);
1706 qla2x00_write_i2c(struct bsg_job *bsg_job)
1708 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1709 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1710 scsi_qla_host_t *vha = shost_priv(host);
1711 struct qla_hw_data *ha = vha->hw;
1713 uint8_t bsg[DMA_POOL_SIZE];
1714 struct qla_i2c_access *i2c = (void *)bsg;
1716 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1719 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1720 EXT_STATUS_NO_MEMORY;
1724 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1725 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1727 memcpy(sfp, i2c->buffer, i2c->length);
1728 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1729 i2c->device, i2c->offset, i2c->length, i2c->option);
1732 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1737 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1740 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1743 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1744 bsg_reply->result = DID_OK << 16;
1745 bsg_job_done(bsg_job, bsg_reply->result,
1746 bsg_reply->reply_payload_rcv_len);
1752 qla2x00_read_i2c(struct bsg_job *bsg_job)
1754 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1755 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1756 scsi_qla_host_t *vha = shost_priv(host);
1757 struct qla_hw_data *ha = vha->hw;
1759 uint8_t bsg[DMA_POOL_SIZE];
1760 struct qla_i2c_access *i2c = (void *)bsg;
1762 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1765 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1766 EXT_STATUS_NO_MEMORY;
1770 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1771 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1773 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1774 i2c->device, i2c->offset, i2c->length, i2c->option);
1777 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1782 memcpy(i2c->buffer, sfp, i2c->length);
1783 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1784 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1786 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1789 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1792 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1793 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1794 bsg_reply->result = DID_OK << 16;
1795 bsg_job_done(bsg_job, bsg_reply->result,
1796 bsg_reply->reply_payload_rcv_len);
1802 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1804 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1805 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1806 scsi_qla_host_t *vha = shost_priv(host);
1807 struct qla_hw_data *ha = vha->hw;
1808 uint32_t rval = EXT_STATUS_OK;
1809 uint16_t req_sg_cnt = 0;
1810 uint16_t rsp_sg_cnt = 0;
1811 uint16_t nextlid = 0;
1814 uint32_t req_data_len;
1815 uint32_t rsp_data_len;
1817 /* Check the type of the adapter */
1818 if (!IS_BIDI_CAPABLE(ha)) {
1819 ql_log(ql_log_warn, vha, 0x70a0,
1820 "This adapter is not supported\n");
1821 rval = EXT_STATUS_NOT_SUPPORTED;
1825 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1826 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1827 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1828 rval = EXT_STATUS_BUSY;
1832 /* Check if host is online */
1833 if (!vha->flags.online) {
1834 ql_log(ql_log_warn, vha, 0x70a1,
1835 "Host is not online\n");
1836 rval = EXT_STATUS_DEVICE_OFFLINE;
1840 /* Check if cable is plugged in or not */
1841 if (vha->device_flags & DFLG_NO_CABLE) {
1842 ql_log(ql_log_warn, vha, 0x70a2,
1843 "Cable is unplugged...\n");
1844 rval = EXT_STATUS_INVALID_CFG;
1848 /* Check if the switch is connected or not */
1849 if (ha->current_topology != ISP_CFG_F) {
1850 ql_log(ql_log_warn, vha, 0x70a3,
1851 "Host is not connected to the switch\n");
1852 rval = EXT_STATUS_INVALID_CFG;
1856 /* Check if operating mode is P2P */
1857 if (ha->operating_mode != P2P) {
1858 ql_log(ql_log_warn, vha, 0x70a4,
1859 "Host operating mode is not P2p\n");
1860 rval = EXT_STATUS_INVALID_CFG;
1864 mutex_lock(&ha->selflogin_lock);
1865 if (vha->self_login_loop_id == 0) {
1866 /* Initialize all required fields of fcport */
1867 vha->bidir_fcport.vha = vha;
1868 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1869 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1870 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1871 vha->bidir_fcport.loop_id = vha->loop_id;
1873 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1874 ql_log(ql_log_warn, vha, 0x70a7,
1875 "Failed to login port %06X for bidirectional IOCB\n",
1876 vha->bidir_fcport.d_id.b24);
1877 mutex_unlock(&ha->selflogin_lock);
1878 rval = EXT_STATUS_MAILBOX;
1881 vha->self_login_loop_id = nextlid - 1;
1884 /* Assign the self login loop id to fcport */
1885 mutex_unlock(&ha->selflogin_lock);
1887 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1889 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1890 bsg_job->request_payload.sg_list,
1891 bsg_job->request_payload.sg_cnt,
1895 rval = EXT_STATUS_NO_MEMORY;
1899 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1900 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1904 rval = EXT_STATUS_NO_MEMORY;
1905 goto done_unmap_req_sg;
1908 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1909 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1910 ql_dbg(ql_dbg_user, vha, 0x70a9,
1911 "Dma mapping resulted in different sg counts "
1912 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1913 "%x dma_reply_sg_cnt: %x]\n",
1914 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1915 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1916 rval = EXT_STATUS_NO_MEMORY;
1920 req_data_len = bsg_job->request_payload.payload_len;
1921 rsp_data_len = bsg_job->reply_payload.payload_len;
1923 if (req_data_len != rsp_data_len) {
1924 rval = EXT_STATUS_BUSY;
1925 ql_log(ql_log_warn, vha, 0x70aa,
1926 "req_data_len != rsp_data_len\n");
1930 /* Alloc SRB structure */
1931 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1933 ql_dbg(ql_dbg_user, vha, 0x70ac,
1934 "Alloc SRB structure failed\n");
1935 rval = EXT_STATUS_NO_MEMORY;
1939 /*Populate srb->ctx with bidir ctx*/
1940 sp->u.bsg_job = bsg_job;
1941 sp->free = qla2x00_bsg_sp_free;
1942 sp->type = SRB_BIDI_CMD;
1943 sp->done = qla2x00_bsg_job_done;
1945 /* Add the read and write sg count */
1946 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1948 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1949 if (rval != EXT_STATUS_OK)
1951 /* the bsg request will be completed in the interrupt handler */
1955 mempool_free(sp, ha->srb_mempool);
1957 dma_unmap_sg(&ha->pdev->dev,
1958 bsg_job->reply_payload.sg_list,
1959 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1961 dma_unmap_sg(&ha->pdev->dev,
1962 bsg_job->request_payload.sg_list,
1963 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1966 /* Return an error vendor specific response
1967 * and complete the bsg request
1969 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1970 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1971 bsg_reply->reply_payload_rcv_len = 0;
1972 bsg_reply->result = (DID_OK) << 16;
1973 bsg_job_done(bsg_job, bsg_reply->result,
1974 bsg_reply->reply_payload_rcv_len);
1975 /* Always return success, vendor rsp carries correct status */
1980 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1982 struct fc_bsg_request *bsg_request = bsg_job->request;
1983 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1984 scsi_qla_host_t *vha = shost_priv(host);
1985 struct qla_hw_data *ha = vha->hw;
1986 int rval = (DID_ERROR << 16);
1987 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1989 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1990 struct fc_port *fcport;
1991 char *type = "FC_BSG_HST_FX_MGMT";
1993 /* Copy the IOCB specific information */
1994 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1995 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1997 /* Dump the vendor information */
1998 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1999 piocb_rqst, sizeof(*piocb_rqst));
2001 if (!vha->flags.online) {
2002 ql_log(ql_log_warn, vha, 0x70d0,
2003 "Host is not online.\n");
2008 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
2009 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
2010 bsg_job->request_payload.sg_list,
2011 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2013 ql_log(ql_log_warn, vha, 0x70c7,
2014 "dma_map_sg return %d for request\n", req_sg_cnt);
2020 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
2021 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
2022 bsg_job->reply_payload.sg_list,
2023 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2025 ql_log(ql_log_warn, vha, 0x70c8,
2026 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
2028 goto done_unmap_req_sg;
2032 ql_dbg(ql_dbg_user, vha, 0x70c9,
2033 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2034 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2035 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2037 /* Allocate a dummy fcport structure, since functions preparing the
2038 * IOCB and mailbox command retrieves port specific information
2039 * from fcport structure. For Host based ELS commands there will be
2040 * no fcport structure allocated
2042 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2044 ql_log(ql_log_warn, vha, 0x70ca,
2045 "Failed to allocate fcport.\n");
2047 goto done_unmap_rsp_sg;
2050 /* Alloc SRB structure */
2051 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2053 ql_log(ql_log_warn, vha, 0x70cb,
2054 "qla2x00_get_sp failed.\n");
2056 goto done_free_fcport;
2059 /* Initialize all required fields of fcport */
2061 fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
2063 sp->type = SRB_FXIOCB_BCMD;
2064 sp->name = "bsg_fx_mgmt";
2065 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2066 sp->u.bsg_job = bsg_job;
2067 sp->free = qla2x00_bsg_sp_free;
2068 sp->done = qla2x00_bsg_job_done;
2070 ql_dbg(ql_dbg_user, vha, 0x70cc,
2071 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2072 type, piocb_rqst->func_type, fcport->loop_id);
2074 rval = qla2x00_start_sp(sp);
2075 if (rval != QLA_SUCCESS) {
2076 ql_log(ql_log_warn, vha, 0x70cd,
2077 "qla2x00_start_sp failed=%d.\n", rval);
2078 mempool_free(sp, ha->srb_mempool);
2080 goto done_free_fcport;
2085 qla2x00_free_fcport(fcport);
2088 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2089 dma_unmap_sg(&ha->pdev->dev,
2090 bsg_job->reply_payload.sg_list,
2091 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2093 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2094 dma_unmap_sg(&ha->pdev->dev,
2095 bsg_job->request_payload.sg_list,
2096 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2103 qla26xx_serdes_op(struct bsg_job *bsg_job)
2105 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2106 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2107 scsi_qla_host_t *vha = shost_priv(host);
2109 struct qla_serdes_reg sr;
2111 memset(&sr, 0, sizeof(sr));
2113 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2114 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2117 case INT_SC_SERDES_WRITE_REG:
2118 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2119 bsg_reply->reply_payload_rcv_len = 0;
2121 case INT_SC_SERDES_READ_REG:
2122 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2123 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2124 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2125 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2128 ql_dbg(ql_dbg_user, vha, 0x708c,
2129 "Unknown serdes cmd %x.\n", sr.cmd);
2134 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2135 rval ? EXT_STATUS_MAILBOX : 0;
2137 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2138 bsg_reply->result = DID_OK << 16;
2139 bsg_job_done(bsg_job, bsg_reply->result,
2140 bsg_reply->reply_payload_rcv_len);
2145 qla8044_serdes_op(struct bsg_job *bsg_job)
2147 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2148 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2149 scsi_qla_host_t *vha = shost_priv(host);
2151 struct qla_serdes_reg_ex sr;
2153 memset(&sr, 0, sizeof(sr));
2155 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2156 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2159 case INT_SC_SERDES_WRITE_REG:
2160 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2161 bsg_reply->reply_payload_rcv_len = 0;
2163 case INT_SC_SERDES_READ_REG:
2164 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2165 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2166 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2167 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2170 ql_dbg(ql_dbg_user, vha, 0x7020,
2171 "Unknown serdes cmd %x.\n", sr.cmd);
2176 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2177 rval ? EXT_STATUS_MAILBOX : 0;
2179 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2180 bsg_reply->result = DID_OK << 16;
2181 bsg_job_done(bsg_job, bsg_reply->result,
2182 bsg_reply->reply_payload_rcv_len);
2187 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2189 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2190 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2191 scsi_qla_host_t *vha = shost_priv(host);
2192 struct qla_hw_data *ha = vha->hw;
2193 struct qla_flash_update_caps cap;
2195 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
2198 memset(&cap, 0, sizeof(cap));
2199 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2200 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2201 (uint64_t)ha->fw_attributes_h << 16 |
2202 (uint64_t)ha->fw_attributes;
2204 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2205 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2206 bsg_reply->reply_payload_rcv_len = sizeof(cap);
2208 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2211 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2212 bsg_reply->result = DID_OK << 16;
2213 bsg_job_done(bsg_job, bsg_reply->result,
2214 bsg_reply->reply_payload_rcv_len);
2219 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2221 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2222 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2223 scsi_qla_host_t *vha = shost_priv(host);
2224 struct qla_hw_data *ha = vha->hw;
2225 uint64_t online_fw_attr = 0;
2226 struct qla_flash_update_caps cap;
2228 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2231 memset(&cap, 0, sizeof(cap));
2232 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2233 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2235 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2236 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2237 (uint64_t)ha->fw_attributes_h << 16 |
2238 (uint64_t)ha->fw_attributes;
2240 if (online_fw_attr != cap.capabilities) {
2241 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2242 EXT_STATUS_INVALID_PARAM;
2246 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
2247 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2248 EXT_STATUS_INVALID_PARAM;
2252 bsg_reply->reply_payload_rcv_len = 0;
2254 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2257 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2258 bsg_reply->result = DID_OK << 16;
2259 bsg_job_done(bsg_job, bsg_reply->result,
2260 bsg_reply->reply_payload_rcv_len);
2265 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2267 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2268 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2269 scsi_qla_host_t *vha = shost_priv(host);
2270 struct qla_hw_data *ha = vha->hw;
2271 struct qla_bbcr_data bbcr;
2272 uint16_t loop_id, topo, sw_cap;
2273 uint8_t domain, area, al_pa, state;
2276 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2279 memset(&bbcr, 0, sizeof(bbcr));
2281 if (vha->flags.bbcr_enable)
2282 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2284 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2286 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2287 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2288 &area, &domain, &topo, &sw_cap);
2289 if (rval != QLA_SUCCESS) {
2290 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2291 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2292 bbcr.mbx1 = loop_id;
2296 state = (vha->bbcr >> 12) & 0x1;
2299 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2300 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2302 bbcr.state = QLA_BBCR_STATE_ONLINE;
2303 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2306 bbcr.configured_bbscn = vha->bbcr & 0xf;
2310 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2311 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2312 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2314 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2316 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2317 bsg_reply->result = DID_OK << 16;
2318 bsg_job_done(bsg_job, bsg_reply->result,
2319 bsg_reply->reply_payload_rcv_len);
2324 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2326 struct fc_bsg_request *bsg_request = bsg_job->request;
2327 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2328 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2329 scsi_qla_host_t *vha = shost_priv(host);
2330 struct qla_hw_data *ha = vha->hw;
2331 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2332 struct link_statistics *stats = NULL;
2333 dma_addr_t stats_dma;
2335 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2336 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2338 if (test_bit(UNLOADING, &vha->dpc_flags))
2341 if (unlikely(pci_channel_offline(ha->pdev)))
2344 if (qla2x00_reset_active(vha))
2347 if (!IS_FWI2_CAPABLE(ha))
2350 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2353 ql_log(ql_log_warn, vha, 0x70e2,
2354 "Failed to allocate memory for stats.\n");
2358 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2360 if (rval == QLA_SUCCESS) {
2361 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
2362 stats, sizeof(*stats));
2363 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2364 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2367 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2368 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2369 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2371 bsg_job->reply_len = sizeof(*bsg_reply);
2372 bsg_reply->result = DID_OK << 16;
2373 bsg_job_done(bsg_job, bsg_reply->result,
2374 bsg_reply->reply_payload_rcv_len);
2376 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2383 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2385 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2386 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2387 scsi_qla_host_t *vha = shost_priv(host);
2389 struct qla_dport_diag *dd;
2391 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2392 !IS_QLA28XX(vha->hw))
2395 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2397 ql_log(ql_log_warn, vha, 0x70db,
2398 "Failed to allocate memory for dport.\n");
2402 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2403 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2405 rval = qla26xx_dport_diagnostics(
2406 vha, dd->buf, sizeof(dd->buf), dd->options);
2407 if (rval == QLA_SUCCESS) {
2408 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2409 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2412 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2413 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2414 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2416 bsg_job->reply_len = sizeof(*bsg_reply);
2417 bsg_reply->result = DID_OK << 16;
2418 bsg_job_done(bsg_job, bsg_reply->result,
2419 bsg_reply->reply_payload_rcv_len);
2427 qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
2429 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2430 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2431 struct qla_hw_data *ha = vha->hw;
2432 struct qla_active_regions regions = { };
2433 struct active_regions active_regions = { };
2435 qla27xx_get_active_image(vha, &active_regions);
2436 regions.global_image = active_regions.global;
2438 if (IS_QLA28XX(ha)) {
2439 qla28xx_get_aux_images(vha, &active_regions);
2440 regions.board_config = active_regions.aux.board_config;
2441 regions.vpd_nvram = active_regions.aux.vpd_nvram;
2442 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
2443 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
2446 ql_dbg(ql_dbg_user, vha, 0x70e1,
2447 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
2448 __func__, vha->host_no, regions.global_image,
2449 regions.board_config, regions.vpd_nvram,
2450 regions.npiv_config_0_1, regions.npiv_config_2_3);
2452 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2453 bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions));
2455 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2456 bsg_reply->reply_payload_rcv_len = sizeof(regions);
2457 bsg_reply->result = DID_OK << 16;
2458 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2459 bsg_job_done(bsg_job, bsg_reply->result,
2460 bsg_reply->reply_payload_rcv_len);
2466 qla2x00_manage_host_stats(struct bsg_job *bsg_job)
2468 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2469 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2470 struct ql_vnd_mng_host_stats_param *req_data;
2471 struct ql_vnd_mng_host_stats_resp rsp_data;
2475 if (!vha->flags.online) {
2476 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2480 req_data_len = bsg_job->request_payload.payload_len;
2482 if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) {
2483 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2487 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2489 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2493 /* Copy the request buffer in req_data */
2494 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2495 bsg_job->request_payload.sg_cnt, req_data,
2498 switch (req_data->action) {
2500 ret = qla2xxx_stop_stats(vha->host, req_data->stat_type);
2503 ret = qla2xxx_start_stats(vha->host, req_data->stat_type);
2506 ret = qla2xxx_reset_stats(vha->host, req_data->stat_type);
2509 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2516 /* Prepare response */
2517 rsp_data.status = ret;
2518 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2520 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2521 bsg_reply->reply_payload_rcv_len =
2522 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2523 bsg_job->reply_payload.sg_cnt,
2525 sizeof(struct ql_vnd_mng_host_stats_resp));
2527 bsg_reply->result = DID_OK;
2528 bsg_job_done(bsg_job, bsg_reply->result,
2529 bsg_reply->reply_payload_rcv_len);
2535 qla2x00_get_host_stats(struct bsg_job *bsg_job)
2537 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2538 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2539 struct ql_vnd_stats_param *req_data;
2540 struct ql_vnd_host_stats_resp rsp_data;
2543 u64 ini_entry_count = 0;
2544 u64 entry_count = 0;
2546 u64 tmp_stat_type = 0;
2547 u64 response_len = 0;
2550 req_data_len = bsg_job->request_payload.payload_len;
2552 if (req_data_len != sizeof(struct ql_vnd_stats_param)) {
2553 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2557 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2559 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2563 /* Copy the request buffer in req_data */
2564 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2565 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2567 /* Copy stat type to work on it */
2568 tmp_stat_type = req_data->stat_type;
2570 if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) {
2571 /* Num of tgts connected to this host */
2572 tgt_num = qla2x00_get_num_tgts(vha);
2574 tmp_stat_type &= ~(1 << 17);
2577 /* Total ini stats */
2578 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
2580 /* Total number of entries */
2581 entry_count = ini_entry_count + tgt_num;
2583 response_len = sizeof(struct ql_vnd_host_stats_resp) +
2584 (sizeof(struct ql_vnd_stat_entry) * entry_count);
2586 if (response_len > bsg_job->reply_payload.payload_len) {
2587 rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL;
2588 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2589 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2591 bsg_reply->reply_payload_rcv_len =
2592 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2593 bsg_job->reply_payload.sg_cnt, &rsp_data,
2594 sizeof(struct ql_vnd_mng_host_stats_resp));
2596 bsg_reply->result = DID_OK;
2597 bsg_job_done(bsg_job, bsg_reply->result,
2598 bsg_reply->reply_payload_rcv_len);
2602 data = kzalloc(response_len, GFP_KERNEL);
2608 ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2609 data, response_len);
2611 rsp_data.status = EXT_STATUS_OK;
2612 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2614 bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2615 bsg_job->reply_payload.sg_cnt,
2616 data, response_len);
2617 bsg_reply->result = DID_OK;
2618 bsg_job_done(bsg_job, bsg_reply->result,
2619 bsg_reply->reply_payload_rcv_len);
2627 static struct fc_rport *
2628 qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num)
2630 fc_port_t *fcport = NULL;
2632 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2633 if (fcport->rport->number == tgt_num)
2634 return fcport->rport;
2640 qla2x00_get_tgt_stats(struct bsg_job *bsg_job)
2642 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2643 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2644 struct ql_vnd_tgt_stats_param *req_data;
2647 u64 response_len = 0;
2648 struct ql_vnd_tgt_stats_resp *data = NULL;
2649 struct fc_rport *rport = NULL;
2651 if (!vha->flags.online) {
2652 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2656 req_data_len = bsg_job->request_payload.payload_len;
2658 if (req_data_len != sizeof(struct ql_vnd_stat_entry)) {
2659 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2663 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2665 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2669 /* Copy the request buffer in req_data */
2670 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2671 bsg_job->request_payload.sg_cnt,
2672 req_data, req_data_len);
2674 response_len = sizeof(struct ql_vnd_tgt_stats_resp) +
2675 sizeof(struct ql_vnd_stat_entry);
2677 /* structure + size for one entry */
2678 data = kzalloc(response_len, GFP_KERNEL);
2684 if (response_len > bsg_job->reply_payload.payload_len) {
2685 data->status = EXT_STATUS_BUFFER_TOO_SMALL;
2686 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2687 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2689 bsg_reply->reply_payload_rcv_len =
2690 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2691 bsg_job->reply_payload.sg_cnt, data,
2692 sizeof(struct ql_vnd_tgt_stats_resp));
2694 bsg_reply->result = DID_OK;
2695 bsg_job_done(bsg_job, bsg_reply->result,
2696 bsg_reply->reply_payload_rcv_len);
2700 rport = qla2xxx_find_rport(vha, req_data->tgt_id);
2702 ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id);
2703 ret = EXT_STATUS_INVALID_PARAM;
2704 data->status = EXT_STATUS_INVALID_PARAM;
2708 ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2709 rport, (void *)data, response_len);
2711 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2713 bsg_reply->reply_payload_rcv_len =
2714 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2715 bsg_job->reply_payload.sg_cnt, data,
2717 bsg_reply->result = DID_OK;
2718 bsg_job_done(bsg_job, bsg_reply->result,
2719 bsg_reply->reply_payload_rcv_len);
2729 qla2x00_manage_host_port(struct bsg_job *bsg_job)
2731 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2732 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2733 struct ql_vnd_mng_host_port_param *req_data;
2734 struct ql_vnd_mng_host_port_resp rsp_data;
2738 req_data_len = bsg_job->request_payload.payload_len;
2740 if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) {
2741 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2745 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2747 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2751 /* Copy the request buffer in req_data */
2752 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2753 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2755 switch (req_data->action) {
2757 ret = qla2xxx_enable_port(vha->host);
2760 ret = qla2xxx_disable_port(vha->host);
2763 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2770 /* Prepare response */
2771 rsp_data.status = ret;
2772 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2773 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp);
2775 bsg_reply->reply_payload_rcv_len =
2776 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2777 bsg_job->reply_payload.sg_cnt, &rsp_data,
2778 sizeof(struct ql_vnd_mng_host_port_resp));
2779 bsg_reply->result = DID_OK;
2780 bsg_job_done(bsg_job, bsg_reply->result,
2781 bsg_reply->reply_payload_rcv_len);
2787 qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job)
2789 struct fc_bsg_request *bsg_request = bsg_job->request;
2791 ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n",
2792 __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
2794 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2795 case QL_VND_LOOPBACK:
2796 return qla2x00_process_loopback(bsg_job);
2798 case QL_VND_A84_RESET:
2799 return qla84xx_reset(bsg_job);
2801 case QL_VND_A84_UPDATE_FW:
2802 return qla84xx_updatefw(bsg_job);
2804 case QL_VND_A84_MGMT_CMD:
2805 return qla84xx_mgmt_cmd(bsg_job);
2808 return qla24xx_iidma(bsg_job);
2810 case QL_VND_FCP_PRIO_CFG_CMD:
2811 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2813 case QL_VND_READ_FLASH:
2814 return qla2x00_read_optrom(bsg_job);
2816 case QL_VND_UPDATE_FLASH:
2817 return qla2x00_update_optrom(bsg_job);
2819 case QL_VND_SET_FRU_VERSION:
2820 return qla2x00_update_fru_versions(bsg_job);
2822 case QL_VND_READ_FRU_STATUS:
2823 return qla2x00_read_fru_status(bsg_job);
2825 case QL_VND_WRITE_FRU_STATUS:
2826 return qla2x00_write_fru_status(bsg_job);
2828 case QL_VND_WRITE_I2C:
2829 return qla2x00_write_i2c(bsg_job);
2831 case QL_VND_READ_I2C:
2832 return qla2x00_read_i2c(bsg_job);
2834 case QL_VND_DIAG_IO_CMD:
2835 return qla24xx_process_bidir_cmd(bsg_job);
2837 case QL_VND_FX00_MGMT_CMD:
2838 return qlafx00_mgmt_cmd(bsg_job);
2840 case QL_VND_SERDES_OP:
2841 return qla26xx_serdes_op(bsg_job);
2843 case QL_VND_SERDES_OP_EX:
2844 return qla8044_serdes_op(bsg_job);
2846 case QL_VND_GET_FLASH_UPDATE_CAPS:
2847 return qla27xx_get_flash_upd_cap(bsg_job);
2849 case QL_VND_SET_FLASH_UPDATE_CAPS:
2850 return qla27xx_set_flash_upd_cap(bsg_job);
2852 case QL_VND_GET_BBCR_DATA:
2853 return qla27xx_get_bbcr_data(bsg_job);
2855 case QL_VND_GET_PRIV_STATS:
2856 case QL_VND_GET_PRIV_STATS_EX:
2857 return qla2x00_get_priv_stats(bsg_job);
2859 case QL_VND_DPORT_DIAGNOSTICS:
2860 return qla2x00_do_dport_diagnostics(bsg_job);
2862 case QL_VND_EDIF_MGMT:
2863 return qla_edif_app_mgmt(bsg_job);
2865 case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
2866 return qla2x00_get_flash_image_status(bsg_job);
2868 case QL_VND_MANAGE_HOST_STATS:
2869 return qla2x00_manage_host_stats(bsg_job);
2871 case QL_VND_GET_HOST_STATS:
2872 return qla2x00_get_host_stats(bsg_job);
2874 case QL_VND_GET_TGT_STATS:
2875 return qla2x00_get_tgt_stats(bsg_job);
2877 case QL_VND_MANAGE_HOST_PORT:
2878 return qla2x00_manage_host_port(bsg_job);
2886 qla24xx_bsg_request(struct bsg_job *bsg_job)
2888 struct fc_bsg_request *bsg_request = bsg_job->request;
2889 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2891 struct fc_rport *rport;
2892 struct Scsi_Host *host;
2893 scsi_qla_host_t *vha;
2895 /* In case no data transferred. */
2896 bsg_reply->reply_payload_rcv_len = 0;
2898 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2899 rport = fc_bsg_to_rport(bsg_job);
2900 host = rport_to_shost(rport);
2901 vha = shost_priv(host);
2903 host = fc_bsg_to_shost(bsg_job);
2904 vha = shost_priv(host);
2907 /* Disable port will bring down the chip, allow enable command */
2908 if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT ||
2909 bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS)
2912 if (vha->hw->flags.port_isolated) {
2913 bsg_reply->result = DID_ERROR;
2914 /* operation not permitted */
2918 if (qla2x00_chip_is_down(vha)) {
2919 ql_dbg(ql_dbg_user, vha, 0x709f,
2920 "BSG: ISP abort active/needed -- cmd=%d.\n",
2921 bsg_request->msgcode);
2922 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
2926 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
2927 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
2932 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
2933 "Entered %s msgcode=0x%x. bsg ptr %px\n",
2934 __func__, bsg_request->msgcode, bsg_job);
2936 switch (bsg_request->msgcode) {
2937 case FC_BSG_RPT_ELS:
2938 case FC_BSG_HST_ELS_NOLOGIN:
2939 ret = qla2x00_process_els(bsg_job);
2942 ret = qla2x00_process_ct(bsg_job);
2944 case FC_BSG_HST_VENDOR:
2945 ret = qla2x00_process_vendor_specific(vha, bsg_job);
2947 case FC_BSG_HST_ADD_RPORT:
2948 case FC_BSG_HST_DEL_RPORT:
2951 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2955 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
2956 "%s done with return %x\n", __func__, ret);
2962 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2964 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2965 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2966 struct qla_hw_data *ha = vha->hw;
2969 unsigned long flags;
2970 struct req_que *req;
2972 ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
2974 /* find the bsg job from the active list of commands */
2975 spin_lock_irqsave(&ha->hardware_lock, flags);
2976 for (que = 0; que < ha->max_req_queues; que++) {
2977 req = ha->req_q_map[que];
2981 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2982 sp = req->outstanding_cmds[cnt];
2984 (sp->type == SRB_CT_CMD ||
2985 sp->type == SRB_ELS_CMD_HST ||
2986 sp->type == SRB_ELS_CMD_HST_NOLOGIN ||
2987 sp->type == SRB_FXIOCB_BCMD) &&
2988 sp->u.bsg_job == bsg_job) {
2989 req->outstanding_cmds[cnt] = NULL;
2990 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2991 if (ha->isp_ops->abort_command(sp)) {
2992 ql_log(ql_log_warn, vha, 0x7089,
2993 "mbx abort_command failed.\n");
2994 bsg_reply->result = -EIO;
2996 ql_dbg(ql_dbg_user, vha, 0x708a,
2997 "mbx abort_command success.\n");
2998 bsg_reply->result = 0;
3000 spin_lock_irqsave(&ha->hardware_lock, flags);
3006 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3007 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
3008 bsg_reply->result = -ENXIO;
3012 spin_unlock_irqrestore(&ha->hardware_lock, flags);