[SCSI] qla2xxx: Wait for IDC complete event to finish loopback operation.
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / scsi / qla2xxx / qla_bsg.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2012 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12
13 /* BSG support for ELS/CT pass through */
14 void
15 qla2x00_bsg_job_done(void *data, void *ptr, int res)
16 {
17         srb_t *sp = (srb_t *)ptr;
18         struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
19         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
20
21         bsg_job->reply->result = res;
22         bsg_job->job_done(bsg_job);
23         sp->free(vha, sp);
24 }
25
26 void
27 qla2x00_bsg_sp_free(void *data, void *ptr)
28 {
29         srb_t *sp = (srb_t *)ptr;
30         struct scsi_qla_host *vha = sp->fcport->vha;
31         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
32         struct qla_hw_data *ha = vha->hw;
33
34         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
35             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
36
37         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
38             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
39
40         if (sp->type == SRB_CT_CMD ||
41             sp->type == SRB_ELS_CMD_HST)
42                 kfree(sp->fcport);
43         qla2x00_rel_sp(vha, sp);
44 }
45
46 int
47 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
48         struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
49 {
50         int i, ret, num_valid;
51         uint8_t *bcode;
52         struct qla_fcp_prio_entry *pri_entry;
53         uint32_t *bcode_val_ptr, bcode_val;
54
55         ret = 1;
56         num_valid = 0;
57         bcode = (uint8_t *)pri_cfg;
58         bcode_val_ptr = (uint32_t *)pri_cfg;
59         bcode_val = (uint32_t)(*bcode_val_ptr);
60
61         if (bcode_val == 0xFFFFFFFF) {
62                 /* No FCP Priority config data in flash */
63                 ql_dbg(ql_dbg_user, vha, 0x7051,
64                     "No FCP Priority config data.\n");
65                 return 0;
66         }
67
68         if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
69                         bcode[3] != 'S') {
70                 /* Invalid FCP priority data header*/
71                 ql_dbg(ql_dbg_user, vha, 0x7052,
72                     "Invalid FCP Priority data header. bcode=0x%x.\n",
73                     bcode_val);
74                 return 0;
75         }
76         if (flag != 1)
77                 return ret;
78
79         pri_entry = &pri_cfg->entry[0];
80         for (i = 0; i < pri_cfg->num_entries; i++) {
81                 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
82                         num_valid++;
83                 pri_entry++;
84         }
85
86         if (num_valid == 0) {
87                 /* No valid FCP priority data entries */
88                 ql_dbg(ql_dbg_user, vha, 0x7053,
89                     "No valid FCP Priority data entries.\n");
90                 ret = 0;
91         } else {
92                 /* FCP priority data is valid */
93                 ql_dbg(ql_dbg_user, vha, 0x7054,
94                     "Valid FCP priority data. num entries = %d.\n",
95                     num_valid);
96         }
97
98         return ret;
99 }
100
101 static int
102 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
103 {
104         struct Scsi_Host *host = bsg_job->shost;
105         scsi_qla_host_t *vha = shost_priv(host);
106         struct qla_hw_data *ha = vha->hw;
107         int ret = 0;
108         uint32_t len;
109         uint32_t oper;
110
111         if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
112                 ret = -EINVAL;
113                 goto exit_fcp_prio_cfg;
114         }
115
116         /* Get the sub command */
117         oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
118
119         /* Only set config is allowed if config memory is not allocated */
120         if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
121                 ret = -EINVAL;
122                 goto exit_fcp_prio_cfg;
123         }
124         switch (oper) {
125         case QLFC_FCP_PRIO_DISABLE:
126                 if (ha->flags.fcp_prio_enabled) {
127                         ha->flags.fcp_prio_enabled = 0;
128                         ha->fcp_prio_cfg->attributes &=
129                                 ~FCP_PRIO_ATTR_ENABLE;
130                         qla24xx_update_all_fcp_prio(vha);
131                         bsg_job->reply->result = DID_OK;
132                 } else {
133                         ret = -EINVAL;
134                         bsg_job->reply->result = (DID_ERROR << 16);
135                         goto exit_fcp_prio_cfg;
136                 }
137                 break;
138
139         case QLFC_FCP_PRIO_ENABLE:
140                 if (!ha->flags.fcp_prio_enabled) {
141                         if (ha->fcp_prio_cfg) {
142                                 ha->flags.fcp_prio_enabled = 1;
143                                 ha->fcp_prio_cfg->attributes |=
144                                     FCP_PRIO_ATTR_ENABLE;
145                                 qla24xx_update_all_fcp_prio(vha);
146                                 bsg_job->reply->result = DID_OK;
147                         } else {
148                                 ret = -EINVAL;
149                                 bsg_job->reply->result = (DID_ERROR << 16);
150                                 goto exit_fcp_prio_cfg;
151                         }
152                 }
153                 break;
154
155         case QLFC_FCP_PRIO_GET_CONFIG:
156                 len = bsg_job->reply_payload.payload_len;
157                 if (!len || len > FCP_PRIO_CFG_SIZE) {
158                         ret = -EINVAL;
159                         bsg_job->reply->result = (DID_ERROR << 16);
160                         goto exit_fcp_prio_cfg;
161                 }
162
163                 bsg_job->reply->result = DID_OK;
164                 bsg_job->reply->reply_payload_rcv_len =
165                         sg_copy_from_buffer(
166                         bsg_job->reply_payload.sg_list,
167                         bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
168                         len);
169
170                 break;
171
172         case QLFC_FCP_PRIO_SET_CONFIG:
173                 len = bsg_job->request_payload.payload_len;
174                 if (!len || len > FCP_PRIO_CFG_SIZE) {
175                         bsg_job->reply->result = (DID_ERROR << 16);
176                         ret = -EINVAL;
177                         goto exit_fcp_prio_cfg;
178                 }
179
180                 if (!ha->fcp_prio_cfg) {
181                         ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
182                         if (!ha->fcp_prio_cfg) {
183                                 ql_log(ql_log_warn, vha, 0x7050,
184                                     "Unable to allocate memory for fcp prio "
185                                     "config data (%x).\n", FCP_PRIO_CFG_SIZE);
186                                 bsg_job->reply->result = (DID_ERROR << 16);
187                                 ret = -ENOMEM;
188                                 goto exit_fcp_prio_cfg;
189                         }
190                 }
191
192                 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
193                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
194                 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
195                         FCP_PRIO_CFG_SIZE);
196
197                 /* validate fcp priority data */
198
199                 if (!qla24xx_fcp_prio_cfg_valid(vha,
200                     (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
201                         bsg_job->reply->result = (DID_ERROR << 16);
202                         ret = -EINVAL;
203                         /* If buffer was invalidatic int
204                          * fcp_prio_cfg is of no use
205                          */
206                         vfree(ha->fcp_prio_cfg);
207                         ha->fcp_prio_cfg = NULL;
208                         goto exit_fcp_prio_cfg;
209                 }
210
211                 ha->flags.fcp_prio_enabled = 0;
212                 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
213                         ha->flags.fcp_prio_enabled = 1;
214                 qla24xx_update_all_fcp_prio(vha);
215                 bsg_job->reply->result = DID_OK;
216                 break;
217         default:
218                 ret = -EINVAL;
219                 break;
220         }
221 exit_fcp_prio_cfg:
222         if (!ret)
223                 bsg_job->job_done(bsg_job);
224         return ret;
225 }
226
227 static int
228 qla2x00_process_els(struct fc_bsg_job *bsg_job)
229 {
230         struct fc_rport *rport;
231         fc_port_t *fcport = NULL;
232         struct Scsi_Host *host;
233         scsi_qla_host_t *vha;
234         struct qla_hw_data *ha;
235         srb_t *sp;
236         const char *type;
237         int req_sg_cnt, rsp_sg_cnt;
238         int rval =  (DRIVER_ERROR << 16);
239         uint16_t nextlid = 0;
240
241         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
242                 rport = bsg_job->rport;
243                 fcport = *(fc_port_t **) rport->dd_data;
244                 host = rport_to_shost(rport);
245                 vha = shost_priv(host);
246                 ha = vha->hw;
247                 type = "FC_BSG_RPT_ELS";
248         } else {
249                 host = bsg_job->shost;
250                 vha = shost_priv(host);
251                 ha = vha->hw;
252                 type = "FC_BSG_HST_ELS_NOLOGIN";
253         }
254
255         /* pass through is supported only for ISP 4Gb or higher */
256         if (!IS_FWI2_CAPABLE(ha)) {
257                 ql_dbg(ql_dbg_user, vha, 0x7001,
258                     "ELS passthru not supported for ISP23xx based adapters.\n");
259                 rval = -EPERM;
260                 goto done;
261         }
262
263         /*  Multiple SG's are not supported for ELS requests */
264         if (bsg_job->request_payload.sg_cnt > 1 ||
265                 bsg_job->reply_payload.sg_cnt > 1) {
266                 ql_dbg(ql_dbg_user, vha, 0x7002,
267                     "Multiple SG's are not suppored for ELS requests, "
268                     "request_sg_cnt=%x reply_sg_cnt=%x.\n",
269                     bsg_job->request_payload.sg_cnt,
270                     bsg_job->reply_payload.sg_cnt);
271                 rval = -EPERM;
272                 goto done;
273         }
274
275         /* ELS request for rport */
276         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
277                 /* make sure the rport is logged in,
278                  * if not perform fabric login
279                  */
280                 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
281                         ql_dbg(ql_dbg_user, vha, 0x7003,
282                             "Failed to login port %06X for ELS passthru.\n",
283                             fcport->d_id.b24);
284                         rval = -EIO;
285                         goto done;
286                 }
287         } else {
288                 /* Allocate a dummy fcport structure, since functions
289                  * preparing the IOCB and mailbox command retrieves port
290                  * specific information from fcport structure. For Host based
291                  * ELS commands there will be no fcport structure allocated
292                  */
293                 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
294                 if (!fcport) {
295                         rval = -ENOMEM;
296                         goto done;
297                 }
298
299                 /* Initialize all required  fields of fcport */
300                 fcport->vha = vha;
301                 fcport->d_id.b.al_pa =
302                         bsg_job->request->rqst_data.h_els.port_id[0];
303                 fcport->d_id.b.area =
304                         bsg_job->request->rqst_data.h_els.port_id[1];
305                 fcport->d_id.b.domain =
306                         bsg_job->request->rqst_data.h_els.port_id[2];
307                 fcport->loop_id =
308                         (fcport->d_id.b.al_pa == 0xFD) ?
309                         NPH_FABRIC_CONTROLLER : NPH_F_PORT;
310         }
311
312         if (!vha->flags.online) {
313                 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
314                 rval = -EIO;
315                 goto done;
316         }
317
318         req_sg_cnt =
319                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
320                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
321         if (!req_sg_cnt) {
322                 rval = -ENOMEM;
323                 goto done_free_fcport;
324         }
325
326         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
327                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
328         if (!rsp_sg_cnt) {
329                 rval = -ENOMEM;
330                 goto done_free_fcport;
331         }
332
333         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
334                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
335                 ql_log(ql_log_warn, vha, 0x7008,
336                     "dma mapping resulted in different sg counts, "
337                     "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
338                     "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
339                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
340                 rval = -EAGAIN;
341                 goto done_unmap_sg;
342         }
343
344         /* Alloc SRB structure */
345         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
346         if (!sp) {
347                 rval = -ENOMEM;
348                 goto done_unmap_sg;
349         }
350
351         sp->type =
352                 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
353                 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
354         sp->name =
355                 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
356                 "bsg_els_rpt" : "bsg_els_hst");
357         sp->u.bsg_job = bsg_job;
358         sp->free = qla2x00_bsg_sp_free;
359         sp->done = qla2x00_bsg_job_done;
360
361         ql_dbg(ql_dbg_user, vha, 0x700a,
362             "bsg rqst type: %s els type: %x - loop-id=%x "
363             "portid=%-2x%02x%02x.\n", type,
364             bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
365             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
366
367         rval = qla2x00_start_sp(sp);
368         if (rval != QLA_SUCCESS) {
369                 ql_log(ql_log_warn, vha, 0x700e,
370                     "qla2x00_start_sp failed = %d\n", rval);
371                 qla2x00_rel_sp(vha, sp);
372                 rval = -EIO;
373                 goto done_unmap_sg;
374         }
375         return rval;
376
377 done_unmap_sg:
378         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
379                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
380         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
381                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
382         goto done_free_fcport;
383
384 done_free_fcport:
385         if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
386                 kfree(fcport);
387 done:
388         return rval;
389 }
390
391 inline uint16_t
392 qla24xx_calc_ct_iocbs(uint16_t dsds)
393 {
394         uint16_t iocbs;
395
396         iocbs = 1;
397         if (dsds > 2) {
398                 iocbs += (dsds - 2) / 5;
399                 if ((dsds - 2) % 5)
400                         iocbs++;
401         }
402         return iocbs;
403 }
404
405 static int
406 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
407 {
408         srb_t *sp;
409         struct Scsi_Host *host = bsg_job->shost;
410         scsi_qla_host_t *vha = shost_priv(host);
411         struct qla_hw_data *ha = vha->hw;
412         int rval = (DRIVER_ERROR << 16);
413         int req_sg_cnt, rsp_sg_cnt;
414         uint16_t loop_id;
415         struct fc_port *fcport;
416         char  *type = "FC_BSG_HST_CT";
417
418         req_sg_cnt =
419                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
420                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
421         if (!req_sg_cnt) {
422                 ql_log(ql_log_warn, vha, 0x700f,
423                     "dma_map_sg return %d for request\n", req_sg_cnt);
424                 rval = -ENOMEM;
425                 goto done;
426         }
427
428         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
429                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
430         if (!rsp_sg_cnt) {
431                 ql_log(ql_log_warn, vha, 0x7010,
432                     "dma_map_sg return %d for reply\n", rsp_sg_cnt);
433                 rval = -ENOMEM;
434                 goto done;
435         }
436
437         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
438             (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
439                 ql_log(ql_log_warn, vha, 0x7011,
440                     "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
441                     "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
442                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
443                 rval = -EAGAIN;
444                 goto done_unmap_sg;
445         }
446
447         if (!vha->flags.online) {
448                 ql_log(ql_log_warn, vha, 0x7012,
449                     "Host is not online.\n");
450                 rval = -EIO;
451                 goto done_unmap_sg;
452         }
453
454         loop_id =
455                 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
456                         >> 24;
457         switch (loop_id) {
458         case 0xFC:
459                 loop_id = cpu_to_le16(NPH_SNS);
460                 break;
461         case 0xFA:
462                 loop_id = vha->mgmt_svr_loop_id;
463                 break;
464         default:
465                 ql_dbg(ql_dbg_user, vha, 0x7013,
466                     "Unknown loop id: %x.\n", loop_id);
467                 rval = -EINVAL;
468                 goto done_unmap_sg;
469         }
470
471         /* Allocate a dummy fcport structure, since functions preparing the
472          * IOCB and mailbox command retrieves port specific information
473          * from fcport structure. For Host based ELS commands there will be
474          * no fcport structure allocated
475          */
476         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
477         if (!fcport) {
478                 ql_log(ql_log_warn, vha, 0x7014,
479                     "Failed to allocate fcport.\n");
480                 rval = -ENOMEM;
481                 goto done_unmap_sg;
482         }
483
484         /* Initialize all required  fields of fcport */
485         fcport->vha = vha;
486         fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
487         fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
488         fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
489         fcport->loop_id = loop_id;
490
491         /* Alloc SRB structure */
492         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
493         if (!sp) {
494                 ql_log(ql_log_warn, vha, 0x7015,
495                     "qla2x00_get_sp failed.\n");
496                 rval = -ENOMEM;
497                 goto done_free_fcport;
498         }
499
500         sp->type = SRB_CT_CMD;
501         sp->name = "bsg_ct";
502         sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
503         sp->u.bsg_job = bsg_job;
504         sp->free = qla2x00_bsg_sp_free;
505         sp->done = qla2x00_bsg_job_done;
506
507         ql_dbg(ql_dbg_user, vha, 0x7016,
508             "bsg rqst type: %s else type: %x - "
509             "loop-id=%x portid=%02x%02x%02x.\n", type,
510             (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
511             fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
512             fcport->d_id.b.al_pa);
513
514         rval = qla2x00_start_sp(sp);
515         if (rval != QLA_SUCCESS) {
516                 ql_log(ql_log_warn, vha, 0x7017,
517                     "qla2x00_start_sp failed=%d.\n", rval);
518                 qla2x00_rel_sp(vha, sp);
519                 rval = -EIO;
520                 goto done_free_fcport;
521         }
522         return rval;
523
524 done_free_fcport:
525         kfree(fcport);
526 done_unmap_sg:
527         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
528                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
529         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
530                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
531 done:
532         return rval;
533 }
534
535 /* Disable loopback mode */
536 static inline int
537 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
538                             int wait, int wait2)
539 {
540         int ret = 0;
541         int rval = 0;
542         uint16_t new_config[4];
543         struct qla_hw_data *ha = vha->hw;
544
545         if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
546                 goto done_reset_internal;
547
548         memset(new_config, 0 , sizeof(new_config));
549         if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
550             ENABLE_INTERNAL_LOOPBACK ||
551             (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
552             ENABLE_EXTERNAL_LOOPBACK) {
553                 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
554                 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
555                     (new_config[0] & INTERNAL_LOOPBACK_MASK));
556                 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
557
558                 ha->notify_dcbx_comp = wait;
559                 ha->notify_lb_portup_comp = wait2;
560
561                 ret = qla81xx_set_port_config(vha, new_config);
562                 if (ret != QLA_SUCCESS) {
563                         ql_log(ql_log_warn, vha, 0x7025,
564                             "Set port config failed.\n");
565                         ha->notify_dcbx_comp = 0;
566                         ha->notify_lb_portup_comp = 0;
567                         rval = -EINVAL;
568                         goto done_reset_internal;
569                 }
570
571                 /* Wait for DCBX complete event */
572                 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
573                         (DCBX_COMP_TIMEOUT * HZ))) {
574                         ql_dbg(ql_dbg_user, vha, 0x7026,
575                             "DCBX completion not received.\n");
576                         ha->notify_dcbx_comp = 0;
577                         ha->notify_lb_portup_comp = 0;
578                         rval = -EINVAL;
579                         goto done_reset_internal;
580                 } else
581                         ql_dbg(ql_dbg_user, vha, 0x7027,
582                             "DCBX completion received.\n");
583
584                 if (wait2 &&
585                     !wait_for_completion_timeout(&ha->lb_portup_comp,
586                     (LB_PORTUP_COMP_TIMEOUT * HZ))) {
587                         ql_dbg(ql_dbg_user, vha, 0x70c5,
588                             "Port up completion not received.\n");
589                         ha->notify_lb_portup_comp = 0;
590                         rval = -EINVAL;
591                         goto done_reset_internal;
592                 } else
593                         ql_dbg(ql_dbg_user, vha, 0x70c6,
594                             "Port up completion received.\n");
595
596                 ha->notify_dcbx_comp = 0;
597                 ha->notify_lb_portup_comp = 0;
598         }
599 done_reset_internal:
600         return rval;
601 }
602
603 /*
604  * Set the port configuration to enable the internal or external loopback
605  * depending on the loopback mode.
606  */
607 static inline int
608 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
609         uint16_t *new_config, uint16_t mode)
610 {
611         int ret = 0;
612         int rval = 0;
613         struct qla_hw_data *ha = vha->hw;
614
615         if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
616                 goto done_set_internal;
617
618         if (mode == INTERNAL_LOOPBACK)
619                 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
620         else if (mode == EXTERNAL_LOOPBACK)
621                 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
622         ql_dbg(ql_dbg_user, vha, 0x70be,
623              "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
624
625         memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
626
627         ha->notify_dcbx_comp = 1;
628         ret = qla81xx_set_port_config(vha, new_config);
629         if (ret != QLA_SUCCESS) {
630                 ql_log(ql_log_warn, vha, 0x7021,
631                     "set port config failed.\n");
632                 ha->notify_dcbx_comp = 0;
633                 rval = -EINVAL;
634                 goto done_set_internal;
635         }
636
637         /* Wait for DCBX complete event */
638         if (!wait_for_completion_timeout(&ha->dcbx_comp,
639             (DCBX_COMP_TIMEOUT * HZ))) {
640                 ql_dbg(ql_dbg_user, vha, 0x7022,
641                     "DCBX completion not received.\n");
642                 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
643                 /*
644                  * If the reset of the loopback mode doesn't work take a FCoE
645                  * dump and reset the chip.
646                  */
647                 if (ret) {
648                         ha->isp_ops->fw_dump(vha, 0);
649                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
650                 }
651                 rval = -EINVAL;
652         } else {
653                 if (ha->flags.idc_compl_status) {
654                         ql_dbg(ql_dbg_user, vha, 0x70c3,
655                             "Bad status in IDC Completion AEN\n");
656                         rval = -EINVAL;
657                         ha->flags.idc_compl_status = 0;
658                 } else
659                         ql_dbg(ql_dbg_user, vha, 0x7023,
660                             "DCBX completion received.\n");
661         }
662
663         ha->notify_dcbx_comp = 0;
664
665 done_set_internal:
666         return rval;
667 }
668
669 static int
670 qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
671 {
672         struct Scsi_Host *host = bsg_job->shost;
673         scsi_qla_host_t *vha = shost_priv(host);
674         struct qla_hw_data *ha = vha->hw;
675         int rval;
676         uint8_t command_sent;
677         char *type;
678         struct msg_echo_lb elreq;
679         uint16_t response[MAILBOX_REGISTER_COUNT];
680         uint16_t config[4], new_config[4];
681         uint8_t *fw_sts_ptr;
682         uint8_t *req_data = NULL;
683         dma_addr_t req_data_dma;
684         uint32_t req_data_len;
685         uint8_t *rsp_data = NULL;
686         dma_addr_t rsp_data_dma;
687         uint32_t rsp_data_len;
688
689         if (!vha->flags.online) {
690                 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
691                 return -EIO;
692         }
693
694         elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
695                 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
696                 DMA_TO_DEVICE);
697
698         if (!elreq.req_sg_cnt) {
699                 ql_log(ql_log_warn, vha, 0x701a,
700                     "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
701                 return -ENOMEM;
702         }
703
704         elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
705                 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
706                 DMA_FROM_DEVICE);
707
708         if (!elreq.rsp_sg_cnt) {
709                 ql_log(ql_log_warn, vha, 0x701b,
710                     "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
711                 rval = -ENOMEM;
712                 goto done_unmap_req_sg;
713         }
714
715         if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
716                 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
717                 ql_log(ql_log_warn, vha, 0x701c,
718                     "dma mapping resulted in different sg counts, "
719                     "request_sg_cnt: %x dma_request_sg_cnt: %x "
720                     "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
721                     bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
722                     bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
723                 rval = -EAGAIN;
724                 goto done_unmap_sg;
725         }
726         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
727         req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
728                 &req_data_dma, GFP_KERNEL);
729         if (!req_data) {
730                 ql_log(ql_log_warn, vha, 0x701d,
731                     "dma alloc failed for req_data.\n");
732                 rval = -ENOMEM;
733                 goto done_unmap_sg;
734         }
735
736         rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
737                 &rsp_data_dma, GFP_KERNEL);
738         if (!rsp_data) {
739                 ql_log(ql_log_warn, vha, 0x7004,
740                     "dma alloc failed for rsp_data.\n");
741                 rval = -ENOMEM;
742                 goto done_free_dma_req;
743         }
744
745         /* Copy the request buffer in req_data now */
746         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
747                 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
748
749         elreq.send_dma = req_data_dma;
750         elreq.rcv_dma = rsp_data_dma;
751         elreq.transfer_size = req_data_len;
752
753         elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
754
755         if (atomic_read(&vha->loop_state) == LOOP_READY &&
756             (ha->current_topology == ISP_CFG_F ||
757             ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
758             le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
759             && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
760                 elreq.options == EXTERNAL_LOOPBACK) {
761                 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
762                 ql_dbg(ql_dbg_user, vha, 0x701e,
763                     "BSG request type: %s.\n", type);
764                 command_sent = INT_DEF_LB_ECHO_CMD;
765                 rval = qla2x00_echo_test(vha, &elreq, response);
766         } else {
767                 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
768                         memset(config, 0, sizeof(config));
769                         memset(new_config, 0, sizeof(new_config));
770
771                         if (qla81xx_get_port_config(vha, config)) {
772                                 ql_log(ql_log_warn, vha, 0x701f,
773                                     "Get port config failed.\n");
774                                 rval = -EPERM;
775                                 goto done_free_dma_rsp;
776                         }
777
778                         if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
779                                 ql_dbg(ql_dbg_user, vha, 0x70c4,
780                                     "Loopback operation already in "
781                                     "progress.\n");
782                                 rval = -EAGAIN;
783                                 goto done_free_dma_rsp;
784                         }
785
786                         ql_dbg(ql_dbg_user, vha, 0x70c0,
787                             "elreq.options=%04x\n", elreq.options);
788
789                         if (elreq.options == EXTERNAL_LOOPBACK)
790                                 if (IS_QLA8031(ha))
791                                         rval = qla81xx_set_loopback_mode(vha,
792                                             config, new_config, elreq.options);
793                                 else
794                                         rval = qla81xx_reset_loopback_mode(vha,
795                                             config, 1, 0);
796                         else
797                                 rval = qla81xx_set_loopback_mode(vha, config,
798                                     new_config, elreq.options);
799
800                         if (rval) {
801                                 rval = -EPERM;
802                                 goto done_free_dma_rsp;
803                         }
804
805                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
806                         ql_dbg(ql_dbg_user, vha, 0x7028,
807                             "BSG request type: %s.\n", type);
808
809                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
810                         rval = qla2x00_loopback_test(vha, &elreq, response);
811
812                         if (response[0] == MBS_COMMAND_ERROR &&
813                                         response[1] == MBS_LB_RESET) {
814                                 ql_log(ql_log_warn, vha, 0x7029,
815                                     "MBX command error, Aborting ISP.\n");
816                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
817                                 qla2xxx_wake_dpc(vha);
818                                 qla2x00_wait_for_chip_reset(vha);
819                                 /* Also reset the MPI */
820                                 if (IS_QLA81XX(ha)) {
821                                         if (qla81xx_restart_mpi_firmware(vha) !=
822                                             QLA_SUCCESS) {
823                                                 ql_log(ql_log_warn, vha, 0x702a,
824                                                     "MPI reset failed.\n");
825                                         }
826                                 }
827
828                                 rval = -EIO;
829                                 goto done_free_dma_rsp;
830                         }
831
832                         if (new_config[0]) {
833                                 int ret;
834
835                                 /* Revert back to original port config
836                                  * Also clear internal loopback
837                                  */
838                                 ret = qla81xx_reset_loopback_mode(vha,
839                                     new_config, 0, 1);
840                                 if (ret) {
841                                         /*
842                                          * If the reset of the loopback mode
843                                          * doesn't work take FCoE dump and then
844                                          * reset the chip.
845                                          */
846                                         ha->isp_ops->fw_dump(vha, 0);
847                                         set_bit(ISP_ABORT_NEEDED,
848                                             &vha->dpc_flags);
849                                 }
850
851                         }
852
853                 } else {
854                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
855                         ql_dbg(ql_dbg_user, vha, 0x702b,
856                             "BSG request type: %s.\n", type);
857                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
858                         rval = qla2x00_loopback_test(vha, &elreq, response);
859                 }
860         }
861
862         if (rval) {
863                 ql_log(ql_log_warn, vha, 0x702c,
864                     "Vendor request %s failed.\n", type);
865
866                 rval = 0;
867                 bsg_job->reply->result = (DID_ERROR << 16);
868                 bsg_job->reply->reply_payload_rcv_len = 0;
869         } else {
870                 ql_dbg(ql_dbg_user, vha, 0x702d,
871                     "Vendor request %s completed.\n", type);
872                 bsg_job->reply->result = (DID_OK << 16);
873                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
874                         bsg_job->reply_payload.sg_cnt, rsp_data,
875                         rsp_data_len);
876         }
877
878         bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
879             sizeof(response) + sizeof(uint8_t);
880         fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
881             sizeof(struct fc_bsg_reply);
882         memcpy(fw_sts_ptr, response, sizeof(response));
883         fw_sts_ptr += sizeof(response);
884         *fw_sts_ptr = command_sent;
885
886 done_free_dma_rsp:
887         dma_free_coherent(&ha->pdev->dev, rsp_data_len,
888                 rsp_data, rsp_data_dma);
889 done_free_dma_req:
890         dma_free_coherent(&ha->pdev->dev, req_data_len,
891                 req_data, req_data_dma);
892 done_unmap_sg:
893         dma_unmap_sg(&ha->pdev->dev,
894             bsg_job->reply_payload.sg_list,
895             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
896 done_unmap_req_sg:
897         dma_unmap_sg(&ha->pdev->dev,
898             bsg_job->request_payload.sg_list,
899             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
900         if (!rval)
901                 bsg_job->job_done(bsg_job);
902         return rval;
903 }
904
905 static int
906 qla84xx_reset(struct fc_bsg_job *bsg_job)
907 {
908         struct Scsi_Host *host = bsg_job->shost;
909         scsi_qla_host_t *vha = shost_priv(host);
910         struct qla_hw_data *ha = vha->hw;
911         int rval = 0;
912         uint32_t flag;
913
914         if (!IS_QLA84XX(ha)) {
915                 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
916                 return -EINVAL;
917         }
918
919         flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
920
921         rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
922
923         if (rval) {
924                 ql_log(ql_log_warn, vha, 0x7030,
925                     "Vendor request 84xx reset failed.\n");
926                 rval = (DID_ERROR << 16);
927
928         } else {
929                 ql_dbg(ql_dbg_user, vha, 0x7031,
930                     "Vendor request 84xx reset completed.\n");
931                 bsg_job->reply->result = DID_OK;
932                 bsg_job->job_done(bsg_job);
933         }
934
935         return rval;
936 }
937
938 static int
939 qla84xx_updatefw(struct fc_bsg_job *bsg_job)
940 {
941         struct Scsi_Host *host = bsg_job->shost;
942         scsi_qla_host_t *vha = shost_priv(host);
943         struct qla_hw_data *ha = vha->hw;
944         struct verify_chip_entry_84xx *mn = NULL;
945         dma_addr_t mn_dma, fw_dma;
946         void *fw_buf = NULL;
947         int rval = 0;
948         uint32_t sg_cnt;
949         uint32_t data_len;
950         uint16_t options;
951         uint32_t flag;
952         uint32_t fw_ver;
953
954         if (!IS_QLA84XX(ha)) {
955                 ql_dbg(ql_dbg_user, vha, 0x7032,
956                     "Not 84xx, exiting.\n");
957                 return -EINVAL;
958         }
959
960         sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
961                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
962         if (!sg_cnt) {
963                 ql_log(ql_log_warn, vha, 0x7033,
964                     "dma_map_sg returned %d for request.\n", sg_cnt);
965                 return -ENOMEM;
966         }
967
968         if (sg_cnt != bsg_job->request_payload.sg_cnt) {
969                 ql_log(ql_log_warn, vha, 0x7034,
970                     "DMA mapping resulted in different sg counts, "
971                     "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
972                     bsg_job->request_payload.sg_cnt, sg_cnt);
973                 rval = -EAGAIN;
974                 goto done_unmap_sg;
975         }
976
977         data_len = bsg_job->request_payload.payload_len;
978         fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
979                 &fw_dma, GFP_KERNEL);
980         if (!fw_buf) {
981                 ql_log(ql_log_warn, vha, 0x7035,
982                     "DMA alloc failed for fw_buf.\n");
983                 rval = -ENOMEM;
984                 goto done_unmap_sg;
985         }
986
987         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
988                 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
989
990         mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
991         if (!mn) {
992                 ql_log(ql_log_warn, vha, 0x7036,
993                     "DMA alloc failed for fw buffer.\n");
994                 rval = -ENOMEM;
995                 goto done_free_fw_buf;
996         }
997
998         flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
999         fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
1000
1001         memset(mn, 0, sizeof(struct access_chip_84xx));
1002         mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1003         mn->entry_count = 1;
1004
1005         options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1006         if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1007                 options |= VCO_DIAG_FW;
1008
1009         mn->options = cpu_to_le16(options);
1010         mn->fw_ver =  cpu_to_le32(fw_ver);
1011         mn->fw_size =  cpu_to_le32(data_len);
1012         mn->fw_seq_size =  cpu_to_le32(data_len);
1013         mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
1014         mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
1015         mn->dseg_length = cpu_to_le32(data_len);
1016         mn->data_seg_cnt = cpu_to_le16(1);
1017
1018         rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1019
1020         if (rval) {
1021                 ql_log(ql_log_warn, vha, 0x7037,
1022                     "Vendor request 84xx updatefw failed.\n");
1023
1024                 rval = (DID_ERROR << 16);
1025         } else {
1026                 ql_dbg(ql_dbg_user, vha, 0x7038,
1027                     "Vendor request 84xx updatefw completed.\n");
1028
1029                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1030                 bsg_job->reply->result = DID_OK;
1031         }
1032
1033         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1034
1035 done_free_fw_buf:
1036         dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1037
1038 done_unmap_sg:
1039         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1040                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1041
1042         if (!rval)
1043                 bsg_job->job_done(bsg_job);
1044         return rval;
1045 }
1046
1047 static int
1048 qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1049 {
1050         struct Scsi_Host *host = bsg_job->shost;
1051         scsi_qla_host_t *vha = shost_priv(host);
1052         struct qla_hw_data *ha = vha->hw;
1053         struct access_chip_84xx *mn = NULL;
1054         dma_addr_t mn_dma, mgmt_dma;
1055         void *mgmt_b = NULL;
1056         int rval = 0;
1057         struct qla_bsg_a84_mgmt *ql84_mgmt;
1058         uint32_t sg_cnt;
1059         uint32_t data_len = 0;
1060         uint32_t dma_direction = DMA_NONE;
1061
1062         if (!IS_QLA84XX(ha)) {
1063                 ql_log(ql_log_warn, vha, 0x703a,
1064                     "Not 84xx, exiting.\n");
1065                 return -EINVAL;
1066         }
1067
1068         ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1069                 sizeof(struct fc_bsg_request));
1070         if (!ql84_mgmt) {
1071                 ql_log(ql_log_warn, vha, 0x703b,
1072                     "MGMT header not provided, exiting.\n");
1073                 return -EINVAL;
1074         }
1075
1076         mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1077         if (!mn) {
1078                 ql_log(ql_log_warn, vha, 0x703c,
1079                     "DMA alloc failed for fw buffer.\n");
1080                 return -ENOMEM;
1081         }
1082
1083         memset(mn, 0, sizeof(struct access_chip_84xx));
1084         mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1085         mn->entry_count = 1;
1086
1087         switch (ql84_mgmt->mgmt.cmd) {
1088         case QLA84_MGMT_READ_MEM:
1089         case QLA84_MGMT_GET_INFO:
1090                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1091                         bsg_job->reply_payload.sg_list,
1092                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1093                 if (!sg_cnt) {
1094                         ql_log(ql_log_warn, vha, 0x703d,
1095                             "dma_map_sg returned %d for reply.\n", sg_cnt);
1096                         rval = -ENOMEM;
1097                         goto exit_mgmt;
1098                 }
1099
1100                 dma_direction = DMA_FROM_DEVICE;
1101
1102                 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1103                         ql_log(ql_log_warn, vha, 0x703e,
1104                             "DMA mapping resulted in different sg counts, "
1105                             "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1106                             bsg_job->reply_payload.sg_cnt, sg_cnt);
1107                         rval = -EAGAIN;
1108                         goto done_unmap_sg;
1109                 }
1110
1111                 data_len = bsg_job->reply_payload.payload_len;
1112
1113                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1114                     &mgmt_dma, GFP_KERNEL);
1115                 if (!mgmt_b) {
1116                         ql_log(ql_log_warn, vha, 0x703f,
1117                             "DMA alloc failed for mgmt_b.\n");
1118                         rval = -ENOMEM;
1119                         goto done_unmap_sg;
1120                 }
1121
1122                 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1123                         mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1124                         mn->parameter1 =
1125                                 cpu_to_le32(
1126                                 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1127
1128                 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1129                         mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1130                         mn->parameter1 =
1131                                 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1132
1133                         mn->parameter2 =
1134                                 cpu_to_le32(
1135                                 ql84_mgmt->mgmt.mgmtp.u.info.context);
1136                 }
1137                 break;
1138
1139         case QLA84_MGMT_WRITE_MEM:
1140                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1141                         bsg_job->request_payload.sg_list,
1142                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1143
1144                 if (!sg_cnt) {
1145                         ql_log(ql_log_warn, vha, 0x7040,
1146                             "dma_map_sg returned %d.\n", sg_cnt);
1147                         rval = -ENOMEM;
1148                         goto exit_mgmt;
1149                 }
1150
1151                 dma_direction = DMA_TO_DEVICE;
1152
1153                 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1154                         ql_log(ql_log_warn, vha, 0x7041,
1155                             "DMA mapping resulted in different sg counts, "
1156                             "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1157                             bsg_job->request_payload.sg_cnt, sg_cnt);
1158                         rval = -EAGAIN;
1159                         goto done_unmap_sg;
1160                 }
1161
1162                 data_len = bsg_job->request_payload.payload_len;
1163                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1164                         &mgmt_dma, GFP_KERNEL);
1165                 if (!mgmt_b) {
1166                         ql_log(ql_log_warn, vha, 0x7042,
1167                             "DMA alloc failed for mgmt_b.\n");
1168                         rval = -ENOMEM;
1169                         goto done_unmap_sg;
1170                 }
1171
1172                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1173                         bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1174
1175                 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1176                 mn->parameter1 =
1177                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1178                 break;
1179
1180         case QLA84_MGMT_CHNG_CONFIG:
1181                 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1182                 mn->parameter1 =
1183                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1184
1185                 mn->parameter2 =
1186                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1187
1188                 mn->parameter3 =
1189                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1190                 break;
1191
1192         default:
1193                 rval = -EIO;
1194                 goto exit_mgmt;
1195         }
1196
1197         if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1198                 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1199                 mn->dseg_count = cpu_to_le16(1);
1200                 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1201                 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1202                 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1203         }
1204
1205         rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1206
1207         if (rval) {
1208                 ql_log(ql_log_warn, vha, 0x7043,
1209                     "Vendor request 84xx mgmt failed.\n");
1210
1211                 rval = (DID_ERROR << 16);
1212
1213         } else {
1214                 ql_dbg(ql_dbg_user, vha, 0x7044,
1215                     "Vendor request 84xx mgmt completed.\n");
1216
1217                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1218                 bsg_job->reply->result = DID_OK;
1219
1220                 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1221                         (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1222                         bsg_job->reply->reply_payload_rcv_len =
1223                                 bsg_job->reply_payload.payload_len;
1224
1225                         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1226                                 bsg_job->reply_payload.sg_cnt, mgmt_b,
1227                                 data_len);
1228                 }
1229         }
1230
1231 done_unmap_sg:
1232         if (mgmt_b)
1233                 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1234
1235         if (dma_direction == DMA_TO_DEVICE)
1236                 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1237                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1238         else if (dma_direction == DMA_FROM_DEVICE)
1239                 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1240                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1241
1242 exit_mgmt:
1243         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1244
1245         if (!rval)
1246                 bsg_job->job_done(bsg_job);
1247         return rval;
1248 }
1249
1250 static int
1251 qla24xx_iidma(struct fc_bsg_job *bsg_job)
1252 {
1253         struct Scsi_Host *host = bsg_job->shost;
1254         scsi_qla_host_t *vha = shost_priv(host);
1255         int rval = 0;
1256         struct qla_port_param *port_param = NULL;
1257         fc_port_t *fcport = NULL;
1258         uint16_t mb[MAILBOX_REGISTER_COUNT];
1259         uint8_t *rsp_ptr = NULL;
1260
1261         if (!IS_IIDMA_CAPABLE(vha->hw)) {
1262                 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1263                 return -EINVAL;
1264         }
1265
1266         port_param = (struct qla_port_param *)((char *)bsg_job->request +
1267                 sizeof(struct fc_bsg_request));
1268         if (!port_param) {
1269                 ql_log(ql_log_warn, vha, 0x7047,
1270                     "port_param header not provided.\n");
1271                 return -EINVAL;
1272         }
1273
1274         if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1275                 ql_log(ql_log_warn, vha, 0x7048,
1276                     "Invalid destination type.\n");
1277                 return -EINVAL;
1278         }
1279
1280         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1281                 if (fcport->port_type != FCT_TARGET)
1282                         continue;
1283
1284                 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1285                         fcport->port_name, sizeof(fcport->port_name)))
1286                         continue;
1287                 break;
1288         }
1289
1290         if (!fcport) {
1291                 ql_log(ql_log_warn, vha, 0x7049,
1292                     "Failed to find port.\n");
1293                 return -EINVAL;
1294         }
1295
1296         if (atomic_read(&fcport->state) != FCS_ONLINE) {
1297                 ql_log(ql_log_warn, vha, 0x704a,
1298                     "Port is not online.\n");
1299                 return -EINVAL;
1300         }
1301
1302         if (fcport->flags & FCF_LOGIN_NEEDED) {
1303                 ql_log(ql_log_warn, vha, 0x704b,
1304                     "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1305                 return -EINVAL;
1306         }
1307
1308         if (port_param->mode)
1309                 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1310                         port_param->speed, mb);
1311         else
1312                 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1313                         &port_param->speed, mb);
1314
1315         if (rval) {
1316                 ql_log(ql_log_warn, vha, 0x704c,
1317                     "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1318                     "%04x %x %04x %04x.\n", fcport->port_name[0],
1319                     fcport->port_name[1], fcport->port_name[2],
1320                     fcport->port_name[3], fcport->port_name[4],
1321                     fcport->port_name[5], fcport->port_name[6],
1322                     fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
1323                 rval = (DID_ERROR << 16);
1324         } else {
1325                 if (!port_param->mode) {
1326                         bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1327                                 sizeof(struct qla_port_param);
1328
1329                         rsp_ptr = ((uint8_t *)bsg_job->reply) +
1330                                 sizeof(struct fc_bsg_reply);
1331
1332                         memcpy(rsp_ptr, port_param,
1333                                 sizeof(struct qla_port_param));
1334                 }
1335
1336                 bsg_job->reply->result = DID_OK;
1337                 bsg_job->job_done(bsg_job);
1338         }
1339
1340         return rval;
1341 }
1342
1343 static int
1344 qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1345         uint8_t is_update)
1346 {
1347         uint32_t start = 0;
1348         int valid = 0;
1349         struct qla_hw_data *ha = vha->hw;
1350
1351         if (unlikely(pci_channel_offline(ha->pdev)))
1352                 return -EINVAL;
1353
1354         start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1355         if (start > ha->optrom_size) {
1356                 ql_log(ql_log_warn, vha, 0x7055,
1357                     "start %d > optrom_size %d.\n", start, ha->optrom_size);
1358                 return -EINVAL;
1359         }
1360
1361         if (ha->optrom_state != QLA_SWAITING) {
1362                 ql_log(ql_log_info, vha, 0x7056,
1363                     "optrom_state %d.\n", ha->optrom_state);
1364                 return -EBUSY;
1365         }
1366
1367         ha->optrom_region_start = start;
1368         ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1369         if (is_update) {
1370                 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1371                         valid = 1;
1372                 else if (start == (ha->flt_region_boot * 4) ||
1373                     start == (ha->flt_region_fw * 4))
1374                         valid = 1;
1375                 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1376                     IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
1377                         valid = 1;
1378                 if (!valid) {
1379                         ql_log(ql_log_warn, vha, 0x7058,
1380                             "Invalid start region 0x%x/0x%x.\n", start,
1381                             bsg_job->request_payload.payload_len);
1382                         return -EINVAL;
1383                 }
1384
1385                 ha->optrom_region_size = start +
1386                     bsg_job->request_payload.payload_len > ha->optrom_size ?
1387                     ha->optrom_size - start :
1388                     bsg_job->request_payload.payload_len;
1389                 ha->optrom_state = QLA_SWRITING;
1390         } else {
1391                 ha->optrom_region_size = start +
1392                     bsg_job->reply_payload.payload_len > ha->optrom_size ?
1393                     ha->optrom_size - start :
1394                     bsg_job->reply_payload.payload_len;
1395                 ha->optrom_state = QLA_SREADING;
1396         }
1397
1398         ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1399         if (!ha->optrom_buffer) {
1400                 ql_log(ql_log_warn, vha, 0x7059,
1401                     "Read: Unable to allocate memory for optrom retrieval "
1402                     "(%x)\n", ha->optrom_region_size);
1403
1404                 ha->optrom_state = QLA_SWAITING;
1405                 return -ENOMEM;
1406         }
1407
1408         memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1409         return 0;
1410 }
1411
1412 static int
1413 qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1414 {
1415         struct Scsi_Host *host = bsg_job->shost;
1416         scsi_qla_host_t *vha = shost_priv(host);
1417         struct qla_hw_data *ha = vha->hw;
1418         int rval = 0;
1419
1420         if (ha->flags.nic_core_reset_hdlr_active)
1421                 return -EBUSY;
1422
1423         rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1424         if (rval)
1425                 return rval;
1426
1427         ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1428             ha->optrom_region_start, ha->optrom_region_size);
1429
1430         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1431             bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1432             ha->optrom_region_size);
1433
1434         bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
1435         bsg_job->reply->result = DID_OK;
1436         vfree(ha->optrom_buffer);
1437         ha->optrom_buffer = NULL;
1438         ha->optrom_state = QLA_SWAITING;
1439         bsg_job->job_done(bsg_job);
1440         return rval;
1441 }
1442
1443 static int
1444 qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1445 {
1446         struct Scsi_Host *host = bsg_job->shost;
1447         scsi_qla_host_t *vha = shost_priv(host);
1448         struct qla_hw_data *ha = vha->hw;
1449         int rval = 0;
1450
1451         rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1452         if (rval)
1453                 return rval;
1454
1455         /* Set the isp82xx_no_md_cap not to capture minidump */
1456         ha->flags.isp82xx_no_md_cap = 1;
1457
1458         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1459             bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1460             ha->optrom_region_size);
1461
1462         ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1463             ha->optrom_region_start, ha->optrom_region_size);
1464
1465         bsg_job->reply->result = DID_OK;
1466         vfree(ha->optrom_buffer);
1467         ha->optrom_buffer = NULL;
1468         ha->optrom_state = QLA_SWAITING;
1469         bsg_job->job_done(bsg_job);
1470         return rval;
1471 }
1472
1473 static int
1474 qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
1475 {
1476         struct Scsi_Host *host = bsg_job->shost;
1477         scsi_qla_host_t *vha = shost_priv(host);
1478         struct qla_hw_data *ha = vha->hw;
1479         int rval = 0;
1480         uint8_t bsg[DMA_POOL_SIZE];
1481         struct qla_image_version_list *list = (void *)bsg;
1482         struct qla_image_version *image;
1483         uint32_t count;
1484         dma_addr_t sfp_dma;
1485         void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1486         if (!sfp) {
1487                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1488                     EXT_STATUS_NO_MEMORY;
1489                 goto done;
1490         }
1491
1492         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1493             bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1494
1495         image = list->version;
1496         count = list->count;
1497         while (count--) {
1498                 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1499                 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1500                     image->field_address.device, image->field_address.offset,
1501                     sizeof(image->field_info), image->field_address.option);
1502                 if (rval) {
1503                         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1504                             EXT_STATUS_MAILBOX;
1505                         goto dealloc;
1506                 }
1507                 image++;
1508         }
1509
1510         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1511
1512 dealloc:
1513         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1514
1515 done:
1516         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1517         bsg_job->reply->result = DID_OK << 16;
1518         bsg_job->job_done(bsg_job);
1519
1520         return 0;
1521 }
1522
1523 static int
1524 qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
1525 {
1526         struct Scsi_Host *host = bsg_job->shost;
1527         scsi_qla_host_t *vha = shost_priv(host);
1528         struct qla_hw_data *ha = vha->hw;
1529         int rval = 0;
1530         uint8_t bsg[DMA_POOL_SIZE];
1531         struct qla_status_reg *sr = (void *)bsg;
1532         dma_addr_t sfp_dma;
1533         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1534         if (!sfp) {
1535                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1536                     EXT_STATUS_NO_MEMORY;
1537                 goto done;
1538         }
1539
1540         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1541             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1542
1543         rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1544             sr->field_address.device, sr->field_address.offset,
1545             sizeof(sr->status_reg), sr->field_address.option);
1546         sr->status_reg = *sfp;
1547
1548         if (rval) {
1549                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1550                     EXT_STATUS_MAILBOX;
1551                 goto dealloc;
1552         }
1553
1554         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1555             bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1556
1557         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1558
1559 dealloc:
1560         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1561
1562 done:
1563         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1564         bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
1565         bsg_job->reply->result = DID_OK << 16;
1566         bsg_job->job_done(bsg_job);
1567
1568         return 0;
1569 }
1570
1571 static int
1572 qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
1573 {
1574         struct Scsi_Host *host = bsg_job->shost;
1575         scsi_qla_host_t *vha = shost_priv(host);
1576         struct qla_hw_data *ha = vha->hw;
1577         int rval = 0;
1578         uint8_t bsg[DMA_POOL_SIZE];
1579         struct qla_status_reg *sr = (void *)bsg;
1580         dma_addr_t sfp_dma;
1581         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1582         if (!sfp) {
1583                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1584                     EXT_STATUS_NO_MEMORY;
1585                 goto done;
1586         }
1587
1588         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1589             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1590
1591         *sfp = sr->status_reg;
1592         rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1593             sr->field_address.device, sr->field_address.offset,
1594             sizeof(sr->status_reg), sr->field_address.option);
1595
1596         if (rval) {
1597                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1598                     EXT_STATUS_MAILBOX;
1599                 goto dealloc;
1600         }
1601
1602         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1603
1604 dealloc:
1605         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1606
1607 done:
1608         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1609         bsg_job->reply->result = DID_OK << 16;
1610         bsg_job->job_done(bsg_job);
1611
1612         return 0;
1613 }
1614
1615 static int
1616 qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
1617 {
1618         struct Scsi_Host *host = bsg_job->shost;
1619         scsi_qla_host_t *vha = shost_priv(host);
1620         struct qla_hw_data *ha = vha->hw;
1621         int rval = 0;
1622         uint8_t bsg[DMA_POOL_SIZE];
1623         struct qla_i2c_access *i2c = (void *)bsg;
1624         dma_addr_t sfp_dma;
1625         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1626         if (!sfp) {
1627                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1628                     EXT_STATUS_NO_MEMORY;
1629                 goto done;
1630         }
1631
1632         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1633             bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1634
1635         memcpy(sfp, i2c->buffer, i2c->length);
1636         rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1637             i2c->device, i2c->offset, i2c->length, i2c->option);
1638
1639         if (rval) {
1640                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1641                     EXT_STATUS_MAILBOX;
1642                 goto dealloc;
1643         }
1644
1645         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1646
1647 dealloc:
1648         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1649
1650 done:
1651         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1652         bsg_job->reply->result = DID_OK << 16;
1653         bsg_job->job_done(bsg_job);
1654
1655         return 0;
1656 }
1657
1658 static int
1659 qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
1660 {
1661         struct Scsi_Host *host = bsg_job->shost;
1662         scsi_qla_host_t *vha = shost_priv(host);
1663         struct qla_hw_data *ha = vha->hw;
1664         int rval = 0;
1665         uint8_t bsg[DMA_POOL_SIZE];
1666         struct qla_i2c_access *i2c = (void *)bsg;
1667         dma_addr_t sfp_dma;
1668         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1669         if (!sfp) {
1670                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1671                     EXT_STATUS_NO_MEMORY;
1672                 goto done;
1673         }
1674
1675         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1676             bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1677
1678         rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1679                 i2c->device, i2c->offset, i2c->length, i2c->option);
1680
1681         if (rval) {
1682                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1683                     EXT_STATUS_MAILBOX;
1684                 goto dealloc;
1685         }
1686
1687         memcpy(i2c->buffer, sfp, i2c->length);
1688         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1689             bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1690
1691         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1692
1693 dealloc:
1694         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1695
1696 done:
1697         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1698         bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c);
1699         bsg_job->reply->result = DID_OK << 16;
1700         bsg_job->job_done(bsg_job);
1701
1702         return 0;
1703 }
1704
1705 static int
1706 qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
1707 {
1708         struct Scsi_Host *host = bsg_job->shost;
1709         scsi_qla_host_t *vha = shost_priv(host);
1710         struct qla_hw_data *ha = vha->hw;
1711         uint16_t thread_id;
1712         uint32_t rval = EXT_STATUS_OK;
1713         uint16_t req_sg_cnt = 0;
1714         uint16_t rsp_sg_cnt = 0;
1715         uint16_t nextlid = 0;
1716         uint32_t tot_dsds;
1717         srb_t *sp = NULL;
1718         uint32_t req_data_len = 0;
1719         uint32_t rsp_data_len = 0;
1720
1721         /* Check the type of the adapter */
1722         if (!IS_BIDI_CAPABLE(ha)) {
1723                 ql_log(ql_log_warn, vha, 0x70a0,
1724                         "This adapter is not supported\n");
1725                 rval = EXT_STATUS_NOT_SUPPORTED;
1726                 goto done;
1727         }
1728
1729         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1730                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1731                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1732                 rval =  EXT_STATUS_BUSY;
1733                 goto done;
1734         }
1735
1736         /* Check if host is online */
1737         if (!vha->flags.online) {
1738                 ql_log(ql_log_warn, vha, 0x70a1,
1739                         "Host is not online\n");
1740                 rval = EXT_STATUS_DEVICE_OFFLINE;
1741                 goto done;
1742         }
1743
1744         /* Check if cable is plugged in or not */
1745         if (vha->device_flags & DFLG_NO_CABLE) {
1746                 ql_log(ql_log_warn, vha, 0x70a2,
1747                         "Cable is unplugged...\n");
1748                 rval = EXT_STATUS_INVALID_CFG;
1749                 goto done;
1750         }
1751
1752         /* Check if the switch is connected or not */
1753         if (ha->current_topology != ISP_CFG_F) {
1754                 ql_log(ql_log_warn, vha, 0x70a3,
1755                         "Host is not connected to the switch\n");
1756                 rval = EXT_STATUS_INVALID_CFG;
1757                 goto done;
1758         }
1759
1760         /* Check if operating mode is P2P */
1761         if (ha->operating_mode != P2P) {
1762                 ql_log(ql_log_warn, vha, 0x70a4,
1763                     "Host is operating mode is not P2p\n");
1764                 rval = EXT_STATUS_INVALID_CFG;
1765                 goto done;
1766         }
1767
1768         thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1769
1770         mutex_lock(&ha->selflogin_lock);
1771         if (vha->self_login_loop_id == 0) {
1772                 /* Initialize all required  fields of fcport */
1773                 vha->bidir_fcport.vha = vha;
1774                 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1775                 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1776                 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1777                 vha->bidir_fcport.loop_id = vha->loop_id;
1778
1779                 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1780                         ql_log(ql_log_warn, vha, 0x70a7,
1781                             "Failed to login port %06X for bidirectional IOCB\n",
1782                             vha->bidir_fcport.d_id.b24);
1783                         mutex_unlock(&ha->selflogin_lock);
1784                         rval = EXT_STATUS_MAILBOX;
1785                         goto done;
1786                 }
1787                 vha->self_login_loop_id = nextlid - 1;
1788
1789         }
1790         /* Assign the self login loop id to fcport */
1791         mutex_unlock(&ha->selflogin_lock);
1792
1793         vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1794
1795         req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1796                 bsg_job->request_payload.sg_list,
1797                 bsg_job->request_payload.sg_cnt,
1798                 DMA_TO_DEVICE);
1799
1800         if (!req_sg_cnt) {
1801                 rval = EXT_STATUS_NO_MEMORY;
1802                 goto done;
1803         }
1804
1805         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1806                 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1807                 DMA_FROM_DEVICE);
1808
1809         if (!rsp_sg_cnt) {
1810                 rval = EXT_STATUS_NO_MEMORY;
1811                 goto done_unmap_req_sg;
1812         }
1813
1814         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
1815                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1816                 ql_dbg(ql_dbg_user, vha, 0x70a9,
1817                     "Dma mapping resulted in different sg counts "
1818                     "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1819                     "%x dma_reply_sg_cnt: %x]\n",
1820                     bsg_job->request_payload.sg_cnt, req_sg_cnt,
1821                     bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1822                 rval = EXT_STATUS_NO_MEMORY;
1823                 goto done_unmap_sg;
1824         }
1825
1826         if (req_data_len != rsp_data_len) {
1827                 rval = EXT_STATUS_BUSY;
1828                 ql_log(ql_log_warn, vha, 0x70aa,
1829                     "req_data_len != rsp_data_len\n");
1830                 goto done_unmap_sg;
1831         }
1832
1833         req_data_len = bsg_job->request_payload.payload_len;
1834         rsp_data_len = bsg_job->reply_payload.payload_len;
1835
1836
1837         /* Alloc SRB structure */
1838         sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1839         if (!sp) {
1840                 ql_dbg(ql_dbg_user, vha, 0x70ac,
1841                     "Alloc SRB structure failed\n");
1842                 rval = EXT_STATUS_NO_MEMORY;
1843                 goto done_unmap_sg;
1844         }
1845
1846         /*Populate srb->ctx with bidir ctx*/
1847         sp->u.bsg_job = bsg_job;
1848         sp->free = qla2x00_bsg_sp_free;
1849         sp->type = SRB_BIDI_CMD;
1850         sp->done = qla2x00_bsg_job_done;
1851
1852         /* Add the read and write sg count */
1853         tot_dsds = rsp_sg_cnt + req_sg_cnt;
1854
1855         rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1856         if (rval != EXT_STATUS_OK)
1857                 goto done_free_srb;
1858         /* the bsg request  will be completed in the interrupt handler */
1859         return rval;
1860
1861 done_free_srb:
1862         mempool_free(sp, ha->srb_mempool);
1863 done_unmap_sg:
1864         dma_unmap_sg(&ha->pdev->dev,
1865             bsg_job->reply_payload.sg_list,
1866             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1867 done_unmap_req_sg:
1868         dma_unmap_sg(&ha->pdev->dev,
1869             bsg_job->request_payload.sg_list,
1870             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1871 done:
1872
1873         /* Return an error vendor specific response
1874          * and complete the bsg request
1875          */
1876         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1877         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1878         bsg_job->reply->reply_payload_rcv_len = 0;
1879         bsg_job->reply->result = (DID_OK) << 16;
1880         bsg_job->job_done(bsg_job);
1881         /* Always retrun success, vendor rsp carries correct status */
1882         return 0;
1883 }
1884
1885 static int
1886 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1887 {
1888         switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
1889         case QL_VND_LOOPBACK:
1890                 return qla2x00_process_loopback(bsg_job);
1891
1892         case QL_VND_A84_RESET:
1893                 return qla84xx_reset(bsg_job);
1894
1895         case QL_VND_A84_UPDATE_FW:
1896                 return qla84xx_updatefw(bsg_job);
1897
1898         case QL_VND_A84_MGMT_CMD:
1899                 return qla84xx_mgmt_cmd(bsg_job);
1900
1901         case QL_VND_IIDMA:
1902                 return qla24xx_iidma(bsg_job);
1903
1904         case QL_VND_FCP_PRIO_CFG_CMD:
1905                 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
1906
1907         case QL_VND_READ_FLASH:
1908                 return qla2x00_read_optrom(bsg_job);
1909
1910         case QL_VND_UPDATE_FLASH:
1911                 return qla2x00_update_optrom(bsg_job);
1912
1913         case QL_VND_SET_FRU_VERSION:
1914                 return qla2x00_update_fru_versions(bsg_job);
1915
1916         case QL_VND_READ_FRU_STATUS:
1917                 return qla2x00_read_fru_status(bsg_job);
1918
1919         case QL_VND_WRITE_FRU_STATUS:
1920                 return qla2x00_write_fru_status(bsg_job);
1921
1922         case QL_VND_WRITE_I2C:
1923                 return qla2x00_write_i2c(bsg_job);
1924
1925         case QL_VND_READ_I2C:
1926                 return qla2x00_read_i2c(bsg_job);
1927
1928         case QL_VND_DIAG_IO_CMD:
1929                 return qla24xx_process_bidir_cmd(bsg_job);
1930
1931         default:
1932                 return -ENOSYS;
1933         }
1934 }
1935
1936 int
1937 qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1938 {
1939         int ret = -EINVAL;
1940         struct fc_rport *rport;
1941         fc_port_t *fcport = NULL;
1942         struct Scsi_Host *host;
1943         scsi_qla_host_t *vha;
1944
1945         /* In case no data transferred. */
1946         bsg_job->reply->reply_payload_rcv_len = 0;
1947
1948         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1949                 rport = bsg_job->rport;
1950                 fcport = *(fc_port_t **) rport->dd_data;
1951                 host = rport_to_shost(rport);
1952                 vha = shost_priv(host);
1953         } else {
1954                 host = bsg_job->shost;
1955                 vha = shost_priv(host);
1956         }
1957
1958         if (qla2x00_reset_active(vha)) {
1959                 ql_dbg(ql_dbg_user, vha, 0x709f,
1960                     "BSG: ISP abort active/needed -- cmd=%d.\n",
1961                     bsg_job->request->msgcode);
1962                 return -EBUSY;
1963         }
1964
1965         ql_dbg(ql_dbg_user, vha, 0x7000,
1966             "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
1967
1968         switch (bsg_job->request->msgcode) {
1969         case FC_BSG_RPT_ELS:
1970         case FC_BSG_HST_ELS_NOLOGIN:
1971                 ret = qla2x00_process_els(bsg_job);
1972                 break;
1973         case FC_BSG_HST_CT:
1974                 ret = qla2x00_process_ct(bsg_job);
1975                 break;
1976         case FC_BSG_HST_VENDOR:
1977                 ret = qla2x00_process_vendor_specific(bsg_job);
1978                 break;
1979         case FC_BSG_HST_ADD_RPORT:
1980         case FC_BSG_HST_DEL_RPORT:
1981         case FC_BSG_RPT_CT:
1982         default:
1983                 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
1984                 break;
1985         }
1986         return ret;
1987 }
1988
1989 int
1990 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1991 {
1992         scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
1993         struct qla_hw_data *ha = vha->hw;
1994         srb_t *sp;
1995         int cnt, que;
1996         unsigned long flags;
1997         struct req_que *req;
1998
1999         /* find the bsg job from the active list of commands */
2000         spin_lock_irqsave(&ha->hardware_lock, flags);
2001         for (que = 0; que < ha->max_req_queues; que++) {
2002                 req = ha->req_q_map[que];
2003                 if (!req)
2004                         continue;
2005
2006                 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2007                         sp = req->outstanding_cmds[cnt];
2008                         if (sp) {
2009                                 if (((sp->type == SRB_CT_CMD) ||
2010                                         (sp->type == SRB_ELS_CMD_HST))
2011                                         && (sp->u.bsg_job == bsg_job)) {
2012                                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2013                                         if (ha->isp_ops->abort_command(sp)) {
2014                                                 ql_log(ql_log_warn, vha, 0x7089,
2015                                                     "mbx abort_command "
2016                                                     "failed.\n");
2017                                                 bsg_job->req->errors =
2018                                                 bsg_job->reply->result = -EIO;
2019                                         } else {
2020                                                 ql_dbg(ql_dbg_user, vha, 0x708a,
2021                                                     "mbx abort_command "
2022                                                     "success.\n");
2023                                                 bsg_job->req->errors =
2024                                                 bsg_job->reply->result = 0;
2025                                         }
2026                                         spin_lock_irqsave(&ha->hardware_lock, flags);
2027                                         goto done;
2028                                 }
2029                         }
2030                 }
2031         }
2032         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2033         ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2034         bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
2035         return 0;
2036
2037 done:
2038         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2039         if (bsg_job->request->msgcode == FC_BSG_HST_CT)
2040                 kfree(sp->fcport);
2041         qla2x00_rel_sp(vha, sp);
2042         return 0;
2043 }