2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
17 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
20 * Returns the proper CF_* direction based on CDB.
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t *sp)
26 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27 struct scsi_qla_host *vha = sp->fcport->vha;
31 /* Set transfer direction */
32 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
34 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35 vha->qla_stats.output_requests++;
36 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
38 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
39 vha->qla_stats.input_requests++;
45 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
46 * Continuation Type 0 IOCBs to allocate.
48 * @dsds: number of data segment decriptors needed
50 * Returns the number of IOCB entries needed to store @dsds.
53 qla2x00_calc_iocbs_32(uint16_t dsds)
59 iocbs += (dsds - 3) / 7;
67 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
68 * Continuation Type 1 IOCBs to allocate.
70 * @dsds: number of data segment decriptors needed
72 * Returns the number of IOCB entries needed to store @dsds.
75 qla2x00_calc_iocbs_64(uint16_t dsds)
81 iocbs += (dsds - 2) / 5;
89 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
92 * Returns a pointer to the Continuation Type 0 IOCB packet.
94 static inline cont_entry_t *
95 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
97 cont_entry_t *cont_pkt;
98 struct req_que *req = vha->req;
99 /* Adjust ring index. */
101 if (req->ring_index == req->length) {
103 req->ring_ptr = req->ring;
108 cont_pkt = (cont_entry_t *)req->ring_ptr;
110 /* Load packet defaults. */
111 *((uint32_t *)(&cont_pkt->entry_type)) =
112 __constant_cpu_to_le32(CONTINUE_TYPE);
118 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
121 * Returns a pointer to the continuation type 1 IOCB packet.
123 static inline cont_a64_entry_t *
124 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
126 cont_a64_entry_t *cont_pkt;
128 /* Adjust ring index. */
130 if (req->ring_index == req->length) {
132 req->ring_ptr = req->ring;
137 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
139 /* Load packet defaults. */
140 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
141 __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
142 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
148 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
150 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
151 uint8_t guard = scsi_host_get_guard(cmd->device->host);
153 /* We always use DIFF Bundling for best performance */
156 /* Translate SCSI opcode to a protection opcode */
157 switch (scsi_get_prot_op(cmd)) {
158 case SCSI_PROT_READ_STRIP:
159 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
161 case SCSI_PROT_WRITE_INSERT:
162 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164 case SCSI_PROT_READ_INSERT:
165 *fw_prot_opts |= PO_MODE_DIF_INSERT;
167 case SCSI_PROT_WRITE_STRIP:
168 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
170 case SCSI_PROT_READ_PASS:
171 case SCSI_PROT_WRITE_PASS:
172 if (guard & SHOST_DIX_GUARD_IP)
173 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
177 default: /* Normal Request */
178 *fw_prot_opts |= PO_MODE_DIF_PASS;
182 return scsi_prot_sg_count(cmd);
186 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
187 * capable IOCB types.
189 * @sp: SRB command to process
190 * @cmd_pkt: Command type 2 IOCB
191 * @tot_dsds: Total number of segments to transfer
193 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
198 scsi_qla_host_t *vha;
199 struct scsi_cmnd *cmd;
200 struct scatterlist *sg;
203 cmd = GET_CMD_SP(sp);
205 /* Update entry type to indicate Command Type 2 IOCB */
206 *((uint32_t *)(&cmd_pkt->entry_type)) =
207 __constant_cpu_to_le32(COMMAND_TYPE);
209 /* No data transfer */
210 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
211 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
215 vha = sp->fcport->vha;
216 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
218 /* Three DSDs are available in the Command Type 2 IOCB */
220 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
222 /* Load data segments */
223 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
224 cont_entry_t *cont_pkt;
226 /* Allocate additional continuation packets? */
227 if (avail_dsds == 0) {
229 * Seven DSDs are available in the Continuation
232 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
233 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
237 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
238 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
244 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
245 * capable IOCB types.
247 * @sp: SRB command to process
248 * @cmd_pkt: Command type 3 IOCB
249 * @tot_dsds: Total number of segments to transfer
251 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
256 scsi_qla_host_t *vha;
257 struct scsi_cmnd *cmd;
258 struct scatterlist *sg;
261 cmd = GET_CMD_SP(sp);
263 /* Update entry type to indicate Command Type 3 IOCB */
264 *((uint32_t *)(&cmd_pkt->entry_type)) =
265 __constant_cpu_to_le32(COMMAND_A64_TYPE);
267 /* No data transfer */
268 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
269 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
273 vha = sp->fcport->vha;
274 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
276 /* Two DSDs are available in the Command Type 3 IOCB */
278 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
280 /* Load data segments */
281 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
283 cont_a64_entry_t *cont_pkt;
285 /* Allocate additional continuation packets? */
286 if (avail_dsds == 0) {
288 * Five DSDs are available in the Continuation
291 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
292 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
296 sle_dma = sg_dma_address(sg);
297 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
298 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
299 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
305 * qla2x00_start_scsi() - Send a SCSI command to the ISP
306 * @sp: command to send to the ISP
308 * Returns non-zero if a failure occurred, else zero.
311 qla2x00_start_scsi(srb_t *sp)
315 scsi_qla_host_t *vha;
316 struct scsi_cmnd *cmd;
320 cmd_entry_t *cmd_pkt;
324 struct device_reg_2xxx __iomem *reg;
325 struct qla_hw_data *ha;
330 /* Setup device pointers. */
332 vha = sp->fcport->vha;
334 reg = &ha->iobase->isp;
335 cmd = GET_CMD_SP(sp);
336 req = ha->req_q_map[0];
337 rsp = ha->rsp_q_map[0];
338 /* So we know we haven't pci_map'ed anything yet */
341 /* Send marker if required */
342 if (vha->marker_needed != 0) {
343 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
345 return (QLA_FUNCTION_FAILED);
347 vha->marker_needed = 0;
350 /* Acquire ring specific lock */
351 spin_lock_irqsave(&ha->hardware_lock, flags);
353 /* Check for room in outstanding command list. */
354 handle = req->current_outstanding_cmd;
355 for (index = 1; index < req->num_outstanding_cmds; index++) {
357 if (handle == req->num_outstanding_cmds)
359 if (!req->outstanding_cmds[handle])
362 if (index == req->num_outstanding_cmds)
365 /* Map the sg table so we have an accurate count of sg entries needed */
366 if (scsi_sg_count(cmd)) {
367 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
368 scsi_sg_count(cmd), cmd->sc_data_direction);
376 /* Calculate the number of request entries needed. */
377 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
378 if (req->cnt < (req_cnt + 2)) {
379 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
380 if (req->ring_index < cnt)
381 req->cnt = cnt - req->ring_index;
383 req->cnt = req->length -
384 (req->ring_index - cnt);
385 /* If still no head room then bail out */
386 if (req->cnt < (req_cnt + 2))
390 /* Build command packet */
391 req->current_outstanding_cmd = handle;
392 req->outstanding_cmds[handle] = sp;
394 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
397 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
398 cmd_pkt->handle = handle;
399 /* Zero out remaining portion of packet. */
400 clr_ptr = (uint32_t *)cmd_pkt + 2;
401 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
404 /* Set target ID and LUN number*/
405 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
408 /* Update tagged queuing modifier */
409 if (scsi_populate_tag_msg(cmd, tag)) {
411 case HEAD_OF_QUEUE_TAG:
412 cmd_pkt->control_flags =
413 __constant_cpu_to_le16(CF_HEAD_TAG);
415 case ORDERED_QUEUE_TAG:
416 cmd_pkt->control_flags =
417 __constant_cpu_to_le16(CF_ORDERED_TAG);
420 cmd_pkt->control_flags =
421 __constant_cpu_to_le16(CF_SIMPLE_TAG);
425 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
428 /* Load SCSI command packet. */
429 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
430 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
432 /* Build IOCB segments */
433 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
435 /* Set total data segment count. */
436 cmd_pkt->entry_count = (uint8_t)req_cnt;
439 /* Adjust ring index. */
441 if (req->ring_index == req->length) {
443 req->ring_ptr = req->ring;
447 sp->flags |= SRB_DMA_VALID;
449 /* Set chip new ring index. */
450 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
451 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
453 /* Manage unprocessed RIO/ZIO commands in response queue. */
454 if (vha->flags.process_response_queue &&
455 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
456 qla2x00_process_response_queue(rsp);
458 spin_unlock_irqrestore(&ha->hardware_lock, flags);
459 return (QLA_SUCCESS);
465 spin_unlock_irqrestore(&ha->hardware_lock, flags);
467 return (QLA_FUNCTION_FAILED);
471 * qla2x00_start_iocbs() - Execute the IOCB command
474 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
476 struct qla_hw_data *ha = vha->hw;
477 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
479 if (IS_P3P_TYPE(ha)) {
480 qla82xx_start_iocbs(vha);
482 /* Adjust ring index. */
484 if (req->ring_index == req->length) {
486 req->ring_ptr = req->ring;
490 /* Set chip new ring index. */
491 if (ha->mqenable || IS_QLA83XX(ha)) {
492 WRT_REG_DWORD(req->req_q_in, req->ring_index);
493 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
494 } else if (IS_QLAFX00(ha)) {
495 WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index);
496 RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in);
497 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
498 } else if (IS_FWI2_CAPABLE(ha)) {
499 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
500 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
502 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
504 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
510 * qla2x00_marker() - Send a marker IOCB to the firmware.
514 * @type: marker modifier
516 * Can be called from both normal and interrupt context.
518 * Returns non-zero if a failure occurred, else zero.
521 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
522 struct rsp_que *rsp, uint16_t loop_id,
523 uint16_t lun, uint8_t type)
526 struct mrk_entry_24xx *mrk24 = NULL;
527 struct mrk_entry_fx00 *mrkfx = NULL;
529 struct qla_hw_data *ha = vha->hw;
530 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
532 req = ha->req_q_map[0];
533 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
535 ql_log(ql_log_warn, base_vha, 0x3026,
536 "Failed to allocate Marker IOCB.\n");
538 return (QLA_FUNCTION_FAILED);
541 mrk->entry_type = MARKER_TYPE;
542 mrk->modifier = type;
543 if (type != MK_SYNC_ALL) {
544 if (IS_QLAFX00(ha)) {
545 mrkfx = (struct mrk_entry_fx00 *) mrk;
546 mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
547 mrkfx->handle_hi = 0;
548 mrkfx->tgt_id = cpu_to_le16(loop_id);
549 mrkfx->lun[1] = LSB(lun);
550 mrkfx->lun[2] = MSB(lun);
551 host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
552 } else if (IS_FWI2_CAPABLE(ha)) {
553 mrk24 = (struct mrk_entry_24xx *) mrk;
554 mrk24->nport_handle = cpu_to_le16(loop_id);
555 mrk24->lun[1] = LSB(lun);
556 mrk24->lun[2] = MSB(lun);
557 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
558 mrk24->vp_index = vha->vp_idx;
559 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
561 SET_TARGET_ID(ha, mrk->target, loop_id);
562 mrk->lun = cpu_to_le16(lun);
567 qla2x00_start_iocbs(vha, req);
569 return (QLA_SUCCESS);
573 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
574 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
578 unsigned long flags = 0;
580 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
581 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
582 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
588 * qla2x00_issue_marker
591 * Caller CAN have hardware lock held as specified by ha_locked parameter.
592 * Might release it, then reaquire.
594 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
597 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
598 MK_SYNC_ALL) != QLA_SUCCESS)
599 return QLA_FUNCTION_FAILED;
601 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
602 MK_SYNC_ALL) != QLA_SUCCESS)
603 return QLA_FUNCTION_FAILED;
605 vha->marker_needed = 0;
611 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
614 uint32_t *cur_dsd = NULL;
615 scsi_qla_host_t *vha;
616 struct qla_hw_data *ha;
617 struct scsi_cmnd *cmd;
618 struct scatterlist *cur_seg;
622 uint8_t first_iocb = 1;
623 uint32_t dsd_list_len;
624 struct dsd_dma *dsd_ptr;
627 cmd = GET_CMD_SP(sp);
629 /* Update entry type to indicate Command Type 3 IOCB */
630 *((uint32_t *)(&cmd_pkt->entry_type)) =
631 __constant_cpu_to_le32(COMMAND_TYPE_6);
633 /* No data transfer */
634 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
635 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
639 vha = sp->fcport->vha;
642 /* Set transfer direction */
643 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
644 cmd_pkt->control_flags =
645 __constant_cpu_to_le16(CF_WRITE_DATA);
646 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
647 vha->qla_stats.output_requests++;
648 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
649 cmd_pkt->control_flags =
650 __constant_cpu_to_le16(CF_READ_DATA);
651 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
652 vha->qla_stats.input_requests++;
655 cur_seg = scsi_sglist(cmd);
656 ctx = GET_CMD_CTX_SP(sp);
659 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
660 QLA_DSDS_PER_IOCB : tot_dsds;
661 tot_dsds -= avail_dsds;
662 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
664 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
665 struct dsd_dma, list);
666 next_dsd = dsd_ptr->dsd_addr;
667 list_del(&dsd_ptr->list);
669 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
675 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
676 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
677 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
678 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
680 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
681 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
682 *cur_dsd++ = cpu_to_le32(dsd_list_len);
684 cur_dsd = (uint32_t *)next_dsd;
688 sle_dma = sg_dma_address(cur_seg);
689 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
690 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
691 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
692 cur_seg = sg_next(cur_seg);
697 /* Null termination */
701 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
706 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
707 * for Command Type 6.
709 * @dsds: number of data segment decriptors needed
711 * Returns the number of dsd list needed to store @dsds.
714 qla24xx_calc_dsd_lists(uint16_t dsds)
716 uint16_t dsd_lists = 0;
718 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
719 if (dsds % QLA_DSDS_PER_IOCB)
726 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
729 * @sp: SRB command to process
730 * @cmd_pkt: Command type 3 IOCB
731 * @tot_dsds: Total number of segments to transfer
734 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
739 scsi_qla_host_t *vha;
740 struct scsi_cmnd *cmd;
741 struct scatterlist *sg;
745 cmd = GET_CMD_SP(sp);
747 /* Update entry type to indicate Command Type 3 IOCB */
748 *((uint32_t *)(&cmd_pkt->entry_type)) =
749 __constant_cpu_to_le32(COMMAND_TYPE_7);
751 /* No data transfer */
752 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
753 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
757 vha = sp->fcport->vha;
760 /* Set transfer direction */
761 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
762 cmd_pkt->task_mgmt_flags =
763 __constant_cpu_to_le16(TMF_WRITE_DATA);
764 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
765 vha->qla_stats.output_requests++;
766 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
767 cmd_pkt->task_mgmt_flags =
768 __constant_cpu_to_le16(TMF_READ_DATA);
769 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
770 vha->qla_stats.input_requests++;
773 /* One DSD is available in the Command Type 3 IOCB */
775 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
777 /* Load data segments */
779 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
781 cont_a64_entry_t *cont_pkt;
783 /* Allocate additional continuation packets? */
784 if (avail_dsds == 0) {
786 * Five DSDs are available in the Continuation
789 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
790 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
794 sle_dma = sg_dma_address(sg);
795 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
796 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
797 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
802 struct fw_dif_context {
805 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
806 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
810 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
814 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
815 unsigned int protcnt)
817 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
819 switch (scsi_get_prot_type(cmd)) {
820 case SCSI_PROT_DIF_TYPE0:
822 * No check for ql2xenablehba_err_chk, as it would be an
823 * I/O error if hba tag generation is not done.
825 pkt->ref_tag = cpu_to_le32((uint32_t)
826 (0xffffffff & scsi_get_lba(cmd)));
828 if (!qla2x00_hba_err_chk_enabled(sp))
831 pkt->ref_tag_mask[0] = 0xff;
832 pkt->ref_tag_mask[1] = 0xff;
833 pkt->ref_tag_mask[2] = 0xff;
834 pkt->ref_tag_mask[3] = 0xff;
838 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
839 * match LBA in CDB + N
841 case SCSI_PROT_DIF_TYPE2:
842 pkt->app_tag = __constant_cpu_to_le16(0);
843 pkt->app_tag_mask[0] = 0x0;
844 pkt->app_tag_mask[1] = 0x0;
846 pkt->ref_tag = cpu_to_le32((uint32_t)
847 (0xffffffff & scsi_get_lba(cmd)));
849 if (!qla2x00_hba_err_chk_enabled(sp))
852 /* enable ALL bytes of the ref tag */
853 pkt->ref_tag_mask[0] = 0xff;
854 pkt->ref_tag_mask[1] = 0xff;
855 pkt->ref_tag_mask[2] = 0xff;
856 pkt->ref_tag_mask[3] = 0xff;
859 /* For Type 3 protection: 16 bit GUARD only */
860 case SCSI_PROT_DIF_TYPE3:
861 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
862 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
867 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
870 case SCSI_PROT_DIF_TYPE1:
871 pkt->ref_tag = cpu_to_le32((uint32_t)
872 (0xffffffff & scsi_get_lba(cmd)));
873 pkt->app_tag = __constant_cpu_to_le16(0);
874 pkt->app_tag_mask[0] = 0x0;
875 pkt->app_tag_mask[1] = 0x0;
877 if (!qla2x00_hba_err_chk_enabled(sp))
880 /* enable ALL bytes of the ref tag */
881 pkt->ref_tag_mask[0] = 0xff;
882 pkt->ref_tag_mask[1] = 0xff;
883 pkt->ref_tag_mask[2] = 0xff;
884 pkt->ref_tag_mask[3] = 0xff;
890 dma_addr_t dma_addr; /* OUT */
891 uint32_t dma_len; /* OUT */
893 uint32_t tot_bytes; /* IN */
894 struct scatterlist *cur_sg; /* IN */
896 /* for book keeping, bzero on initial invocation */
897 uint32_t bytes_consumed;
899 uint32_t tot_partial;
907 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
910 struct scatterlist *sg;
911 uint32_t cumulative_partial, sg_len;
912 dma_addr_t sg_dma_addr;
914 if (sgx->num_bytes == sgx->tot_bytes)
918 cumulative_partial = sgx->tot_partial;
920 sg_dma_addr = sg_dma_address(sg);
921 sg_len = sg_dma_len(sg);
923 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
925 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
926 sgx->dma_len = (blk_sz - cumulative_partial);
927 sgx->tot_partial = 0;
928 sgx->num_bytes += blk_sz;
931 sgx->dma_len = sg_len - sgx->bytes_consumed;
932 sgx->tot_partial += sgx->dma_len;
936 sgx->bytes_consumed += sgx->dma_len;
938 if (sg_len == sgx->bytes_consumed) {
942 sgx->bytes_consumed = 0;
949 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
950 uint32_t *dsd, uint16_t tot_dsds)
953 uint8_t avail_dsds = 0;
954 uint32_t dsd_list_len;
955 struct dsd_dma *dsd_ptr;
956 struct scatterlist *sg_prot;
957 uint32_t *cur_dsd = dsd;
958 uint16_t used_dsds = tot_dsds;
964 uint32_t sle_dma_len, tot_prot_dma_len = 0;
965 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
967 prot_int = cmd->device->sector_size;
969 memset(&sgx, 0, sizeof(struct qla2_sgx));
970 sgx.tot_bytes = scsi_bufflen(cmd);
971 sgx.cur_sg = scsi_sglist(cmd);
974 sg_prot = scsi_prot_sglist(cmd);
976 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
978 sle_dma = sgx.dma_addr;
979 sle_dma_len = sgx.dma_len;
981 /* Allocate additional continuation packets? */
982 if (avail_dsds == 0) {
983 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
984 QLA_DSDS_PER_IOCB : used_dsds;
985 dsd_list_len = (avail_dsds + 1) * 12;
986 used_dsds -= avail_dsds;
988 /* allocate tracking DS */
989 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
993 /* allocate new list */
994 dsd_ptr->dsd_addr = next_dsd =
995 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
996 &dsd_ptr->dsd_list_dma);
1000 * Need to cleanup only this dsd_ptr, rest
1001 * will be done by sp_free_dma()
1007 list_add_tail(&dsd_ptr->list,
1008 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1010 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1012 /* add new list to cmd iocb or last list */
1013 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1014 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1015 *cur_dsd++ = dsd_list_len;
1016 cur_dsd = (uint32_t *)next_dsd;
1018 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1019 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1020 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1024 /* Got a full protection interval */
1025 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1028 tot_prot_dma_len += sle_dma_len;
1029 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1030 tot_prot_dma_len = 0;
1031 sg_prot = sg_next(sg_prot);
1034 partial = 1; /* So as to not re-enter this block */
1035 goto alloc_and_fill;
1038 /* Null termination */
1046 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1050 uint8_t avail_dsds = 0;
1051 uint32_t dsd_list_len;
1052 struct dsd_dma *dsd_ptr;
1053 struct scatterlist *sg;
1054 uint32_t *cur_dsd = dsd;
1056 uint16_t used_dsds = tot_dsds;
1057 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1059 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1062 /* Allocate additional continuation packets? */
1063 if (avail_dsds == 0) {
1064 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1065 QLA_DSDS_PER_IOCB : used_dsds;
1066 dsd_list_len = (avail_dsds + 1) * 12;
1067 used_dsds -= avail_dsds;
1069 /* allocate tracking DS */
1070 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1074 /* allocate new list */
1075 dsd_ptr->dsd_addr = next_dsd =
1076 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1077 &dsd_ptr->dsd_list_dma);
1081 * Need to cleanup only this dsd_ptr, rest
1082 * will be done by sp_free_dma()
1088 list_add_tail(&dsd_ptr->list,
1089 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1091 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1093 /* add new list to cmd iocb or last list */
1094 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1095 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1096 *cur_dsd++ = dsd_list_len;
1097 cur_dsd = (uint32_t *)next_dsd;
1099 sle_dma = sg_dma_address(sg);
1101 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1102 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1103 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1107 /* Null termination */
1115 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1120 uint8_t avail_dsds = 0;
1121 uint32_t dsd_list_len;
1122 struct dsd_dma *dsd_ptr;
1123 struct scatterlist *sg;
1125 struct scsi_cmnd *cmd;
1126 uint32_t *cur_dsd = dsd;
1127 uint16_t used_dsds = tot_dsds;
1129 cmd = GET_CMD_SP(sp);
1130 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1133 /* Allocate additional continuation packets? */
1134 if (avail_dsds == 0) {
1135 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1136 QLA_DSDS_PER_IOCB : used_dsds;
1137 dsd_list_len = (avail_dsds + 1) * 12;
1138 used_dsds -= avail_dsds;
1140 /* allocate tracking DS */
1141 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1145 /* allocate new list */
1146 dsd_ptr->dsd_addr = next_dsd =
1147 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1148 &dsd_ptr->dsd_list_dma);
1152 * Need to cleanup only this dsd_ptr, rest
1153 * will be done by sp_free_dma()
1159 list_add_tail(&dsd_ptr->list,
1160 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1162 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1164 /* add new list to cmd iocb or last list */
1165 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1166 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1167 *cur_dsd++ = dsd_list_len;
1168 cur_dsd = (uint32_t *)next_dsd;
1170 sle_dma = sg_dma_address(sg);
1172 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1173 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1174 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1178 /* Null termination */
1186 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1187 * Type 6 IOCB types.
1189 * @sp: SRB command to process
1190 * @cmd_pkt: Command type 3 IOCB
1191 * @tot_dsds: Total number of segments to transfer
1194 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1195 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1197 uint32_t *cur_dsd, *fcp_dl;
1198 scsi_qla_host_t *vha;
1199 struct scsi_cmnd *cmd;
1201 uint32_t total_bytes = 0;
1202 uint32_t data_bytes;
1204 uint8_t bundling = 1;
1207 struct crc_context *crc_ctx_pkt = NULL;
1208 struct qla_hw_data *ha;
1209 uint8_t additional_fcpcdb_len;
1210 uint16_t fcp_cmnd_len;
1211 struct fcp_cmnd *fcp_cmnd;
1212 dma_addr_t crc_ctx_dma;
1215 cmd = GET_CMD_SP(sp);
1218 /* Update entry type to indicate Command Type CRC_2 IOCB */
1219 *((uint32_t *)(&cmd_pkt->entry_type)) =
1220 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1222 vha = sp->fcport->vha;
1225 /* No data transfer */
1226 data_bytes = scsi_bufflen(cmd);
1227 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1228 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1232 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1234 /* Set transfer direction */
1235 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1236 cmd_pkt->control_flags =
1237 __constant_cpu_to_le16(CF_WRITE_DATA);
1238 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1239 cmd_pkt->control_flags =
1240 __constant_cpu_to_le16(CF_READ_DATA);
1243 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1244 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1245 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1246 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1249 /* Allocate CRC context from global pool */
1250 crc_ctx_pkt = sp->u.scmd.ctx =
1251 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1254 goto crc_queuing_error;
1256 /* Zero out CTX area. */
1257 clr_ptr = (uint8_t *)crc_ctx_pkt;
1258 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1260 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1262 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1265 crc_ctx_pkt->handle = cmd_pkt->handle;
1267 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1269 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1270 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1272 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1273 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1274 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1276 /* Determine SCSI command length -- align to 4 byte boundary */
1277 if (cmd->cmd_len > 16) {
1278 additional_fcpcdb_len = cmd->cmd_len - 16;
1279 if ((cmd->cmd_len % 4) != 0) {
1280 /* SCSI cmd > 16 bytes must be multiple of 4 */
1281 goto crc_queuing_error;
1283 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1285 additional_fcpcdb_len = 0;
1286 fcp_cmnd_len = 12 + 16 + 4;
1289 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1291 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1292 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1293 fcp_cmnd->additional_cdb_len |= 1;
1294 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1295 fcp_cmnd->additional_cdb_len |= 2;
1297 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1298 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1299 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1300 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1301 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1302 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1303 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1304 fcp_cmnd->task_management = 0;
1307 * Update tagged queuing modifier if using command tag queuing
1309 if (scsi_populate_tag_msg(cmd, tag)) {
1311 case HEAD_OF_QUEUE_TAG:
1312 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1314 case ORDERED_QUEUE_TAG:
1315 fcp_cmnd->task_attribute = TSK_ORDERED;
1318 fcp_cmnd->task_attribute = TSK_SIMPLE;
1322 fcp_cmnd->task_attribute = TSK_SIMPLE;
1325 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1327 /* Compute dif len and adjust data len to incude protection */
1329 blk_size = cmd->device->sector_size;
1330 dif_bytes = (data_bytes / blk_size) * 8;
1332 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1333 case SCSI_PROT_READ_INSERT:
1334 case SCSI_PROT_WRITE_STRIP:
1335 total_bytes = data_bytes;
1336 data_bytes += dif_bytes;
1339 case SCSI_PROT_READ_STRIP:
1340 case SCSI_PROT_WRITE_INSERT:
1341 case SCSI_PROT_READ_PASS:
1342 case SCSI_PROT_WRITE_PASS:
1343 total_bytes = data_bytes + dif_bytes;
1349 if (!qla2x00_hba_err_chk_enabled(sp))
1350 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1351 /* HBA error checking enabled */
1352 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1353 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1354 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1355 SCSI_PROT_DIF_TYPE2))
1356 fw_prot_opts |= BIT_10;
1357 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1358 SCSI_PROT_DIF_TYPE3)
1359 fw_prot_opts |= BIT_11;
1363 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1366 * Configure Bundling if we need to fetch interlaving
1367 * protection PCI accesses
1369 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1370 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1371 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1373 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1376 /* Finish the common fields of CRC pkt */
1377 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1378 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1379 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1380 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1381 /* Fibre channel byte count */
1382 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1383 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1384 additional_fcpcdb_len);
1385 *fcp_dl = htonl(total_bytes);
1387 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1388 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1391 /* Walks data segments */
1393 cmd_pkt->control_flags |=
1394 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1396 if (!bundling && tot_prot_dsds) {
1397 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1399 goto crc_queuing_error;
1400 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1401 (tot_dsds - tot_prot_dsds)))
1402 goto crc_queuing_error;
1404 if (bundling && tot_prot_dsds) {
1405 /* Walks dif segments */
1406 cmd_pkt->control_flags |=
1407 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1408 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1409 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1411 goto crc_queuing_error;
1416 /* Cleanup will be performed by the caller */
1418 return QLA_FUNCTION_FAILED;
1422 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1423 * @sp: command to send to the ISP
1425 * Returns non-zero if a failure occurred, else zero.
1428 qla24xx_start_scsi(srb_t *sp)
1431 unsigned long flags;
1435 struct cmd_type_7 *cmd_pkt;
1439 struct req_que *req = NULL;
1440 struct rsp_que *rsp = NULL;
1441 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1442 struct scsi_qla_host *vha = sp->fcport->vha;
1443 struct qla_hw_data *ha = vha->hw;
1446 /* Setup device pointers. */
1449 qla25xx_set_que(sp, &rsp);
1452 /* So we know we haven't pci_map'ed anything yet */
1455 /* Send marker if required */
1456 if (vha->marker_needed != 0) {
1457 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1459 return QLA_FUNCTION_FAILED;
1460 vha->marker_needed = 0;
1463 /* Acquire ring specific lock */
1464 spin_lock_irqsave(&ha->hardware_lock, flags);
1466 /* Check for room in outstanding command list. */
1467 handle = req->current_outstanding_cmd;
1468 for (index = 1; index < req->num_outstanding_cmds; index++) {
1470 if (handle == req->num_outstanding_cmds)
1472 if (!req->outstanding_cmds[handle])
1475 if (index == req->num_outstanding_cmds)
1478 /* Map the sg table so we have an accurate count of sg entries needed */
1479 if (scsi_sg_count(cmd)) {
1480 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1481 scsi_sg_count(cmd), cmd->sc_data_direction);
1482 if (unlikely(!nseg))
1488 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1489 if (req->cnt < (req_cnt + 2)) {
1490 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1492 if (req->ring_index < cnt)
1493 req->cnt = cnt - req->ring_index;
1495 req->cnt = req->length -
1496 (req->ring_index - cnt);
1497 if (req->cnt < (req_cnt + 2))
1501 /* Build command packet. */
1502 req->current_outstanding_cmd = handle;
1503 req->outstanding_cmds[handle] = sp;
1504 sp->handle = handle;
1505 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1506 req->cnt -= req_cnt;
1508 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1509 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1511 /* Zero out remaining portion of packet. */
1512 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1513 clr_ptr = (uint32_t *)cmd_pkt + 2;
1514 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1515 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1517 /* Set NPORT-ID and LUN number*/
1518 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1519 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1520 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1521 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1522 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1524 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1525 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1527 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1528 if (scsi_populate_tag_msg(cmd, tag)) {
1530 case HEAD_OF_QUEUE_TAG:
1531 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1533 case ORDERED_QUEUE_TAG:
1534 cmd_pkt->task = TSK_ORDERED;
1537 cmd_pkt->task = TSK_SIMPLE;
1541 cmd_pkt->task = TSK_SIMPLE;
1544 /* Load SCSI command packet. */
1545 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1546 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1548 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1550 /* Build IOCB segments */
1551 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1553 /* Set total data segment count. */
1554 cmd_pkt->entry_count = (uint8_t)req_cnt;
1555 /* Specify response queue number where completion should happen */
1556 cmd_pkt->entry_status = (uint8_t) rsp->id;
1558 /* Adjust ring index. */
1560 if (req->ring_index == req->length) {
1561 req->ring_index = 0;
1562 req->ring_ptr = req->ring;
1566 sp->flags |= SRB_DMA_VALID;
1568 /* Set chip new ring index. */
1569 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1570 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1572 /* Manage unprocessed RIO/ZIO commands in response queue. */
1573 if (vha->flags.process_response_queue &&
1574 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1575 qla24xx_process_response_queue(vha, rsp);
1577 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1582 scsi_dma_unmap(cmd);
1584 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1586 return QLA_FUNCTION_FAILED;
1590 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1591 * @sp: command to send to the ISP
1593 * Returns non-zero if a failure occurred, else zero.
1596 qla24xx_dif_start_scsi(srb_t *sp)
1599 unsigned long flags;
1604 uint16_t req_cnt = 0;
1606 uint16_t tot_prot_dsds;
1607 uint16_t fw_prot_opts = 0;
1608 struct req_que *req = NULL;
1609 struct rsp_que *rsp = NULL;
1610 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1611 struct scsi_qla_host *vha = sp->fcport->vha;
1612 struct qla_hw_data *ha = vha->hw;
1613 struct cmd_type_crc_2 *cmd_pkt;
1614 uint32_t status = 0;
1616 #define QDSS_GOT_Q_SPACE BIT_0
1618 /* Only process protection or >16 cdb in this routine */
1619 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1620 if (cmd->cmd_len <= 16)
1621 return qla24xx_start_scsi(sp);
1624 /* Setup device pointers. */
1626 qla25xx_set_que(sp, &rsp);
1629 /* So we know we haven't pci_map'ed anything yet */
1632 /* Send marker if required */
1633 if (vha->marker_needed != 0) {
1634 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1636 return QLA_FUNCTION_FAILED;
1637 vha->marker_needed = 0;
1640 /* Acquire ring specific lock */
1641 spin_lock_irqsave(&ha->hardware_lock, flags);
1643 /* Check for room in outstanding command list. */
1644 handle = req->current_outstanding_cmd;
1645 for (index = 1; index < req->num_outstanding_cmds; index++) {
1647 if (handle == req->num_outstanding_cmds)
1649 if (!req->outstanding_cmds[handle])
1653 if (index == req->num_outstanding_cmds)
1656 /* Compute number of required data segments */
1657 /* Map the sg table so we have an accurate count of sg entries needed */
1658 if (scsi_sg_count(cmd)) {
1659 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1660 scsi_sg_count(cmd), cmd->sc_data_direction);
1661 if (unlikely(!nseg))
1664 sp->flags |= SRB_DMA_VALID;
1666 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1667 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1668 struct qla2_sgx sgx;
1671 memset(&sgx, 0, sizeof(struct qla2_sgx));
1672 sgx.tot_bytes = scsi_bufflen(cmd);
1673 sgx.cur_sg = scsi_sglist(cmd);
1677 while (qla24xx_get_one_block_sg(
1678 cmd->device->sector_size, &sgx, &partial))
1684 /* number of required data segments */
1687 /* Compute number of required protection segments */
1688 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1689 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1690 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1691 if (unlikely(!nseg))
1694 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1696 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1697 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1698 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1705 /* Total Data and protection sg segment(s) */
1706 tot_prot_dsds = nseg;
1708 if (req->cnt < (req_cnt + 2)) {
1709 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1711 if (req->ring_index < cnt)
1712 req->cnt = cnt - req->ring_index;
1714 req->cnt = req->length -
1715 (req->ring_index - cnt);
1716 if (req->cnt < (req_cnt + 2))
1720 status |= QDSS_GOT_Q_SPACE;
1722 /* Build header part of command packet (excluding the OPCODE). */
1723 req->current_outstanding_cmd = handle;
1724 req->outstanding_cmds[handle] = sp;
1725 sp->handle = handle;
1726 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1727 req->cnt -= req_cnt;
1729 /* Fill-in common area */
1730 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1731 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1733 clr_ptr = (uint32_t *)cmd_pkt + 2;
1734 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1736 /* Set NPORT-ID and LUN number*/
1737 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1738 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1739 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1740 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1742 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1743 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1745 /* Total Data and protection segment(s) */
1746 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1748 /* Build IOCB segments and adjust for data protection segments */
1749 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1750 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1754 cmd_pkt->entry_count = (uint8_t)req_cnt;
1755 /* Specify response queue number where completion should happen */
1756 cmd_pkt->entry_status = (uint8_t) rsp->id;
1757 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1760 /* Adjust ring index. */
1762 if (req->ring_index == req->length) {
1763 req->ring_index = 0;
1764 req->ring_ptr = req->ring;
1768 /* Set chip new ring index. */
1769 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1770 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1772 /* Manage unprocessed RIO/ZIO commands in response queue. */
1773 if (vha->flags.process_response_queue &&
1774 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1775 qla24xx_process_response_queue(vha, rsp);
1777 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1782 if (status & QDSS_GOT_Q_SPACE) {
1783 req->outstanding_cmds[handle] = NULL;
1784 req->cnt += req_cnt;
1786 /* Cleanup will be performed by the caller (queuecommand) */
1788 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1789 return QLA_FUNCTION_FAILED;
1793 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1795 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1796 struct qla_hw_data *ha = sp->fcport->vha->hw;
1797 int affinity = cmd->request->cpu;
1799 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1800 affinity < ha->max_rsp_queues - 1)
1801 *rsp = ha->rsp_q_map[affinity + 1];
1803 *rsp = ha->rsp_q_map[0];
1806 /* Generic Control-SRB manipulation functions. */
1808 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1810 struct qla_hw_data *ha = vha->hw;
1811 struct req_que *req = ha->req_q_map[0];
1812 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1813 uint32_t index, handle;
1815 uint16_t cnt, req_cnt;
1822 goto skip_cmd_array;
1824 /* Check for room in outstanding command list. */
1825 handle = req->current_outstanding_cmd;
1826 for (index = 1; req->num_outstanding_cmds; index++) {
1828 if (handle == req->num_outstanding_cmds)
1830 if (!req->outstanding_cmds[handle])
1833 if (index == req->num_outstanding_cmds) {
1834 ql_log(ql_log_warn, vha, 0x700b,
1835 "No room on outstanding cmd array.\n");
1839 /* Prep command array. */
1840 req->current_outstanding_cmd = handle;
1841 req->outstanding_cmds[handle] = sp;
1842 sp->handle = handle;
1844 /* Adjust entry-counts as needed. */
1845 if (sp->type != SRB_SCSI_CMD)
1846 req_cnt = sp->iocbs;
1849 /* Check for room on request queue. */
1850 if (req->cnt < req_cnt) {
1851 if (ha->mqenable || IS_QLA83XX(ha))
1852 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
1853 else if (IS_P3P_TYPE(ha))
1854 cnt = RD_REG_DWORD(®->isp82.req_q_out);
1855 else if (IS_FWI2_CAPABLE(ha))
1856 cnt = RD_REG_DWORD(®->isp24.req_q_out);
1857 else if (IS_QLAFX00(ha))
1858 cnt = RD_REG_DWORD(®->ispfx00.req_q_out);
1860 cnt = qla2x00_debounce_register(
1861 ISP_REQ_Q_OUT(ha, ®->isp));
1863 if (req->ring_index < cnt)
1864 req->cnt = cnt - req->ring_index;
1866 req->cnt = req->length -
1867 (req->ring_index - cnt);
1869 if (req->cnt < req_cnt)
1873 req->cnt -= req_cnt;
1874 pkt = req->ring_ptr;
1875 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1876 if (IS_QLAFX00(ha)) {
1877 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
1878 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
1880 pkt->entry_count = req_cnt;
1881 pkt->handle = handle;
1889 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1891 struct srb_iocb *lio = &sp->u.iocb_cmd;
1893 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1894 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1895 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1896 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1897 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1898 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1899 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1900 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1901 logio->port_id[1] = sp->fcport->d_id.b.area;
1902 logio->port_id[2] = sp->fcport->d_id.b.domain;
1903 logio->vp_index = sp->fcport->vha->vp_idx;
1907 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1909 struct qla_hw_data *ha = sp->fcport->vha->hw;
1910 struct srb_iocb *lio = &sp->u.iocb_cmd;
1913 mbx->entry_type = MBX_IOCB_TYPE;
1914 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1915 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1916 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1917 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1918 if (HAS_EXTENDED_IDS(ha)) {
1919 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1920 mbx->mb10 = cpu_to_le16(opts);
1922 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1924 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1925 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1926 sp->fcport->d_id.b.al_pa);
1927 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1931 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1933 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1934 logio->control_flags =
1935 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1936 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1937 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1938 logio->port_id[1] = sp->fcport->d_id.b.area;
1939 logio->port_id[2] = sp->fcport->d_id.b.domain;
1940 logio->vp_index = sp->fcport->vha->vp_idx;
1944 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1946 struct qla_hw_data *ha = sp->fcport->vha->hw;
1948 mbx->entry_type = MBX_IOCB_TYPE;
1949 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1950 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1951 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1952 cpu_to_le16(sp->fcport->loop_id):
1953 cpu_to_le16(sp->fcport->loop_id << 8);
1954 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1955 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1956 sp->fcport->d_id.b.al_pa);
1957 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1958 /* Implicit: mbx->mbx10 = 0. */
1962 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1964 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1965 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1966 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1967 logio->vp_index = sp->fcport->vha->vp_idx;
1971 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1973 struct qla_hw_data *ha = sp->fcport->vha->hw;
1975 mbx->entry_type = MBX_IOCB_TYPE;
1976 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1977 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1978 if (HAS_EXTENDED_IDS(ha)) {
1979 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1980 mbx->mb10 = cpu_to_le16(BIT_0);
1982 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1984 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1985 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1986 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1987 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1988 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1992 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1996 struct fc_port *fcport = sp->fcport;
1997 scsi_qla_host_t *vha = fcport->vha;
1998 struct qla_hw_data *ha = vha->hw;
1999 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2000 struct req_que *req = vha->req;
2002 flags = iocb->u.tmf.flags;
2003 lun = iocb->u.tmf.lun;
2005 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2006 tsk->entry_count = 1;
2007 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2008 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2009 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2010 tsk->control_flags = cpu_to_le32(flags);
2011 tsk->port_id[0] = fcport->d_id.b.al_pa;
2012 tsk->port_id[1] = fcport->d_id.b.area;
2013 tsk->port_id[2] = fcport->d_id.b.domain;
2014 tsk->vp_index = fcport->vha->vp_idx;
2016 if (flags == TCF_LUN_RESET) {
2017 int_to_scsilun(lun, &tsk->lun);
2018 host_to_fcp_swap((uint8_t *)&tsk->lun,
2024 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2026 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2028 els_iocb->entry_type = ELS_IOCB_TYPE;
2029 els_iocb->entry_count = 1;
2030 els_iocb->sys_define = 0;
2031 els_iocb->entry_status = 0;
2032 els_iocb->handle = sp->handle;
2033 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2034 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2035 els_iocb->vp_index = sp->fcport->vha->vp_idx;
2036 els_iocb->sof_type = EST_SOFI3;
2037 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2040 sp->type == SRB_ELS_CMD_RPT ?
2041 bsg_job->request->rqst_data.r_els.els_code :
2042 bsg_job->request->rqst_data.h_els.command_code;
2043 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2044 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2045 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2046 els_iocb->control_flags = 0;
2047 els_iocb->rx_byte_count =
2048 cpu_to_le32(bsg_job->reply_payload.payload_len);
2049 els_iocb->tx_byte_count =
2050 cpu_to_le32(bsg_job->request_payload.payload_len);
2052 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2053 (bsg_job->request_payload.sg_list)));
2054 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2055 (bsg_job->request_payload.sg_list)));
2056 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2057 (bsg_job->request_payload.sg_list));
2059 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2060 (bsg_job->reply_payload.sg_list)));
2061 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2062 (bsg_job->reply_payload.sg_list)));
2063 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2064 (bsg_job->reply_payload.sg_list));
2066 sp->fcport->vha->qla_stats.control_requests++;
2070 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2072 uint16_t avail_dsds;
2074 struct scatterlist *sg;
2077 scsi_qla_host_t *vha = sp->fcport->vha;
2078 struct qla_hw_data *ha = vha->hw;
2079 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2080 int loop_iterartion = 0;
2081 int cont_iocb_prsnt = 0;
2082 int entry_count = 1;
2084 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2085 ct_iocb->entry_type = CT_IOCB_TYPE;
2086 ct_iocb->entry_status = 0;
2087 ct_iocb->handle1 = sp->handle;
2088 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2089 ct_iocb->status = __constant_cpu_to_le16(0);
2090 ct_iocb->control_flags = __constant_cpu_to_le16(0);
2091 ct_iocb->timeout = 0;
2092 ct_iocb->cmd_dsd_count =
2093 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2094 ct_iocb->total_dsd_count =
2095 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2096 ct_iocb->req_bytecount =
2097 cpu_to_le32(bsg_job->request_payload.payload_len);
2098 ct_iocb->rsp_bytecount =
2099 cpu_to_le32(bsg_job->reply_payload.payload_len);
2101 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2102 (bsg_job->request_payload.sg_list)));
2103 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2104 (bsg_job->request_payload.sg_list)));
2105 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2107 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2108 (bsg_job->reply_payload.sg_list)));
2109 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2110 (bsg_job->reply_payload.sg_list)));
2111 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2114 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2116 tot_dsds = bsg_job->reply_payload.sg_cnt;
2118 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2120 cont_a64_entry_t *cont_pkt;
2122 /* Allocate additional continuation packets? */
2123 if (avail_dsds == 0) {
2125 * Five DSDs are available in the Cont.
2128 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2129 vha->hw->req_q_map[0]);
2130 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2132 cont_iocb_prsnt = 1;
2136 sle_dma = sg_dma_address(sg);
2137 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2138 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2139 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2143 ct_iocb->entry_count = entry_count;
2145 sp->fcport->vha->qla_stats.control_requests++;
2149 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2151 uint16_t avail_dsds;
2153 struct scatterlist *sg;
2156 scsi_qla_host_t *vha = sp->fcport->vha;
2157 struct qla_hw_data *ha = vha->hw;
2158 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2159 int loop_iterartion = 0;
2160 int cont_iocb_prsnt = 0;
2161 int entry_count = 1;
2163 ct_iocb->entry_type = CT_IOCB_TYPE;
2164 ct_iocb->entry_status = 0;
2165 ct_iocb->sys_define = 0;
2166 ct_iocb->handle = sp->handle;
2168 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2169 ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2170 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2172 ct_iocb->cmd_dsd_count =
2173 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2174 ct_iocb->timeout = 0;
2175 ct_iocb->rsp_dsd_count =
2176 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2177 ct_iocb->rsp_byte_count =
2178 cpu_to_le32(bsg_job->reply_payload.payload_len);
2179 ct_iocb->cmd_byte_count =
2180 cpu_to_le32(bsg_job->request_payload.payload_len);
2181 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2182 (bsg_job->request_payload.sg_list)));
2183 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2184 (bsg_job->request_payload.sg_list)));
2185 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2186 (bsg_job->request_payload.sg_list));
2189 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2191 tot_dsds = bsg_job->reply_payload.sg_cnt;
2193 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2195 cont_a64_entry_t *cont_pkt;
2197 /* Allocate additional continuation packets? */
2198 if (avail_dsds == 0) {
2200 * Five DSDs are available in the Cont.
2203 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2205 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2207 cont_iocb_prsnt = 1;
2211 sle_dma = sg_dma_address(sg);
2212 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2213 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2214 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2218 ct_iocb->entry_count = entry_count;
2222 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2223 * @sp: command to send to the ISP
2225 * Returns non-zero if a failure occurred, else zero.
2228 qla82xx_start_scsi(srb_t *sp)
2231 unsigned long flags;
2232 struct scsi_cmnd *cmd;
2239 struct device_reg_82xx __iomem *reg;
2242 uint8_t additional_cdb_len;
2243 struct ct6_dsd *ctx;
2244 struct scsi_qla_host *vha = sp->fcport->vha;
2245 struct qla_hw_data *ha = vha->hw;
2246 struct req_que *req = NULL;
2247 struct rsp_que *rsp = NULL;
2250 /* Setup device pointers. */
2252 reg = &ha->iobase->isp82;
2253 cmd = GET_CMD_SP(sp);
2255 rsp = ha->rsp_q_map[0];
2257 /* So we know we haven't pci_map'ed anything yet */
2260 dbval = 0x04 | (ha->portnum << 5);
2262 /* Send marker if required */
2263 if (vha->marker_needed != 0) {
2264 if (qla2x00_marker(vha, req,
2265 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2266 ql_log(ql_log_warn, vha, 0x300c,
2267 "qla2x00_marker failed for cmd=%p.\n", cmd);
2268 return QLA_FUNCTION_FAILED;
2270 vha->marker_needed = 0;
2273 /* Acquire ring specific lock */
2274 spin_lock_irqsave(&ha->hardware_lock, flags);
2276 /* Check for room in outstanding command list. */
2277 handle = req->current_outstanding_cmd;
2278 for (index = 1; index < req->num_outstanding_cmds; index++) {
2280 if (handle == req->num_outstanding_cmds)
2282 if (!req->outstanding_cmds[handle])
2285 if (index == req->num_outstanding_cmds)
2288 /* Map the sg table so we have an accurate count of sg entries needed */
2289 if (scsi_sg_count(cmd)) {
2290 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2291 scsi_sg_count(cmd), cmd->sc_data_direction);
2292 if (unlikely(!nseg))
2299 if (tot_dsds > ql2xshiftctondsd) {
2300 struct cmd_type_6 *cmd_pkt;
2301 uint16_t more_dsd_lists = 0;
2302 struct dsd_dma *dsd_ptr;
2305 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2306 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2307 ql_dbg(ql_dbg_io, vha, 0x300d,
2308 "Num of DSD list %d is than %d for cmd=%p.\n",
2309 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2314 if (more_dsd_lists <= ha->gbl_dsd_avail)
2315 goto sufficient_dsds;
2317 more_dsd_lists -= ha->gbl_dsd_avail;
2319 for (i = 0; i < more_dsd_lists; i++) {
2320 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2322 ql_log(ql_log_fatal, vha, 0x300e,
2323 "Failed to allocate memory for dsd_dma "
2324 "for cmd=%p.\n", cmd);
2328 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2329 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2330 if (!dsd_ptr->dsd_addr) {
2332 ql_log(ql_log_fatal, vha, 0x300f,
2333 "Failed to allocate memory for dsd_addr "
2334 "for cmd=%p.\n", cmd);
2337 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2338 ha->gbl_dsd_avail++;
2344 if (req->cnt < (req_cnt + 2)) {
2345 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2346 ®->req_q_out[0]);
2347 if (req->ring_index < cnt)
2348 req->cnt = cnt - req->ring_index;
2350 req->cnt = req->length -
2351 (req->ring_index - cnt);
2352 if (req->cnt < (req_cnt + 2))
2356 ctx = sp->u.scmd.ctx =
2357 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2359 ql_log(ql_log_fatal, vha, 0x3010,
2360 "Failed to allocate ctx for cmd=%p.\n", cmd);
2364 memset(ctx, 0, sizeof(struct ct6_dsd));
2365 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2366 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2367 if (!ctx->fcp_cmnd) {
2368 ql_log(ql_log_fatal, vha, 0x3011,
2369 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2373 /* Initialize the DSD list and dma handle */
2374 INIT_LIST_HEAD(&ctx->dsd_list);
2375 ctx->dsd_use_cnt = 0;
2377 if (cmd->cmd_len > 16) {
2378 additional_cdb_len = cmd->cmd_len - 16;
2379 if ((cmd->cmd_len % 4) != 0) {
2380 /* SCSI command bigger than 16 bytes must be
2383 ql_log(ql_log_warn, vha, 0x3012,
2384 "scsi cmd len %d not multiple of 4 "
2385 "for cmd=%p.\n", cmd->cmd_len, cmd);
2386 goto queuing_error_fcp_cmnd;
2388 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2390 additional_cdb_len = 0;
2391 ctx->fcp_cmnd_len = 12 + 16 + 4;
2394 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2395 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2397 /* Zero out remaining portion of packet. */
2398 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2399 clr_ptr = (uint32_t *)cmd_pkt + 2;
2400 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2401 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2403 /* Set NPORT-ID and LUN number*/
2404 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2405 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2406 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2407 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2408 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2410 /* Build IOCB segments */
2411 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2412 goto queuing_error_fcp_cmnd;
2414 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2415 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2417 /* build FCP_CMND IU */
2418 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2419 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2420 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2422 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2423 ctx->fcp_cmnd->additional_cdb_len |= 1;
2424 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2425 ctx->fcp_cmnd->additional_cdb_len |= 2;
2428 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2430 if (scsi_populate_tag_msg(cmd, tag)) {
2432 case HEAD_OF_QUEUE_TAG:
2433 ctx->fcp_cmnd->task_attribute =
2436 case ORDERED_QUEUE_TAG:
2437 ctx->fcp_cmnd->task_attribute =
2443 /* Populate the FCP_PRIO. */
2444 if (ha->flags.fcp_prio_enabled)
2445 ctx->fcp_cmnd->task_attribute |=
2446 sp->fcport->fcp_prio << 3;
2448 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2450 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2451 additional_cdb_len);
2452 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2454 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2455 cmd_pkt->fcp_cmnd_dseg_address[0] =
2456 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2457 cmd_pkt->fcp_cmnd_dseg_address[1] =
2458 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2460 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2461 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2462 /* Set total data segment count. */
2463 cmd_pkt->entry_count = (uint8_t)req_cnt;
2464 /* Specify response queue number where
2465 * completion should happen
2467 cmd_pkt->entry_status = (uint8_t) rsp->id;
2469 struct cmd_type_7 *cmd_pkt;
2470 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2471 if (req->cnt < (req_cnt + 2)) {
2472 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2473 ®->req_q_out[0]);
2474 if (req->ring_index < cnt)
2475 req->cnt = cnt - req->ring_index;
2477 req->cnt = req->length -
2478 (req->ring_index - cnt);
2480 if (req->cnt < (req_cnt + 2))
2483 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2484 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2486 /* Zero out remaining portion of packet. */
2487 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2488 clr_ptr = (uint32_t *)cmd_pkt + 2;
2489 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2490 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2492 /* Set NPORT-ID and LUN number*/
2493 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2494 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2495 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2496 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2497 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2499 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2500 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2501 sizeof(cmd_pkt->lun));
2504 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2506 if (scsi_populate_tag_msg(cmd, tag)) {
2508 case HEAD_OF_QUEUE_TAG:
2509 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2511 case ORDERED_QUEUE_TAG:
2512 cmd_pkt->task = TSK_ORDERED;
2517 /* Populate the FCP_PRIO. */
2518 if (ha->flags.fcp_prio_enabled)
2519 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2521 /* Load SCSI command packet. */
2522 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2523 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2525 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2527 /* Build IOCB segments */
2528 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2530 /* Set total data segment count. */
2531 cmd_pkt->entry_count = (uint8_t)req_cnt;
2532 /* Specify response queue number where
2533 * completion should happen.
2535 cmd_pkt->entry_status = (uint8_t) rsp->id;
2538 /* Build command packet. */
2539 req->current_outstanding_cmd = handle;
2540 req->outstanding_cmds[handle] = sp;
2541 sp->handle = handle;
2542 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2543 req->cnt -= req_cnt;
2546 /* Adjust ring index. */
2548 if (req->ring_index == req->length) {
2549 req->ring_index = 0;
2550 req->ring_ptr = req->ring;
2554 sp->flags |= SRB_DMA_VALID;
2556 /* Set chip new ring index. */
2557 /* write, read and verify logic */
2558 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2560 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2563 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2566 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
2568 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2574 /* Manage unprocessed RIO/ZIO commands in response queue. */
2575 if (vha->flags.process_response_queue &&
2576 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2577 qla24xx_process_response_queue(vha, rsp);
2579 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2582 queuing_error_fcp_cmnd:
2583 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2586 scsi_dma_unmap(cmd);
2588 if (sp->u.scmd.ctx) {
2589 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2590 sp->u.scmd.ctx = NULL;
2592 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2594 return QLA_FUNCTION_FAILED;
2598 qla2x00_start_sp(srb_t *sp)
2601 struct qla_hw_data *ha = sp->fcport->vha->hw;
2603 unsigned long flags;
2605 rval = QLA_FUNCTION_FAILED;
2606 spin_lock_irqsave(&ha->hardware_lock, flags);
2607 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2609 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2610 "qla2x00_alloc_iocbs failed.\n");
2617 IS_FWI2_CAPABLE(ha) ?
2618 qla24xx_login_iocb(sp, pkt) :
2619 qla2x00_login_iocb(sp, pkt);
2621 case SRB_LOGOUT_CMD:
2622 IS_FWI2_CAPABLE(ha) ?
2623 qla24xx_logout_iocb(sp, pkt) :
2624 qla2x00_logout_iocb(sp, pkt);
2626 case SRB_ELS_CMD_RPT:
2627 case SRB_ELS_CMD_HST:
2628 qla24xx_els_iocb(sp, pkt);
2631 IS_FWI2_CAPABLE(ha) ?
2632 qla24xx_ct_iocb(sp, pkt) :
2633 qla2x00_ct_iocb(sp, pkt);
2636 IS_FWI2_CAPABLE(ha) ?
2637 qla24xx_adisc_iocb(sp, pkt) :
2638 qla2x00_adisc_iocb(sp, pkt);
2642 qlafx00_tm_iocb(sp, pkt) :
2643 qla24xx_tm_iocb(sp, pkt);
2645 case SRB_FXIOCB_DCMD:
2646 case SRB_FXIOCB_BCMD:
2647 qlafx00_fxdisc_iocb(sp, pkt);
2650 qlafx00_abort_iocb(sp, pkt);
2657 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2659 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2664 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2665 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2667 uint16_t avail_dsds;
2669 uint32_t req_data_len = 0;
2670 uint32_t rsp_data_len = 0;
2671 struct scatterlist *sg;
2673 int entry_count = 1;
2674 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2676 /*Update entry type to indicate bidir command */
2677 *((uint32_t *)(&cmd_pkt->entry_type)) =
2678 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2680 /* Set the transfer direction, in this set both flags
2681 * Also set the BD_WRAP_BACK flag, firmware will take care
2682 * assigning DID=SID for outgoing pkts.
2684 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2685 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2686 cmd_pkt->control_flags =
2687 __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2690 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2691 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2692 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2693 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2695 vha->bidi_stats.transfer_bytes += req_data_len;
2696 vha->bidi_stats.io_count++;
2698 vha->qla_stats.output_bytes += req_data_len;
2699 vha->qla_stats.output_requests++;
2701 /* Only one dsd is available for bidirectional IOCB, remaining dsds
2702 * are bundled in continuation iocb
2705 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2709 for_each_sg(bsg_job->request_payload.sg_list, sg,
2710 bsg_job->request_payload.sg_cnt, index) {
2712 cont_a64_entry_t *cont_pkt;
2714 /* Allocate additional continuation packets */
2715 if (avail_dsds == 0) {
2716 /* Continuation type 1 IOCB can accomodate
2719 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2720 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2724 sle_dma = sg_dma_address(sg);
2725 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2726 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2727 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2730 /* For read request DSD will always goes to continuation IOCB
2731 * and follow the write DSD. If there is room on the current IOCB
2732 * then it is added to that IOCB else new continuation IOCB is
2735 for_each_sg(bsg_job->reply_payload.sg_list, sg,
2736 bsg_job->reply_payload.sg_cnt, index) {
2738 cont_a64_entry_t *cont_pkt;
2740 /* Allocate additional continuation packets */
2741 if (avail_dsds == 0) {
2742 /* Continuation type 1 IOCB can accomodate
2745 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2746 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2750 sle_dma = sg_dma_address(sg);
2751 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2752 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2753 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2756 /* This value should be same as number of IOCB required for this cmd */
2757 cmd_pkt->entry_count = entry_count;
2761 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2764 struct qla_hw_data *ha = vha->hw;
2765 unsigned long flags;
2771 struct cmd_bidir *cmd_pkt = NULL;
2772 struct rsp_que *rsp;
2773 struct req_que *req;
2774 int rval = EXT_STATUS_OK;
2778 rsp = ha->rsp_q_map[0];
2781 /* Send marker if required */
2782 if (vha->marker_needed != 0) {
2783 if (qla2x00_marker(vha, req,
2784 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2785 return EXT_STATUS_MAILBOX;
2786 vha->marker_needed = 0;
2789 /* Acquire ring specific lock */
2790 spin_lock_irqsave(&ha->hardware_lock, flags);
2792 /* Check for room in outstanding command list. */
2793 handle = req->current_outstanding_cmd;
2794 for (index = 1; index < req->num_outstanding_cmds; index++) {
2796 if (handle == req->num_outstanding_cmds)
2798 if (!req->outstanding_cmds[handle])
2802 if (index == req->num_outstanding_cmds) {
2803 rval = EXT_STATUS_BUSY;
2807 /* Calculate number of IOCB required */
2808 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2810 /* Check for room on request queue. */
2811 if (req->cnt < req_cnt + 2) {
2812 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
2814 if (req->ring_index < cnt)
2815 req->cnt = cnt - req->ring_index;
2817 req->cnt = req->length -
2818 (req->ring_index - cnt);
2820 if (req->cnt < req_cnt + 2) {
2821 rval = EXT_STATUS_BUSY;
2825 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2826 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2828 /* Zero out remaining portion of packet. */
2829 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2830 clr_ptr = (uint32_t *)cmd_pkt + 2;
2831 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2833 /* Set NPORT-ID (of vha)*/
2834 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2835 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2836 cmd_pkt->port_id[1] = vha->d_id.b.area;
2837 cmd_pkt->port_id[2] = vha->d_id.b.domain;
2839 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2840 cmd_pkt->entry_status = (uint8_t) rsp->id;
2841 /* Build command packet. */
2842 req->current_outstanding_cmd = handle;
2843 req->outstanding_cmds[handle] = sp;
2844 sp->handle = handle;
2845 req->cnt -= req_cnt;
2847 /* Send the command to the firmware */
2849 qla2x00_start_iocbs(vha, req);
2851 spin_unlock_irqrestore(&ha->hardware_lock, flags);