atomic_read(&tgtp->xmt_abort_rsp),
atomic_read(&tgtp->xmt_abort_rsp_error));
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "DELAY: ctx %08x fod %08x wqfull %08x\n",
+ atomic_read(&tgtp->defer_ctx),
+ atomic_read(&tgtp->defer_fod),
+ atomic_read(&tgtp->defer_wqfull));
+
/* Calculate outstanding IOs */
tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
tot += atomic_read(&tgtp->xmt_fcp_release);
drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
if (rc < 0) {
+ (rqbp->rqb_free_buffer)(phba, rqb_entry);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6409 Cannot post to RQ %d: %x %x\n",
+ "6409 Cannot post to HRQ %d: %x %x %x "
+ "DRQ %x %x\n",
rqb_entry->hrq->queue_id,
rqb_entry->hrq->host_index,
- rqb_entry->hrq->hba_index);
- (rqbp->rqb_free_buffer)(phba, rqb_entry);
+ rqb_entry->hrq->hba_index,
+ rqb_entry->hrq->entry_count,
+ rqb_entry->drq->host_index,
+ rqb_entry->drq->hba_index);
} else {
list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
rqbp->buffer_count++;
"NVMET RCV BUSY: xri x%x sz %d "
"from %06x\n",
oxid, size, sid);
- /* defer repost rcv buffer till .defer_rcv callback */
- ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
return;
}
list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
wq->q_flag |= HBA_NVMET_WQFULL;
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ atomic_inc(&lpfc_nvmep->defer_wqfull);
return 0;
}
tgtp = phba->targetport->private;
atomic_inc(&tgtp->rcv_fcp_cmd_defer);
- if (ctxp->flag & LPFC_NVMET_DEFER_RCV_REPOST)
- lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
- else
- nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
- ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
+
+ /* Free the nvmebuf since a new buffer already replaced it */
+ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
}
static struct nvmet_fc_target_template lpfc_tgttemplate = {
atomic_set(&tgtp->xmt_abort_sol, 0);
atomic_set(&tgtp->xmt_abort_rsp, 0);
atomic_set(&tgtp->xmt_abort_rsp_error, 0);
+ atomic_set(&tgtp->defer_ctx, 0);
+ atomic_set(&tgtp->defer_fod, 0);
+ atomic_set(&tgtp->defer_wqfull, 0);
}
return error;
}
lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
oxid, size, smp_processor_id());
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+
if (!ctx_buf) {
/* Queue this NVME IO to process later */
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
lpfc_post_rq_buffer(
phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
+
+ atomic_inc(&tgtp->defer_ctx);
return;
}
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
payload = (uint32_t *)(nvmebuf->dbuf.virt);
sid = sli4_sid_from_fc_hdr(fc_hdr);
/* Processing of FCP command is deferred */
if (rc == -EOVERFLOW) {
+ /*
+ * Post a brand new DMA buffer to RQ and defer
+ * freeing rcv buffer till .defer_rcv callback
+ */
+ qno = nvmebuf->idx;
+ lpfc_post_rq_buffer(
+ phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
+ phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
+
lpfc_nvmeio_data(phba,
"NVMET RCV BUSY: xri x%x sz %d from %06x\n",
oxid, size, sid);
- /* defer reposting rcv buffer till .defer_rcv callback */
- ctxp->flag |= LPFC_NVMET_DEFER_RCV_REPOST;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
+ atomic_inc(&tgtp->defer_fod);
return;
}
ctxp->rqb_buffer = nvmebuf;
atomic_t xmt_fcp_rsp_aborted;
atomic_t xmt_fcp_rsp_drop;
-
/* Stats counters - lpfc_nvmet_xmt_fcp_abort */
atomic_t xmt_fcp_xri_abort_cqe;
atomic_t xmt_fcp_abort;
atomic_t xmt_abort_unsol;
atomic_t xmt_abort_rsp;
atomic_t xmt_abort_rsp_error;
+
+ /* Stats counters - defer IO */
+ atomic_t defer_ctx;
+ atomic_t defer_fod;
+ atomic_t defer_wqfull;
};
struct lpfc_nvmet_ctx_info {
#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
-#define LPFC_NVMET_DEFER_RCV_REPOST 0x20 /* repost to RQ on defer rcv */
#define LPFC_NVMET_DEFER_WQFULL 0x40 /* Waiting on a free WQE */
struct rqb_dmabuf *rqb_buffer;
struct lpfc_nvmet_ctxbuf *ctxbuf;
struct lpfc_rqe hrqe;
struct lpfc_rqe drqe;
struct lpfc_rqb *rqbp;
+ unsigned long flags;
struct rqb_dmabuf *rqb_buffer;
LIST_HEAD(rqb_buf_list);
+ spin_lock_irqsave(&phba->hbalock, flags);
rqbp = hrq->rqbp;
for (i = 0; i < count; i++) {
/* IF RQ is already full, don't bother */
drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
if (rc < 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6421 Cannot post to HRQ %d: %x %x %x "
+ "DRQ %x %x\n",
+ hrq->queue_id,
+ hrq->host_index,
+ hrq->hba_index,
+ hrq->entry_count,
+ drq->host_index,
+ drq->hba_index);
rqbp->rqb_free_buffer(phba, rqb_buffer);
} else {
list_add_tail(&rqb_buffer->hbuf.list,
rqbp->buffer_count++;
}
}
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return 1;
}