scsi: lpfc: Fix -EOVERFLOW behavior for NVMET and defer_rcv
authorJames Smart <jsmart2021@gmail.com>
Sat, 9 Dec 2017 01:18:04 +0000 (17:18 -0800)
committerMartin K. Petersen <martin.petersen@oracle.com>
Thu, 21 Dec 2017 02:11:45 +0000 (21:11 -0500)
The driver is all set to handle the defer_rcv api for the nvmet_fc
transport, yet didn't properly recognize the return status when the
defer_rcv occurred. The driver treated it simply as an error and aborted
the io. Several residual issues occurred at that point.

Finish the defer_rcv support: recognize the return status when the io
request is being handled in a deferred style. This stops the rogue
aborts; Replenish the async cmd rcv buffer in the deferred receive if
needed.

Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_nvmet.h
drivers/scsi/lpfc/lpfc_sli.c

index d80cd1d..02a1cfa 100644 (file)
@@ -38,6 +38,7 @@
 
 #include <../drivers/nvme/host/nvme.h>
 #include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
 
 #include "lpfc_version.h"
 #include "lpfc_hw4.h"
@@ -218,6 +219,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
                ctxp->entry_cnt = 1;
                ctxp->flag = 0;
                ctxp->ctxbuf = ctx_buf;
+               ctxp->rqb_buffer = (void *)nvmebuf;
                spin_lock_init(&ctxp->ctxlock);
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -253,6 +255,17 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
                        return;
                }
 
+               /* Processing of FCP command is deferred */
+               if (rc == -EOVERFLOW) {
+                       lpfc_nvmeio_data(phba,
+                                        "NVMET RCV BUSY: xri x%x sz %d "
+                                        "from %06x\n",
+                                        oxid, size, sid);
+                       /* defer repost rcv buffer till .defer_rcv callback */
+                       ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
+                       atomic_inc(&tgtp->rcv_fcp_cmd_out);
+                       return;
+               }
                atomic_inc(&tgtp->rcv_fcp_cmd_drop);
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
                                "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
@@ -921,7 +934,11 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
 
        tgtp = phba->targetport->private;
        atomic_inc(&tgtp->rcv_fcp_cmd_defer);
-       lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+       if (ctxp->flag & LPFC_NVMET_DEFER_RCV_REPOST)
+               lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+       else
+               nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+       ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
 }
 
 static struct nvmet_fc_target_template lpfc_tgttemplate = {
@@ -1693,6 +1710,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
        ctxp->entry_cnt = 1;
        ctxp->flag = 0;
        ctxp->ctxbuf = ctx_buf;
+       ctxp->rqb_buffer = (void *)nvmebuf;
        spin_lock_init(&ctxp->ctxlock);
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1726,6 +1744,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
 
        /* Process FCP command */
        if (rc == 0) {
+               ctxp->rqb_buffer = NULL;
                atomic_inc(&tgtp->rcv_fcp_cmd_out);
                lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
                return;
@@ -1737,10 +1756,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
                                 "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
                                 oxid, size, sid);
                /* defer reposting rcv buffer till .defer_rcv callback */
-               ctxp->rqb_buffer = nvmebuf;
+               ctxp->flag |= LPFC_NVMET_DEFER_RCV_REPOST;
                atomic_inc(&tgtp->rcv_fcp_cmd_out);
                return;
        }
+       ctxp->rqb_buffer = nvmebuf;
 
        atomic_inc(&tgtp->rcv_fcp_cmd_drop);
        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
index 6723e7b..0309602 100644 (file)
@@ -126,6 +126,7 @@ struct lpfc_nvmet_rcv_ctx {
 #define LPFC_NVMET_XBUSY               0x4  /* XB bit set on IO cmpl */
 #define LPFC_NVMET_CTX_RLS             0x8  /* ctx free requested */
 #define LPFC_NVMET_ABTS_RCV            0x10  /* ABTS received on exchange */
+#define LPFC_NVMET_DEFER_RCV_REPOST    0x20  /* repost to RQ on defer rcv */
        struct rqb_dmabuf *rqb_buffer;
        struct lpfc_nvmet_ctxbuf *ctxbuf;
 
index 1d489b8..5f5528a 100644 (file)
@@ -475,28 +475,30 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
        struct lpfc_rqe *temp_hrqe;
        struct lpfc_rqe *temp_drqe;
        struct lpfc_register doorbell;
-       int put_index;
+       int hq_put_index;
+       int dq_put_index;
 
        /* sanity check on queue memory */
        if (unlikely(!hq) || unlikely(!dq))
                return -ENOMEM;
-       put_index = hq->host_index;
-       temp_hrqe = hq->qe[put_index].rqe;
-       temp_drqe = dq->qe[dq->host_index].rqe;
+       hq_put_index = hq->host_index;
+       dq_put_index = dq->host_index;
+       temp_hrqe = hq->qe[hq_put_index].rqe;
+       temp_drqe = dq->qe[dq_put_index].rqe;
 
        if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
                return -EINVAL;
-       if (put_index != dq->host_index)
+       if (hq_put_index != dq_put_index)
                return -EINVAL;
        /* If the host has not yet processed the next entry then we are done */
-       if (((put_index + 1) % hq->entry_count) == hq->hba_index)
+       if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
                return -EBUSY;
        lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
        lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
 
        /* Update the host index to point to the next slot */
-       hq->host_index = ((put_index + 1) % hq->entry_count);
-       dq->host_index = ((dq->host_index + 1) % dq->entry_count);
+       hq->host_index = ((hq_put_index + 1) % hq->entry_count);
+       dq->host_index = ((dq_put_index + 1) % dq->entry_count);
        hq->RQ_buf_posted++;
 
        /* Ring The Header Receive Queue Doorbell */
@@ -517,7 +519,7 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
                }
                writel(doorbell.word0, hq->db_regaddr);
        }
-       return put_index;
+       return hq_put_index;
 }
 
 /**
@@ -12887,8 +12889,8 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "2537 Receive Frame Truncated!!\n");
        case FC_STATUS_RQ_SUCCESS:
-               lpfc_sli4_rq_release(hrq, drq);
                spin_lock_irqsave(&phba->hbalock, iflags);
+               lpfc_sli4_rq_release(hrq, drq);
                dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
                if (!dma_buf) {
                        hrq->RQ_no_buf_found++;
@@ -13290,8 +13292,8 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
                                "6126 Receive Frame Truncated!!\n");
                /* Drop thru */
        case FC_STATUS_RQ_SUCCESS:
-               lpfc_sli4_rq_release(hrq, drq);
                spin_lock_irqsave(&phba->hbalock, iflags);
+               lpfc_sli4_rq_release(hrq, drq);
                dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
                if (!dma_buf) {
                        hrq->RQ_no_buf_found++;