Merge tag 'scsi-sg' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[platform/kernel/linux-rpi.git] / drivers / scsi / lpfc / lpfc_nvmet.c
index 877d1fb..faa596f 100644 (file)
@@ -220,19 +220,68 @@ lpfc_nvmet_cmd_template(void)
        /* Word 12, 13, 14, 15 - is zero */
 }
 
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+static struct lpfc_nvmet_rcv_ctx *
+lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
+{
+       struct lpfc_nvmet_rcv_ctx *ctxp;
+       unsigned long iflag;
+       bool found = false;
+
+       spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
+       list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
+               if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
+                       continue;
+
+               found = true;
+               break;
+       }
+       spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
+       if (found)
+               return ctxp;
+
+       return NULL;
+}
+
+static struct lpfc_nvmet_rcv_ctx *
+lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
+{
+       struct lpfc_nvmet_rcv_ctx *ctxp;
+       unsigned long iflag;
+       bool found = false;
+
+       spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
+       list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
+               if (ctxp->oxid != oxid || ctxp->sid != sid)
+                       continue;
+
+               found = true;
+               break;
+       }
+       spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
+       if (found)
+               return ctxp;
+
+       return NULL;
+}
+#endif
+
 static void
 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
 {
        lockdep_assert_held(&ctxp->ctxlock);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-                       "6313 NVMET Defer ctx release xri x%x flg x%x\n",
+                       "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
                        ctxp->oxid, ctxp->flag);
 
        if (ctxp->flag & LPFC_NVMET_CTX_RLS)
                return;
 
        ctxp->flag |= LPFC_NVMET_CTX_RLS;
+       spin_lock(&phba->sli4_hba.t_active_list_lock);
+       list_del(&ctxp->list);
+       spin_unlock(&phba->sli4_hba.t_active_list_lock);
        spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
        list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
        spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
@@ -343,16 +392,23 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
        }
 
        if (ctxp->rqb_buffer) {
-               nvmebuf = ctxp->rqb_buffer;
                spin_lock_irqsave(&ctxp->ctxlock, iflag);
-               ctxp->rqb_buffer = NULL;
-               if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
-                       ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
-                       spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
-                       nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+               nvmebuf = ctxp->rqb_buffer;
+               /* check if freed in another path whilst acquiring lock */
+               if (nvmebuf) {
+                       ctxp->rqb_buffer = NULL;
+                       if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
+                               ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
+                               spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+                               nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
+                                                                   nvmebuf);
+                       } else {
+                               spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+                               /* repost */
+                               lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
+                       }
                } else {
                        spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
-                       lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
                }
        }
        ctxp->state = LPFC_NVMET_STE_FREE;
@@ -388,8 +444,9 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
                spin_lock_init(&ctxp->ctxlock);
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-               if (ctxp->ts_cmd_nvme) {
-                       ctxp->ts_cmd_nvme = ktime_get_ns();
+               /* NOTE: isr time stamp is stale when context is re-assigned*/
+               if (ctxp->ts_isr_cmd) {
+                       ctxp->ts_cmd_nvme = 0;
                        ctxp->ts_nvme_data = 0;
                        ctxp->ts_data_wqput = 0;
                        ctxp->ts_isr_data = 0;
@@ -402,9 +459,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
 #endif
                atomic_inc(&tgtp->rcv_fcp_cmd_in);
 
-               /*  flag new work queued, replacement buffer has already
-                *  been reposted
-                */
+               /* Indicate that a replacement buffer has been posted */
                spin_lock_irqsave(&ctxp->ctxlock, iflag);
                ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
                spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
@@ -433,6 +488,9 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
         * Use the CPU context list, from the MRQ the IO was received on
         * (ctxp->idx), to save context structure.
         */
+       spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
+       list_del_init(&ctxp->list);
+       spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
        cpu = raw_smp_processor_id();
        infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
        spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
@@ -700,8 +758,10 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
                }
 
                lpfc_printf_log(phba, KERN_INFO, logerr,
-                               "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
-                               ctxp->oxid, status, result, ctxp->flag);
+                               "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
+                               "XBUSY:x%x\n",
+                               ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
+                               status, result, ctxp->flag);
 
        } else {
                rsp->fcp_error = NVME_SC_SUCCESS;
@@ -849,7 +909,6 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
                 * before freeing ctxp and iocbq.
                 */
                lpfc_in_buf_free(phba, &nvmebuf->dbuf);
-               ctxp->rqb_buffer = 0;
                atomic_inc(&nvmep->xmt_ls_rsp);
                return 0;
        }
@@ -922,7 +981,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
            (ctxp->state == LPFC_NVMET_STE_ABORT)) {
                atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
-                               "6102 IO xri x%x aborted\n",
+                               "6102 IO oxid x%x aborted\n",
                                ctxp->oxid);
                rc = -ENXIO;
                goto aerr;
@@ -1022,7 +1081,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
                ctxp->hdwq = &phba->sli4_hba.hdwq[0];
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-                       "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
+                       "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
                        ctxp->oxid, ctxp->flag, ctxp->state);
 
        lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
@@ -1035,7 +1094,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
        /* Since iaab/iaar are NOT set, we need to check
         * if the firmware is in process of aborting IO
         */
-       if (ctxp->flag & LPFC_NVMET_XBUSY) {
+       if (ctxp->flag & (LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP)) {
                spin_unlock_irqrestore(&ctxp->ctxlock, flags);
                return;
        }
@@ -1098,6 +1157,7 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
                         ctxp->state, aborting);
 
        atomic_inc(&lpfc_nvmep->xmt_fcp_release);
+       ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
 
        if (aborting)
                return;
@@ -1122,7 +1182,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
 
        if (!nvmebuf) {
                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
-                               "6425 Defer rcv: no buffer xri x%x: "
+                               "6425 Defer rcv: no buffer oxid x%x: "
                                "flg %x ste %x\n",
                                ctxp->oxid, ctxp->flag, ctxp->state);
                return;
@@ -1139,6 +1199,22 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
        spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
 }
 
+static void
+lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
+{
+       struct lpfc_nvmet_tgtport *tgtp;
+       struct lpfc_hba *phba;
+       uint32_t rc;
+
+       tgtp = tgtport->private;
+       phba = tgtp->phba;
+
+       rc = lpfc_issue_els_rscn(phba->pport, 0);
+       lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+                       "6420 NVMET subsystem change: Notification %s\n",
+                       (rc) ? "Failed" : "Sent");
+}
+
 static struct nvmet_fc_target_template lpfc_tgttemplate = {
        .targetport_delete = lpfc_nvmet_targetport_delete,
        .xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
@@ -1146,6 +1222,7 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
        .fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
        .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
        .defer_rcv      = lpfc_nvmet_defer_rcv,
+       .discovery_event = lpfc_nvmet_discovery_event,
 
        .max_hw_queues  = 1,
        .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
@@ -1497,10 +1574,12 @@ void
 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
                            struct sli4_wcqe_xri_aborted *axri)
 {
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
        uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
        uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
        struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
        struct lpfc_nvmet_tgtport *tgtp;
+       struct nvmefc_tgt_fcp_req *req = NULL;
        struct lpfc_nodelist *ndlp;
        unsigned long iflag = 0;
        int rrq_empty = 0;
@@ -1531,7 +1610,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
                 */
                if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
                    !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
-                       list_del(&ctxp->list);
+                       list_del_init(&ctxp->list);
                        released = true;
                }
                ctxp->flag &= ~LPFC_NVMET_XBUSY;
@@ -1551,7 +1630,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
                }
 
                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-                               "6318 XB aborted oxid %x flg x%x (%x)\n",
+                               "6318 XB aborted oxid x%x flg x%x (%x)\n",
                                ctxp->oxid, ctxp->flag, released);
                if (released)
                        lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
@@ -1562,6 +1641,33 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
        }
        spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
        spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+       ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
+       if (ctxp) {
+               /*
+                *  Abort already done by FW, so BA_ACC sent.
+                *  However, the transport may be unaware.
+                */
+               lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+                               "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
+                               "flag x%x oxid x%x rxid x%x\n",
+                               xri, ctxp->state, ctxp->flag, ctxp->oxid,
+                               rxid);
+
+               spin_lock_irqsave(&ctxp->ctxlock, iflag);
+               ctxp->flag |= LPFC_NVMET_ABTS_RCV;
+               ctxp->state = LPFC_NVMET_STE_ABORT;
+               spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+
+               lpfc_nvmeio_data(phba,
+                                "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
+                                xri, raw_smp_processor_id(), 0);
+
+               req = &ctxp->ctx.fcp_req;
+               if (req)
+                       nvmet_fc_rcv_fcp_abort(phba->targetport, req);
+       }
+#endif
 }
 
 int
@@ -1572,19 +1678,23 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
        struct lpfc_hba *phba = vport->phba;
        struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
        struct nvmefc_tgt_fcp_req *rsp;
-       uint16_t xri;
+       uint32_t sid;
+       uint16_t oxid, xri;
        unsigned long iflag = 0;
 
-       xri = be16_to_cpu(fc_hdr->fh_ox_id);
+       sid = sli4_sid_from_fc_hdr(fc_hdr);
+       oxid = be16_to_cpu(fc_hdr->fh_ox_id);
 
        spin_lock_irqsave(&phba->hbalock, iflag);
        spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
        list_for_each_entry_safe(ctxp, next_ctxp,
                                 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
                                 list) {
-               if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
+               if (ctxp->oxid != oxid || ctxp->sid != sid)
                        continue;
 
+               xri = ctxp->ctxbuf->sglq->sli4_xritag;
+
                spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
                spin_unlock_irqrestore(&phba->hbalock, iflag);
 
@@ -1609,11 +1719,93 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
        spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
        spin_unlock_irqrestore(&phba->hbalock, iflag);
 
-       lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
-                        xri, raw_smp_processor_id(), 1);
+       /* check the wait list */
+       if (phba->sli4_hba.nvmet_io_wait_cnt) {
+               struct rqb_dmabuf *nvmebuf;
+               struct fc_frame_header *fc_hdr_tmp;
+               u32 sid_tmp;
+               u16 oxid_tmp;
+               bool found = false;
+
+               spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
+
+               /* match by oxid and s_id */
+               list_for_each_entry(nvmebuf,
+                                   &phba->sli4_hba.lpfc_nvmet_io_wait_list,
+                                   hbuf.list) {
+                       fc_hdr_tmp = (struct fc_frame_header *)
+                                       (nvmebuf->hbuf.virt);
+                       oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
+                       sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
+                       if (oxid_tmp != oxid || sid_tmp != sid)
+                               continue;
+
+                       lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+                                       "6321 NVMET Rcv ABTS oxid x%x from x%x "
+                                       "is waiting for a ctxp\n",
+                                       oxid, sid);
+
+                       list_del_init(&nvmebuf->hbuf.list);
+                       phba->sli4_hba.nvmet_io_wait_cnt--;
+                       found = true;
+                       break;
+               }
+               spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
+                                      iflag);
+
+               /* free buffer since already posted a new DMA buffer to RQ */
+               if (found) {
+                       nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+                       /* Respond with BA_ACC accordingly */
+                       lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
+                       return 0;
+               }
+       }
+
+       /* check active list */
+       ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
+       if (ctxp) {
+               xri = ctxp->ctxbuf->sglq->sli4_xritag;
+
+               spin_lock_irqsave(&ctxp->ctxlock, iflag);
+               ctxp->flag |= (LPFC_NVMET_ABTS_RCV | LPFC_NVMET_ABORT_OP);
+               spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+
+               lpfc_nvmeio_data(phba,
+                                "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
+                                xri, raw_smp_processor_id(), 0);
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+                               "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
+                               "flag x%x state x%x\n",
+                               ctxp->oxid, xri, ctxp->flag, ctxp->state);
+
+               if (ctxp->flag & LPFC_NVMET_TNOTIFY) {
+                       /* Notify the transport */
+                       nvmet_fc_rcv_fcp_abort(phba->targetport,
+                                              &ctxp->ctx.fcp_req);
+               } else {
+                       cancel_work_sync(&ctxp->ctxbuf->defer_work);
+                       spin_lock_irqsave(&ctxp->ctxlock, iflag);
+                       lpfc_nvmet_defer_release(phba, ctxp);
+                       spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+               }
+               if (ctxp->state == LPFC_NVMET_STE_RCV)
+                       lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+                                                        ctxp->oxid);
+               else
+                       lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+                                                      ctxp->oxid);
+
+               lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
+               return 0;
+       }
+
+       lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
+                        oxid, raw_smp_processor_id(), 1);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-                       "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
+                       "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
 
        /* Respond with BA_RJT accordingly */
        lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
@@ -1697,6 +1889,18 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
                        return;
                }
+               if (rc == WQE_SUCCESS) {
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+                       if (ctxp->ts_cmd_nvme) {
+                               if (ctxp->ctx.fcp_req.op == NVMET_FCOP_RSP)
+                                       ctxp->ts_status_wqput = ktime_get_ns();
+                               else
+                                       ctxp->ts_data_wqput = ktime_get_ns();
+                       }
+#endif
+               } else {
+                       WARN_ON(rc);
+               }
        }
        wq->q_flag &= ~HBA_NVMET_WQFULL;
        spin_unlock_irqrestore(&pring->ring_lock, iflags);
@@ -1862,8 +2066,20 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
                return;
        }
 
+       if (ctxp->flag & LPFC_NVMET_ABTS_RCV) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+                               "6324 IO oxid x%x aborted\n",
+                               ctxp->oxid);
+               return;
+       }
+
        payload = (uint32_t *)(nvmebuf->dbuf.virt);
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+       ctxp->flag |= LPFC_NVMET_TNOTIFY;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+       if (ctxp->ts_isr_cmd)
+               ctxp->ts_cmd_nvme = ktime_get_ns();
+#endif
        /*
         * The calling sequence should be:
         * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
@@ -1913,6 +2129,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
                        phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
                return;
        }
+       ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
        atomic_inc(&tgtp->rcv_fcp_cmd_drop);
        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
                        "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
@@ -2002,6 +2219,8 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
  * @phba: pointer to lpfc hba data structure.
  * @idx: relative index of MRQ vector
  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
+ * @isr_timestamp: in jiffies.
+ * @cqflag: cq processing information regarding workload.
  *
  * This routine is used for processing the WQE associated with a unsolicited
  * event. It first determines whether there is an existing ndlp that matches
@@ -2014,7 +2233,8 @@ static void
 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
                            uint32_t idx,
                            struct rqb_dmabuf *nvmebuf,
-                           uint64_t isr_timestamp)
+                           uint64_t isr_timestamp,
+                           uint8_t cqflag)
 {
        struct lpfc_nvmet_rcv_ctx *ctxp;
        struct lpfc_nvmet_tgtport *tgtp;
@@ -2101,6 +2321,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
        sid = sli4_sid_from_fc_hdr(fc_hdr);
 
        ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
+       spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
+       list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
+       spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
        if (ctxp->state != LPFC_NVMET_STE_FREE) {
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
                                "6414 NVMET Context corrupt %d %d oxid x%x\n",
@@ -2123,24 +2346,41 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
        spin_lock_init(&ctxp->ctxlock);
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-       if (isr_timestamp) {
+       if (isr_timestamp)
                ctxp->ts_isr_cmd = isr_timestamp;
-               ctxp->ts_cmd_nvme = ktime_get_ns();
-               ctxp->ts_nvme_data = 0;
-               ctxp->ts_data_wqput = 0;
-               ctxp->ts_isr_data = 0;
-               ctxp->ts_data_nvme = 0;
-               ctxp->ts_nvme_status = 0;
-               ctxp->ts_status_wqput = 0;
-               ctxp->ts_isr_status = 0;
-               ctxp->ts_status_nvme = 0;
-       } else {
-               ctxp->ts_cmd_nvme = 0;
-       }
+       ctxp->ts_cmd_nvme = 0;
+       ctxp->ts_nvme_data = 0;
+       ctxp->ts_data_wqput = 0;
+       ctxp->ts_isr_data = 0;
+       ctxp->ts_data_nvme = 0;
+       ctxp->ts_nvme_status = 0;
+       ctxp->ts_status_wqput = 0;
+       ctxp->ts_isr_status = 0;
+       ctxp->ts_status_nvme = 0;
 #endif
 
        atomic_inc(&tgtp->rcv_fcp_cmd_in);
-       lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
+       /* check for cq processing load */
+       if (!cqflag) {
+               lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
+               return;
+       }
+
+       if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
+               atomic_inc(&tgtp->rcv_fcp_cmd_drop);
+               lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+                               "6325 Unable to queue work for oxid x%x. "
+                               "FCP Drop IO [x%x x%x x%x]\n",
+                               ctxp->oxid,
+                               atomic_read(&tgtp->rcv_fcp_cmd_in),
+                               atomic_read(&tgtp->rcv_fcp_cmd_out),
+                               atomic_read(&tgtp->xmt_fcp_release));
+
+               spin_lock_irqsave(&ctxp->ctxlock, iflag);
+               lpfc_nvmet_defer_release(phba, ctxp);
+               spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+               lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
+       }
 }
 
 /**
@@ -2177,6 +2417,8 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  * @phba: pointer to lpfc hba data structure.
  * @idx: relative index of MRQ vector
  * @nvmebuf: pointer to received nvme data structure.
+ * @isr_timestamp: in jiffies.
+ * @cqflag: cq processing information regarding workload.
  *
  * This routine is used to process an unsolicited event received from a SLI
  * (Service Level Interface) ring. The actual processing of the data buffer
@@ -2188,14 +2430,14 @@ void
 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
                           uint32_t idx,
                           struct rqb_dmabuf *nvmebuf,
-                          uint64_t isr_timestamp)
+                          uint64_t isr_timestamp,
+                          uint8_t cqflag)
 {
        if (phba->nvmet_support == 0) {
                lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
                return;
        }
-       lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
-                                   isr_timestamp);
+       lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
 }
 
 /**
@@ -2732,7 +2974,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
            !(ctxp->flag & LPFC_NVMET_XBUSY)) {
                spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
-               list_del(&ctxp->list);
+               list_del_init(&ctxp->list);
                spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
                released = true;
        }
@@ -2741,7 +2983,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        atomic_inc(&tgtp->xmt_abort_rsp);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-                       "6165 ABORT cmpl: xri x%x flg x%x (%d) "
+                       "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
                        "WCQE: %08x %08x %08x %08x\n",
                        ctxp->oxid, ctxp->flag, released,
                        wcqe->word0, wcqe->total_data_placed,
@@ -2816,7 +3058,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
            !(ctxp->flag & LPFC_NVMET_XBUSY)) {
                spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
-               list_del(&ctxp->list);
+               list_del_init(&ctxp->list);
                spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
                released = true;
        }
@@ -2825,7 +3067,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        atomic_inc(&tgtp->xmt_abort_rsp);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-                       "6316 ABTS cmpl xri x%x flg x%x (%x) "
+                       "6316 ABTS cmpl oxid x%x flg x%x (%x) "
                        "WCQE: %08x %08x %08x %08x\n",
                        ctxp->oxid, ctxp->flag, released,
                        wcqe->word0, wcqe->total_data_placed,
@@ -3196,7 +3438,7 @@ aerr:
        spin_lock_irqsave(&ctxp->ctxlock, flags);
        if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
                spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
-               list_del(&ctxp->list);
+               list_del_init(&ctxp->list);
                spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
                released = true;
        }
@@ -3205,8 +3447,9 @@ aerr:
 
        atomic_inc(&tgtp->xmt_abort_rsp_error);
        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
-                       "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
-                       ctxp->oxid, rc);
+                       "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
+                       "(%x)\n",
+                       ctxp->oxid, rc, released);
        if (released)
                lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
        return 1;