nvme: handle effects after freeing the request
authorKeith Busch <kbusch@kernel.org>
Mon, 19 Sep 2022 19:36:46 +0000 (12:36 -0700)
committerChristoph Hellwig <hch@lst.de>
Tue, 27 Sep 2022 07:15:56 +0000 (09:15 +0200)
If a reset occurs after the scan work attempts to issue a command, the
reset may quisce the admin queue, which blocks the scan work's command
from dispatching. The scan work will not be able to complete while the
queue is quiesced.

Meanwhile, the reset work will cancel all outstanding admin tags and
wait until all requests have transitioned to idle, which includes the
passthrough request. But the passthrough request won't be set to idle
until after the scan_work flushes, so we're deadlocked.

Fix this by handling the end effects after the request has been freed.

Link: https://bugzilla.kernel.org/show_bug.cgi?id=216354
Reported-by: Jonathan Derrick <Jonathan.Derrick@solidigm.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Chao Leng <lengchao@huawei.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/host/core.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/nvme.h
drivers/nvme/target/passthru.c

index 8c9c117..ea6694f 100644 (file)
@@ -1111,8 +1111,8 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        return effects;
 }
 
-static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
-                             struct nvme_command *cmd, int status)
+void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
+                      struct nvme_command *cmd, int status)
 {
        if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
                nvme_unfreeze(ctrl);
@@ -1148,21 +1148,16 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
                break;
        }
 }
+EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
 
-int nvme_execute_passthru_rq(struct request *rq)
+int nvme_execute_passthru_rq(struct request *rq, u32 *effects)
 {
        struct nvme_command *cmd = nvme_req(rq)->cmd;
        struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
        struct nvme_ns *ns = rq->q->queuedata;
-       u32 effects;
-       int  ret;
 
-       effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
-       ret = nvme_execute_rq(rq, false);
-       if (effects) /* nothing to be done for zero cmd effects */
-               nvme_passthru_end(ctrl, effects, cmd, ret);
-
-       return ret;
+       *effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
+       return nvme_execute_rq(rq, false);
 }
 EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
 
index 27614be..d3281f8 100644 (file)
@@ -136,9 +136,11 @@ static int nvme_submit_user_cmd(struct request_queue *q,
                unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
                u32 meta_seed, u64 *result, unsigned timeout, bool vec)
 {
+       struct nvme_ctrl *ctrl;
        struct request *req;
        void *meta = NULL;
        struct bio *bio;
+       u32 effects;
        int ret;
 
        req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer,
@@ -147,8 +149,9 @@ static int nvme_submit_user_cmd(struct request_queue *q,
                return PTR_ERR(req);
 
        bio = req->bio;
+       ctrl = nvme_req(req)->ctrl;
 
-       ret = nvme_execute_passthru_rq(req);
+       ret = nvme_execute_passthru_rq(req, &effects);
 
        if (result)
                *result = le64_to_cpu(nvme_req(req)->result.u64);
@@ -158,6 +161,10 @@ static int nvme_submit_user_cmd(struct request_queue *q,
        if (bio)
                blk_rq_unmap_user(bio);
        blk_mq_free_request(req);
+
+       if (effects)
+               nvme_passthru_end(ctrl, effects, cmd, ret);
+
        return ret;
 }
 
index 1bdf714..a0bf956 100644 (file)
@@ -1023,7 +1023,9 @@ static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
 
 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
                         u8 opcode);
-int nvme_execute_passthru_rq(struct request *rq);
+int nvme_execute_passthru_rq(struct request *rq, u32 *effects);
+void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
+                      struct nvme_command *cmd, int status);
 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
 void nvme_put_ns(struct nvme_ns *ns);
index 6f39a29..94d3153 100644 (file)
@@ -215,9 +215,11 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
 {
        struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
        struct request *rq = req->p.rq;
+       struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
+       u32 effects;
        int status;
 
-       status = nvme_execute_passthru_rq(rq);
+       status = nvme_execute_passthru_rq(rq, &effects);
 
        if (status == NVME_SC_SUCCESS &&
            req->cmd->common.opcode == nvme_admin_identify) {
@@ -238,6 +240,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
        req->cqe->result = nvme_req(rq)->result;
        nvmet_req_complete(req, status);
        blk_mq_free_request(rq);
+
+       if (effects)
+               nvme_passthru_end(ctrl, effects, req->cmd, status);
 }
 
 static void nvmet_passthru_req_done(struct request *rq,