nvmet-auth: don't try to cancel a non-initialized work_struct
authorChristoph Hellwig <hch@lst.de>
Tue, 20 Sep 2022 13:37:18 +0000 (15:37 +0200)
committerChristoph Hellwig <hch@lst.de>
Tue, 27 Sep 2022 07:22:09 +0000 (09:22 +0200)
Currently blktests nvme/002 trips up debugobjects if CONFIG_NVME_AUTH is
enabled, but authentication is not on a queue.  This is because
nvmet_auth_sq_free cancels sq->auth_expired_work unconditionaly, while
auth_expired_work is only ever initialized if authentication is enabled
for a given controller.

Fix this by calling most of what is nvmet_init_auth unconditionally
when initializing the SQ, and just do the setting of the result
field in the connect command handler.

Fixes: db1312dd9548 ("nvmet: implement basic In-Band Authentication")
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Hannes Reinecke <hare@suse.de>
drivers/nvme/target/core.c
drivers/nvme/target/fabrics-cmd-auth.c
drivers/nvme/target/fabrics-cmd.c
drivers/nvme/target/nvmet.h

index a134579..8e3cf0c 100644 (file)
@@ -830,6 +830,7 @@ int nvmet_sq_init(struct nvmet_sq *sq)
        }
        init_completion(&sq->free_done);
        init_completion(&sq->confirm_done);
+       nvmet_auth_sq_init(sq);
 
        return 0;
 }
index 84601e3..7970a76 100644 (file)
@@ -23,17 +23,12 @@ static void nvmet_auth_expired_work(struct work_struct *work)
        sq->dhchap_tid = -1;
 }
 
-void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
+void nvmet_auth_sq_init(struct nvmet_sq *sq)
 {
-       u32 result = le32_to_cpu(req->cqe->result.u32);
-
        /* Initialize in-band authentication */
-       INIT_DELAYED_WORK(&req->sq->auth_expired_work,
-                         nvmet_auth_expired_work);
-       req->sq->authenticated = false;
-       req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
-       result |= (u32)NVME_CONNECT_AUTHREQ_ATR << 16;
-       req->cqe->result.u32 = cpu_to_le32(result);
+       INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work);
+       sq->authenticated = false;
+       sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
 }
 
 static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
index c1dfdfb..c7e9035 100644 (file)
@@ -272,7 +272,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
        req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
 
        if (nvmet_has_auth(ctrl))
-               nvmet_init_auth(ctrl, req);
+               req->cqe->result.u32 |=
+                       cpu_to_le32((u32)NVME_CONNECT_AUTHREQ_ATR << 16);
 out:
        kfree(d);
 complete:
@@ -333,7 +334,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
 
        pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
        if (nvmet_has_auth(ctrl))
-               nvmet_init_auth(ctrl, req);
+               req->cqe->result.u32 |=
+                       cpu_to_le32((u32)NVME_CONNECT_AUTHREQ_ATR << 16);
 
 out:
        kfree(d);
index 6ffeeb0..dfe3894 100644 (file)
@@ -704,7 +704,7 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
                       bool set_ctrl);
 int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
 int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
-void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req);
+void nvmet_auth_sq_init(struct nvmet_sq *sq);
 void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
 void nvmet_auth_sq_free(struct nvmet_sq *sq);
 int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
@@ -726,8 +726,9 @@ static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
 {
        return 0;
 }
-static inline void nvmet_init_auth(struct nvmet_ctrl *ctrl,
-                                  struct nvmet_req *req) {};
+static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
+{
+}
 static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
 static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
 static inline bool nvmet_check_auth_status(struct nvmet_req *req)