blk-mq: only iterate over inflight requests in blk_mq_tagset_busy_iter
authorChristoph Hellwig <hch@lst.de>
Wed, 30 May 2018 16:51:00 +0000 (18:51 +0200)
committerJens Axboe <axboe@kernel.dk>
Wed, 30 May 2018 17:31:34 +0000 (11:31 -0600)
We already check for started commands in all callbacks, but we should
also protect against already completed commands.  Do this by taking
the checks to common code.

Acked-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-tag.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/nbd.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c

index a4e58fc28a06c574b76a886df0a57d2dbde41e72..70356a2a11ab12a059654bdf8af0a3f2c0eb3b41 100644 (file)
@@ -271,7 +271,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
         * test and set the bit before assining ->rqs[].
         */
        rq = tags->rqs[bitnr];
-       if (rq)
+       if (rq && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
                iter_data->fn(rq, iter_data->data, reserved);
 
        return true;
index 95657b81454360b955e3ca2313713c31d1d4228f..c73626decb46e9af8c694eef5820719707d2faaa 100644 (file)
@@ -2725,15 +2725,11 @@ static void mtip_softirq_done_fn(struct request *rq)
        blk_mq_end_request(rq, cmd->status);
 }
 
-static void mtip_abort_cmd(struct request *req, void *data,
-                                                       bool reserved)
+static void mtip_abort_cmd(struct request *req, void *data, bool reserved)
 {
        struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
        struct driver_data *dd = data;
 
-       if (!blk_mq_request_started(req))
-               return;
-
        dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
 
        clear_bit(req->tag, dd->port->cmds_to_issue);
@@ -2741,14 +2737,10 @@ static void mtip_abort_cmd(struct request *req, void *data,
        mtip_softirq_done_fn(req);
 }
 
-static void mtip_queue_cmd(struct request *req, void *data,
-                                                       bool reserved)
+static void mtip_queue_cmd(struct request *req, void *data, bool reserved)
 {
        struct driver_data *dd = data;
 
-       if (!blk_mq_request_started(req))
-               return;
-
        set_bit(req->tag, dd->port->cmds_to_issue);
        blk_abort_request(req);
 }
index a6e3a6f0579195ff8157694fd4a65ab0e1867ffc..3ed1ef8ee5289d4202d0901f93138e9612bbdaff 100644 (file)
@@ -676,11 +676,8 @@ static void recv_work(struct work_struct *work)
 
 static void nbd_clear_req(struct request *req, void *data, bool reserved)
 {
-       struct nbd_cmd *cmd;
+       struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
 
-       if (!blk_mq_request_started(req))
-               return;
-       cmd = blk_mq_rq_to_pdu(req);
        cmd->status = BLK_STS_IOERR;
        blk_mq_complete_request(req);
 }
index 2c4cf65641a6b12d7706ae1214f64d1f6c3bc15f..70c3961676e7a8d81868780f59a36d7e265d83d8 100644 (file)
@@ -242,9 +242,6 @@ EXPORT_SYMBOL_GPL(nvme_complete_rq);
 
 void nvme_cancel_request(struct request *req, void *data, bool reserved)
 {
-       if (!blk_mq_request_started(req))
-               return;
-
        dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
                                "Cancelling I/O %d", req->tag);
 
index ac35a80f5532c17cdd46e7d7cf443cf7f514af63..0bad65803271ff68bc883e0dd16c78b8386fabf8 100644 (file)
@@ -2393,9 +2393,6 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
        struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
        struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
 
-       if (!blk_mq_request_started(req))
-               return;
-
        __nvme_fc_abort_op(ctrl, op);
 }