blk-mq: don't kick the requeue_list in blk_mq_add_to_requeue_list
authorChristoph Hellwig <hch@lst.de>
Thu, 13 Apr 2023 06:40:53 +0000 (08:40 +0200)
committerJens Axboe <axboe@kernel.dk>
Thu, 13 Apr 2023 12:52:30 +0000 (06:52 -0600)
blk_mq_add_to_requeue_list takes a bool parameter to control how to kick
the requeue list at the end of the function.  Move the call to
blk_mq_kick_requeue_list to the callers that want it instead.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230413064057.707578-17-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-flush.c
block/blk-mq.c
block/blk-mq.h

index 3561aba..015982b 100644 (file)
@@ -188,7 +188,8 @@ static void blk_flush_complete_seq(struct request *rq,
 
        case REQ_FSEQ_DATA:
                list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
-               blk_mq_add_to_requeue_list(rq, true, true);
+               blk_mq_add_to_requeue_list(rq, true);
+               blk_mq_kick_requeue_list(q);
                break;
 
        case REQ_FSEQ_DONE:
@@ -345,7 +346,8 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
        smp_wmb();
        req_ref_set(flush_rq, 1);
 
-       blk_mq_add_to_requeue_list(flush_rq, false, true);
+       blk_mq_add_to_requeue_list(flush_rq, false);
+       blk_mq_kick_requeue_list(q);
 }
 
 static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
index cde7ba9..db806c1 100644 (file)
@@ -1412,12 +1412,17 @@ static void __blk_mq_requeue_request(struct request *rq)
 
 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
 {
+       struct request_queue *q = rq->q;
+
        __blk_mq_requeue_request(rq);
 
        /* this request will be re-inserted to io scheduler queue */
        blk_mq_sched_requeue_request(rq);
 
-       blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
+       blk_mq_add_to_requeue_list(rq, true);
+
+       if (kick_requeue_list)
+               blk_mq_kick_requeue_list(q);
 }
 EXPORT_SYMBOL(blk_mq_requeue_request);
 
@@ -1459,8 +1464,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
        blk_mq_run_hw_queues(q, false);
 }
 
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
-                               bool kick_requeue_list)
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
 {
        struct request_queue *q = rq->q;
        unsigned long flags;
@@ -1479,9 +1483,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
                list_add_tail(&rq->queuelist, &q->requeue_list);
        }
        spin_unlock_irqrestore(&q->requeue_lock, flags);
-
-       if (kick_requeue_list)
-               blk_mq_kick_requeue_list(q);
 }
 
 void blk_mq_kick_requeue_list(struct request_queue *q)
index f30f991..5d3761c 100644 (file)
@@ -44,8 +44,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
 void blk_mq_wake_waiters(struct request_queue *q);
 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
                             unsigned int);
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
-                               bool kick_requeue_list);
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
                                        struct blk_mq_ctx *start);