blk-mq: run dispatch lock once in case of issuing from list
authorMing Lei <ming.lei@redhat.com>
Fri, 3 Dec 2021 13:15:34 +0000 (21:15 +0800)
committerJens Axboe <axboe@kernel.dk>
Fri, 3 Dec 2021 21:51:29 +0000 (14:51 -0700)
It isn't necessary to call blk_mq_run_dispatch_ops() once for issuing
single request directly, and enough to do it one time when issuing from
whole list.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20211203131534.3668411-5-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-sched.c
block/blk-mq.c

index 0d72578..55488ba 100644 (file)
@@ -475,7 +475,8 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
                 * us one extra enqueue & dequeue to sw queue.
                 */
                if (!hctx->dispatch_busy && !run_queue_async) {
-                       blk_mq_try_issue_list_directly(hctx, list);
+                       blk_mq_run_dispatch_ops(hctx->queue,
+                               blk_mq_try_issue_list_directly(hctx, list));
                        if (list_empty(list))
                                goto out;
                }
index 24c65bb..22ec21a 100644 (file)
@@ -2464,12 +2464,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 
 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
 {
-       blk_status_t ret;
-       struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
-
-       blk_mq_run_dispatch_ops(rq->q,
-               ret = __blk_mq_try_issue_directly(hctx, rq, true, last));
-       return ret;
+       return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last);
 }
 
 static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
@@ -2526,7 +2521,8 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
        plug->rq_count = 0;
 
        if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
-               blk_mq_plug_issue_direct(plug, false);
+               blk_mq_run_dispatch_ops(plug->mq_list->q,
+                               blk_mq_plug_issue_direct(plug, false));
                if (rq_list_empty(plug->mq_list))
                        return;
        }
@@ -2867,7 +2863,9 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
         * bypass a potential scheduler on the bottom device for
         * insert.
         */
-       return blk_mq_request_issue_directly(rq, true);
+       blk_mq_run_dispatch_ops(rq->q,
+                       ret = blk_mq_request_issue_directly(rq, true));
+       return ret;
 }
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);