blk-mq: move blk_mq_flush_plug_list
authorChristoph Hellwig <hch@lst.de>
Wed, 17 Nov 2021 06:13:57 +0000 (07:13 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 29 Nov 2021 13:34:50 +0000 (06:34 -0700)
Move blk_mq_flush_plug_list and blk_mq_plug_issue_direct down in blk-mq.c
to prepare for marking blk_mq_request_issue_directly static without the
need of a forward declaration.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Link: https://lore.kernel.org/r/20211117061404.331732-5-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c

index 3e5dc87..df28e5e 100644 (file)
@@ -2309,98 +2309,6 @@ static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued,
        *queued = 0;
 }
 
-static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
-{
-       struct blk_mq_hw_ctx *hctx = NULL;
-       struct request *rq;
-       int queued = 0;
-       int errors = 0;
-
-       while ((rq = rq_list_pop(&plug->mq_list))) {
-               bool last = rq_list_empty(plug->mq_list);
-               blk_status_t ret;
-
-               if (hctx != rq->mq_hctx) {
-                       if (hctx)
-                               blk_mq_commit_rqs(hctx, &queued, from_schedule);
-                       hctx = rq->mq_hctx;
-               }
-
-               ret = blk_mq_request_issue_directly(rq, last);
-               switch (ret) {
-               case BLK_STS_OK:
-                       queued++;
-                       break;
-               case BLK_STS_RESOURCE:
-               case BLK_STS_DEV_RESOURCE:
-                       blk_mq_request_bypass_insert(rq, false, last);
-                       blk_mq_commit_rqs(hctx, &queued, from_schedule);
-                       return;
-               default:
-                       blk_mq_end_request(rq, ret);
-                       errors++;
-                       break;
-               }
-       }
-
-       /*
-        * If we didn't flush the entire list, we could have told the driver
-        * there was more coming, but that turned out to be a lie.
-        */
-       if (errors)
-               blk_mq_commit_rqs(hctx, &queued, from_schedule);
-}
-
-void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
-{
-       struct blk_mq_hw_ctx *this_hctx;
-       struct blk_mq_ctx *this_ctx;
-       unsigned int depth;
-       LIST_HEAD(list);
-
-       if (rq_list_empty(plug->mq_list))
-               return;
-       plug->rq_count = 0;
-
-       if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
-               blk_mq_plug_issue_direct(plug, false);
-               if (rq_list_empty(plug->mq_list))
-                       return;
-       }
-
-       this_hctx = NULL;
-       this_ctx = NULL;
-       depth = 0;
-       do {
-               struct request *rq;
-
-               rq = rq_list_pop(&plug->mq_list);
-
-               if (!this_hctx) {
-                       this_hctx = rq->mq_hctx;
-                       this_ctx = rq->mq_ctx;
-               } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
-                       trace_block_unplug(this_hctx->queue, depth,
-                                               !from_schedule);
-                       blk_mq_sched_insert_requests(this_hctx, this_ctx,
-                                               &list, from_schedule);
-                       depth = 0;
-                       this_hctx = rq->mq_hctx;
-                       this_ctx = rq->mq_ctx;
-
-               }
-
-               list_add(&rq->queuelist, &list);
-               depth++;
-       } while (!rq_list_empty(plug->mq_list));
-
-       if (!list_empty(&list)) {
-               trace_block_unplug(this_hctx->queue, depth, !from_schedule);
-               blk_mq_sched_insert_requests(this_hctx, this_ctx, &list,
-                                               from_schedule);
-       }
-}
-
 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
                unsigned int nr_segs)
 {
@@ -2540,6 +2448,98 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
        return ret;
 }
 
+static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
+{
+       struct blk_mq_hw_ctx *hctx = NULL;
+       struct request *rq;
+       int queued = 0;
+       int errors = 0;
+
+       while ((rq = rq_list_pop(&plug->mq_list))) {
+               bool last = rq_list_empty(plug->mq_list);
+               blk_status_t ret;
+
+               if (hctx != rq->mq_hctx) {
+                       if (hctx)
+                               blk_mq_commit_rqs(hctx, &queued, from_schedule);
+                       hctx = rq->mq_hctx;
+               }
+
+               ret = blk_mq_request_issue_directly(rq, last);
+               switch (ret) {
+               case BLK_STS_OK:
+                       queued++;
+                       break;
+               case BLK_STS_RESOURCE:
+               case BLK_STS_DEV_RESOURCE:
+                       blk_mq_request_bypass_insert(rq, false, last);
+                       blk_mq_commit_rqs(hctx, &queued, from_schedule);
+                       return;
+               default:
+                       blk_mq_end_request(rq, ret);
+                       errors++;
+                       break;
+               }
+       }
+
+       /*
+        * If we didn't flush the entire list, we could have told the driver
+        * there was more coming, but that turned out to be a lie.
+        */
+       if (errors)
+               blk_mq_commit_rqs(hctx, &queued, from_schedule);
+}
+
+void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+{
+       struct blk_mq_hw_ctx *this_hctx;
+       struct blk_mq_ctx *this_ctx;
+       unsigned int depth;
+       LIST_HEAD(list);
+
+       if (rq_list_empty(plug->mq_list))
+               return;
+       plug->rq_count = 0;
+
+       if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
+               blk_mq_plug_issue_direct(plug, false);
+               if (rq_list_empty(plug->mq_list))
+                       return;
+       }
+
+       this_hctx = NULL;
+       this_ctx = NULL;
+       depth = 0;
+       do {
+               struct request *rq;
+
+               rq = rq_list_pop(&plug->mq_list);
+
+               if (!this_hctx) {
+                       this_hctx = rq->mq_hctx;
+                       this_ctx = rq->mq_ctx;
+               } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
+                       trace_block_unplug(this_hctx->queue, depth,
+                                               !from_schedule);
+                       blk_mq_sched_insert_requests(this_hctx, this_ctx,
+                                               &list, from_schedule);
+                       depth = 0;
+                       this_hctx = rq->mq_hctx;
+                       this_ctx = rq->mq_ctx;
+
+               }
+
+               list_add(&rq->queuelist, &list);
+               depth++;
+       } while (!rq_list_empty(plug->mq_list));
+
+       if (!list_empty(&list)) {
+               trace_block_unplug(this_hctx->queue, depth, !from_schedule);
+               blk_mq_sched_insert_requests(this_hctx, this_ctx, &list,
+                                               from_schedule);
+       }
+}
+
 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                struct list_head *list)
 {