blk-mq: use the I/O scheduler for writes from the flush state machine
authorBart Van Assche <bvanassche@acm.org>
Fri, 19 May 2023 04:40:47 +0000 (06:40 +0200)
committerJens Axboe <axboe@kernel.dk>
Sat, 20 May 2023 01:52:29 +0000 (19:52 -0600)
Send write requests issued by the flush state machine through the normal
I/O submission path including the I/O scheduler (if present) so that I/O
scheduler policies are applied to writes with the FUA flag set.

Separate the I/O scheduler members from the flush members in struct
request since now a request may pass through both an I/O scheduler
and the flush machinery.

Note that the actual flush requests, which have no bio attached to the
request still bypass the I/O schedulers.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
[hch: rebased]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230519044050.107790-5-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c
include/linux/blk-mq.h

index c0b3940..aac67bc 100644 (file)
@@ -458,7 +458,7 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
                 * Flush/passthrough requests are special and go directly to the
                 * dispatch list.
                 */
-               if (!op_is_flush(data->cmd_flags) &&
+               if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH &&
                    !blk_op_is_passthrough(data->cmd_flags)) {
                        struct elevator_mq_ops *ops = &q->elevator->type->ops;
 
@@ -2497,7 +2497,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
                 * dispatch it given we prioritize requests in hctx->dispatch.
                 */
                blk_mq_request_bypass_insert(rq, flags);
-       } else if (rq->rq_flags & RQF_FLUSH_SEQ) {
+       } else if (req_op(rq) == REQ_OP_FLUSH) {
                /*
                 * Firstly normal IO request is inserted to scheduler queue or
                 * sw queue, meantime we add flush request to dispatch queue(
index 49d14b1..935201c 100644 (file)
@@ -169,25 +169,20 @@ struct request {
                void *completion_data;
        };
 
-
        /*
         * Three pointers are available for the IO schedulers, if they need
-        * more they have to dynamically allocate it.  Flush requests are
-        * never put on the IO scheduler. So let the flush fields share
-        * space with the elevator data.
+        * more they have to dynamically allocate it.
         */
-       union {
-               struct {
-                       struct io_cq            *icq;
-                       void                    *priv[2];
-               } elv;
-
-               struct {
-                       unsigned int            seq;
-                       struct list_head        list;
-                       rq_end_io_fn            *saved_end_io;
-               } flush;
-       };
+       struct {
+               struct io_cq            *icq;
+               void                    *priv[2];
+       } elv;
+
+       struct {
+               unsigned int            seq;
+               struct list_head        list;
+               rq_end_io_fn            *saved_end_io;
+       } flush;
 
        union {
                struct __call_single_data csd;