block: replace the spin argument to blk_iopoll with a flags argument
authorChristoph Hellwig <hch@lst.de>
Tue, 12 Oct 2021 11:12:19 +0000 (13:12 +0200)
committerJens Axboe <axboe@kernel.dk>
Mon, 18 Oct 2021 12:17:36 +0000 (06:17 -0600)
Switch the boolean spin argument to blk_poll to passing a set of flags
instead.  This will allow to control polling behavior in a more fine
grained way.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Mark Wunderlich <mark.wunderlich@intel.com>
Link: https://lore.kernel.org/r/20211012111226.760968-10-hch@lst.de
[axboe: adapt to changed io_uring iopoll]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-exec.c
block/blk-mq.c
block/fops.c
fs/io_uring.c
fs/iomap/direct-io.c
include/linux/blkdev.h
include/linux/fs.h
include/linux/iomap.h
mm/page_io.c

index d6cd501..1fa7f25 100644 (file)
@@ -71,7 +71,7 @@ static bool blk_rq_is_poll(struct request *rq)
 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
 {
        do {
-               blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), true);
+               blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), 0);
                cond_resched();
        } while (!completion_done(wait));
 }
index 7d0d947..6609e10 100644 (file)
@@ -4052,7 +4052,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
 }
 
 static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
-               bool spin)
+               unsigned int flags)
 {
        struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
        long state = get_current_state();
@@ -4075,7 +4075,7 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
                if (task_is_running(current))
                        return 1;
 
-               if (ret < 0 || !spin)
+               if (ret < 0 || (flags & BLK_POLL_ONESHOT))
                        break;
                cpu_relax();
        } while (!need_resched());
@@ -4088,15 +4088,13 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
  * blk_poll - poll for IO completions
  * @q:  the queue
  * @cookie: cookie passed back at IO submission time
- * @spin: whether to spin for completions
+ * @flags: BLK_POLL_* flags that control the behavior
  *
  * Description:
  *    Poll for completions on the passed in queue. Returns number of
- *    completed entries found. If @spin is true, then blk_poll will continue
- *    looping until at least one completion is found, unless the task is
- *    otherwise marked running (or we need to reschedule).
+ *    completed entries found.
  */
-int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
+int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags)
 {
        if (cookie == BLK_QC_T_NONE ||
            !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
@@ -4105,12 +4103,11 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
        if (current->plug)
                blk_flush_plug_list(current->plug, false);
 
-       /* If specified not to spin, we also should not sleep. */
-       if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
+       if (q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
                if (blk_mq_poll_hybrid(q, cookie))
                        return 1;
        }
-       return blk_mq_poll_classic(q, cookie, spin);
+       return blk_mq_poll_classic(q, cookie, flags);
 }
 EXPORT_SYMBOL_GPL(blk_poll);
 
index 15324f2..db8f2fe 100644 (file)
@@ -108,7 +108,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
                if (!READ_ONCE(bio.bi_private))
                        break;
                if (!(iocb->ki_flags & IOCB_HIPRI) ||
-                   !blk_poll(bdev_get_queue(bdev), qc, true))
+                   !blk_poll(bdev_get_queue(bdev), qc, 0))
                        blk_io_schedule();
        }
        __set_current_state(TASK_RUNNING);
@@ -141,12 +141,12 @@ struct blkdev_dio {
 
 static struct bio_set blkdev_dio_pool;
 
-static int blkdev_iopoll(struct kiocb *kiocb, bool wait)
+static int blkdev_iopoll(struct kiocb *kiocb, unsigned int flags)
 {
        struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
        struct request_queue *q = bdev_get_queue(bdev);
 
-       return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait);
+       return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags);
 }
 
 static void blkdev_bio_end_io(struct bio *bio)
@@ -297,7 +297,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                if (!READ_ONCE(dio->waiter))
                        break;
 
-               if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, true))
+               if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, 0))
                        blk_io_schedule();
        }
        __set_current_state(TASK_RUNNING);
index d2e8678..541fec2 100644 (file)
@@ -2457,14 +2457,15 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
                        long min)
 {
        struct io_kiocb *req, *tmp;
+       unsigned int poll_flags = 0;
        LIST_HEAD(done);
-       bool spin;
 
        /*
         * Only spin for completions if we don't have multiple devices hanging
         * off our complete list, and we're under the requested amount.
         */
-       spin = !ctx->poll_multi_queue && *nr_events < min;
+       if (ctx->poll_multi_queue || *nr_events >= min)
+               poll_flags |= BLK_POLL_ONESHOT;
 
        list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
                struct kiocb *kiocb = &req->rw.kiocb;
@@ -2482,11 +2483,11 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
                if (!list_empty(&done))
                        break;
 
-               ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
+               ret = kiocb->ki_filp->f_op->iopoll(kiocb, poll_flags);
                if (unlikely(ret < 0))
                        return ret;
                else if (ret)
-                       spin = false;
+                       poll_flags |= BLK_POLL_ONESHOT;
 
                /* iopoll may have completed current req */
                if (READ_ONCE(req->iopoll_completed))
index 560ae96..236aba2 100644 (file)
@@ -49,13 +49,13 @@ struct iomap_dio {
        };
 };
 
-int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
+int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags)
 {
        struct request_queue *q = READ_ONCE(kiocb->private);
 
        if (!q)
                return 0;
-       return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin);
+       return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags);
 }
 EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
 
@@ -642,7 +642,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                        if (!(iocb->ki_flags & IOCB_HIPRI) ||
                            !dio->submit.last_queue ||
                            !blk_poll(dio->submit.last_queue,
-                                        dio->submit.cookie, true))
+                                        dio->submit.cookie, 0))
                                blk_io_schedule();
                }
                __set_current_state(TASK_RUNNING);
index 17705c9..e177346 100644 (file)
@@ -564,7 +564,9 @@ extern const char *blk_op_str(unsigned int op);
 int blk_status_to_errno(blk_status_t status);
 blk_status_t errno_to_blk_status(int errno);
 
-int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
+/* only poll the hardware once, don't continue until a completion was found */
+#define BLK_POLL_ONESHOT               (1 << 0)
+int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags);
 
 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
 {
index e7a6333..c443cdd 100644 (file)
@@ -2075,7 +2075,7 @@ struct file_operations {
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
-       int (*iopoll)(struct kiocb *kiocb, bool spin);
+       int (*iopoll)(struct kiocb *kiocb, unsigned int flags);
        int (*iterate) (struct file *, struct dir_context *);
        int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
index 24f8489..1e86b65 100644 (file)
@@ -337,7 +337,7 @@ struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
                unsigned int dio_flags);
 ssize_t iomap_dio_complete(struct iomap_dio *dio);
-int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
+int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags);
 
 #ifdef CONFIG_SWAP
 struct file;
index c493ce9..5d5543f 100644 (file)
@@ -428,7 +428,7 @@ int swap_readpage(struct page *page, bool synchronous)
                if (!READ_ONCE(bio->bi_private))
                        break;
 
-               if (!blk_poll(disk->queue, qc, true))
+               if (!blk_poll(disk->queue, qc, 0))
                        blk_io_schedule();
        }
        __set_current_state(TASK_RUNNING);