From: Kanchan Joshi Date: Tue, 23 Aug 2022 16:14:41 +0000 (+0530) Subject: io_uring: add iopoll infrastructure for io_uring_cmd X-Git-Tag: v6.1-rc5~284^2~46 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=5756a3a7e713bcab705a5f0c810a2b1f7f4ecfaa;p=platform%2Fkernel%2Flinux-starfive.git io_uring: add iopoll infrastructure for io_uring_cmd Put this up in the same way as iopoll is done for regular read/write IO. Make place for storing a cookie into struct io_uring_cmd on submission. Perform the completion using the ->uring_cmd_iopoll handler. Signed-off-by: Kanchan Joshi Signed-off-by: Pankaj Raghav Link: https://lore.kernel.org/r/20220823161443.49436-3-joshi.k@samsung.com Signed-off-by: Jens Axboe --- diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 4a2f6cc..58676c0 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -20,8 +20,12 @@ enum io_uring_cmd_flags { struct io_uring_cmd { struct file *file; const void *cmd; - /* callback to defer completions to task context */ - void (*task_work_cb)(struct io_uring_cmd *cmd); + union { + /* callback to defer completions to task context */ + void (*task_work_cb)(struct io_uring_cmd *cmd); + /* used for polled completion */ + void *cookie; + }; u32 cmd_op; u32 pad; u8 pdu[32]; /* available inline for free use */ diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index d99b31a..31ac87e 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1433,6 +1433,12 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) wq_list_empty(&ctx->iopoll_list)) break; } + + if (task_work_pending(current)) { + mutex_unlock(&ctx->uring_lock); + io_run_task_work(); + mutex_lock(&ctx->uring_lock); + } ret = io_do_iopoll(ctx, !min); if (ret < 0) break; diff --git a/io_uring/opdef.c b/io_uring/opdef.c index c4dddd0..008320c 100644 --- a/io_uring/opdef.c +++ b/io_uring/opdef.c @@ -465,6 +465,7 @@ const struct io_op_def io_op_defs[] = { .needs_file = 1, .plug = 1, .name = "URING_CMD", + .iopoll = 1, .async_size = uring_cmd_pdu_size(1), .prep = io_uring_cmd_prep, .issue = io_uring_cmd, diff --git a/io_uring/rw.c b/io_uring/rw.c index 76ebcfe..b6f9c75 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -1011,7 +1011,13 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) if (READ_ONCE(req->iopoll_completed)) break; - ret = rw->kiocb.ki_filp->f_op->iopoll(&rw->kiocb, &iob, poll_flags); + if (req->opcode == IORING_OP_URING_CMD) { + struct io_uring_cmd *ioucmd = (struct io_uring_cmd *)rw; + + ret = req->file->f_op->uring_cmd_iopoll(ioucmd); + } else + ret = rw->kiocb.ki_filp->f_op->iopoll(&rw->kiocb, &iob, + poll_flags); if (unlikely(ret < 0)) return ret; else if (ret) diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index e78b6f9..f3ed61e 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -50,7 +50,11 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2) io_req_set_res(req, ret, 0); if (req->ctx->flags & IORING_SETUP_CQE32) io_req_set_cqe32_extra(req, res2, 0); - __io_req_complete(req, 0); + if (req->ctx->flags & IORING_SETUP_IOPOLL) + /* order with io_iopoll_req_issued() checking ->iopoll_complete */ + smp_store_release(&req->iopoll_completed, 1); + else + __io_req_complete(req, 0); } EXPORT_SYMBOL_GPL(io_uring_cmd_done); @@ -97,8 +101,11 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) issue_flags |= IO_URING_F_SQE128; if (ctx->flags & IORING_SETUP_CQE32) issue_flags |= IO_URING_F_CQE32; - if (ctx->flags & IORING_SETUP_IOPOLL) + if (ctx->flags & IORING_SETUP_IOPOLL) { issue_flags |= IO_URING_F_IOPOLL; + req->iopoll_completed = 0; + WRITE_ONCE(ioucmd->cookie, NULL); + } if (req_has_async_data(req)) ioucmd->cmd = req->async_data;