From b850d6ddc78878996039d79833f3d7fd755f0916 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 29 Aug 2022 14:30:13 +0100 Subject: [PATCH] io_uring: clean cqe filling functions [ upstream commmit 913a571affedd17239c4d4ea90c8874b32fc2191 ] Split io_cqring_fill_event() into a couple of more targeted functions. The first on is io_fill_cqe_aux() for completions that are not associated with request completions and doing the ->cq_extra accounting. Examples are additional CQEs from multishot poll and rsrc notifications. The second is io_fill_cqe_req(), should be called when it's a normal request completion. Nothing more to it at the moment, will be used in later patches. The last one is inlined __io_fill_cqe() for a finer grained control, should be used with caution and in hottest places. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/59a9117a4a44fc9efcf04b3afa51e0d080f5943c.1636559119.git.asml.silence@gmail.com Signed-off-by: Jens Axboe [pavel: backport] Signed-off-by: Pavel Begunkov Signed-off-by: Greg Kroah-Hartman --- fs/io_uring.c | 57 ++++++++++++++++++++++++++------------------------- 1 file changed, 29 insertions(+), 28 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 28a7b04f8d4f..552ccc3c8601 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1079,8 +1079,8 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, bool cancel_all); static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); -static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data, - s32 res, u32 cflags); +static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags); + static void io_put_req(struct io_kiocb *req); static void io_put_req_deferred(struct io_kiocb *req); static void io_dismantle_req(struct io_kiocb *req); @@ -1515,7 +1515,7 @@ static void io_kill_timeout(struct io_kiocb *req, int status) atomic_set(&req->ctx->cq_timeouts, atomic_read(&req->ctx->cq_timeouts) + 1); list_del_init(&req->timeout.list); - io_cqring_fill_event(req->ctx, req->user_data, status, 0); + io_fill_cqe_req(req, status, 0); io_put_req_deferred(req); } } @@ -1790,8 +1790,8 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, return true; } -static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data, - s32 res, u32 cflags) +static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data, + s32 res, u32 cflags) { struct io_uring_cqe *cqe; @@ -1812,11 +1812,16 @@ static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data return io_cqring_event_overflow(ctx, user_data, res, cflags); } -/* not as hot to bloat with inlining */ -static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data, - s32 res, u32 cflags) +static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags) +{ + __io_fill_cqe(req->ctx, req->user_data, res, cflags); +} + +static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, + s32 res, u32 cflags) { - return __io_cqring_fill_event(ctx, user_data, res, cflags); + ctx->cq_extra++; + return __io_fill_cqe(ctx, user_data, res, cflags); } static void io_req_complete_post(struct io_kiocb *req, s32 res, @@ -1825,7 +1830,7 @@ static void io_req_complete_post(struct io_kiocb *req, s32 res, struct io_ring_ctx *ctx = req->ctx; spin_lock(&ctx->completion_lock); - __io_cqring_fill_event(ctx, req->user_data, res, cflags); + __io_fill_cqe(ctx, req->user_data, res, cflags); /* * If we're the last reference to this request, add to our locked * free_list cache. @@ -2051,8 +2056,7 @@ static bool io_kill_linked_timeout(struct io_kiocb *req) link->timeout.head = NULL; if (hrtimer_try_to_cancel(&io->timer) != -1) { list_del(&link->timeout.list); - io_cqring_fill_event(link->ctx, link->user_data, - -ECANCELED, 0); + io_fill_cqe_req(link, -ECANCELED, 0); io_put_req_deferred(link); return true; } @@ -2076,7 +2080,7 @@ static void io_fail_links(struct io_kiocb *req) link->link = NULL; trace_io_uring_fail_link(req, link); - io_cqring_fill_event(link->ctx, link->user_data, res, 0); + io_fill_cqe_req(link, res, 0); io_put_req_deferred(link); link = nxt; } @@ -2093,8 +2097,7 @@ static bool io_disarm_next(struct io_kiocb *req) req->flags &= ~REQ_F_ARM_LTIMEOUT; if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { io_remove_next_linked(req); - io_cqring_fill_event(link->ctx, link->user_data, - -ECANCELED, 0); + io_fill_cqe_req(link, -ECANCELED, 0); io_put_req_deferred(link); posted = true; } @@ -2370,8 +2373,8 @@ static void io_submit_flush_completions(struct io_ring_ctx *ctx) for (i = 0; i < nr; i++) { struct io_kiocb *req = state->compl_reqs[i]; - __io_cqring_fill_event(ctx, req->user_data, req->result, - req->compl.cflags); + __io_fill_cqe(ctx, req->user_data, req->result, + req->compl.cflags); } io_commit_cqring(ctx); spin_unlock(&ctx->completion_lock); @@ -2482,8 +2485,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, req = list_first_entry(done, struct io_kiocb, inflight_entry); list_del(&req->inflight_entry); - __io_cqring_fill_event(ctx, req->user_data, req->result, - io_put_rw_kbuf(req)); + io_fill_cqe_req(req, req->result, io_put_rw_kbuf(req)); (*nr_events)++; if (req_ref_put_and_test(req)) @@ -5413,13 +5415,13 @@ static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask) } if (req->poll.events & EPOLLONESHOT) flags = 0; - if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) { + + if (!(flags & IORING_CQE_F_MORE)) { + io_fill_cqe_req(req, error, flags); + } else if (!io_fill_cqe_aux(ctx, req->user_data, error, flags)) { req->poll.events |= EPOLLONESHOT; flags = 0; } - if (flags & IORING_CQE_F_MORE) - ctx->cq_extra++; - return !(flags & IORING_CQE_F_MORE); } @@ -5746,9 +5748,9 @@ static bool io_poll_remove_one(struct io_kiocb *req) do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true); if (do_complete) { - io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0); - io_commit_cqring(req->ctx); req_set_fail(req); + io_fill_cqe_req(req, -ECANCELED, 0); + io_commit_cqring(req->ctx); io_put_req_deferred(req); } return do_complete; @@ -6045,7 +6047,7 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) return PTR_ERR(req); req_set_fail(req); - io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0); + io_fill_cqe_req(req, -ECANCELED, 0); io_put_req_deferred(req); return 0; } @@ -8271,8 +8273,7 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node) io_ring_submit_lock(ctx, lock_ring); spin_lock(&ctx->completion_lock); - io_cqring_fill_event(ctx, prsrc->tag, 0, 0); - ctx->cq_extra++; + io_fill_cqe_aux(ctx, prsrc->tag, 0, 0); io_commit_cqring(ctx); spin_unlock(&ctx->completion_lock); io_cqring_ev_posted(ctx); -- 2.34.1