From 85224a3f89cf8041f5e7a2b8324eee5850865ac4 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 23 Nov 2022 11:33:39 +0000 Subject: [PATCH] io_uring: remove io_req_tw_post_queue [ Upstream commit 833b5dfffc26c81835ce38e2a5df9ac5fa142735 ] Remove io_req_tw_post() and io_req_tw_post_queue(), we can use io_req_task_complete() instead. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b9b73c08022c7f1457023ac841f35c0100e70345.1669203009.git.asml.silence@gmail.com Signed-off-by: Jens Axboe Stable-dep-of: ef5c600adb1d ("io_uring: always prep_async for drain requests") Signed-off-by: Sasha Levin --- io_uring/io_uring.c | 12 ------------ io_uring/io_uring.h | 8 +++++++- io_uring/timeout.c | 6 +++--- 3 files changed, 10 insertions(+), 16 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 6b81a0d2..50f959f 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1236,18 +1236,6 @@ int io_run_local_work(struct io_ring_ctx *ctx) return ret; } -static void io_req_tw_post(struct io_kiocb *req, bool *locked) -{ - io_req_complete_post(req); -} - -void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags) -{ - io_req_set_res(req, res, cflags); - req->io_task_work.func = io_req_tw_post; - io_req_task_work_add(req); -} - static void io_req_task_cancel(struct io_kiocb *req, bool *locked) { /* not needed for normal modes, but SQPOLL depends on it */ diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 0cf544b..90b675c 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -53,7 +53,6 @@ static inline bool io_req_ffs_set(struct io_kiocb *req) void __io_req_task_work_add(struct io_kiocb *req, bool allow_local); bool io_is_uring_fops(struct file *file); bool io_alloc_async_data(struct io_kiocb *req); -void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags); void io_req_task_queue(struct io_kiocb *req); void io_queue_iowq(struct io_kiocb *req, bool *dont_use); void io_req_task_complete(struct io_kiocb *req, bool *locked); @@ -380,4 +379,11 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) ctx->submitter_task == current); } +static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) +{ + io_req_set_res(req, res, 0); + req->io_task_work.func = io_req_task_complete; + io_req_task_work_add(req); +} + #endif diff --git a/io_uring/timeout.c b/io_uring/timeout.c index 16b006b..4c6a566 100644 --- a/io_uring/timeout.c +++ b/io_uring/timeout.c @@ -63,7 +63,7 @@ static bool io_kill_timeout(struct io_kiocb *req, int status) atomic_set(&req->ctx->cq_timeouts, atomic_read(&req->ctx->cq_timeouts) + 1); list_del_init(&timeout->list); - io_req_tw_post_queue(req, status, 0); + io_req_queue_tw_complete(req, status); return true; } return false; @@ -161,7 +161,7 @@ void io_disarm_next(struct io_kiocb *req) req->flags &= ~REQ_F_ARM_LTIMEOUT; if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { io_remove_next_linked(req); - io_req_tw_post_queue(link, -ECANCELED, 0); + io_req_queue_tw_complete(link, -ECANCELED); } } else if (req->flags & REQ_F_LINK_TIMEOUT) { struct io_ring_ctx *ctx = req->ctx; @@ -170,7 +170,7 @@ void io_disarm_next(struct io_kiocb *req) link = io_disarm_linked_timeout(req); spin_unlock_irq(&ctx->timeout_lock); if (link) - io_req_tw_post_queue(link, -ECANCELED, 0); + io_req_queue_tw_complete(link, -ECANCELED); } if (unlikely((req->flags & REQ_F_FAIL) && !(req->flags & REQ_F_HARDLINK))) -- 2.7.4