From 3218e5d32dbcf1b9c6dc589eca21deebb14215fa Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 25 Jun 2022 11:52:59 +0100 Subject: [PATCH] io_uring: fuse fallback_node and normal tw node Now as both normal and fallback paths use llist, just keep one node head in struct io_task_work and kill off ->fallback_node. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/d04ebde409f7b162fe247b361b4486b193293e46.1656153285.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 5 +---- io_uring/io_uring.c | 5 ++--- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 918165a..3ca8f36 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -427,10 +427,7 @@ enum { typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked); struct io_task_work { - union { - struct llist_node node; - struct llist_node fallback_node; - }; + struct llist_node node; io_req_tw_func_t func; }; diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 45538b3..86a0b0c 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -233,7 +233,7 @@ static __cold void io_fallback_req_func(struct work_struct *work) bool locked = false; percpu_ref_get(&ctx->refs); - llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node) + llist_for_each_entry_safe(req, tmp, node, io_task_work.node) req->io_task_work.func(req, &locked); if (locked) { @@ -1091,13 +1091,12 @@ void io_req_task_work_add(struct io_kiocb *req) if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method))) return; - node = llist_del_all(&tctx->task_list); while (node) { req = container_of(node, struct io_kiocb, io_task_work.node); node = node->next; - if (llist_add(&req->io_task_work.fallback_node, + if (llist_add(&req->io_task_work.node, &req->ctx->fallback_llist)) schedule_delayed_work(&req->ctx->fallback_work, 1); } -- 2.7.4