io_uring: add more likely/unlikely() annotations
authorPavel Begunkov <asml.silence@gmail.com>
Mon, 4 Oct 2021 19:02:47 +0000 (20:02 +0100)
committerJens Axboe <axboe@kernel.dk>
Tue, 19 Oct 2021 11:49:54 +0000 (05:49 -0600)
Add two extra unlikely() in io_submit_sqes() and one around
io_req_needs_clean() to help the compiler to avoid extra jumps
in hot paths.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/88e087afe657e7660194353aada9b00f11d480f9.1633373302.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index ab07a08..bc87ed8 100644 (file)
@@ -1957,7 +1957,7 @@ static inline void io_dismantle_req(struct io_kiocb *req)
 {
        unsigned int flags = req->flags;
 
-       if (io_req_needs_clean(req))
+       if (unlikely(io_req_needs_clean(req)))
                io_clean_op(req);
        if (!(flags & REQ_F_FIXED_FILE))
                io_put_file(req->file);
@@ -7198,11 +7198,11 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
        unsigned int entries = io_sqring_entries(ctx);
        int submitted = 0;
 
-       if (!entries)
+       if (unlikely(!entries))
                return 0;
        /* make sure SQ entry isn't read before tail */
        nr = min3(nr, ctx->sq_entries, entries);
-       if (!percpu_ref_tryget_many(&ctx->refs, nr))
+       if (unlikely(!percpu_ref_tryget_many(&ctx->refs, nr)))
                return -EAGAIN;
        io_get_task_refs(nr);