io_uring: optimise io_req_init() sqe flags checks
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 15 Sep 2021 11:03:38 +0000 (12:03 +0100)
committerJens Axboe <axboe@kernel.dk>
Tue, 19 Oct 2021 11:49:53 +0000 (05:49 -0600)
IOSQE_IO_DRAIN is quite marginal and we don't care too much about
IOSQE_BUFFER_SELECT. Save to ifs and hide both of them under
SQE_VALID_FLAGS check. Now we first check whether it uses a "safe"
subset, i.e. without DRAIN and BUFFER_SELECT, and only if it's not
true we test the rest of the flags.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/dccfb9ab2ab0969a2d8dc59af88fa0ce44eeb1d5.1631703764.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index 027ca7a..3801f2e 100644 (file)
 
 #define IORING_MAX_REG_BUFFERS (1U << 14)
 
-#define SQE_VALID_FLAGS        (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
-                               IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
-                               IOSQE_BUFFER_SELECT)
+#define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
+                         IOSQE_IO_HARDLINK | IOSQE_ASYNC)
+
+#define SQE_VALID_FLAGS        (SQE_COMMON_FLAGS|IOSQE_BUFFER_SELECT|IOSQE_IO_DRAIN)
+
 #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
                                REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS)
 
@@ -7059,20 +7061,21 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
        req->fixed_rsrc_refs = NULL;
        req->task = current;
 
-       /* enforce forwards compatibility on users */
-       if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
-               return -EINVAL;
        if (unlikely(req->opcode >= IORING_OP_LAST))
                return -EINVAL;
+       if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
+               /* enforce forwards compatibility on users */
+               if (sqe_flags & ~SQE_VALID_FLAGS)
+                       return -EINVAL;
+               if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
+                   !io_op_defs[req->opcode].buffer_select)
+                       return -EOPNOTSUPP;
+               if (sqe_flags & IOSQE_IO_DRAIN)
+                       ctx->drain_active = true;
+       }
        if (!io_check_restriction(ctx, req, sqe_flags))
                return -EACCES;
 
-       if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
-           !io_op_defs[req->opcode].buffer_select)
-               return -EOPNOTSUPP;
-       if (unlikely(sqe_flags & IOSQE_IO_DRAIN))
-               ctx->drain_active = true;
-
        personality = READ_ONCE(sqe->personality);
        if (personality) {
                req->creds = xa_load(&ctx->personalities, personality);
@@ -11017,6 +11020,8 @@ static int __init io_uring_init(void)
 
        /* should fit into one byte */
        BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
+       BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
+       BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
 
        BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
        BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));