unsigned sq_thread_idle;
};
+#define IO_IOPOLL_BATCH 8
+
+struct io_comp_state {
+ unsigned int nr;
+ struct list_head list;
+ struct io_ring_ctx *ctx;
+};
+
+struct io_submit_state {
+ struct blk_plug plug;
+
+ /*
+ * io_kiocb alloc cache
+ */
+ void *reqs[IO_IOPOLL_BATCH];
+ unsigned int free_reqs;
+
+ bool plug_started;
+
+ /*
+ * Batch completion logic
+ */
+ struct io_comp_state comp;
+
+ /*
+ * File reference cache
+ */
+ struct file *file;
+ unsigned int fd;
+ unsigned int file_refs;
+ unsigned int ios_left;
+};
+
struct io_ring_ctx {
struct {
struct percpu_ref refs;
struct work_struct exit_work;
struct io_restriction restrictions;
+ struct io_submit_state submit_state;
};
/*
u32 seq;
};
-#define IO_IOPOLL_BATCH 8
-
-struct io_comp_state {
- unsigned int nr;
- struct list_head list;
- struct io_ring_ctx *ctx;
-};
-
-struct io_submit_state {
- struct blk_plug plug;
-
- /*
- * io_kiocb alloc cache
- */
- void *reqs[IO_IOPOLL_BATCH];
- unsigned int free_reqs;
-
- bool plug_started;
-
- /*
- * Batch completion logic
- */
- struct io_comp_state comp;
-
- /*
- * File reference cache
- */
- struct file *file;
- unsigned int fd;
- unsigned int file_refs;
- unsigned int ios_left;
-};
-
struct io_op_def {
/* needs req->file assigned */
unsigned needs_file : 1;
return NULL;
}
-static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
- struct io_submit_state *state)
+static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
{
+ struct io_submit_state *state = &ctx->submit_state;
+
if (!state->free_reqs) {
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
size_t sz;
IOSQE_BUFFER_SELECT)
static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct io_uring_sqe *sqe,
- struct io_submit_state *state)
+ const struct io_uring_sqe *sqe)
{
+ struct io_submit_state *state;
unsigned int sqe_flags;
int id, ret;
/* same numerical values with corresponding REQ_F_*, safe to copy */
req->flags |= sqe_flags;
+ state = &ctx->submit_state;
/*
* Plug now if we have more than 1 IO left after this, and the target
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
{
- struct io_submit_state state;
struct io_submit_link link;
int i, submitted = 0;
percpu_counter_add(¤t->io_uring->inflight, nr);
refcount_add(nr, ¤t->usage);
- io_submit_state_start(&state, ctx, nr);
+ io_submit_state_start(&ctx->submit_state, ctx, nr);
link.head = NULL;
for (i = 0; i < nr; i++) {
io_consume_sqe(ctx);
break;
}
- req = io_alloc_req(ctx, &state);
+ req = io_alloc_req(ctx);
if (unlikely(!req)) {
if (!submitted)
submitted = -EAGAIN;
/* will complete beyond this point, count as submitted */
submitted++;
- err = io_init_req(ctx, req, sqe, &state);
+ err = io_init_req(ctx, req, sqe);
if (unlikely(err)) {
fail_req:
io_put_req(req);
trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
true, ctx->flags & IORING_SETUP_SQPOLL);
- err = io_submit_sqe(req, sqe, &link, &state.comp);
+ err = io_submit_sqe(req, sqe, &link, &ctx->submit_state.comp);
if (err)
goto fail_req;
}
put_task_struct_many(current, unused);
}
if (link.head)
- io_queue_link_head(link.head, &state.comp);
- io_submit_state_end(&state);
+ io_queue_link_head(link.head, &ctx->submit_state.comp);
+ io_submit_state_end(&ctx->submit_state);
/* Commit SQ ring head once we've consumed and submitted all SQEs */
io_commit_sqring(ctx);