struct io_comp_state {
unsigned int nr;
struct list_head list;
- struct io_ring_ctx *ctx;
};
struct io_submit_state {
io_put_req(req);
}
-static void io_submit_flush_completions(struct io_comp_state *cs)
+static void io_submit_flush_completions(struct io_comp_state *cs,
+ struct io_ring_ctx *ctx)
{
- struct io_ring_ctx *ctx = cs->ctx;
-
spin_lock_irq(&ctx->completion_lock);
while (!list_empty(&cs->list)) {
struct io_kiocb *req;
if (req->flags & REQ_F_COMPLETE_INLINE) {
list_add_tail(&req->compl.list, &cs->list);
if (++cs->nr >= 32)
- io_submit_flush_completions(cs);
+ io_submit_flush_completions(cs, req->ctx);
req = NULL;
} else {
req = io_put_req_find_next(req);
/*
* Batched submission is done, ensure local IO is flushed out.
*/
-static void io_submit_state_end(struct io_submit_state *state)
+static void io_submit_state_end(struct io_submit_state *state,
+ struct io_ring_ctx *ctx)
{
if (!list_empty(&state->comp.list))
- io_submit_flush_completions(&state->comp);
+ io_submit_flush_completions(&state->comp, ctx);
if (state->plug_started)
blk_finish_plug(&state->plug);
io_state_file_put(state);
* Start submission side cache.
*/
static void io_submit_state_start(struct io_submit_state *state,
- struct io_ring_ctx *ctx, unsigned int max_ios)
+ unsigned int max_ios)
{
state->plug_started = false;
state->comp.nr = 0;
INIT_LIST_HEAD(&state->comp.list);
- state->comp.ctx = ctx;
state->free_reqs = 0;
state->file_refs = 0;
state->ios_left = max_ios;
percpu_counter_add(¤t->io_uring->inflight, nr);
refcount_add(nr, ¤t->usage);
- io_submit_state_start(&ctx->submit_state, ctx, nr);
+ io_submit_state_start(&ctx->submit_state, nr);
link.head = NULL;
for (i = 0; i < nr; i++) {
}
if (link.head)
io_queue_link_head(link.head, &ctx->submit_state.comp);
- io_submit_state_end(&ctx->submit_state);
+ io_submit_state_end(&ctx->submit_state, ctx);
/* Commit SQ ring head once we've consumed and submitted all SQEs */
io_commit_sqring(ctx);