}
}
-int io_run_local_work(struct io_ring_ctx *ctx)
+int __io_run_local_work(struct io_ring_ctx *ctx, bool locked)
{
- bool locked;
struct llist_node *node;
struct llist_node fake;
struct llist_node *current_final = NULL;
return -EEXIST;
}
- locked = mutex_trylock(&ctx->uring_lock);
-
node = io_llist_xchg(&ctx->work_llist, &fake);
ret = 0;
again:
goto again;
}
- if (locked) {
+ if (locked)
io_submit_flush_completions(ctx);
- mutex_unlock(&ctx->uring_lock);
- }
trace_io_uring_local_work_run(ctx, ret, loops);
return ret;
+
+}
+
+int io_run_local_work(struct io_ring_ctx *ctx)
+{
+ bool locked;
+ int ret;
+
+ locked = mutex_trylock(&ctx->uring_lock);
+ ret = __io_run_local_work(ctx, locked);
+ if (locked)
+ mutex_unlock(&ctx->uring_lock);
+
+ return ret;
}
static void io_req_tw_post(struct io_kiocb *req, bool *locked)
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
bool io_req_cqe_overflow(struct io_kiocb *req);
int io_run_task_work_sig(struct io_ring_ctx *ctx);
+int __io_run_local_work(struct io_ring_ctx *ctx, bool locked);
int io_run_local_work(struct io_ring_ctx *ctx);
void io_req_complete_failed(struct io_kiocb *req, s32 res);
void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);