static void io_cqring_fill_event(struct io_kiocb *req, long res);
static void io_put_req(struct io_kiocb *req);
static void io_put_req_deferred(struct io_kiocb *req, int nr);
-static void io_double_put_req(struct io_kiocb *req);
static void io_dismantle_req(struct io_kiocb *req);
static void io_put_task(struct task_struct *task, int nr);
static void io_queue_next(struct io_kiocb *req);
io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work);
}
-static void __io_req_task_cancel(struct io_kiocb *req, int error)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- spin_lock_irq(&ctx->completion_lock);
- io_cqring_fill_event(req, error);
- io_commit_cqring(ctx);
- spin_unlock_irq(&ctx->completion_lock);
-
- io_cqring_ev_posted(ctx);
- req_set_fail_links(req);
- io_double_put_req(req);
-}
-
static void io_req_task_cancel(struct callback_head *cb)
{
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
/* ctx is guaranteed to stay alive while we hold uring_lock */
mutex_lock(&ctx->uring_lock);
- __io_req_task_cancel(req, req->result);
+ io_req_complete_failed(req, req->result);
mutex_unlock(&ctx->uring_lock);
}
if (!(current->flags & PF_EXITING) && !current->in_execve)
__io_queue_sqe(req);
else
- __io_req_task_cancel(req, -EFAULT);
+ io_req_complete_failed(req, -EFAULT);
mutex_unlock(&ctx->uring_lock);
}
io_free_req_deferred(req);
}
-static void io_double_put_req(struct io_kiocb *req)
-{
- /* drop both submit and complete references */
- if (req_ref_sub_and_test(req, 2))
- io_free_req(req);
-}
-
static unsigned io_cqring_events(struct io_ring_ctx *ctx)
{
/* See comment at the top of this file */
if (!READ_ONCE(apoll->poll.canceled))
__io_req_task_submit(req);
else
- __io_req_task_cancel(req, -ECANCELED);
+ io_req_complete_failed(req, -ECANCELED);
kfree(apoll->double_poll);
kfree(apoll);