ACCT_PINNED,
};
-static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
- struct task_struct *task);
-
+static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+ struct task_struct *task,
+ struct files_struct *files);
static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
struct io_ring_ctx *ctx);
* as nobody else will be looking for them.
*/
do {
- __io_uring_cancel_task_requests(ctx, NULL);
+ io_uring_try_cancel_requests(ctx, NULL, NULL);
} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
io_ring_ctx_free(ctx);
}
}
}
+static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+ struct task_struct *task,
+ struct files_struct *files)
+{
+ struct io_task_cancel cancel = { .task = task, .files = files, };
+
+ while (1) {
+ enum io_wq_cancel cret;
+ bool ret = false;
+
+ if (ctx->io_wq) {
+ cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
+ &cancel, true);
+ ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
+ }
+
+ /* SQPOLL thread does its own polling */
+ if (!(ctx->flags & IORING_SETUP_SQPOLL) && !files) {
+ while (!list_empty_careful(&ctx->iopoll_list)) {
+ io_iopoll_try_reap_events(ctx);
+ ret = true;
+ }
+ }
+
+ ret |= io_poll_remove_all(ctx, task, files);
+ ret |= io_kill_timeouts(ctx, task, files);
+ ret |= io_run_task_work();
+ io_cqring_overflow_flush(ctx, true, task, files);
+ if (!ret)
+ break;
+ cond_resched();
+ }
+}
+
static int io_uring_count_inflight(struct io_ring_ctx *ctx,
struct task_struct *task,
struct files_struct *files)
struct files_struct *files)
{
while (!list_empty_careful(&ctx->inflight_list)) {
- struct io_task_cancel cancel = { .task = task, .files = files };
DEFINE_WAIT(wait);
int inflight;
if (!inflight)
break;
- io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
- io_poll_remove_all(ctx, task, files);
- io_kill_timeouts(ctx, task, files);
- io_cqring_overflow_flush(ctx, true, task, files);
- /* cancellations _may_ trigger task work */
- io_run_task_work();
-
+ io_uring_try_cancel_requests(ctx, task, files);
prepare_to_wait(&task->io_uring->wait, &wait,
TASK_UNINTERRUPTIBLE);
if (inflight == io_uring_count_inflight(ctx, task, files))
}
}
-static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
- struct task_struct *task)
-{
- while (1) {
- struct io_task_cancel cancel = { .task = task, .files = NULL, };
- enum io_wq_cancel cret;
- bool ret = false;
-
- if (ctx->io_wq) {
- cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
- &cancel, true);
- ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
- }
-
- /* SQPOLL thread does its own polling */
- if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
- while (!list_empty_careful(&ctx->iopoll_list)) {
- io_iopoll_try_reap_events(ctx);
- ret = true;
- }
- }
-
- ret |= io_poll_remove_all(ctx, task, NULL);
- ret |= io_kill_timeouts(ctx, task, NULL);
- ret |= io_run_task_work();
- if (!ret)
- break;
- cond_resched();
- }
-}
-
static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
{
mutex_lock(&ctx->uring_lock);
}
io_cancel_defer_files(ctx, task, files);
- io_cqring_overflow_flush(ctx, true, task, files);
io_uring_cancel_files(ctx, task, files);
if (!files)
- __io_uring_cancel_task_requests(ctx, task);
+ io_uring_try_cancel_requests(ctx, task, NULL);
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
atomic_dec(&task->io_uring->in_idle);