if (req->flags & REQ_F_FORCE_ASYNC)
req->work.flags |= IO_WQ_WORK_CONCURRENT;
- if (req->file && !io_req_ffs_set(req))
- req->flags |= io_file_get_flags(req->file) << REQ_F_SUPPORT_NOWAIT_BIT;
+ if (req->file && !(req->flags & REQ_F_FIXED_FILE))
+ req->flags |= io_file_get_flags(req->file);
if (req->file && (req->flags & REQ_F_ISREG)) {
bool should_hash = def->hash_reg_file;
struct io_ring_ctx *ctx = req->ctx;
u64 user_data = req->cqe.user_data;
struct io_uring_cqe *cqe;
- unsigned int length;
if (!defer)
return __io_post_aux_cqe(ctx, user_data, res, cflags, allow_overflow);
- length = ARRAY_SIZE(ctx->submit_state.cqes);
-
lockdep_assert_held(&ctx->uring_lock);
- if (ctx->submit_state.cqes_count == length) {
+ if (ctx->submit_state.cqes_count == ARRAY_SIZE(ctx->submit_state.cqes)) {
__io_cq_lock(ctx);
__io_flush_post_cqes(ctx);
/* no need to flush - flush is deferred */
}
}
-/*
- * If we tracked the file through the SCM inflight mechanism, we could support
- * any file. For now, just ensure that anything potentially problematic is done
- * inline.
- */
-static bool __io_file_supports_nowait(struct file *file, umode_t mode)
-{
- /* any ->read/write should understand O_NONBLOCK */
- if (file->f_flags & O_NONBLOCK)
- return true;
- return file->f_mode & FMODE_NOWAIT;
-}
-
-/*
- * If we tracked the file through the SCM inflight mechanism, we could support
- * any file. For now, just ensure that anything potentially problematic is done
- * inline.
- */
unsigned int io_file_get_flags(struct file *file)
{
- umode_t mode = file_inode(file)->i_mode;
unsigned int res = 0;
- if (S_ISREG(mode))
- res |= FFS_ISREG;
- if (__io_file_supports_nowait(file, mode))
- res |= FFS_NOWAIT;
+ if (S_ISREG(file_inode(file)->i_mode))
+ res |= REQ_F_ISREG;
+ if ((file->f_flags & O_NONBLOCK) || (file->f_mode & FMODE_NOWAIT))
+ res |= REQ_F_SUPPORT_NOWAIT;
return res;
}
unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
+ struct io_fixed_file *slot;
struct file *file = NULL;
- unsigned long file_ptr;
io_ring_submit_lock(ctx, issue_flags);
if (unlikely((unsigned int)fd >= ctx->nr_user_files))
goto out;
fd = array_index_nospec(fd, ctx->nr_user_files);
- file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
- file = (struct file *) (file_ptr & FFS_MASK);
- file_ptr &= ~FFS_MASK;
- /* mask in overlapping REQ_F and FFS bits */
- req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
+ slot = io_fixed_file_slot(&ctx->file_table, fd);
+ file = io_slot_file(slot);
+ req->flags |= io_slot_flags(slot);
io_req_set_rsrc_node(req, ctx, 0);
out:
io_ring_submit_unlock(ctx, issue_flags);
/* there is little hope left, don't run it too often */
interval = HZ * 60;
}
- } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
+ /*
+ * This is really an uninterruptible wait, as it has to be
+ * complete. But it's also run from a kworker, which doesn't
+ * take signals, so it's fine to make it interruptible. This
+ * avoids scenarios where we knowingly can wait much longer
+ * on completions, for example if someone does a SIGSTOP on
+ * a task that needs to finish task_work to make this loop
+ * complete. That's a synthetic situation that should not
+ * cause a stuck task backtrace, and hence a potential panic
+ * on stuck tasks if that is enabled.
+ */
+ } while (!wait_for_completion_interruptible_timeout(&ctx->ref_comp, interval));
init_completion(&exit.completion);
init_task_work(&exit.task_work, io_tctx_exit_cb);
continue;
mutex_unlock(&ctx->uring_lock);
- wait_for_completion(&exit.completion);
+ /*
+ * See comment above for
+ * wait_for_completion_interruptible_timeout() on why this
+ * wait is marked as interruptible.
+ */
+ wait_for_completion_interruptible(&exit.completion);
mutex_lock(&ctx->uring_lock);
}
mutex_unlock(&ctx->uring_lock);