REQ_F_CUR_POS_BIT,
REQ_F_NOWAIT_BIT,
REQ_F_LINK_TIMEOUT_BIT,
- REQ_F_ISREG_BIT,
REQ_F_NEED_CLEANUP_BIT,
REQ_F_POLLED_BIT,
REQ_F_BUFFER_SELECTED_BIT,
REQ_F_LTIMEOUT_ACTIVE_BIT,
REQ_F_COMPLETE_INLINE_BIT,
REQ_F_REISSUE_BIT,
+ /* keep async read/write and isreg together and in order */
+ REQ_F_ASYNC_READ_BIT,
+ REQ_F_ASYNC_WRITE_BIT,
+ REQ_F_ISREG_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
/* has or had linked timeout */
REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
- /* regular file */
- REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
/* needs cleanup */
REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
/* already went through poll handler */
REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
/* caller should reissue async */
REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
+ /* supports async reads */
+ REQ_F_ASYNC_READ = BIT(REQ_F_ASYNC_READ_BIT),
+ /* supports async writes */
+ REQ_F_ASYNC_WRITE = BIT(REQ_F_ASYNC_WRITE_BIT),
+ /* regular file */
+ REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
};
struct async_poll {
* any file. For now, just ensure that anything potentially problematic is done
* inline.
*/
-static bool io_file_supports_async(struct file *file, int rw)
+static bool __io_file_supports_async(struct file *file, int rw)
{
umode_t mode = file_inode(file)->i_mode;
return file->f_op->write_iter != NULL;
}
+static bool io_file_supports_async(struct io_kiocb *req, int rw)
+{
+ if (rw == READ && (req->flags & REQ_F_ASYNC_READ))
+ return true;
+ else if (rw == WRITE && (req->flags & REQ_F_ASYNC_WRITE))
+ return true;
+
+ return __io_file_supports_async(req->file, rw);
+}
+
static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
unsigned ioprio;
int ret;
- if (S_ISREG(file_inode(file)->i_mode))
+ if (!(req->flags & REQ_F_ISREG) && S_ISREG(file_inode(file)->i_mode))
req->flags |= REQ_F_ISREG;
kiocb->ki_pos = READ_ONCE(sqe->off);
kiocb->ki_flags |= IOCB_NOWAIT;
/* If the file doesn't support async, just async punt */
- if (force_nonblock && !io_file_supports_async(req->file, READ)) {
+ if (force_nonblock && !io_file_supports_async(req, READ)) {
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
return ret ?: -EAGAIN;
}
kiocb->ki_flags |= IOCB_NOWAIT;
/* If the file doesn't support async, just async punt */
- if (force_nonblock && !io_file_supports_async(req->file, WRITE))
+ if (force_nonblock && !io_file_supports_async(req, WRITE))
goto copy_iov;
/* file path doesn't support NOWAIT for non-direct_IO */
else
return false;
/* if we can't nonblock try, then no point in arming a poll handler */
- if (!io_file_supports_async(req->file, rw))
+ if (!io_file_supports_async(req, rw))
return false;
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
}
}
+#define FFS_ASYNC_READ 0x1UL
+#define FFS_ASYNC_WRITE 0x2UL
+#ifdef CONFIG_64BIT
+#define FFS_ISREG 0x4UL
+#else
+#define FFS_ISREG 0x0UL
+#endif
+#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
+
static inline struct file **io_fixed_file_slot(struct fixed_rsrc_data *file_data,
unsigned i)
{
static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
int index)
{
- return *io_fixed_file_slot(ctx->file_data, index);
+ struct file **file_slot = io_fixed_file_slot(ctx->file_data, index);
+
+ return (struct file *) ((unsigned long) *file_slot & FFS_MASK);
}
static struct file *io_file_get(struct io_submit_state *state,
struct file *file;
if (fixed) {
+ unsigned long file_ptr;
+
if (unlikely((unsigned int)fd >= ctx->nr_user_files))
return NULL;
fd = array_index_nospec(fd, ctx->nr_user_files);
- file = io_file_from_index(ctx, fd);
+ file_ptr = (unsigned long) *io_fixed_file_slot(ctx->file_data, fd);
+ file = (struct file *) (file_ptr & FFS_MASK);
+ file_ptr &= ~FFS_MASK;
+ /* mask in overlapping REQ_F and FFS bits */
+ req->flags |= (file_ptr << REQ_F_ASYNC_READ_BIT);
io_set_resource_node(req);
} else {
trace_io_uring_file_get(ctx, fd);
goto out_free;
for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
+ unsigned long file_ptr;
+
if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
ret = -EFAULT;
goto out_fput;
fput(file);
goto out_fput;
}
- *io_fixed_file_slot(file_data, i) = file;
+ file_ptr = (unsigned long) file;
+ if (__io_file_supports_async(file, READ))
+ file_ptr |= FFS_ASYNC_READ;
+ if (__io_file_supports_async(file, WRITE))
+ file_ptr |= FFS_ASYNC_WRITE;
+ if (S_ISREG(file_inode(file)->i_mode))
+ file_ptr |= FFS_ISREG;
+ *io_fixed_file_slot(file_data, i) = (struct file *) file_ptr;
}
ret = io_sqe_files_scm(ctx);
file_slot = io_fixed_file_slot(ctx->file_data, i);
if (*file_slot) {
- err = io_queue_file_removal(data, *file_slot);
+ file = (struct file *) ((unsigned long) *file_slot & FFS_MASK);
+ err = io_queue_file_removal(data, file);
if (err)
break;
*file_slot = NULL;
seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
- struct file *f = *io_fixed_file_slot(ctx->file_data, i);
+ struct file *f = io_file_from_index(ctx, i);
if (f)
seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);