1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
6 #include <linux/fdtable.h>
7 #include <linux/fsnotify.h>
8 #include <linux/namei.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
13 #include "../fs/internal.h"
17 #include "openclose.h"
23 struct filename *filename;
34 static bool io_openat_force_async(struct io_open *open)
37 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
38 * it'll always -EAGAIN. Note that we test for __O_TMPFILE because
39 * O_TMPFILE includes O_DIRECTORY, which isn't a flag we need to force
42 return open->how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE);
45 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
47 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
48 const char __user *fname;
51 if (unlikely(sqe->buf_index))
53 if (unlikely(req->flags & REQ_F_FIXED_FILE))
56 /* open.how should be already initialised */
57 if (!(open->how.flags & O_PATH) && force_o_largefile())
58 open->how.flags |= O_LARGEFILE;
60 open->dfd = READ_ONCE(sqe->fd);
61 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
62 open->filename = getname(fname);
63 if (IS_ERR(open->filename)) {
64 ret = PTR_ERR(open->filename);
65 open->filename = NULL;
69 open->file_slot = READ_ONCE(sqe->file_index);
70 if (open->file_slot && (open->how.flags & O_CLOEXEC))
73 open->nofile = rlimit(RLIMIT_NOFILE);
74 req->flags |= REQ_F_NEED_CLEANUP;
75 if (io_openat_force_async(open))
76 req->flags |= REQ_F_FORCE_ASYNC;
80 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
82 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
83 u64 mode = READ_ONCE(sqe->len);
84 u64 flags = READ_ONCE(sqe->open_flags);
86 open->how = build_open_how(flags, mode);
87 return __io_openat_prep(req, sqe);
90 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
92 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
93 struct open_how __user *how;
97 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
98 len = READ_ONCE(sqe->len);
99 if (len < OPEN_HOW_SIZE_VER0)
102 ret = copy_struct_from_user(&open->how, sizeof(open->how), how, len);
106 return __io_openat_prep(req, sqe);
109 int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
111 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
112 struct open_flags op;
114 bool resolve_nonblock, nonblock_set;
115 bool fixed = !!open->file_slot;
118 ret = build_open_flags(&open->how, &op);
121 nonblock_set = op.open_flag & O_NONBLOCK;
122 resolve_nonblock = open->how.resolve & RESOLVE_CACHED;
123 if (issue_flags & IO_URING_F_NONBLOCK) {
124 WARN_ON_ONCE(io_openat_force_async(open));
125 op.lookup_flags |= LOOKUP_CACHED;
126 op.open_flag |= O_NONBLOCK;
130 ret = __get_unused_fd_flags(open->how.flags, open->nofile);
135 file = do_filp_open(open->dfd, open->filename, &op);
138 * We could hang on to this 'fd' on retrying, but seems like
139 * marginal gain for something that is now known to be a slower
140 * path. So just put it, and we'll get a new one when we retry.
146 /* only retry if RESOLVE_CACHED wasn't already set by application */
147 if (ret == -EAGAIN &&
148 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
153 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
154 file->f_flags &= ~O_NONBLOCK;
157 fd_install(ret, file);
159 ret = io_fixed_fd_install(req, issue_flags, file,
162 putname(open->filename);
163 req->flags &= ~REQ_F_NEED_CLEANUP;
166 io_req_set_res(req, ret, 0);
170 int io_openat(struct io_kiocb *req, unsigned int issue_flags)
172 return io_openat2(req, issue_flags);
175 void io_open_cleanup(struct io_kiocb *req)
177 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
180 putname(open->filename);
183 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
188 io_ring_submit_lock(ctx, issue_flags);
189 ret = io_fixed_fd_remove(ctx, offset);
190 io_ring_submit_unlock(ctx, issue_flags);
195 static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
197 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
199 return __io_close_fixed(req->ctx, issue_flags, close->file_slot - 1);
202 int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
204 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
206 if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
208 if (req->flags & REQ_F_FIXED_FILE)
211 close->fd = READ_ONCE(sqe->fd);
212 close->file_slot = READ_ONCE(sqe->file_index);
213 if (close->file_slot && close->fd)
219 int io_close(struct io_kiocb *req, unsigned int issue_flags)
221 struct files_struct *files = current->files;
222 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
227 if (close->file_slot) {
228 ret = io_close_fixed(req, issue_flags);
232 spin_lock(&files->file_lock);
233 fdt = files_fdtable(files);
234 if (close->fd >= fdt->max_fds) {
235 spin_unlock(&files->file_lock);
238 file = rcu_dereference_protected(fdt->fd[close->fd],
239 lockdep_is_held(&files->file_lock));
240 if (!file || io_is_uring_fops(file)) {
241 spin_unlock(&files->file_lock);
245 /* if the file has a flush method, be safe and punt to async */
246 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
247 spin_unlock(&files->file_lock);
251 file = __close_fd_get_file(close->fd);
252 spin_unlock(&files->file_lock);
256 /* No ->flush() or already async, safely close from here */
257 ret = filp_close(file, current->files);
261 io_req_set_res(req, ret, 0);