1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
6 #include <linux/fdtable.h>
7 #include <linux/fsnotify.h>
8 #include <linux/namei.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
13 #include "../fs/internal.h"
17 #include "openclose.h"
23 struct filename *filename;
34 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
36 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
37 const char __user *fname;
40 if (unlikely(sqe->buf_index))
42 if (unlikely(req->flags & REQ_F_FIXED_FILE))
45 /* open.how should be already initialised */
46 if (!(open->how.flags & O_PATH) && force_o_largefile())
47 open->how.flags |= O_LARGEFILE;
49 open->dfd = READ_ONCE(sqe->fd);
50 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
51 open->filename = getname(fname);
52 if (IS_ERR(open->filename)) {
53 ret = PTR_ERR(open->filename);
54 open->filename = NULL;
58 open->file_slot = READ_ONCE(sqe->file_index);
59 if (open->file_slot && (open->how.flags & O_CLOEXEC))
62 open->nofile = rlimit(RLIMIT_NOFILE);
63 req->flags |= REQ_F_NEED_CLEANUP;
67 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
69 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
70 u64 mode = READ_ONCE(sqe->len);
71 u64 flags = READ_ONCE(sqe->open_flags);
73 open->how = build_open_how(flags, mode);
74 return __io_openat_prep(req, sqe);
77 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
79 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
80 struct open_how __user *how;
84 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
85 len = READ_ONCE(sqe->len);
86 if (len < OPEN_HOW_SIZE_VER0)
89 ret = copy_struct_from_user(&open->how, sizeof(open->how), how, len);
93 return __io_openat_prep(req, sqe);
96 int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
98 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
101 bool resolve_nonblock, nonblock_set;
102 bool fixed = !!open->file_slot;
105 ret = build_open_flags(&open->how, &op);
108 nonblock_set = op.open_flag & O_NONBLOCK;
109 resolve_nonblock = open->how.resolve & RESOLVE_CACHED;
110 if (issue_flags & IO_URING_F_NONBLOCK) {
112 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
113 * it'll always -EAGAIN
115 if (open->how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
117 op.lookup_flags |= LOOKUP_CACHED;
118 op.open_flag |= O_NONBLOCK;
122 ret = __get_unused_fd_flags(open->how.flags, open->nofile);
127 file = do_filp_open(open->dfd, open->filename, &op);
130 * We could hang on to this 'fd' on retrying, but seems like
131 * marginal gain for something that is now known to be a slower
132 * path. So just put it, and we'll get a new one when we retry.
138 /* only retry if RESOLVE_CACHED wasn't already set by application */
139 if (ret == -EAGAIN &&
140 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
145 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
146 file->f_flags &= ~O_NONBLOCK;
150 fd_install(ret, file);
152 ret = io_fixed_fd_install(req, issue_flags, file,
155 putname(open->filename);
156 req->flags &= ~REQ_F_NEED_CLEANUP;
159 io_req_set_res(req, ret, 0);
163 int io_openat(struct io_kiocb *req, unsigned int issue_flags)
165 return io_openat2(req, issue_flags);
168 void io_open_cleanup(struct io_kiocb *req)
170 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
173 putname(open->filename);
176 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
181 io_ring_submit_lock(ctx, issue_flags);
182 ret = io_fixed_fd_remove(ctx, offset);
183 io_ring_submit_unlock(ctx, issue_flags);
188 static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
190 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
192 return __io_close_fixed(req->ctx, issue_flags, close->file_slot - 1);
195 int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
197 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
199 if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
201 if (req->flags & REQ_F_FIXED_FILE)
204 close->fd = READ_ONCE(sqe->fd);
205 close->file_slot = READ_ONCE(sqe->file_index);
206 if (close->file_slot && close->fd)
212 int io_close(struct io_kiocb *req, unsigned int issue_flags)
214 struct files_struct *files = current->files;
215 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
220 if (close->file_slot) {
221 ret = io_close_fixed(req, issue_flags);
225 spin_lock(&files->file_lock);
226 fdt = files_fdtable(files);
227 if (close->fd >= fdt->max_fds) {
228 spin_unlock(&files->file_lock);
231 file = rcu_dereference_protected(fdt->fd[close->fd],
232 lockdep_is_held(&files->file_lock));
233 if (!file || io_is_uring_fops(file)) {
234 spin_unlock(&files->file_lock);
238 /* if the file has a flush method, be safe and punt to async */
239 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
240 spin_unlock(&files->file_lock);
244 file = __close_fd_get_file(close->fd);
245 spin_unlock(&files->file_lock);
249 /* No ->flush() or already async, safely close from here */
250 ret = filp_close(file, current->files);
254 io_req_set_res(req, ret, 0);