1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
6 #include <linux/fdtable.h>
7 #include <linux/fsnotify.h>
8 #include <linux/namei.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
13 #include "../fs/internal.h"
17 #include "openclose.h"
23 struct filename *filename;
34 static bool io_openat_force_async(struct io_open *open)
37 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
38 * it'll always -EAGAIN
40 return open->how.flags & (O_TRUNC | O_CREAT | O_TMPFILE);
43 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
45 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
46 const char __user *fname;
49 if (unlikely(sqe->buf_index))
51 if (unlikely(req->flags & REQ_F_FIXED_FILE))
54 /* open.how should be already initialised */
55 if (!(open->how.flags & O_PATH) && force_o_largefile())
56 open->how.flags |= O_LARGEFILE;
58 open->dfd = READ_ONCE(sqe->fd);
59 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
60 open->filename = getname(fname);
61 if (IS_ERR(open->filename)) {
62 ret = PTR_ERR(open->filename);
63 open->filename = NULL;
67 open->file_slot = READ_ONCE(sqe->file_index);
68 if (open->file_slot && (open->how.flags & O_CLOEXEC))
71 open->nofile = rlimit(RLIMIT_NOFILE);
72 req->flags |= REQ_F_NEED_CLEANUP;
73 if (io_openat_force_async(open))
74 req->flags |= REQ_F_FORCE_ASYNC;
78 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
80 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
81 u64 mode = READ_ONCE(sqe->len);
82 u64 flags = READ_ONCE(sqe->open_flags);
84 open->how = build_open_how(flags, mode);
85 return __io_openat_prep(req, sqe);
88 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
90 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
91 struct open_how __user *how;
95 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
96 len = READ_ONCE(sqe->len);
97 if (len < OPEN_HOW_SIZE_VER0)
100 ret = copy_struct_from_user(&open->how, sizeof(open->how), how, len);
104 return __io_openat_prep(req, sqe);
107 int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
109 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
110 struct open_flags op;
112 bool resolve_nonblock, nonblock_set;
113 bool fixed = !!open->file_slot;
116 ret = build_open_flags(&open->how, &op);
119 nonblock_set = op.open_flag & O_NONBLOCK;
120 resolve_nonblock = open->how.resolve & RESOLVE_CACHED;
121 if (issue_flags & IO_URING_F_NONBLOCK) {
122 WARN_ON_ONCE(io_openat_force_async(open));
123 op.lookup_flags |= LOOKUP_CACHED;
124 op.open_flag |= O_NONBLOCK;
128 ret = __get_unused_fd_flags(open->how.flags, open->nofile);
133 file = do_filp_open(open->dfd, open->filename, &op);
136 * We could hang on to this 'fd' on retrying, but seems like
137 * marginal gain for something that is now known to be a slower
138 * path. So just put it, and we'll get a new one when we retry.
144 /* only retry if RESOLVE_CACHED wasn't already set by application */
145 if (ret == -EAGAIN &&
146 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
151 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
152 file->f_flags &= ~O_NONBLOCK;
156 fd_install(ret, file);
158 ret = io_fixed_fd_install(req, issue_flags, file,
161 putname(open->filename);
162 req->flags &= ~REQ_F_NEED_CLEANUP;
165 io_req_set_res(req, ret, 0);
169 int io_openat(struct io_kiocb *req, unsigned int issue_flags)
171 return io_openat2(req, issue_flags);
174 void io_open_cleanup(struct io_kiocb *req)
176 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
179 putname(open->filename);
182 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
187 io_ring_submit_lock(ctx, issue_flags);
188 ret = io_fixed_fd_remove(ctx, offset);
189 io_ring_submit_unlock(ctx, issue_flags);
194 static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
196 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
198 return __io_close_fixed(req->ctx, issue_flags, close->file_slot - 1);
201 int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
203 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
205 if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
207 if (req->flags & REQ_F_FIXED_FILE)
210 close->fd = READ_ONCE(sqe->fd);
211 close->file_slot = READ_ONCE(sqe->file_index);
212 if (close->file_slot && close->fd)
218 int io_close(struct io_kiocb *req, unsigned int issue_flags)
220 struct files_struct *files = current->files;
221 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
226 if (close->file_slot) {
227 ret = io_close_fixed(req, issue_flags);
231 spin_lock(&files->file_lock);
232 fdt = files_fdtable(files);
233 if (close->fd >= fdt->max_fds) {
234 spin_unlock(&files->file_lock);
237 file = rcu_dereference_protected(fdt->fd[close->fd],
238 lockdep_is_held(&files->file_lock));
239 if (!file || io_is_uring_fops(file)) {
240 spin_unlock(&files->file_lock);
244 /* if the file has a flush method, be safe and punt to async */
245 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
246 spin_unlock(&files->file_lock);
250 file = __close_fd_get_file(close->fd);
251 spin_unlock(&files->file_lock);
255 /* No ->flush() or already async, safely close from here */
256 ret = filp_close(file, current->files);
260 io_req_set_res(req, ret, 0);