io_uring: kill extra io_uring_types.h includes
[platform/kernel/linux-starfive.git] / io_uring / rw.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring.h>
14
15 #include <uapi/linux/io_uring.h>
16
17 #include "io_uring.h"
18 #include "opdef.h"
19 #include "kbuf.h"
20 #include "rsrc.h"
21 #include "rw.h"
22
23 struct io_rw {
24         /* NOTE: kiocb has the file as the first member, so don't do it here */
25         struct kiocb                    kiocb;
26         u64                             addr;
27         u32                             len;
28         rwf_t                           flags;
29 };
30
31 static inline bool io_file_supports_nowait(struct io_kiocb *req)
32 {
33         return req->flags & REQ_F_SUPPORT_NOWAIT;
34 }
35
36 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
37 {
38         struct io_rw *rw = io_kiocb_to_cmd(req);
39         unsigned ioprio;
40         int ret;
41
42         rw->kiocb.ki_pos = READ_ONCE(sqe->off);
43         /* used for fixed read/write too - just read unconditionally */
44         req->buf_index = READ_ONCE(sqe->buf_index);
45
46         if (req->opcode == IORING_OP_READ_FIXED ||
47             req->opcode == IORING_OP_WRITE_FIXED) {
48                 struct io_ring_ctx *ctx = req->ctx;
49                 u16 index;
50
51                 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
52                         return -EFAULT;
53                 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
54                 req->imu = ctx->user_bufs[index];
55                 io_req_set_rsrc_node(req, ctx, 0);
56         }
57
58         ioprio = READ_ONCE(sqe->ioprio);
59         if (ioprio) {
60                 ret = ioprio_check_cap(ioprio);
61                 if (ret)
62                         return ret;
63
64                 rw->kiocb.ki_ioprio = ioprio;
65         } else {
66                 rw->kiocb.ki_ioprio = get_current_ioprio();
67         }
68
69         rw->addr = READ_ONCE(sqe->addr);
70         rw->len = READ_ONCE(sqe->len);
71         rw->flags = READ_ONCE(sqe->rw_flags);
72         return 0;
73 }
74
75 void io_readv_writev_cleanup(struct io_kiocb *req)
76 {
77         struct io_async_rw *io = req->async_data;
78
79         kfree(io->free_iovec);
80 }
81
82 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
83 {
84         switch (ret) {
85         case -EIOCBQUEUED:
86                 break;
87         case -ERESTARTSYS:
88         case -ERESTARTNOINTR:
89         case -ERESTARTNOHAND:
90         case -ERESTART_RESTARTBLOCK:
91                 /*
92                  * We can't just restart the syscall, since previously
93                  * submitted sqes may already be in progress. Just fail this
94                  * IO with EINTR.
95                  */
96                 ret = -EINTR;
97                 fallthrough;
98         default:
99                 kiocb->ki_complete(kiocb, ret);
100         }
101 }
102
103 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
104 {
105         struct io_rw *rw = io_kiocb_to_cmd(req);
106
107         if (rw->kiocb.ki_pos != -1)
108                 return &rw->kiocb.ki_pos;
109
110         if (!(req->file->f_mode & FMODE_STREAM)) {
111                 req->flags |= REQ_F_CUR_POS;
112                 rw->kiocb.ki_pos = req->file->f_pos;
113                 return &rw->kiocb.ki_pos;
114         }
115
116         rw->kiocb.ki_pos = 0;
117         return NULL;
118 }
119
120 static void io_req_task_queue_reissue(struct io_kiocb *req)
121 {
122         req->io_task_work.func = io_queue_iowq;
123         io_req_task_work_add(req);
124 }
125
126 #ifdef CONFIG_BLOCK
127 static bool io_resubmit_prep(struct io_kiocb *req)
128 {
129         struct io_async_rw *io = req->async_data;
130
131         if (!req_has_async_data(req))
132                 return !io_req_prep_async(req);
133         iov_iter_restore(&io->s.iter, &io->s.iter_state);
134         return true;
135 }
136
137 static bool io_rw_should_reissue(struct io_kiocb *req)
138 {
139         umode_t mode = file_inode(req->file)->i_mode;
140         struct io_ring_ctx *ctx = req->ctx;
141
142         if (!S_ISBLK(mode) && !S_ISREG(mode))
143                 return false;
144         if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
145             !(ctx->flags & IORING_SETUP_IOPOLL)))
146                 return false;
147         /*
148          * If ref is dying, we might be running poll reap from the exit work.
149          * Don't attempt to reissue from that path, just let it fail with
150          * -EAGAIN.
151          */
152         if (percpu_ref_is_dying(&ctx->refs))
153                 return false;
154         /*
155          * Play it safe and assume not safe to re-import and reissue if we're
156          * not in the original thread group (or in task context).
157          */
158         if (!same_thread_group(req->task, current) || !in_task())
159                 return false;
160         return true;
161 }
162 #else
163 static bool io_resubmit_prep(struct io_kiocb *req)
164 {
165         return false;
166 }
167 static bool io_rw_should_reissue(struct io_kiocb *req)
168 {
169         return false;
170 }
171 #endif
172
173 static void kiocb_end_write(struct io_kiocb *req)
174 {
175         /*
176          * Tell lockdep we inherited freeze protection from submission
177          * thread.
178          */
179         if (req->flags & REQ_F_ISREG) {
180                 struct super_block *sb = file_inode(req->file)->i_sb;
181
182                 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
183                 sb_end_write(sb);
184         }
185 }
186
187 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
188 {
189         struct io_rw *rw = io_kiocb_to_cmd(req);
190
191         if (rw->kiocb.ki_flags & IOCB_WRITE) {
192                 kiocb_end_write(req);
193                 fsnotify_modify(req->file);
194         } else {
195                 fsnotify_access(req->file);
196         }
197         if (unlikely(res != req->cqe.res)) {
198                 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
199                     io_rw_should_reissue(req)) {
200                         req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
201                         return true;
202                 }
203                 req_set_fail(req);
204                 req->cqe.res = res;
205         }
206         return false;
207 }
208
209 static void io_complete_rw(struct kiocb *kiocb, long res)
210 {
211         struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
212         struct io_kiocb *req = cmd_to_io_kiocb(rw);
213
214         if (__io_complete_rw_common(req, res))
215                 return;
216         io_req_set_res(req, res, 0);
217         req->io_task_work.func = io_req_task_complete;
218         io_req_task_prio_work_add(req);
219 }
220
221 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
222 {
223         struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
224         struct io_kiocb *req = cmd_to_io_kiocb(rw);
225
226         if (kiocb->ki_flags & IOCB_WRITE)
227                 kiocb_end_write(req);
228         if (unlikely(res != req->cqe.res)) {
229                 if (res == -EAGAIN && io_rw_should_reissue(req)) {
230                         req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
231                         return;
232                 }
233                 req->cqe.res = res;
234         }
235
236         /* order with io_iopoll_complete() checking ->iopoll_completed */
237         smp_store_release(&req->iopoll_completed, 1);
238 }
239
240 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
241                        unsigned int issue_flags)
242 {
243         struct io_async_rw *io = req->async_data;
244         struct io_rw *rw = io_kiocb_to_cmd(req);
245
246         /* add previously done IO, if any */
247         if (req_has_async_data(req) && io->bytes_done > 0) {
248                 if (ret < 0)
249                         ret = io->bytes_done;
250                 else
251                         ret += io->bytes_done;
252         }
253
254         if (req->flags & REQ_F_CUR_POS)
255                 req->file->f_pos = rw->kiocb.ki_pos;
256         if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
257                 if (!__io_complete_rw_common(req, ret)) {
258                         io_req_set_res(req, req->cqe.res,
259                                        io_put_kbuf(req, issue_flags));
260                         return IOU_OK;
261                 }
262         } else {
263                 io_rw_done(&rw->kiocb, ret);
264         }
265
266         if (req->flags & REQ_F_REISSUE) {
267                 req->flags &= ~REQ_F_REISSUE;
268                 if (io_resubmit_prep(req))
269                         io_req_task_queue_reissue(req);
270                 else
271                         io_req_task_queue_fail(req, ret);
272         }
273         return IOU_ISSUE_SKIP_COMPLETE;
274 }
275
276 static int __io_import_fixed(struct io_kiocb *req, int ddir,
277                              struct iov_iter *iter, struct io_mapped_ubuf *imu)
278 {
279         struct io_rw *rw = io_kiocb_to_cmd(req);
280         size_t len = rw->len;
281         u64 buf_end, buf_addr = rw->addr;
282         size_t offset;
283
284         if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
285                 return -EFAULT;
286         /* not inside the mapped region */
287         if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
288                 return -EFAULT;
289
290         /*
291          * May not be a start of buffer, set size appropriately
292          * and advance us to the beginning.
293          */
294         offset = buf_addr - imu->ubuf;
295         iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
296
297         if (offset) {
298                 /*
299                  * Don't use iov_iter_advance() here, as it's really slow for
300                  * using the latter parts of a big fixed buffer - it iterates
301                  * over each segment manually. We can cheat a bit here, because
302                  * we know that:
303                  *
304                  * 1) it's a BVEC iter, we set it up
305                  * 2) all bvecs are PAGE_SIZE in size, except potentially the
306                  *    first and last bvec
307                  *
308                  * So just find our index, and adjust the iterator afterwards.
309                  * If the offset is within the first bvec (or the whole first
310                  * bvec, just use iov_iter_advance(). This makes it easier
311                  * since we can just skip the first segment, which may not
312                  * be PAGE_SIZE aligned.
313                  */
314                 const struct bio_vec *bvec = imu->bvec;
315
316                 if (offset <= bvec->bv_len) {
317                         iov_iter_advance(iter, offset);
318                 } else {
319                         unsigned long seg_skip;
320
321                         /* skip first vec */
322                         offset -= bvec->bv_len;
323                         seg_skip = 1 + (offset >> PAGE_SHIFT);
324
325                         iter->bvec = bvec + seg_skip;
326                         iter->nr_segs -= seg_skip;
327                         iter->count -= bvec->bv_len + offset;
328                         iter->iov_offset = offset & ~PAGE_MASK;
329                 }
330         }
331
332         return 0;
333 }
334
335 static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
336                            unsigned int issue_flags)
337 {
338         if (WARN_ON_ONCE(!req->imu))
339                 return -EFAULT;
340         return __io_import_fixed(req, rw, iter, req->imu);
341 }
342
343 #ifdef CONFIG_COMPAT
344 static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
345                                 unsigned int issue_flags)
346 {
347         struct io_rw *rw = io_kiocb_to_cmd(req);
348         struct compat_iovec __user *uiov;
349         compat_ssize_t clen;
350         void __user *buf;
351         size_t len;
352
353         uiov = u64_to_user_ptr(rw->addr);
354         if (!access_ok(uiov, sizeof(*uiov)))
355                 return -EFAULT;
356         if (__get_user(clen, &uiov->iov_len))
357                 return -EFAULT;
358         if (clen < 0)
359                 return -EINVAL;
360
361         len = clen;
362         buf = io_buffer_select(req, &len, issue_flags);
363         if (!buf)
364                 return -ENOBUFS;
365         rw->addr = (unsigned long) buf;
366         iov[0].iov_base = buf;
367         rw->len = iov[0].iov_len = (compat_size_t) len;
368         return 0;
369 }
370 #endif
371
372 static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
373                                       unsigned int issue_flags)
374 {
375         struct io_rw *rw = io_kiocb_to_cmd(req);
376         struct iovec __user *uiov = u64_to_user_ptr(rw->addr);
377         void __user *buf;
378         ssize_t len;
379
380         if (copy_from_user(iov, uiov, sizeof(*uiov)))
381                 return -EFAULT;
382
383         len = iov[0].iov_len;
384         if (len < 0)
385                 return -EINVAL;
386         buf = io_buffer_select(req, &len, issue_flags);
387         if (!buf)
388                 return -ENOBUFS;
389         rw->addr = (unsigned long) buf;
390         iov[0].iov_base = buf;
391         rw->len = iov[0].iov_len = len;
392         return 0;
393 }
394
395 static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
396                                     unsigned int issue_flags)
397 {
398         struct io_rw *rw = io_kiocb_to_cmd(req);
399
400         if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
401                 iov[0].iov_base = u64_to_user_ptr(rw->addr);
402                 iov[0].iov_len = rw->len;
403                 return 0;
404         }
405         if (rw->len != 1)
406                 return -EINVAL;
407
408 #ifdef CONFIG_COMPAT
409         if (req->ctx->compat)
410                 return io_compat_import(req, iov, issue_flags);
411 #endif
412
413         return __io_iov_buffer_select(req, iov, issue_flags);
414 }
415
416 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
417                                        struct io_rw_state *s,
418                                        unsigned int issue_flags)
419 {
420         struct io_rw *rw = io_kiocb_to_cmd(req);
421         struct iov_iter *iter = &s->iter;
422         u8 opcode = req->opcode;
423         struct iovec *iovec;
424         void __user *buf;
425         size_t sqe_len;
426         ssize_t ret;
427
428         if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
429                 ret = io_import_fixed(req, ddir, iter, issue_flags);
430                 if (ret)
431                         return ERR_PTR(ret);
432                 return NULL;
433         }
434
435         buf = u64_to_user_ptr(rw->addr);
436         sqe_len = rw->len;
437
438         if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
439                 if (io_do_buffer_select(req)) {
440                         buf = io_buffer_select(req, &sqe_len, issue_flags);
441                         if (!buf)
442                                 return ERR_PTR(-ENOBUFS);
443                         rw->addr = (unsigned long) buf;
444                         rw->len = sqe_len;
445                 }
446
447                 ret = import_single_range(ddir, buf, sqe_len, s->fast_iov, iter);
448                 if (ret)
449                         return ERR_PTR(ret);
450                 return NULL;
451         }
452
453         iovec = s->fast_iov;
454         if (req->flags & REQ_F_BUFFER_SELECT) {
455                 ret = io_iov_buffer_select(req, iovec, issue_flags);
456                 if (ret)
457                         return ERR_PTR(ret);
458                 iov_iter_init(iter, ddir, iovec, 1, iovec->iov_len);
459                 return NULL;
460         }
461
462         ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
463                               req->ctx->compat);
464         if (unlikely(ret < 0))
465                 return ERR_PTR(ret);
466         return iovec;
467 }
468
469 static inline int io_import_iovec(int rw, struct io_kiocb *req,
470                                   struct iovec **iovec, struct io_rw_state *s,
471                                   unsigned int issue_flags)
472 {
473         *iovec = __io_import_iovec(rw, req, s, issue_flags);
474         if (unlikely(IS_ERR(*iovec)))
475                 return PTR_ERR(*iovec);
476
477         iov_iter_save_state(&s->iter, &s->iter_state);
478         return 0;
479 }
480
481 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
482 {
483         return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
484 }
485
486 /*
487  * For files that don't have ->read_iter() and ->write_iter(), handle them
488  * by looping over ->read() or ->write() manually.
489  */
490 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
491 {
492         struct kiocb *kiocb = &rw->kiocb;
493         struct file *file = kiocb->ki_filp;
494         ssize_t ret = 0;
495         loff_t *ppos;
496
497         /*
498          * Don't support polled IO through this interface, and we can't
499          * support non-blocking either. For the latter, this just causes
500          * the kiocb to be handled from an async context.
501          */
502         if (kiocb->ki_flags & IOCB_HIPRI)
503                 return -EOPNOTSUPP;
504         if ((kiocb->ki_flags & IOCB_NOWAIT) &&
505             !(kiocb->ki_filp->f_flags & O_NONBLOCK))
506                 return -EAGAIN;
507
508         ppos = io_kiocb_ppos(kiocb);
509
510         while (iov_iter_count(iter)) {
511                 struct iovec iovec;
512                 ssize_t nr;
513
514                 if (!iov_iter_is_bvec(iter)) {
515                         iovec = iov_iter_iovec(iter);
516                 } else {
517                         iovec.iov_base = u64_to_user_ptr(rw->addr);
518                         iovec.iov_len = rw->len;
519                 }
520
521                 if (ddir == READ) {
522                         nr = file->f_op->read(file, iovec.iov_base,
523                                               iovec.iov_len, ppos);
524                 } else {
525                         nr = file->f_op->write(file, iovec.iov_base,
526                                                iovec.iov_len, ppos);
527                 }
528
529                 if (nr < 0) {
530                         if (!ret)
531                                 ret = nr;
532                         break;
533                 }
534                 ret += nr;
535                 if (!iov_iter_is_bvec(iter)) {
536                         iov_iter_advance(iter, nr);
537                 } else {
538                         rw->addr += nr;
539                         rw->len -= nr;
540                         if (!rw->len)
541                                 break;
542                 }
543                 if (nr != iovec.iov_len)
544                         break;
545         }
546
547         return ret;
548 }
549
550 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
551                           const struct iovec *fast_iov, struct iov_iter *iter)
552 {
553         struct io_async_rw *io = req->async_data;
554
555         memcpy(&io->s.iter, iter, sizeof(*iter));
556         io->free_iovec = iovec;
557         io->bytes_done = 0;
558         /* can only be fixed buffers, no need to do anything */
559         if (iov_iter_is_bvec(iter))
560                 return;
561         if (!iovec) {
562                 unsigned iov_off = 0;
563
564                 io->s.iter.iov = io->s.fast_iov;
565                 if (iter->iov != fast_iov) {
566                         iov_off = iter->iov - fast_iov;
567                         io->s.iter.iov += iov_off;
568                 }
569                 if (io->s.fast_iov != fast_iov)
570                         memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
571                                sizeof(struct iovec) * iter->nr_segs);
572         } else {
573                 req->flags |= REQ_F_NEED_CLEANUP;
574         }
575 }
576
577 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
578                              struct io_rw_state *s, bool force)
579 {
580         if (!force && !io_op_defs[req->opcode].prep_async)
581                 return 0;
582         if (!req_has_async_data(req)) {
583                 struct io_async_rw *iorw;
584
585                 if (io_alloc_async_data(req)) {
586                         kfree(iovec);
587                         return -ENOMEM;
588                 }
589
590                 io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
591                 iorw = req->async_data;
592                 /* we've copied and mapped the iter, ensure state is saved */
593                 iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
594         }
595         return 0;
596 }
597
598 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
599 {
600         struct io_async_rw *iorw = req->async_data;
601         struct iovec *iov;
602         int ret;
603
604         /* submission path, ->uring_lock should already be taken */
605         ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
606         if (unlikely(ret < 0))
607                 return ret;
608
609         iorw->bytes_done = 0;
610         iorw->free_iovec = iov;
611         if (iov)
612                 req->flags |= REQ_F_NEED_CLEANUP;
613         return 0;
614 }
615
616 int io_readv_prep_async(struct io_kiocb *req)
617 {
618         return io_rw_prep_async(req, READ);
619 }
620
621 int io_writev_prep_async(struct io_kiocb *req)
622 {
623         return io_rw_prep_async(req, WRITE);
624 }
625
626 /*
627  * This is our waitqueue callback handler, registered through __folio_lock_async()
628  * when we initially tried to do the IO with the iocb armed our waitqueue.
629  * This gets called when the page is unlocked, and we generally expect that to
630  * happen when the page IO is completed and the page is now uptodate. This will
631  * queue a task_work based retry of the operation, attempting to copy the data
632  * again. If the latter fails because the page was NOT uptodate, then we will
633  * do a thread based blocking retry of the operation. That's the unexpected
634  * slow path.
635  */
636 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
637                              int sync, void *arg)
638 {
639         struct wait_page_queue *wpq;
640         struct io_kiocb *req = wait->private;
641         struct io_rw *rw = io_kiocb_to_cmd(req);
642         struct wait_page_key *key = arg;
643
644         wpq = container_of(wait, struct wait_page_queue, wait);
645
646         if (!wake_page_match(wpq, key))
647                 return 0;
648
649         rw->kiocb.ki_flags &= ~IOCB_WAITQ;
650         list_del_init(&wait->entry);
651         io_req_task_queue(req);
652         return 1;
653 }
654
655 /*
656  * This controls whether a given IO request should be armed for async page
657  * based retry. If we return false here, the request is handed to the async
658  * worker threads for retry. If we're doing buffered reads on a regular file,
659  * we prepare a private wait_page_queue entry and retry the operation. This
660  * will either succeed because the page is now uptodate and unlocked, or it
661  * will register a callback when the page is unlocked at IO completion. Through
662  * that callback, io_uring uses task_work to setup a retry of the operation.
663  * That retry will attempt the buffered read again. The retry will generally
664  * succeed, or in rare cases where it fails, we then fall back to using the
665  * async worker threads for a blocking retry.
666  */
667 static bool io_rw_should_retry(struct io_kiocb *req)
668 {
669         struct io_async_rw *io = req->async_data;
670         struct wait_page_queue *wait = &io->wpq;
671         struct io_rw *rw = io_kiocb_to_cmd(req);
672         struct kiocb *kiocb = &rw->kiocb;
673
674         /* never retry for NOWAIT, we just complete with -EAGAIN */
675         if (req->flags & REQ_F_NOWAIT)
676                 return false;
677
678         /* Only for buffered IO */
679         if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
680                 return false;
681
682         /*
683          * just use poll if we can, and don't attempt if the fs doesn't
684          * support callback based unlocks
685          */
686         if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
687                 return false;
688
689         wait->wait.func = io_async_buf_func;
690         wait->wait.private = req;
691         wait->wait.flags = 0;
692         INIT_LIST_HEAD(&wait->wait.entry);
693         kiocb->ki_flags |= IOCB_WAITQ;
694         kiocb->ki_flags &= ~IOCB_NOWAIT;
695         kiocb->ki_waitq = wait;
696         return true;
697 }
698
699 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
700 {
701         struct file *file = rw->kiocb.ki_filp;
702
703         if (likely(file->f_op->read_iter))
704                 return call_read_iter(file, &rw->kiocb, iter);
705         else if (file->f_op->read)
706                 return loop_rw_iter(READ, rw, iter);
707         else
708                 return -EINVAL;
709 }
710
711 static bool need_read_all(struct io_kiocb *req)
712 {
713         return req->flags & REQ_F_ISREG ||
714                 S_ISBLK(file_inode(req->file)->i_mode);
715 }
716
717 static inline bool io_req_ffs_set(struct io_kiocb *req)
718 {
719         return req->flags & REQ_F_FIXED_FILE;
720 }
721
722 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
723 {
724         struct io_rw *rw = io_kiocb_to_cmd(req);
725         struct kiocb *kiocb = &rw->kiocb;
726         struct io_ring_ctx *ctx = req->ctx;
727         struct file *file = req->file;
728         int ret;
729
730         if (unlikely(!file || !(file->f_mode & mode)))
731                 return -EBADF;
732
733         if (!io_req_ffs_set(req))
734                 req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
735
736         kiocb->ki_flags = iocb_flags(file);
737         ret = kiocb_set_rw_flags(kiocb, rw->flags);
738         if (unlikely(ret))
739                 return ret;
740
741         /*
742          * If the file is marked O_NONBLOCK, still allow retry for it if it
743          * supports async. Otherwise it's impossible to use O_NONBLOCK files
744          * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
745          */
746         if ((kiocb->ki_flags & IOCB_NOWAIT) ||
747             ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
748                 req->flags |= REQ_F_NOWAIT;
749
750         if (ctx->flags & IORING_SETUP_IOPOLL) {
751                 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
752                         return -EOPNOTSUPP;
753
754                 kiocb->private = NULL;
755                 kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
756                 kiocb->ki_complete = io_complete_rw_iopoll;
757                 req->iopoll_completed = 0;
758         } else {
759                 if (kiocb->ki_flags & IOCB_HIPRI)
760                         return -EINVAL;
761                 kiocb->ki_complete = io_complete_rw;
762         }
763
764         return 0;
765 }
766
767 int io_read(struct io_kiocb *req, unsigned int issue_flags)
768 {
769         struct io_rw *rw = io_kiocb_to_cmd(req);
770         struct io_rw_state __s, *s = &__s;
771         struct iovec *iovec;
772         struct kiocb *kiocb = &rw->kiocb;
773         bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
774         struct io_async_rw *io;
775         ssize_t ret, ret2;
776         loff_t *ppos;
777
778         if (!req_has_async_data(req)) {
779                 ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
780                 if (unlikely(ret < 0))
781                         return ret;
782         } else {
783                 io = req->async_data;
784                 s = &io->s;
785
786                 /*
787                  * Safe and required to re-import if we're using provided
788                  * buffers, as we dropped the selected one before retry.
789                  */
790                 if (io_do_buffer_select(req)) {
791                         ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
792                         if (unlikely(ret < 0))
793                                 return ret;
794                 }
795
796                 /*
797                  * We come here from an earlier attempt, restore our state to
798                  * match in case it doesn't. It's cheap enough that we don't
799                  * need to make this conditional.
800                  */
801                 iov_iter_restore(&s->iter, &s->iter_state);
802                 iovec = NULL;
803         }
804         ret = io_rw_init_file(req, FMODE_READ);
805         if (unlikely(ret)) {
806                 kfree(iovec);
807                 return ret;
808         }
809         req->cqe.res = iov_iter_count(&s->iter);
810
811         if (force_nonblock) {
812                 /* If the file doesn't support async, just async punt */
813                 if (unlikely(!io_file_supports_nowait(req))) {
814                         ret = io_setup_async_rw(req, iovec, s, true);
815                         return ret ?: -EAGAIN;
816                 }
817                 kiocb->ki_flags |= IOCB_NOWAIT;
818         } else {
819                 /* Ensure we clear previously set non-block flag */
820                 kiocb->ki_flags &= ~IOCB_NOWAIT;
821         }
822
823         ppos = io_kiocb_update_pos(req);
824
825         ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
826         if (unlikely(ret)) {
827                 kfree(iovec);
828                 return ret;
829         }
830
831         ret = io_iter_do_read(rw, &s->iter);
832
833         if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
834                 req->flags &= ~REQ_F_REISSUE;
835                 /* if we can poll, just do that */
836                 if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
837                         return -EAGAIN;
838                 /* IOPOLL retry should happen for io-wq threads */
839                 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
840                         goto done;
841                 /* no retry on NONBLOCK nor RWF_NOWAIT */
842                 if (req->flags & REQ_F_NOWAIT)
843                         goto done;
844                 ret = 0;
845         } else if (ret == -EIOCBQUEUED) {
846                 if (iovec)
847                         kfree(iovec);
848                 return IOU_ISSUE_SKIP_COMPLETE;
849         } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
850                    (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
851                 /* read all, failed, already did sync or don't want to retry */
852                 goto done;
853         }
854
855         /*
856          * Don't depend on the iter state matching what was consumed, or being
857          * untouched in case of error. Restore it and we'll advance it
858          * manually if we need to.
859          */
860         iov_iter_restore(&s->iter, &s->iter_state);
861
862         ret2 = io_setup_async_rw(req, iovec, s, true);
863         if (ret2)
864                 return ret2;
865
866         iovec = NULL;
867         io = req->async_data;
868         s = &io->s;
869         /*
870          * Now use our persistent iterator and state, if we aren't already.
871          * We've restored and mapped the iter to match.
872          */
873
874         do {
875                 /*
876                  * We end up here because of a partial read, either from
877                  * above or inside this loop. Advance the iter by the bytes
878                  * that were consumed.
879                  */
880                 iov_iter_advance(&s->iter, ret);
881                 if (!iov_iter_count(&s->iter))
882                         break;
883                 io->bytes_done += ret;
884                 iov_iter_save_state(&s->iter, &s->iter_state);
885
886                 /* if we can retry, do so with the callbacks armed */
887                 if (!io_rw_should_retry(req)) {
888                         kiocb->ki_flags &= ~IOCB_WAITQ;
889                         return -EAGAIN;
890                 }
891
892                 /*
893                  * Now retry read with the IOCB_WAITQ parts set in the iocb. If
894                  * we get -EIOCBQUEUED, then we'll get a notification when the
895                  * desired page gets unlocked. We can also get a partial read
896                  * here, and if we do, then just retry at the new offset.
897                  */
898                 ret = io_iter_do_read(rw, &s->iter);
899                 if (ret == -EIOCBQUEUED)
900                         return IOU_ISSUE_SKIP_COMPLETE;
901                 /* we got some bytes, but not all. retry. */
902                 kiocb->ki_flags &= ~IOCB_WAITQ;
903                 iov_iter_restore(&s->iter, &s->iter_state);
904         } while (ret > 0);
905 done:
906         /* it's faster to check here then delegate to kfree */
907         if (iovec)
908                 kfree(iovec);
909         return kiocb_done(req, ret, issue_flags);
910 }
911
912 int io_write(struct io_kiocb *req, unsigned int issue_flags)
913 {
914         struct io_rw *rw = io_kiocb_to_cmd(req);
915         struct io_rw_state __s, *s = &__s;
916         struct iovec *iovec;
917         struct kiocb *kiocb = &rw->kiocb;
918         bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
919         ssize_t ret, ret2;
920         loff_t *ppos;
921
922         if (!req_has_async_data(req)) {
923                 ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
924                 if (unlikely(ret < 0))
925                         return ret;
926         } else {
927                 struct io_async_rw *io = req->async_data;
928
929                 s = &io->s;
930                 iov_iter_restore(&s->iter, &s->iter_state);
931                 iovec = NULL;
932         }
933         ret = io_rw_init_file(req, FMODE_WRITE);
934         if (unlikely(ret)) {
935                 kfree(iovec);
936                 return ret;
937         }
938         req->cqe.res = iov_iter_count(&s->iter);
939
940         if (force_nonblock) {
941                 /* If the file doesn't support async, just async punt */
942                 if (unlikely(!io_file_supports_nowait(req)))
943                         goto copy_iov;
944
945                 /* file path doesn't support NOWAIT for non-direct_IO */
946                 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
947                     (req->flags & REQ_F_ISREG))
948                         goto copy_iov;
949
950                 kiocb->ki_flags |= IOCB_NOWAIT;
951         } else {
952                 /* Ensure we clear previously set non-block flag */
953                 kiocb->ki_flags &= ~IOCB_NOWAIT;
954         }
955
956         ppos = io_kiocb_update_pos(req);
957
958         ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
959         if (unlikely(ret)) {
960                 kfree(iovec);
961                 return ret;
962         }
963
964         /*
965          * Open-code file_start_write here to grab freeze protection,
966          * which will be released by another thread in
967          * io_complete_rw().  Fool lockdep by telling it the lock got
968          * released so that it doesn't complain about the held lock when
969          * we return to userspace.
970          */
971         if (req->flags & REQ_F_ISREG) {
972                 sb_start_write(file_inode(req->file)->i_sb);
973                 __sb_writers_release(file_inode(req->file)->i_sb,
974                                         SB_FREEZE_WRITE);
975         }
976         kiocb->ki_flags |= IOCB_WRITE;
977
978         if (likely(req->file->f_op->write_iter))
979                 ret2 = call_write_iter(req->file, kiocb, &s->iter);
980         else if (req->file->f_op->write)
981                 ret2 = loop_rw_iter(WRITE, rw, &s->iter);
982         else
983                 ret2 = -EINVAL;
984
985         if (req->flags & REQ_F_REISSUE) {
986                 req->flags &= ~REQ_F_REISSUE;
987                 ret2 = -EAGAIN;
988         }
989
990         /*
991          * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
992          * retry them without IOCB_NOWAIT.
993          */
994         if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
995                 ret2 = -EAGAIN;
996         /* no retry on NONBLOCK nor RWF_NOWAIT */
997         if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
998                 goto done;
999         if (!force_nonblock || ret2 != -EAGAIN) {
1000                 /* IOPOLL retry should happen for io-wq threads */
1001                 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
1002                         goto copy_iov;
1003 done:
1004                 ret = kiocb_done(req, ret2, issue_flags);
1005         } else {
1006 copy_iov:
1007                 iov_iter_restore(&s->iter, &s->iter_state);
1008                 ret = io_setup_async_rw(req, iovec, s, false);
1009                 return ret ?: -EAGAIN;
1010         }
1011         /* it's reportedly faster than delegating the null check to kfree() */
1012         if (iovec)
1013                 kfree(iovec);
1014         return ret;
1015 }
1016
1017 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1018 {
1019         if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
1020                      ctx->has_evfd))
1021                 __io_commit_cqring_flush(ctx);
1022
1023         if (ctx->flags & IORING_SETUP_SQPOLL)
1024                 io_cqring_wake(ctx);
1025 }
1026
1027 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1028 {
1029         struct io_wq_work_node *pos, *start, *prev;
1030         unsigned int poll_flags = BLK_POLL_NOSLEEP;
1031         DEFINE_IO_COMP_BATCH(iob);
1032         int nr_events = 0;
1033
1034         /*
1035          * Only spin for completions if we don't have multiple devices hanging
1036          * off our complete list.
1037          */
1038         if (ctx->poll_multi_queue || force_nonspin)
1039                 poll_flags |= BLK_POLL_ONESHOT;
1040
1041         wq_list_for_each(pos, start, &ctx->iopoll_list) {
1042                 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1043                 struct io_rw *rw = io_kiocb_to_cmd(req);
1044                 int ret;
1045
1046                 /*
1047                  * Move completed and retryable entries to our local lists.
1048                  * If we find a request that requires polling, break out
1049                  * and complete those lists first, if we have entries there.
1050                  */
1051                 if (READ_ONCE(req->iopoll_completed))
1052                         break;
1053
1054                 ret = rw->kiocb.ki_filp->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1055                 if (unlikely(ret < 0))
1056                         return ret;
1057                 else if (ret)
1058                         poll_flags |= BLK_POLL_ONESHOT;
1059
1060                 /* iopoll may have completed current req */
1061                 if (!rq_list_empty(iob.req_list) ||
1062                     READ_ONCE(req->iopoll_completed))
1063                         break;
1064         }
1065
1066         if (!rq_list_empty(iob.req_list))
1067                 iob.complete(&iob);
1068         else if (!pos)
1069                 return 0;
1070
1071         prev = start;
1072         wq_list_for_each_resume(pos, prev) {
1073                 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1074
1075                 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
1076                 if (!smp_load_acquire(&req->iopoll_completed))
1077                         break;
1078                 nr_events++;
1079                 if (unlikely(req->flags & REQ_F_CQE_SKIP))
1080                         continue;
1081
1082                 req->cqe.flags = io_put_kbuf(req, 0);
1083                 __io_fill_cqe_req(req->ctx, req);
1084         }
1085
1086         if (unlikely(!nr_events))
1087                 return 0;
1088
1089         io_commit_cqring(ctx);
1090         io_cqring_ev_posted_iopoll(ctx);
1091         pos = start ? start->next : ctx->iopoll_list.first;
1092         wq_list_cut(&ctx->iopoll_list, prev, start);
1093         io_free_batch_list(ctx, pos);
1094         return nr_events;
1095 }