Merge tag 'lsm-pr-20230428' of git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/lsm
[platform/kernel/linux-starfive.git] / io_uring / rw.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring.h>
14
15 #include <uapi/linux/io_uring.h>
16
17 #include "io_uring.h"
18 #include "opdef.h"
19 #include "kbuf.h"
20 #include "rsrc.h"
21 #include "rw.h"
22
23 struct io_rw {
24         /* NOTE: kiocb has the file as the first member, so don't do it here */
25         struct kiocb                    kiocb;
26         u64                             addr;
27         u32                             len;
28         rwf_t                           flags;
29 };
30
31 static inline bool io_file_supports_nowait(struct io_kiocb *req)
32 {
33         return req->flags & REQ_F_SUPPORT_NOWAIT;
34 }
35
36 #ifdef CONFIG_COMPAT
37 static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
38 {
39         struct compat_iovec __user *uiov;
40         compat_ssize_t clen;
41
42         uiov = u64_to_user_ptr(rw->addr);
43         if (!access_ok(uiov, sizeof(*uiov)))
44                 return -EFAULT;
45         if (__get_user(clen, &uiov->iov_len))
46                 return -EFAULT;
47         if (clen < 0)
48                 return -EINVAL;
49
50         rw->len = clen;
51         return 0;
52 }
53 #endif
54
55 static int io_iov_buffer_select_prep(struct io_kiocb *req)
56 {
57         struct iovec __user *uiov;
58         struct iovec iov;
59         struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
60
61         if (rw->len != 1)
62                 return -EINVAL;
63
64 #ifdef CONFIG_COMPAT
65         if (req->ctx->compat)
66                 return io_iov_compat_buffer_select_prep(rw);
67 #endif
68
69         uiov = u64_to_user_ptr(rw->addr);
70         if (copy_from_user(&iov, uiov, sizeof(*uiov)))
71                 return -EFAULT;
72         rw->len = iov.iov_len;
73         return 0;
74 }
75
76 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
77 {
78         struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
79         unsigned ioprio;
80         int ret;
81
82         rw->kiocb.ki_pos = READ_ONCE(sqe->off);
83         /* used for fixed read/write too - just read unconditionally */
84         req->buf_index = READ_ONCE(sqe->buf_index);
85
86         if (req->opcode == IORING_OP_READ_FIXED ||
87             req->opcode == IORING_OP_WRITE_FIXED) {
88                 struct io_ring_ctx *ctx = req->ctx;
89                 u16 index;
90
91                 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
92                         return -EFAULT;
93                 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
94                 req->imu = ctx->user_bufs[index];
95                 io_req_set_rsrc_node(req, ctx, 0);
96         }
97
98         ioprio = READ_ONCE(sqe->ioprio);
99         if (ioprio) {
100                 ret = ioprio_check_cap(ioprio);
101                 if (ret)
102                         return ret;
103
104                 rw->kiocb.ki_ioprio = ioprio;
105         } else {
106                 rw->kiocb.ki_ioprio = get_current_ioprio();
107         }
108
109         rw->addr = READ_ONCE(sqe->addr);
110         rw->len = READ_ONCE(sqe->len);
111         rw->flags = READ_ONCE(sqe->rw_flags);
112
113         /* Have to do this validation here, as this is in io_read() rw->len might
114          * have chanaged due to buffer selection
115          */
116         if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
117                 ret = io_iov_buffer_select_prep(req);
118                 if (ret)
119                         return ret;
120         }
121
122         return 0;
123 }
124
125 void io_readv_writev_cleanup(struct io_kiocb *req)
126 {
127         struct io_async_rw *io = req->async_data;
128
129         kfree(io->free_iovec);
130 }
131
132 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
133 {
134         switch (ret) {
135         case -EIOCBQUEUED:
136                 break;
137         case -ERESTARTSYS:
138         case -ERESTARTNOINTR:
139         case -ERESTARTNOHAND:
140         case -ERESTART_RESTARTBLOCK:
141                 /*
142                  * We can't just restart the syscall, since previously
143                  * submitted sqes may already be in progress. Just fail this
144                  * IO with EINTR.
145                  */
146                 ret = -EINTR;
147                 fallthrough;
148         default:
149                 kiocb->ki_complete(kiocb, ret);
150         }
151 }
152
153 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
154 {
155         struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
156
157         if (rw->kiocb.ki_pos != -1)
158                 return &rw->kiocb.ki_pos;
159
160         if (!(req->file->f_mode & FMODE_STREAM)) {
161                 req->flags |= REQ_F_CUR_POS;
162                 rw->kiocb.ki_pos = req->file->f_pos;
163                 return &rw->kiocb.ki_pos;
164         }
165
166         rw->kiocb.ki_pos = 0;
167         return NULL;
168 }
169
170 static void io_req_task_queue_reissue(struct io_kiocb *req)
171 {
172         req->io_task_work.func = io_queue_iowq;
173         io_req_task_work_add(req);
174 }
175
176 #ifdef CONFIG_BLOCK
177 static bool io_resubmit_prep(struct io_kiocb *req)
178 {
179         struct io_async_rw *io = req->async_data;
180
181         if (!req_has_async_data(req))
182                 return !io_req_prep_async(req);
183         iov_iter_restore(&io->s.iter, &io->s.iter_state);
184         return true;
185 }
186
187 static bool io_rw_should_reissue(struct io_kiocb *req)
188 {
189         umode_t mode = file_inode(req->file)->i_mode;
190         struct io_ring_ctx *ctx = req->ctx;
191
192         if (!S_ISBLK(mode) && !S_ISREG(mode))
193                 return false;
194         if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
195             !(ctx->flags & IORING_SETUP_IOPOLL)))
196                 return false;
197         /*
198          * If ref is dying, we might be running poll reap from the exit work.
199          * Don't attempt to reissue from that path, just let it fail with
200          * -EAGAIN.
201          */
202         if (percpu_ref_is_dying(&ctx->refs))
203                 return false;
204         /*
205          * Play it safe and assume not safe to re-import and reissue if we're
206          * not in the original thread group (or in task context).
207          */
208         if (!same_thread_group(req->task, current) || !in_task())
209                 return false;
210         return true;
211 }
212 #else
213 static bool io_resubmit_prep(struct io_kiocb *req)
214 {
215         return false;
216 }
217 static bool io_rw_should_reissue(struct io_kiocb *req)
218 {
219         return false;
220 }
221 #endif
222
223 static void kiocb_end_write(struct io_kiocb *req)
224 {
225         /*
226          * Tell lockdep we inherited freeze protection from submission
227          * thread.
228          */
229         if (req->flags & REQ_F_ISREG) {
230                 struct super_block *sb = file_inode(req->file)->i_sb;
231
232                 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
233                 sb_end_write(sb);
234         }
235 }
236
237 /*
238  * Trigger the notifications after having done some IO, and finish the write
239  * accounting, if any.
240  */
241 static void io_req_io_end(struct io_kiocb *req)
242 {
243         struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
244
245         if (rw->kiocb.ki_flags & IOCB_WRITE) {
246                 kiocb_end_write(req);
247                 fsnotify_modify(req->file);
248         } else {
249                 fsnotify_access(req->file);
250         }
251 }
252
253 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
254 {
255         if (unlikely(res != req->cqe.res)) {
256                 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
257                     io_rw_should_reissue(req)) {
258                         /*
259                          * Reissue will start accounting again, finish the
260                          * current cycle.
261                          */
262                         io_req_io_end(req);
263                         req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
264                         return true;
265                 }
266                 req_set_fail(req);
267                 req->cqe.res = res;
268         }
269         return false;
270 }
271
272 static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
273 {
274         struct io_async_rw *io = req->async_data;
275
276         /* add previously done IO, if any */
277         if (req_has_async_data(req) && io->bytes_done > 0) {
278                 if (res < 0)
279                         res = io->bytes_done;
280                 else
281                         res += io->bytes_done;
282         }
283         return res;
284 }
285
286 static void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
287 {
288         io_req_io_end(req);
289
290         if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
291                 unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
292
293                 req->cqe.flags |= io_put_kbuf(req, issue_flags);
294         }
295         io_req_task_complete(req, ts);
296 }
297
298 static void io_complete_rw(struct kiocb *kiocb, long res)
299 {
300         struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
301         struct io_kiocb *req = cmd_to_io_kiocb(rw);
302
303         if (__io_complete_rw_common(req, res))
304                 return;
305         io_req_set_res(req, io_fixup_rw_res(req, res), 0);
306         req->io_task_work.func = io_req_rw_complete;
307         __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
308 }
309
310 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
311 {
312         struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
313         struct io_kiocb *req = cmd_to_io_kiocb(rw);
314
315         if (kiocb->ki_flags & IOCB_WRITE)
316                 kiocb_end_write(req);
317         if (unlikely(res != req->cqe.res)) {
318                 if (res == -EAGAIN && io_rw_should_reissue(req)) {
319                         req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
320                         return;
321                 }
322                 req->cqe.res = res;
323         }
324
325         /* order with io_iopoll_complete() checking ->iopoll_completed */
326         smp_store_release(&req->iopoll_completed, 1);
327 }
328
329 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
330                        unsigned int issue_flags)
331 {
332         struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
333         unsigned final_ret = io_fixup_rw_res(req, ret);
334
335         if (req->flags & REQ_F_CUR_POS)
336                 req->file->f_pos = rw->kiocb.ki_pos;
337         if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
338                 if (!__io_complete_rw_common(req, ret)) {
339                         /*
340                          * Safe to call io_end from here as we're inline
341                          * from the submission path.
342                          */
343                         io_req_io_end(req);
344                         io_req_set_res(req, final_ret,
345                                        io_put_kbuf(req, issue_flags));
346                         return IOU_OK;
347                 }
348         } else {
349                 io_rw_done(&rw->kiocb, ret);
350         }
351
352         if (req->flags & REQ_F_REISSUE) {
353                 req->flags &= ~REQ_F_REISSUE;
354                 if (io_resubmit_prep(req))
355                         io_req_task_queue_reissue(req);
356                 else
357                         io_req_task_queue_fail(req, final_ret);
358         }
359         return IOU_ISSUE_SKIP_COMPLETE;
360 }
361
362 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
363                                        struct io_rw_state *s,
364                                        unsigned int issue_flags)
365 {
366         struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
367         struct iov_iter *iter = &s->iter;
368         u8 opcode = req->opcode;
369         struct iovec *iovec;
370         void __user *buf;
371         size_t sqe_len;
372         ssize_t ret;
373
374         if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
375                 ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
376                 if (ret)
377                         return ERR_PTR(ret);
378                 return NULL;
379         }
380
381         buf = u64_to_user_ptr(rw->addr);
382         sqe_len = rw->len;
383
384         if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ||
385             (req->flags & REQ_F_BUFFER_SELECT)) {
386                 if (io_do_buffer_select(req)) {
387                         buf = io_buffer_select(req, &sqe_len, issue_flags);
388                         if (!buf)
389                                 return ERR_PTR(-ENOBUFS);
390                         rw->addr = (unsigned long) buf;
391                         rw->len = sqe_len;
392                 }
393
394                 ret = import_ubuf(ddir, buf, sqe_len, iter);
395                 if (ret)
396                         return ERR_PTR(ret);
397                 return NULL;
398         }
399
400         iovec = s->fast_iov;
401         ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
402                               req->ctx->compat);
403         if (unlikely(ret < 0))
404                 return ERR_PTR(ret);
405         return iovec;
406 }
407
408 static inline int io_import_iovec(int rw, struct io_kiocb *req,
409                                   struct iovec **iovec, struct io_rw_state *s,
410                                   unsigned int issue_flags)
411 {
412         *iovec = __io_import_iovec(rw, req, s, issue_flags);
413         if (IS_ERR(*iovec))
414                 return PTR_ERR(*iovec);
415
416         iov_iter_save_state(&s->iter, &s->iter_state);
417         return 0;
418 }
419
420 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
421 {
422         return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
423 }
424
425 /*
426  * For files that don't have ->read_iter() and ->write_iter(), handle them
427  * by looping over ->read() or ->write() manually.
428  */
429 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
430 {
431         struct kiocb *kiocb = &rw->kiocb;
432         struct file *file = kiocb->ki_filp;
433         ssize_t ret = 0;
434         loff_t *ppos;
435
436         /*
437          * Don't support polled IO through this interface, and we can't
438          * support non-blocking either. For the latter, this just causes
439          * the kiocb to be handled from an async context.
440          */
441         if (kiocb->ki_flags & IOCB_HIPRI)
442                 return -EOPNOTSUPP;
443         if ((kiocb->ki_flags & IOCB_NOWAIT) &&
444             !(kiocb->ki_filp->f_flags & O_NONBLOCK))
445                 return -EAGAIN;
446
447         ppos = io_kiocb_ppos(kiocb);
448
449         while (iov_iter_count(iter)) {
450                 void __user *addr;
451                 size_t len;
452                 ssize_t nr;
453
454                 if (iter_is_ubuf(iter)) {
455                         addr = iter->ubuf + iter->iov_offset;
456                         len = iov_iter_count(iter);
457                 } else if (!iov_iter_is_bvec(iter)) {
458                         addr = iter_iov_addr(iter);
459                         len = iter_iov_len(iter);
460                 } else {
461                         addr = u64_to_user_ptr(rw->addr);
462                         len = rw->len;
463                 }
464
465                 if (ddir == READ)
466                         nr = file->f_op->read(file, addr, len, ppos);
467                 else
468                         nr = file->f_op->write(file, addr, len, ppos);
469
470                 if (nr < 0) {
471                         if (!ret)
472                                 ret = nr;
473                         break;
474                 }
475                 ret += nr;
476                 if (!iov_iter_is_bvec(iter)) {
477                         iov_iter_advance(iter, nr);
478                 } else {
479                         rw->addr += nr;
480                         rw->len -= nr;
481                         if (!rw->len)
482                                 break;
483                 }
484                 if (nr != len)
485                         break;
486         }
487
488         return ret;
489 }
490
491 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
492                           const struct iovec *fast_iov, struct iov_iter *iter)
493 {
494         struct io_async_rw *io = req->async_data;
495
496         memcpy(&io->s.iter, iter, sizeof(*iter));
497         io->free_iovec = iovec;
498         io->bytes_done = 0;
499         /* can only be fixed buffers, no need to do anything */
500         if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter))
501                 return;
502         if (!iovec) {
503                 unsigned iov_off = 0;
504
505                 io->s.iter.__iov = io->s.fast_iov;
506                 if (iter->__iov != fast_iov) {
507                         iov_off = iter_iov(iter) - fast_iov;
508                         io->s.iter.__iov += iov_off;
509                 }
510                 if (io->s.fast_iov != fast_iov)
511                         memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
512                                sizeof(struct iovec) * iter->nr_segs);
513         } else {
514                 req->flags |= REQ_F_NEED_CLEANUP;
515         }
516 }
517
518 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
519                              struct io_rw_state *s, bool force)
520 {
521         if (!force && !io_cold_defs[req->opcode].prep_async)
522                 return 0;
523         if (!req_has_async_data(req)) {
524                 struct io_async_rw *iorw;
525
526                 if (io_alloc_async_data(req)) {
527                         kfree(iovec);
528                         return -ENOMEM;
529                 }
530
531                 io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
532                 iorw = req->async_data;
533                 /* we've copied and mapped the iter, ensure state is saved */
534                 iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
535         }
536         return 0;
537 }
538
539 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
540 {
541         struct io_async_rw *iorw = req->async_data;
542         struct iovec *iov;
543         int ret;
544
545         /* submission path, ->uring_lock should already be taken */
546         ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
547         if (unlikely(ret < 0))
548                 return ret;
549
550         iorw->bytes_done = 0;
551         iorw->free_iovec = iov;
552         if (iov)
553                 req->flags |= REQ_F_NEED_CLEANUP;
554         return 0;
555 }
556
557 int io_readv_prep_async(struct io_kiocb *req)
558 {
559         return io_rw_prep_async(req, ITER_DEST);
560 }
561
562 int io_writev_prep_async(struct io_kiocb *req)
563 {
564         return io_rw_prep_async(req, ITER_SOURCE);
565 }
566
567 /*
568  * This is our waitqueue callback handler, registered through __folio_lock_async()
569  * when we initially tried to do the IO with the iocb armed our waitqueue.
570  * This gets called when the page is unlocked, and we generally expect that to
571  * happen when the page IO is completed and the page is now uptodate. This will
572  * queue a task_work based retry of the operation, attempting to copy the data
573  * again. If the latter fails because the page was NOT uptodate, then we will
574  * do a thread based blocking retry of the operation. That's the unexpected
575  * slow path.
576  */
577 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
578                              int sync, void *arg)
579 {
580         struct wait_page_queue *wpq;
581         struct io_kiocb *req = wait->private;
582         struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
583         struct wait_page_key *key = arg;
584
585         wpq = container_of(wait, struct wait_page_queue, wait);
586
587         if (!wake_page_match(wpq, key))
588                 return 0;
589
590         rw->kiocb.ki_flags &= ~IOCB_WAITQ;
591         list_del_init(&wait->entry);
592         io_req_task_queue(req);
593         return 1;
594 }
595
596 /*
597  * This controls whether a given IO request should be armed for async page
598  * based retry. If we return false here, the request is handed to the async
599  * worker threads for retry. If we're doing buffered reads on a regular file,
600  * we prepare a private wait_page_queue entry and retry the operation. This
601  * will either succeed because the page is now uptodate and unlocked, or it
602  * will register a callback when the page is unlocked at IO completion. Through
603  * that callback, io_uring uses task_work to setup a retry of the operation.
604  * That retry will attempt the buffered read again. The retry will generally
605  * succeed, or in rare cases where it fails, we then fall back to using the
606  * async worker threads for a blocking retry.
607  */
608 static bool io_rw_should_retry(struct io_kiocb *req)
609 {
610         struct io_async_rw *io = req->async_data;
611         struct wait_page_queue *wait = &io->wpq;
612         struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
613         struct kiocb *kiocb = &rw->kiocb;
614
615         /* never retry for NOWAIT, we just complete with -EAGAIN */
616         if (req->flags & REQ_F_NOWAIT)
617                 return false;
618
619         /* Only for buffered IO */
620         if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
621                 return false;
622
623         /*
624          * just use poll if we can, and don't attempt if the fs doesn't
625          * support callback based unlocks
626          */
627         if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
628                 return false;
629
630         wait->wait.func = io_async_buf_func;
631         wait->wait.private = req;
632         wait->wait.flags = 0;
633         INIT_LIST_HEAD(&wait->wait.entry);
634         kiocb->ki_flags |= IOCB_WAITQ;
635         kiocb->ki_flags &= ~IOCB_NOWAIT;
636         kiocb->ki_waitq = wait;
637         return true;
638 }
639
640 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
641 {
642         struct file *file = rw->kiocb.ki_filp;
643
644         if (likely(file->f_op->read_iter))
645                 return call_read_iter(file, &rw->kiocb, iter);
646         else if (file->f_op->read)
647                 return loop_rw_iter(READ, rw, iter);
648         else
649                 return -EINVAL;
650 }
651
652 static bool need_complete_io(struct io_kiocb *req)
653 {
654         return req->flags & REQ_F_ISREG ||
655                 S_ISBLK(file_inode(req->file)->i_mode);
656 }
657
658 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
659 {
660         struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
661         struct kiocb *kiocb = &rw->kiocb;
662         struct io_ring_ctx *ctx = req->ctx;
663         struct file *file = req->file;
664         int ret;
665
666         if (unlikely(!file || !(file->f_mode & mode)))
667                 return -EBADF;
668
669         if (!io_req_ffs_set(req))
670                 req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
671
672         kiocb->ki_flags = file->f_iocb_flags;
673         ret = kiocb_set_rw_flags(kiocb, rw->flags);
674         if (unlikely(ret))
675                 return ret;
676         kiocb->ki_flags |= IOCB_ALLOC_CACHE;
677
678         /*
679          * If the file is marked O_NONBLOCK, still allow retry for it if it
680          * supports async. Otherwise it's impossible to use O_NONBLOCK files
681          * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
682          */
683         if ((kiocb->ki_flags & IOCB_NOWAIT) ||
684             ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
685                 req->flags |= REQ_F_NOWAIT;
686
687         if (ctx->flags & IORING_SETUP_IOPOLL) {
688                 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
689                         return -EOPNOTSUPP;
690
691                 kiocb->private = NULL;
692                 kiocb->ki_flags |= IOCB_HIPRI;
693                 kiocb->ki_complete = io_complete_rw_iopoll;
694                 req->iopoll_completed = 0;
695         } else {
696                 if (kiocb->ki_flags & IOCB_HIPRI)
697                         return -EINVAL;
698                 kiocb->ki_complete = io_complete_rw;
699         }
700
701         return 0;
702 }
703
704 int io_read(struct io_kiocb *req, unsigned int issue_flags)
705 {
706         struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
707         struct io_rw_state __s, *s = &__s;
708         struct iovec *iovec;
709         struct kiocb *kiocb = &rw->kiocb;
710         bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
711         struct io_async_rw *io;
712         ssize_t ret, ret2;
713         loff_t *ppos;
714
715         if (!req_has_async_data(req)) {
716                 ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
717                 if (unlikely(ret < 0))
718                         return ret;
719         } else {
720                 io = req->async_data;
721                 s = &io->s;
722
723                 /*
724                  * Safe and required to re-import if we're using provided
725                  * buffers, as we dropped the selected one before retry.
726                  */
727                 if (io_do_buffer_select(req)) {
728                         ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
729                         if (unlikely(ret < 0))
730                                 return ret;
731                 }
732
733                 /*
734                  * We come here from an earlier attempt, restore our state to
735                  * match in case it doesn't. It's cheap enough that we don't
736                  * need to make this conditional.
737                  */
738                 iov_iter_restore(&s->iter, &s->iter_state);
739                 iovec = NULL;
740         }
741         ret = io_rw_init_file(req, FMODE_READ);
742         if (unlikely(ret)) {
743                 kfree(iovec);
744                 return ret;
745         }
746         req->cqe.res = iov_iter_count(&s->iter);
747
748         if (force_nonblock) {
749                 /* If the file doesn't support async, just async punt */
750                 if (unlikely(!io_file_supports_nowait(req))) {
751                         ret = io_setup_async_rw(req, iovec, s, true);
752                         return ret ?: -EAGAIN;
753                 }
754                 kiocb->ki_flags |= IOCB_NOWAIT;
755         } else {
756                 /* Ensure we clear previously set non-block flag */
757                 kiocb->ki_flags &= ~IOCB_NOWAIT;
758         }
759
760         ppos = io_kiocb_update_pos(req);
761
762         ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
763         if (unlikely(ret)) {
764                 kfree(iovec);
765                 return ret;
766         }
767
768         ret = io_iter_do_read(rw, &s->iter);
769
770         if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
771                 req->flags &= ~REQ_F_REISSUE;
772                 /* if we can poll, just do that */
773                 if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
774                         return -EAGAIN;
775                 /* IOPOLL retry should happen for io-wq threads */
776                 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
777                         goto done;
778                 /* no retry on NONBLOCK nor RWF_NOWAIT */
779                 if (req->flags & REQ_F_NOWAIT)
780                         goto done;
781                 ret = 0;
782         } else if (ret == -EIOCBQUEUED) {
783                 if (iovec)
784                         kfree(iovec);
785                 return IOU_ISSUE_SKIP_COMPLETE;
786         } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
787                    (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
788                 /* read all, failed, already did sync or don't want to retry */
789                 goto done;
790         }
791
792         /*
793          * Don't depend on the iter state matching what was consumed, or being
794          * untouched in case of error. Restore it and we'll advance it
795          * manually if we need to.
796          */
797         iov_iter_restore(&s->iter, &s->iter_state);
798
799         ret2 = io_setup_async_rw(req, iovec, s, true);
800         iovec = NULL;
801         if (ret2) {
802                 ret = ret > 0 ? ret : ret2;
803                 goto done;
804         }
805
806         io = req->async_data;
807         s = &io->s;
808         /*
809          * Now use our persistent iterator and state, if we aren't already.
810          * We've restored and mapped the iter to match.
811          */
812
813         do {
814                 /*
815                  * We end up here because of a partial read, either from
816                  * above or inside this loop. Advance the iter by the bytes
817                  * that were consumed.
818                  */
819                 iov_iter_advance(&s->iter, ret);
820                 if (!iov_iter_count(&s->iter))
821                         break;
822                 io->bytes_done += ret;
823                 iov_iter_save_state(&s->iter, &s->iter_state);
824
825                 /* if we can retry, do so with the callbacks armed */
826                 if (!io_rw_should_retry(req)) {
827                         kiocb->ki_flags &= ~IOCB_WAITQ;
828                         return -EAGAIN;
829                 }
830
831                 req->cqe.res = iov_iter_count(&s->iter);
832                 /*
833                  * Now retry read with the IOCB_WAITQ parts set in the iocb. If
834                  * we get -EIOCBQUEUED, then we'll get a notification when the
835                  * desired page gets unlocked. We can also get a partial read
836                  * here, and if we do, then just retry at the new offset.
837                  */
838                 ret = io_iter_do_read(rw, &s->iter);
839                 if (ret == -EIOCBQUEUED)
840                         return IOU_ISSUE_SKIP_COMPLETE;
841                 /* we got some bytes, but not all. retry. */
842                 kiocb->ki_flags &= ~IOCB_WAITQ;
843                 iov_iter_restore(&s->iter, &s->iter_state);
844         } while (ret > 0);
845 done:
846         /* it's faster to check here then delegate to kfree */
847         if (iovec)
848                 kfree(iovec);
849         return kiocb_done(req, ret, issue_flags);
850 }
851
852 int io_write(struct io_kiocb *req, unsigned int issue_flags)
853 {
854         struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
855         struct io_rw_state __s, *s = &__s;
856         struct iovec *iovec;
857         struct kiocb *kiocb = &rw->kiocb;
858         bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
859         ssize_t ret, ret2;
860         loff_t *ppos;
861
862         if (!req_has_async_data(req)) {
863                 ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
864                 if (unlikely(ret < 0))
865                         return ret;
866         } else {
867                 struct io_async_rw *io = req->async_data;
868
869                 s = &io->s;
870                 iov_iter_restore(&s->iter, &s->iter_state);
871                 iovec = NULL;
872         }
873         ret = io_rw_init_file(req, FMODE_WRITE);
874         if (unlikely(ret)) {
875                 kfree(iovec);
876                 return ret;
877         }
878         req->cqe.res = iov_iter_count(&s->iter);
879
880         if (force_nonblock) {
881                 /* If the file doesn't support async, just async punt */
882                 if (unlikely(!io_file_supports_nowait(req)))
883                         goto copy_iov;
884
885                 /* File path supports NOWAIT for non-direct_IO only for block devices. */
886                 if (!(kiocb->ki_flags & IOCB_DIRECT) &&
887                         !(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
888                         (req->flags & REQ_F_ISREG))
889                         goto copy_iov;
890
891                 kiocb->ki_flags |= IOCB_NOWAIT;
892         } else {
893                 /* Ensure we clear previously set non-block flag */
894                 kiocb->ki_flags &= ~IOCB_NOWAIT;
895         }
896
897         ppos = io_kiocb_update_pos(req);
898
899         ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
900         if (unlikely(ret)) {
901                 kfree(iovec);
902                 return ret;
903         }
904
905         /*
906          * Open-code file_start_write here to grab freeze protection,
907          * which will be released by another thread in
908          * io_complete_rw().  Fool lockdep by telling it the lock got
909          * released so that it doesn't complain about the held lock when
910          * we return to userspace.
911          */
912         if (req->flags & REQ_F_ISREG) {
913                 sb_start_write(file_inode(req->file)->i_sb);
914                 __sb_writers_release(file_inode(req->file)->i_sb,
915                                         SB_FREEZE_WRITE);
916         }
917         kiocb->ki_flags |= IOCB_WRITE;
918
919         if (likely(req->file->f_op->write_iter))
920                 ret2 = call_write_iter(req->file, kiocb, &s->iter);
921         else if (req->file->f_op->write)
922                 ret2 = loop_rw_iter(WRITE, rw, &s->iter);
923         else
924                 ret2 = -EINVAL;
925
926         if (req->flags & REQ_F_REISSUE) {
927                 req->flags &= ~REQ_F_REISSUE;
928                 ret2 = -EAGAIN;
929         }
930
931         /*
932          * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
933          * retry them without IOCB_NOWAIT.
934          */
935         if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
936                 ret2 = -EAGAIN;
937         /* no retry on NONBLOCK nor RWF_NOWAIT */
938         if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
939                 goto done;
940         if (!force_nonblock || ret2 != -EAGAIN) {
941                 /* IOPOLL retry should happen for io-wq threads */
942                 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
943                         goto copy_iov;
944
945                 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
946                         struct io_async_rw *io;
947
948                         trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
949                                                 req->cqe.res, ret2);
950
951                         /* This is a partial write. The file pos has already been
952                          * updated, setup the async struct to complete the request
953                          * in the worker. Also update bytes_done to account for
954                          * the bytes already written.
955                          */
956                         iov_iter_save_state(&s->iter, &s->iter_state);
957                         ret = io_setup_async_rw(req, iovec, s, true);
958
959                         io = req->async_data;
960                         if (io)
961                                 io->bytes_done += ret2;
962
963                         if (kiocb->ki_flags & IOCB_WRITE)
964                                 kiocb_end_write(req);
965                         return ret ? ret : -EAGAIN;
966                 }
967 done:
968                 ret = kiocb_done(req, ret2, issue_flags);
969         } else {
970 copy_iov:
971                 iov_iter_restore(&s->iter, &s->iter_state);
972                 ret = io_setup_async_rw(req, iovec, s, false);
973                 if (!ret) {
974                         if (kiocb->ki_flags & IOCB_WRITE)
975                                 kiocb_end_write(req);
976                         return -EAGAIN;
977                 }
978                 return ret;
979         }
980         /* it's reportedly faster than delegating the null check to kfree() */
981         if (iovec)
982                 kfree(iovec);
983         return ret;
984 }
985
986 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
987 {
988         io_commit_cqring_flush(ctx);
989         if (ctx->flags & IORING_SETUP_SQPOLL)
990                 io_cqring_wake(ctx);
991 }
992
993 void io_rw_fail(struct io_kiocb *req)
994 {
995         int res;
996
997         res = io_fixup_rw_res(req, req->cqe.res);
998         io_req_set_res(req, res, req->cqe.flags);
999 }
1000
1001 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1002 {
1003         struct io_wq_work_node *pos, *start, *prev;
1004         unsigned int poll_flags = 0;
1005         DEFINE_IO_COMP_BATCH(iob);
1006         int nr_events = 0;
1007
1008         /*
1009          * Only spin for completions if we don't have multiple devices hanging
1010          * off our complete list.
1011          */
1012         if (ctx->poll_multi_queue || force_nonspin)
1013                 poll_flags |= BLK_POLL_ONESHOT;
1014
1015         wq_list_for_each(pos, start, &ctx->iopoll_list) {
1016                 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1017                 struct file *file = req->file;
1018                 int ret;
1019
1020                 /*
1021                  * Move completed and retryable entries to our local lists.
1022                  * If we find a request that requires polling, break out
1023                  * and complete those lists first, if we have entries there.
1024                  */
1025                 if (READ_ONCE(req->iopoll_completed))
1026                         break;
1027
1028                 if (req->opcode == IORING_OP_URING_CMD) {
1029                         struct io_uring_cmd *ioucmd;
1030
1031                         ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1032                         ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1033                                                                 poll_flags);
1034                 } else {
1035                         struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1036
1037                         ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1038                 }
1039                 if (unlikely(ret < 0))
1040                         return ret;
1041                 else if (ret)
1042                         poll_flags |= BLK_POLL_ONESHOT;
1043
1044                 /* iopoll may have completed current req */
1045                 if (!rq_list_empty(iob.req_list) ||
1046                     READ_ONCE(req->iopoll_completed))
1047                         break;
1048         }
1049
1050         if (!rq_list_empty(iob.req_list))
1051                 iob.complete(&iob);
1052         else if (!pos)
1053                 return 0;
1054
1055         prev = start;
1056         wq_list_for_each_resume(pos, prev) {
1057                 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1058
1059                 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
1060                 if (!smp_load_acquire(&req->iopoll_completed))
1061                         break;
1062                 nr_events++;
1063                 if (unlikely(req->flags & REQ_F_CQE_SKIP))
1064                         continue;
1065
1066                 req->cqe.flags = io_put_kbuf(req, 0);
1067                 if (unlikely(!__io_fill_cqe_req(ctx, req))) {
1068                         spin_lock(&ctx->completion_lock);
1069                         io_req_cqe_overflow(req);
1070                         spin_unlock(&ctx->completion_lock);
1071                 }
1072         }
1073
1074         if (unlikely(!nr_events))
1075                 return 0;
1076
1077         io_commit_cqring(ctx);
1078         io_cqring_ev_posted_iopoll(ctx);
1079         pos = start ? start->next : ctx->iopoll_list.first;
1080         wq_list_cut(&ctx->iopoll_list, prev, start);
1081         io_free_batch_list(ctx, pos);
1082         return nr_events;
1083 }