1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring.h>
15 #include <uapi/linux/io_uring.h>
24 /* NOTE: kiocb has the file as the first member, so don't do it here */
31 static inline bool io_file_supports_nowait(struct io_kiocb *req)
33 return req->flags & REQ_F_SUPPORT_NOWAIT;
36 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
38 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
42 rw->kiocb.ki_pos = READ_ONCE(sqe->off);
43 /* used for fixed read/write too - just read unconditionally */
44 req->buf_index = READ_ONCE(sqe->buf_index);
46 if (req->opcode == IORING_OP_READ_FIXED ||
47 req->opcode == IORING_OP_WRITE_FIXED) {
48 struct io_ring_ctx *ctx = req->ctx;
51 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
53 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
54 req->imu = ctx->user_bufs[index];
55 io_req_set_rsrc_node(req, ctx, 0);
58 ioprio = READ_ONCE(sqe->ioprio);
60 ret = ioprio_check_cap(ioprio);
64 rw->kiocb.ki_ioprio = ioprio;
66 rw->kiocb.ki_ioprio = get_current_ioprio();
69 rw->addr = READ_ONCE(sqe->addr);
70 rw->len = READ_ONCE(sqe->len);
71 rw->flags = READ_ONCE(sqe->rw_flags);
75 void io_readv_writev_cleanup(struct io_kiocb *req)
77 struct io_async_rw *io = req->async_data;
79 kfree(io->free_iovec);
82 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
90 case -ERESTART_RESTARTBLOCK:
92 * We can't just restart the syscall, since previously
93 * submitted sqes may already be in progress. Just fail this
99 kiocb->ki_complete(kiocb, ret);
103 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
105 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
107 if (rw->kiocb.ki_pos != -1)
108 return &rw->kiocb.ki_pos;
110 if (!(req->file->f_mode & FMODE_STREAM)) {
111 req->flags |= REQ_F_CUR_POS;
112 rw->kiocb.ki_pos = req->file->f_pos;
113 return &rw->kiocb.ki_pos;
116 rw->kiocb.ki_pos = 0;
120 static void io_req_task_queue_reissue(struct io_kiocb *req)
122 req->io_task_work.func = io_queue_iowq;
123 io_req_task_work_add(req);
127 static bool io_resubmit_prep(struct io_kiocb *req)
129 struct io_async_rw *io = req->async_data;
131 if (!req_has_async_data(req))
132 return !io_req_prep_async(req);
133 iov_iter_restore(&io->s.iter, &io->s.iter_state);
137 static bool io_rw_should_reissue(struct io_kiocb *req)
139 umode_t mode = file_inode(req->file)->i_mode;
140 struct io_ring_ctx *ctx = req->ctx;
142 if (!S_ISBLK(mode) && !S_ISREG(mode))
144 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
145 !(ctx->flags & IORING_SETUP_IOPOLL)))
148 * If ref is dying, we might be running poll reap from the exit work.
149 * Don't attempt to reissue from that path, just let it fail with
152 if (percpu_ref_is_dying(&ctx->refs))
155 * Play it safe and assume not safe to re-import and reissue if we're
156 * not in the original thread group (or in task context).
158 if (!same_thread_group(req->task, current) || !in_task())
163 static bool io_resubmit_prep(struct io_kiocb *req)
167 static bool io_rw_should_reissue(struct io_kiocb *req)
173 static void kiocb_end_write(struct io_kiocb *req)
176 * Tell lockdep we inherited freeze protection from submission
179 if (req->flags & REQ_F_ISREG) {
180 struct super_block *sb = file_inode(req->file)->i_sb;
182 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
187 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
189 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
191 if (rw->kiocb.ki_flags & IOCB_WRITE) {
192 kiocb_end_write(req);
193 fsnotify_modify(req->file);
195 fsnotify_access(req->file);
197 if (unlikely(res != req->cqe.res)) {
198 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
199 io_rw_should_reissue(req)) {
200 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
209 static inline unsigned io_fixup_rw_res(struct io_kiocb *req, unsigned res)
211 struct io_async_rw *io = req->async_data;
213 /* add previously done IO, if any */
214 if (req_has_async_data(req) && io->bytes_done > 0) {
216 res = io->bytes_done;
218 res += io->bytes_done;
223 static void io_complete_rw(struct kiocb *kiocb, long res)
225 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
226 struct io_kiocb *req = cmd_to_io_kiocb(rw);
228 if (__io_complete_rw_common(req, res))
230 io_req_set_res(req, io_fixup_rw_res(req, res), 0);
231 req->io_task_work.func = io_req_task_complete;
232 io_req_task_work_add(req);
235 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
237 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
238 struct io_kiocb *req = cmd_to_io_kiocb(rw);
240 if (kiocb->ki_flags & IOCB_WRITE)
241 kiocb_end_write(req);
242 if (unlikely(res != req->cqe.res)) {
243 if (res == -EAGAIN && io_rw_should_reissue(req)) {
244 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
250 /* order with io_iopoll_complete() checking ->iopoll_completed */
251 smp_store_release(&req->iopoll_completed, 1);
254 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
255 unsigned int issue_flags)
257 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
258 unsigned final_ret = io_fixup_rw_res(req, ret);
260 if (req->flags & REQ_F_CUR_POS)
261 req->file->f_pos = rw->kiocb.ki_pos;
262 if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
263 if (!__io_complete_rw_common(req, ret)) {
264 io_req_set_res(req, final_ret,
265 io_put_kbuf(req, issue_flags));
269 io_rw_done(&rw->kiocb, ret);
272 if (req->flags & REQ_F_REISSUE) {
273 req->flags &= ~REQ_F_REISSUE;
274 if (io_resubmit_prep(req))
275 io_req_task_queue_reissue(req);
277 io_req_task_queue_fail(req, final_ret);
279 return IOU_ISSUE_SKIP_COMPLETE;
283 static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
284 unsigned int issue_flags)
286 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
287 struct compat_iovec __user *uiov;
292 uiov = u64_to_user_ptr(rw->addr);
293 if (!access_ok(uiov, sizeof(*uiov)))
295 if (__get_user(clen, &uiov->iov_len))
301 buf = io_buffer_select(req, &len, issue_flags);
304 rw->addr = (unsigned long) buf;
305 iov[0].iov_base = buf;
306 rw->len = iov[0].iov_len = (compat_size_t) len;
311 static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
312 unsigned int issue_flags)
314 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
315 struct iovec __user *uiov = u64_to_user_ptr(rw->addr);
319 if (copy_from_user(iov, uiov, sizeof(*uiov)))
322 len = iov[0].iov_len;
325 buf = io_buffer_select(req, &len, issue_flags);
328 rw->addr = (unsigned long) buf;
329 iov[0].iov_base = buf;
330 rw->len = iov[0].iov_len = len;
334 static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
335 unsigned int issue_flags)
337 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
339 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
340 iov[0].iov_base = u64_to_user_ptr(rw->addr);
341 iov[0].iov_len = rw->len;
348 if (req->ctx->compat)
349 return io_compat_import(req, iov, issue_flags);
352 return __io_iov_buffer_select(req, iov, issue_flags);
355 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
356 struct io_rw_state *s,
357 unsigned int issue_flags)
359 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
360 struct iov_iter *iter = &s->iter;
361 u8 opcode = req->opcode;
367 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
368 ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
374 buf = u64_to_user_ptr(rw->addr);
377 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
378 if (io_do_buffer_select(req)) {
379 buf = io_buffer_select(req, &sqe_len, issue_flags);
381 return ERR_PTR(-ENOBUFS);
382 rw->addr = (unsigned long) buf;
386 ret = import_single_range(ddir, buf, sqe_len, s->fast_iov, iter);
393 if (req->flags & REQ_F_BUFFER_SELECT) {
394 ret = io_iov_buffer_select(req, iovec, issue_flags);
397 iov_iter_init(iter, ddir, iovec, 1, iovec->iov_len);
401 ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
403 if (unlikely(ret < 0))
408 static inline int io_import_iovec(int rw, struct io_kiocb *req,
409 struct iovec **iovec, struct io_rw_state *s,
410 unsigned int issue_flags)
412 *iovec = __io_import_iovec(rw, req, s, issue_flags);
413 if (unlikely(IS_ERR(*iovec)))
414 return PTR_ERR(*iovec);
416 iov_iter_save_state(&s->iter, &s->iter_state);
420 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
422 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
426 * For files that don't have ->read_iter() and ->write_iter(), handle them
427 * by looping over ->read() or ->write() manually.
429 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
431 struct kiocb *kiocb = &rw->kiocb;
432 struct file *file = kiocb->ki_filp;
437 * Don't support polled IO through this interface, and we can't
438 * support non-blocking either. For the latter, this just causes
439 * the kiocb to be handled from an async context.
441 if (kiocb->ki_flags & IOCB_HIPRI)
443 if ((kiocb->ki_flags & IOCB_NOWAIT) &&
444 !(kiocb->ki_filp->f_flags & O_NONBLOCK))
447 ppos = io_kiocb_ppos(kiocb);
449 while (iov_iter_count(iter)) {
453 if (!iov_iter_is_bvec(iter)) {
454 iovec = iov_iter_iovec(iter);
456 iovec.iov_base = u64_to_user_ptr(rw->addr);
457 iovec.iov_len = rw->len;
461 nr = file->f_op->read(file, iovec.iov_base,
462 iovec.iov_len, ppos);
464 nr = file->f_op->write(file, iovec.iov_base,
465 iovec.iov_len, ppos);
474 if (!iov_iter_is_bvec(iter)) {
475 iov_iter_advance(iter, nr);
482 if (nr != iovec.iov_len)
489 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
490 const struct iovec *fast_iov, struct iov_iter *iter)
492 struct io_async_rw *io = req->async_data;
494 memcpy(&io->s.iter, iter, sizeof(*iter));
495 io->free_iovec = iovec;
497 /* can only be fixed buffers, no need to do anything */
498 if (iov_iter_is_bvec(iter))
501 unsigned iov_off = 0;
503 io->s.iter.iov = io->s.fast_iov;
504 if (iter->iov != fast_iov) {
505 iov_off = iter->iov - fast_iov;
506 io->s.iter.iov += iov_off;
508 if (io->s.fast_iov != fast_iov)
509 memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
510 sizeof(struct iovec) * iter->nr_segs);
512 req->flags |= REQ_F_NEED_CLEANUP;
516 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
517 struct io_rw_state *s, bool force)
519 if (!force && !io_op_defs[req->opcode].prep_async)
521 if (!req_has_async_data(req)) {
522 struct io_async_rw *iorw;
524 if (io_alloc_async_data(req)) {
529 io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
530 iorw = req->async_data;
531 /* we've copied and mapped the iter, ensure state is saved */
532 iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
537 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
539 struct io_async_rw *iorw = req->async_data;
543 /* submission path, ->uring_lock should already be taken */
544 ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
545 if (unlikely(ret < 0))
548 iorw->bytes_done = 0;
549 iorw->free_iovec = iov;
551 req->flags |= REQ_F_NEED_CLEANUP;
555 int io_readv_prep_async(struct io_kiocb *req)
557 return io_rw_prep_async(req, READ);
560 int io_writev_prep_async(struct io_kiocb *req)
562 return io_rw_prep_async(req, WRITE);
566 * This is our waitqueue callback handler, registered through __folio_lock_async()
567 * when we initially tried to do the IO with the iocb armed our waitqueue.
568 * This gets called when the page is unlocked, and we generally expect that to
569 * happen when the page IO is completed and the page is now uptodate. This will
570 * queue a task_work based retry of the operation, attempting to copy the data
571 * again. If the latter fails because the page was NOT uptodate, then we will
572 * do a thread based blocking retry of the operation. That's the unexpected
575 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
578 struct wait_page_queue *wpq;
579 struct io_kiocb *req = wait->private;
580 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
581 struct wait_page_key *key = arg;
583 wpq = container_of(wait, struct wait_page_queue, wait);
585 if (!wake_page_match(wpq, key))
588 rw->kiocb.ki_flags &= ~IOCB_WAITQ;
589 list_del_init(&wait->entry);
590 io_req_task_queue(req);
595 * This controls whether a given IO request should be armed for async page
596 * based retry. If we return false here, the request is handed to the async
597 * worker threads for retry. If we're doing buffered reads on a regular file,
598 * we prepare a private wait_page_queue entry and retry the operation. This
599 * will either succeed because the page is now uptodate and unlocked, or it
600 * will register a callback when the page is unlocked at IO completion. Through
601 * that callback, io_uring uses task_work to setup a retry of the operation.
602 * That retry will attempt the buffered read again. The retry will generally
603 * succeed, or in rare cases where it fails, we then fall back to using the
604 * async worker threads for a blocking retry.
606 static bool io_rw_should_retry(struct io_kiocb *req)
608 struct io_async_rw *io = req->async_data;
609 struct wait_page_queue *wait = &io->wpq;
610 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
611 struct kiocb *kiocb = &rw->kiocb;
613 /* never retry for NOWAIT, we just complete with -EAGAIN */
614 if (req->flags & REQ_F_NOWAIT)
617 /* Only for buffered IO */
618 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
622 * just use poll if we can, and don't attempt if the fs doesn't
623 * support callback based unlocks
625 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
628 wait->wait.func = io_async_buf_func;
629 wait->wait.private = req;
630 wait->wait.flags = 0;
631 INIT_LIST_HEAD(&wait->wait.entry);
632 kiocb->ki_flags |= IOCB_WAITQ;
633 kiocb->ki_flags &= ~IOCB_NOWAIT;
634 kiocb->ki_waitq = wait;
638 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
640 struct file *file = rw->kiocb.ki_filp;
642 if (likely(file->f_op->read_iter))
643 return call_read_iter(file, &rw->kiocb, iter);
644 else if (file->f_op->read)
645 return loop_rw_iter(READ, rw, iter);
650 static bool need_complete_io(struct io_kiocb *req)
652 return req->flags & REQ_F_ISREG ||
653 S_ISBLK(file_inode(req->file)->i_mode);
656 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
658 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
659 struct kiocb *kiocb = &rw->kiocb;
660 struct io_ring_ctx *ctx = req->ctx;
661 struct file *file = req->file;
664 if (unlikely(!file || !(file->f_mode & mode)))
667 if (!io_req_ffs_set(req))
668 req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
670 kiocb->ki_flags = file->f_iocb_flags;
671 ret = kiocb_set_rw_flags(kiocb, rw->flags);
676 * If the file is marked O_NONBLOCK, still allow retry for it if it
677 * supports async. Otherwise it's impossible to use O_NONBLOCK files
678 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
680 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
681 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
682 req->flags |= REQ_F_NOWAIT;
684 if (ctx->flags & IORING_SETUP_IOPOLL) {
685 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
688 kiocb->private = NULL;
689 kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
690 kiocb->ki_complete = io_complete_rw_iopoll;
691 req->iopoll_completed = 0;
693 if (kiocb->ki_flags & IOCB_HIPRI)
695 kiocb->ki_complete = io_complete_rw;
701 int io_read(struct io_kiocb *req, unsigned int issue_flags)
703 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
704 struct io_rw_state __s, *s = &__s;
706 struct kiocb *kiocb = &rw->kiocb;
707 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
708 struct io_async_rw *io;
712 if (!req_has_async_data(req)) {
713 ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
714 if (unlikely(ret < 0))
717 io = req->async_data;
721 * Safe and required to re-import if we're using provided
722 * buffers, as we dropped the selected one before retry.
724 if (io_do_buffer_select(req)) {
725 ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
726 if (unlikely(ret < 0))
731 * We come here from an earlier attempt, restore our state to
732 * match in case it doesn't. It's cheap enough that we don't
733 * need to make this conditional.
735 iov_iter_restore(&s->iter, &s->iter_state);
738 ret = io_rw_init_file(req, FMODE_READ);
743 req->cqe.res = iov_iter_count(&s->iter);
745 if (force_nonblock) {
746 /* If the file doesn't support async, just async punt */
747 if (unlikely(!io_file_supports_nowait(req))) {
748 ret = io_setup_async_rw(req, iovec, s, true);
749 return ret ?: -EAGAIN;
751 kiocb->ki_flags |= IOCB_NOWAIT;
753 /* Ensure we clear previously set non-block flag */
754 kiocb->ki_flags &= ~IOCB_NOWAIT;
757 ppos = io_kiocb_update_pos(req);
759 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
765 ret = io_iter_do_read(rw, &s->iter);
767 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
768 req->flags &= ~REQ_F_REISSUE;
769 /* if we can poll, just do that */
770 if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
772 /* IOPOLL retry should happen for io-wq threads */
773 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
775 /* no retry on NONBLOCK nor RWF_NOWAIT */
776 if (req->flags & REQ_F_NOWAIT)
779 } else if (ret == -EIOCBQUEUED) {
782 return IOU_ISSUE_SKIP_COMPLETE;
783 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
784 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
785 /* read all, failed, already did sync or don't want to retry */
790 * Don't depend on the iter state matching what was consumed, or being
791 * untouched in case of error. Restore it and we'll advance it
792 * manually if we need to.
794 iov_iter_restore(&s->iter, &s->iter_state);
796 ret2 = io_setup_async_rw(req, iovec, s, true);
801 io = req->async_data;
804 * Now use our persistent iterator and state, if we aren't already.
805 * We've restored and mapped the iter to match.
810 * We end up here because of a partial read, either from
811 * above or inside this loop. Advance the iter by the bytes
812 * that were consumed.
814 iov_iter_advance(&s->iter, ret);
815 if (!iov_iter_count(&s->iter))
817 io->bytes_done += ret;
818 iov_iter_save_state(&s->iter, &s->iter_state);
820 /* if we can retry, do so with the callbacks armed */
821 if (!io_rw_should_retry(req)) {
822 kiocb->ki_flags &= ~IOCB_WAITQ;
827 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
828 * we get -EIOCBQUEUED, then we'll get a notification when the
829 * desired page gets unlocked. We can also get a partial read
830 * here, and if we do, then just retry at the new offset.
832 ret = io_iter_do_read(rw, &s->iter);
833 if (ret == -EIOCBQUEUED)
834 return IOU_ISSUE_SKIP_COMPLETE;
835 /* we got some bytes, but not all. retry. */
836 kiocb->ki_flags &= ~IOCB_WAITQ;
837 iov_iter_restore(&s->iter, &s->iter_state);
840 /* it's faster to check here then delegate to kfree */
843 return kiocb_done(req, ret, issue_flags);
846 int io_write(struct io_kiocb *req, unsigned int issue_flags)
848 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
849 struct io_rw_state __s, *s = &__s;
851 struct kiocb *kiocb = &rw->kiocb;
852 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
856 if (!req_has_async_data(req)) {
857 ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
858 if (unlikely(ret < 0))
861 struct io_async_rw *io = req->async_data;
864 iov_iter_restore(&s->iter, &s->iter_state);
867 ret = io_rw_init_file(req, FMODE_WRITE);
872 req->cqe.res = iov_iter_count(&s->iter);
874 if (force_nonblock) {
875 /* If the file doesn't support async, just async punt */
876 if (unlikely(!io_file_supports_nowait(req)))
879 /* File path supports NOWAIT for non-direct_IO only for block devices. */
880 if (!(kiocb->ki_flags & IOCB_DIRECT) &&
881 !(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
882 (req->flags & REQ_F_ISREG))
885 kiocb->ki_flags |= IOCB_NOWAIT;
887 /* Ensure we clear previously set non-block flag */
888 kiocb->ki_flags &= ~IOCB_NOWAIT;
891 ppos = io_kiocb_update_pos(req);
893 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
900 * Open-code file_start_write here to grab freeze protection,
901 * which will be released by another thread in
902 * io_complete_rw(). Fool lockdep by telling it the lock got
903 * released so that it doesn't complain about the held lock when
904 * we return to userspace.
906 if (req->flags & REQ_F_ISREG) {
907 sb_start_write(file_inode(req->file)->i_sb);
908 __sb_writers_release(file_inode(req->file)->i_sb,
911 kiocb->ki_flags |= IOCB_WRITE;
913 if (likely(req->file->f_op->write_iter))
914 ret2 = call_write_iter(req->file, kiocb, &s->iter);
915 else if (req->file->f_op->write)
916 ret2 = loop_rw_iter(WRITE, rw, &s->iter);
920 if (req->flags & REQ_F_REISSUE) {
921 req->flags &= ~REQ_F_REISSUE;
926 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
927 * retry them without IOCB_NOWAIT.
929 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
931 /* no retry on NONBLOCK nor RWF_NOWAIT */
932 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
934 if (!force_nonblock || ret2 != -EAGAIN) {
935 /* IOPOLL retry should happen for io-wq threads */
936 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
939 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
940 struct io_async_rw *rw;
942 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
945 /* This is a partial write. The file pos has already been
946 * updated, setup the async struct to complete the request
947 * in the worker. Also update bytes_done to account for
948 * the bytes already written.
950 iov_iter_save_state(&s->iter, &s->iter_state);
951 ret = io_setup_async_rw(req, iovec, s, true);
953 rw = req->async_data;
955 rw->bytes_done += ret2;
957 if (kiocb->ki_flags & IOCB_WRITE)
958 kiocb_end_write(req);
959 return ret ? ret : -EAGAIN;
962 ret = kiocb_done(req, ret2, issue_flags);
965 iov_iter_restore(&s->iter, &s->iter_state);
966 ret = io_setup_async_rw(req, iovec, s, false);
968 if (kiocb->ki_flags & IOCB_WRITE)
969 kiocb_end_write(req);
974 /* it's reportedly faster than delegating the null check to kfree() */
980 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
982 io_commit_cqring_flush(ctx);
983 if (ctx->flags & IORING_SETUP_SQPOLL)
987 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
989 struct io_wq_work_node *pos, *start, *prev;
990 unsigned int poll_flags = BLK_POLL_NOSLEEP;
991 DEFINE_IO_COMP_BATCH(iob);
995 * Only spin for completions if we don't have multiple devices hanging
996 * off our complete list.
998 if (ctx->poll_multi_queue || force_nonspin)
999 poll_flags |= BLK_POLL_ONESHOT;
1001 wq_list_for_each(pos, start, &ctx->iopoll_list) {
1002 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1003 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1007 * Move completed and retryable entries to our local lists.
1008 * If we find a request that requires polling, break out
1009 * and complete those lists first, if we have entries there.
1011 if (READ_ONCE(req->iopoll_completed))
1014 ret = rw->kiocb.ki_filp->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1015 if (unlikely(ret < 0))
1018 poll_flags |= BLK_POLL_ONESHOT;
1020 /* iopoll may have completed current req */
1021 if (!rq_list_empty(iob.req_list) ||
1022 READ_ONCE(req->iopoll_completed))
1026 if (!rq_list_empty(iob.req_list))
1032 wq_list_for_each_resume(pos, prev) {
1033 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1035 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
1036 if (!smp_load_acquire(&req->iopoll_completed))
1039 if (unlikely(req->flags & REQ_F_CQE_SKIP))
1042 req->cqe.flags = io_put_kbuf(req, 0);
1043 __io_fill_cqe_req(req->ctx, req);
1046 if (unlikely(!nr_events))
1049 io_commit_cqring(ctx);
1050 io_cqring_ev_posted_iopoll(ctx);
1051 pos = start ? start->next : ctx->iopoll_list.first;
1052 wq_list_cut(&ctx->iopoll_list, prev, start);
1053 io_free_batch_list(ctx, pos);