2 * Copyright (C) 2010 Red Hat, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #include <linux/module.h>
15 #include <linux/compiler.h>
17 #include <linux/iomap.h>
18 #include <linux/uaccess.h>
19 #include <linux/gfp.h>
21 #include <linux/swap.h>
22 #include <linux/pagemap.h>
23 #include <linux/file.h>
24 #include <linux/uio.h>
25 #include <linux/backing-dev.h>
26 #include <linux/buffer_head.h>
27 #include <linux/task_io_accounting_ops.h>
28 #include <linux/dax.h>
29 #include <linux/sched/signal.h>
34 * Execute a iomap write on a segment of the mapping that spans a
35 * contiguous range of pages that have identical block mapping state.
37 * This avoids the need to map pages individually, do individual allocations
38 * for each page and most importantly avoid the need for filesystem specific
39 * locking per page. Instead, all the operations are amortised over the entire
40 * range of pages. It is assumed that the filesystems will lock whatever
41 * resources they require in the iomap_begin call, and release them in the
45 iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
46 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
48 struct iomap iomap = { 0 };
49 loff_t written = 0, ret;
52 * Need to map a range from start position for length bytes. This can
53 * span multiple pages - it is only guaranteed to return a range of a
54 * single type of pages (e.g. all into a hole, all mapped or all
55 * unwritten). Failure at this point has nothing to undo.
57 * If allocation is required for this range, reserve the space now so
58 * that the allocation is guaranteed to succeed later on. Once we copy
59 * the data into the page cache pages, then we cannot fail otherwise we
60 * expose transient stale data. If the reserve fails, we can safely
61 * back out at this point as there is nothing to undo.
63 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
66 if (WARN_ON(iomap.offset > pos))
70 * Cut down the length to the one actually provided by the filesystem,
71 * as it might not be able to give us the whole size that we requested.
73 if (iomap.offset + iomap.length < pos + length)
74 length = iomap.offset + iomap.length - pos;
77 * Now that we have guaranteed that the space allocation will succeed.
78 * we can do the copy-in page by page without having to worry about
79 * failures exposing transient data.
81 written = actor(inode, pos, length, data, &iomap);
84 * Now the data has been copied, commit the range we've copied. This
85 * should not fail unless the filesystem has had a fatal error.
88 ret = ops->iomap_end(inode, pos, length,
89 written > 0 ? written : 0,
93 return written ? written : ret;
97 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
99 loff_t i_size = i_size_read(inode);
102 * Only truncate newly allocated pages beyoned EOF, even if the
103 * write started inside the existing inode size.
105 if (pos + len > i_size)
106 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
110 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
111 struct page **pagep, struct iomap *iomap)
113 pgoff_t index = pos >> PAGE_SHIFT;
117 BUG_ON(pos + len > iomap->offset + iomap->length);
119 if (fatal_signal_pending(current))
122 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
126 status = __block_write_begin_int(page, pos, len, NULL, iomap);
127 if (unlikely(status)) {
132 iomap_write_failed(inode, pos, len);
140 iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
141 unsigned copied, struct page *page)
145 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
148 iomap_write_failed(inode, pos, len);
153 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
156 struct iov_iter *i = data;
159 unsigned int flags = AOP_FLAG_NOFS;
163 unsigned long offset; /* Offset into pagecache page */
164 unsigned long bytes; /* Bytes to write to page */
165 size_t copied; /* Bytes copied from user */
167 offset = (pos & (PAGE_SIZE - 1));
168 bytes = min_t(unsigned long, PAGE_SIZE - offset,
175 * Bring in the user page that we will copy from _first_.
176 * Otherwise there's a nasty deadlock on copying from the
177 * same page as we're writing to, without it being marked
180 * Not only is this an optimisation, but it is also required
181 * to check that the address is actually valid, when atomic
182 * usercopies are used, below.
184 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
189 status = iomap_write_begin(inode, pos, bytes, flags, &page,
191 if (unlikely(status))
194 if (mapping_writably_mapped(inode->i_mapping))
195 flush_dcache_page(page);
197 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
199 flush_dcache_page(page);
201 status = iomap_write_end(inode, pos, bytes, copied, page);
202 if (unlikely(status < 0))
208 iov_iter_advance(i, copied);
209 if (unlikely(copied == 0)) {
211 * If we were unable to copy any data at all, we must
212 * fall back to a single segment length write.
214 * If we didn't fallback here, we could livelock
215 * because not all segments in the iov can be copied at
216 * once without a pagefault.
218 bytes = min_t(unsigned long, PAGE_SIZE - offset,
219 iov_iter_single_seg_count(i));
226 balance_dirty_pages_ratelimited(inode->i_mapping);
227 } while (iov_iter_count(i) && length);
229 return written ? written : status;
233 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
234 const struct iomap_ops *ops)
236 struct inode *inode = iocb->ki_filp->f_mapping->host;
237 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
239 while (iov_iter_count(iter)) {
240 ret = iomap_apply(inode, pos, iov_iter_count(iter),
241 IOMAP_WRITE, ops, iter, iomap_write_actor);
248 return written ? written : ret;
250 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
253 __iomap_read_page(struct inode *inode, loff_t offset)
255 struct address_space *mapping = inode->i_mapping;
258 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
261 if (!PageUptodate(page)) {
263 return ERR_PTR(-EIO);
269 iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
276 struct page *page, *rpage;
277 unsigned long offset; /* Offset into pagecache page */
278 unsigned long bytes; /* Bytes to write to page */
280 offset = (pos & (PAGE_SIZE - 1));
281 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
283 rpage = __iomap_read_page(inode, pos);
285 return PTR_ERR(rpage);
287 status = iomap_write_begin(inode, pos, bytes,
288 AOP_FLAG_NOFS, &page, iomap);
290 if (unlikely(status))
293 WARN_ON_ONCE(!PageUptodate(page));
295 status = iomap_write_end(inode, pos, bytes, bytes, page);
296 if (unlikely(status <= 0)) {
297 if (WARN_ON_ONCE(status == 0))
308 balance_dirty_pages_ratelimited(inode->i_mapping);
315 iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
316 const struct iomap_ops *ops)
321 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
331 EXPORT_SYMBOL_GPL(iomap_file_dirty);
333 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
334 unsigned bytes, struct iomap *iomap)
339 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
344 zero_user(page, offset, bytes);
345 mark_page_accessed(page);
347 return iomap_write_end(inode, pos, bytes, bytes, page);
350 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
353 sector_t sector = (iomap->addr +
354 (pos & PAGE_MASK) - iomap->offset) >> 9;
356 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, sector,
361 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
362 void *data, struct iomap *iomap)
364 bool *did_zero = data;
368 /* already zeroed? we're done. */
369 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
373 unsigned offset, bytes;
375 offset = pos & (PAGE_SIZE - 1); /* Within page */
376 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
379 status = iomap_dax_zero(pos, offset, bytes, iomap);
381 status = iomap_zero(inode, pos, offset, bytes, iomap);
396 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
397 const struct iomap_ops *ops)
402 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
403 ops, did_zero, iomap_zero_range_actor);
413 EXPORT_SYMBOL_GPL(iomap_zero_range);
416 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
417 const struct iomap_ops *ops)
419 unsigned int blocksize = i_blocksize(inode);
420 unsigned int off = pos & (blocksize - 1);
422 /* Block boundary? Nothing to do */
425 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
427 EXPORT_SYMBOL_GPL(iomap_truncate_page);
430 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
431 void *data, struct iomap *iomap)
433 struct page *page = data;
436 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
440 block_commit_write(page, 0, length);
444 int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
446 struct page *page = vmf->page;
447 struct inode *inode = file_inode(vmf->vma->vm_file);
448 unsigned long length;
453 size = i_size_read(inode);
454 if ((page->mapping != inode->i_mapping) ||
455 (page_offset(page) > size)) {
456 /* We overload EFAULT to mean page got truncated */
461 /* page is wholly or partially inside EOF */
462 if (((page->index + 1) << PAGE_SHIFT) > size)
463 length = size & ~PAGE_MASK;
467 offset = page_offset(page);
469 ret = iomap_apply(inode, offset, length,
470 IOMAP_WRITE | IOMAP_FAULT, ops, page,
471 iomap_page_mkwrite_actor);
472 if (unlikely(ret <= 0))
478 set_page_dirty(page);
479 wait_for_stable_page(page);
480 return VM_FAULT_LOCKED;
483 return block_page_mkwrite_return(ret);
485 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
488 struct fiemap_extent_info *fi;
492 static int iomap_to_fiemap(struct fiemap_extent_info *fi,
493 struct iomap *iomap, u32 flags)
495 switch (iomap->type) {
500 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
502 case IOMAP_UNWRITTEN:
503 flags |= FIEMAP_EXTENT_UNWRITTEN;
509 if (iomap->flags & IOMAP_F_MERGED)
510 flags |= FIEMAP_EXTENT_MERGED;
511 if (iomap->flags & IOMAP_F_SHARED)
512 flags |= FIEMAP_EXTENT_SHARED;
513 if (iomap->flags & IOMAP_F_DATA_INLINE)
514 flags |= FIEMAP_EXTENT_DATA_INLINE;
516 return fiemap_fill_next_extent(fi, iomap->offset,
517 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
518 iomap->length, flags);
522 iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
525 struct fiemap_ctx *ctx = data;
528 if (iomap->type == IOMAP_HOLE)
531 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
534 case 0: /* success */
536 case 1: /* extent array full */
543 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
544 loff_t start, loff_t len, const struct iomap_ops *ops)
546 struct fiemap_ctx ctx;
549 memset(&ctx, 0, sizeof(ctx));
551 ctx.prev.type = IOMAP_HOLE;
553 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
557 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
558 ret = filemap_write_and_wait(inode->i_mapping);
564 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
566 /* inode with no (attribute) mapping will give ENOENT */
578 if (ctx.prev.type != IOMAP_HOLE) {
579 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
586 EXPORT_SYMBOL_GPL(iomap_fiemap);
589 iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
590 void *data, struct iomap *iomap)
592 switch (iomap->type) {
593 case IOMAP_UNWRITTEN:
594 offset = page_cache_seek_hole_data(inode, offset, length,
600 *(loff_t *)data = offset;
608 iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
610 loff_t size = i_size_read(inode);
611 loff_t length = size - offset;
614 /* Nothing to be found before or beyond the end of the file. */
615 if (offset < 0 || offset >= size)
619 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
620 &offset, iomap_seek_hole_actor);
632 EXPORT_SYMBOL_GPL(iomap_seek_hole);
635 iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
636 void *data, struct iomap *iomap)
638 switch (iomap->type) {
641 case IOMAP_UNWRITTEN:
642 offset = page_cache_seek_hole_data(inode, offset, length,
648 *(loff_t *)data = offset;
654 iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
656 loff_t size = i_size_read(inode);
657 loff_t length = size - offset;
660 /* Nothing to be found before or beyond the end of the file. */
661 if (offset < 0 || offset >= size)
665 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
666 &offset, iomap_seek_data_actor);
680 EXPORT_SYMBOL_GPL(iomap_seek_data);
683 * Private flags for iomap_dio, must not overlap with the public ones in
686 #define IOMAP_DIO_WRITE (1 << 30)
687 #define IOMAP_DIO_DIRTY (1 << 31)
691 iomap_dio_end_io_t *end_io;
699 /* used during submission and for synchronous completion: */
701 struct iov_iter *iter;
702 struct task_struct *waiter;
703 struct request_queue *last_queue;
707 /* used for aio completion: */
709 struct work_struct work;
714 static ssize_t iomap_dio_complete(struct iomap_dio *dio)
716 struct kiocb *iocb = dio->iocb;
717 struct inode *inode = file_inode(iocb->ki_filp);
718 loff_t offset = iocb->ki_pos;
722 ret = dio->end_io(iocb,
723 dio->error ? dio->error : dio->size,
731 /* check for short read */
732 if (offset + ret > dio->i_size &&
733 !(dio->flags & IOMAP_DIO_WRITE))
734 ret = dio->i_size - offset;
739 * Try again to invalidate clean pages which might have been cached by
740 * non-direct readahead, or faulted in by get_user_pages() if the source
741 * of the write was an mmap'ed region of the file we're writing. Either
742 * one is a pretty crazy thing to do, so we don't support it 100%. If
743 * this invalidation fails, tough, the write still worked...
745 * And this page cache invalidation has to be after dio->end_io(), as
746 * some filesystems convert unwritten extents to real allocations in
747 * end_io() when necessary, otherwise a racing buffer read would cache
748 * zeros from unwritten extents.
751 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
753 err = invalidate_inode_pages2_range(inode->i_mapping,
754 offset >> PAGE_SHIFT,
755 (offset + dio->size - 1) >> PAGE_SHIFT);
759 inode_dio_end(file_inode(iocb->ki_filp));
765 static void iomap_dio_complete_work(struct work_struct *work)
767 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
768 struct kiocb *iocb = dio->iocb;
769 bool is_write = (dio->flags & IOMAP_DIO_WRITE);
772 ret = iomap_dio_complete(dio);
773 if (is_write && ret > 0)
774 ret = generic_write_sync(iocb, ret);
775 iocb->ki_complete(iocb, ret, 0);
779 * Set an error in the dio if none is set yet. We have to use cmpxchg
780 * as the submission context and the completion context(s) can race to
783 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
785 cmpxchg(&dio->error, 0, ret);
788 static void iomap_dio_bio_end_io(struct bio *bio)
790 struct iomap_dio *dio = bio->bi_private;
791 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
794 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
796 if (atomic_dec_and_test(&dio->ref)) {
797 if (is_sync_kiocb(dio->iocb)) {
798 struct task_struct *waiter = dio->submit.waiter;
800 WRITE_ONCE(dio->submit.waiter, NULL);
801 wake_up_process(waiter);
802 } else if (dio->flags & IOMAP_DIO_WRITE) {
803 struct inode *inode = file_inode(dio->iocb->ki_filp);
805 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
806 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
808 iomap_dio_complete_work(&dio->aio.work);
813 bio_check_pages_dirty(bio);
815 struct bio_vec *bvec;
818 bio_for_each_segment_all(bvec, bio, i)
819 put_page(bvec->bv_page);
825 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
828 struct page *page = ZERO_PAGE(0);
831 bio = bio_alloc(GFP_KERNEL, 1);
832 bio_set_dev(bio, iomap->bdev);
833 bio->bi_iter.bi_sector =
834 (iomap->addr + pos - iomap->offset) >> 9;
835 bio->bi_private = dio;
836 bio->bi_end_io = iomap_dio_bio_end_io;
839 if (bio_add_page(bio, page, len, 0) != len)
841 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
843 atomic_inc(&dio->ref);
844 return submit_bio(bio);
848 iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
849 void *data, struct iomap *iomap)
851 struct iomap_dio *dio = data;
852 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
853 unsigned int fs_block_size = i_blocksize(inode), pad;
854 unsigned int align = iov_iter_alignment(dio->submit.iter);
855 struct iov_iter iter;
857 bool need_zeroout = false;
861 if ((pos | length | align) & ((1 << blkbits) - 1))
864 switch (iomap->type) {
866 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
869 case IOMAP_UNWRITTEN:
870 if (!(dio->flags & IOMAP_DIO_WRITE)) {
871 length = iov_iter_zero(length, dio->submit.iter);
875 dio->flags |= IOMAP_DIO_UNWRITTEN;
879 if (iomap->flags & IOMAP_F_SHARED)
880 dio->flags |= IOMAP_DIO_COW;
881 if (iomap->flags & IOMAP_F_NEW)
890 * Operate on a partial iter trimmed to the extent we were called for.
891 * We'll update the iter in the dio once we're done with this extent.
893 iter = *dio->submit.iter;
894 iov_iter_truncate(&iter, length);
896 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
901 /* zero out from the start of the block to the write offset */
902 pad = pos & (fs_block_size - 1);
904 iomap_dio_zero(dio, iomap, pos - pad, pad);
910 iov_iter_revert(dio->submit.iter, copied);
914 bio = bio_alloc(GFP_KERNEL, nr_pages);
915 bio_set_dev(bio, iomap->bdev);
916 bio->bi_iter.bi_sector =
917 (iomap->addr + pos - iomap->offset) >> 9;
918 bio->bi_write_hint = dio->iocb->ki_hint;
919 bio->bi_private = dio;
920 bio->bi_end_io = iomap_dio_bio_end_io;
922 ret = bio_iov_iter_get_pages(bio, &iter);
925 return copied ? copied : ret;
928 n = bio->bi_iter.bi_size;
929 if (dio->flags & IOMAP_DIO_WRITE) {
930 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
931 task_io_account_write(n);
933 bio_set_op_attrs(bio, REQ_OP_READ, 0);
934 if (dio->flags & IOMAP_DIO_DIRTY)
935 bio_set_pages_dirty(bio);
938 iov_iter_advance(dio->submit.iter, n);
944 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
946 atomic_inc(&dio->ref);
948 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
949 dio->submit.cookie = submit_bio(bio);
953 /* zero out from the end of the write to the end of the block */
954 pad = pos & (fs_block_size - 1);
956 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
962 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
963 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
965 struct address_space *mapping = iocb->ki_filp->f_mapping;
966 struct inode *inode = file_inode(iocb->ki_filp);
967 size_t count = iov_iter_count(iter);
968 loff_t pos = iocb->ki_pos, start = pos;
969 loff_t end = iocb->ki_pos + count - 1, ret = 0;
970 unsigned int flags = IOMAP_DIRECT;
971 struct blk_plug plug;
972 struct iomap_dio *dio;
974 lockdep_assert_held(&inode->i_rwsem);
979 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
984 atomic_set(&dio->ref, 1);
986 dio->i_size = i_size_read(inode);
987 dio->end_io = end_io;
991 dio->submit.iter = iter;
992 if (is_sync_kiocb(iocb)) {
993 dio->submit.waiter = current;
994 dio->submit.cookie = BLK_QC_T_NONE;
995 dio->submit.last_queue = NULL;
998 if (iov_iter_rw(iter) == READ) {
999 if (pos >= dio->i_size)
1002 if (iter->type == ITER_IOVEC)
1003 dio->flags |= IOMAP_DIO_DIRTY;
1005 dio->flags |= IOMAP_DIO_WRITE;
1006 flags |= IOMAP_WRITE;
1009 if (iocb->ki_flags & IOCB_NOWAIT) {
1010 if (filemap_range_has_page(mapping, start, end)) {
1014 flags |= IOMAP_NOWAIT;
1017 ret = filemap_write_and_wait_range(mapping, start, end);
1021 ret = invalidate_inode_pages2_range(mapping,
1022 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
1026 if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
1027 !inode->i_sb->s_dio_done_wq) {
1028 ret = sb_init_dio_done_wq(inode->i_sb);
1033 inode_dio_begin(inode);
1035 blk_start_plug(&plug);
1037 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1040 /* magic error code to fall back to buffered I/O */
1041 if (ret == -ENOTBLK)
1047 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1049 } while ((count = iov_iter_count(iter)) > 0);
1050 blk_finish_plug(&plug);
1053 iomap_dio_set_error(dio, ret);
1055 if (!atomic_dec_and_test(&dio->ref)) {
1056 if (!is_sync_kiocb(iocb))
1057 return -EIOCBQUEUED;
1060 set_current_state(TASK_UNINTERRUPTIBLE);
1061 if (!READ_ONCE(dio->submit.waiter))
1064 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1065 !dio->submit.last_queue ||
1066 !blk_poll(dio->submit.last_queue,
1067 dio->submit.cookie))
1070 __set_current_state(TASK_RUNNING);
1073 ret = iomap_dio_complete(dio);
1081 EXPORT_SYMBOL_GPL(iomap_dio_rw);