1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
6 #include <linux/module.h>
7 #include <linux/compiler.h>
9 #include <linux/iomap.h>
10 #include <linux/uaccess.h>
11 #include <linux/gfp.h>
12 #include <linux/migrate.h>
14 #include <linux/mm_inline.h>
15 #include <linux/swap.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/file.h>
19 #include <linux/uio.h>
20 #include <linux/backing-dev.h>
21 #include <linux/buffer_head.h>
22 #include <linux/task_io_accounting_ops.h>
23 #include <linux/dax.h>
24 #include <linux/sched/signal.h>
29 * Execute a iomap write on a segment of the mapping that spans a
30 * contiguous range of pages that have identical block mapping state.
32 * This avoids the need to map pages individually, do individual allocations
33 * for each page and most importantly avoid the need for filesystem specific
34 * locking per page. Instead, all the operations are amortised over the entire
35 * range of pages. It is assumed that the filesystems will lock whatever
36 * resources they require in the iomap_begin call, and release them in the
40 iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
41 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
43 struct iomap iomap = { 0 };
44 loff_t written = 0, ret;
47 * Need to map a range from start position for length bytes. This can
48 * span multiple pages - it is only guaranteed to return a range of a
49 * single type of pages (e.g. all into a hole, all mapped or all
50 * unwritten). Failure at this point has nothing to undo.
52 * If allocation is required for this range, reserve the space now so
53 * that the allocation is guaranteed to succeed later on. Once we copy
54 * the data into the page cache pages, then we cannot fail otherwise we
55 * expose transient stale data. If the reserve fails, we can safely
56 * back out at this point as there is nothing to undo.
58 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
61 if (WARN_ON(iomap.offset > pos))
63 if (WARN_ON(iomap.length == 0))
67 * Cut down the length to the one actually provided by the filesystem,
68 * as it might not be able to give us the whole size that we requested.
70 if (iomap.offset + iomap.length < pos + length)
71 length = iomap.offset + iomap.length - pos;
74 * Now that we have guaranteed that the space allocation will succeed.
75 * we can do the copy-in page by page without having to worry about
76 * failures exposing transient data.
78 written = actor(inode, pos, length, data, &iomap);
81 * Now the data has been copied, commit the range we've copied. This
82 * should not fail unless the filesystem has had a fatal error.
85 ret = ops->iomap_end(inode, pos, length,
86 written > 0 ? written : 0,
90 return written ? written : ret;
94 iomap_sector(struct iomap *iomap, loff_t pos)
96 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
99 static struct iomap_page *
100 iomap_page_create(struct inode *inode, struct page *page)
102 struct iomap_page *iop = to_iomap_page(page);
104 if (iop || i_blocksize(inode) == PAGE_SIZE)
107 iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
108 atomic_set(&iop->read_count, 0);
109 atomic_set(&iop->write_count, 0);
110 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
113 * migrate_page_move_mapping() assumes that pages with private data have
114 * their count elevated by 1.
117 set_page_private(page, (unsigned long)iop);
118 SetPagePrivate(page);
123 iomap_page_release(struct page *page)
125 struct iomap_page *iop = to_iomap_page(page);
129 WARN_ON_ONCE(atomic_read(&iop->read_count));
130 WARN_ON_ONCE(atomic_read(&iop->write_count));
131 ClearPagePrivate(page);
132 set_page_private(page, 0);
138 * Calculate the range inside the page that we actually need to read.
141 iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
142 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
144 loff_t orig_pos = *pos;
145 loff_t isize = i_size_read(inode);
146 unsigned block_bits = inode->i_blkbits;
147 unsigned block_size = (1 << block_bits);
148 unsigned poff = offset_in_page(*pos);
149 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
150 unsigned first = poff >> block_bits;
151 unsigned last = (poff + plen - 1) >> block_bits;
154 * If the block size is smaller than the page size we need to check the
155 * per-block uptodate status and adjust the offset and length if needed
156 * to avoid reading in already uptodate ranges.
161 /* move forward for each leading block marked uptodate */
162 for (i = first; i <= last; i++) {
163 if (!test_bit(i, iop->uptodate))
171 /* truncate len if we find any trailing uptodate block(s) */
172 for ( ; i <= last; i++) {
173 if (test_bit(i, iop->uptodate)) {
174 plen -= (last - i + 1) * block_size;
182 * If the extent spans the block that contains the i_size we need to
183 * handle both halves separately so that we properly zero data in the
184 * page cache for blocks that are entirely outside of i_size.
186 if (orig_pos <= isize && orig_pos + length > isize) {
187 unsigned end = offset_in_page(isize - 1) >> block_bits;
189 if (first <= end && last > end)
190 plen -= (last - end) * block_size;
198 iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
200 struct iomap_page *iop = to_iomap_page(page);
201 struct inode *inode = page->mapping->host;
202 unsigned first = off >> inode->i_blkbits;
203 unsigned last = (off + len - 1) >> inode->i_blkbits;
205 bool uptodate = true;
208 for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
209 if (i >= first && i <= last)
210 set_bit(i, iop->uptodate);
211 else if (!test_bit(i, iop->uptodate))
216 if (uptodate && !PageError(page))
217 SetPageUptodate(page);
221 iomap_read_finish(struct iomap_page *iop, struct page *page)
223 if (!iop || atomic_dec_and_test(&iop->read_count))
228 iomap_read_page_end_io(struct bio_vec *bvec, int error)
230 struct page *page = bvec->bv_page;
231 struct iomap_page *iop = to_iomap_page(page);
233 if (unlikely(error)) {
234 ClearPageUptodate(page);
237 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
240 iomap_read_finish(iop, page);
244 iomap_read_end_io(struct bio *bio)
246 int error = blk_status_to_errno(bio->bi_status);
247 struct bio_vec *bvec;
248 struct bvec_iter_all iter_all;
250 bio_for_each_segment_all(bvec, bio, iter_all)
251 iomap_read_page_end_io(bvec, error);
255 struct iomap_readpage_ctx {
256 struct page *cur_page;
257 bool cur_page_in_bio;
260 struct list_head *pages;
264 iomap_read_inline_data(struct inode *inode, struct page *page,
267 size_t size = i_size_read(inode);
270 if (PageUptodate(page))
274 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
276 addr = kmap_atomic(page);
277 memcpy(addr, iomap->inline_data, size);
278 memset(addr + size, 0, PAGE_SIZE - size);
280 SetPageUptodate(page);
284 iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
287 struct iomap_readpage_ctx *ctx = data;
288 struct page *page = ctx->cur_page;
289 struct iomap_page *iop = iomap_page_create(inode, page);
290 bool same_page = false, is_contig = false;
291 loff_t orig_pos = pos;
295 if (iomap->type == IOMAP_INLINE) {
297 iomap_read_inline_data(inode, page, iomap);
301 /* zero post-eof blocks as the page may be mapped */
302 iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
306 if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
307 zero_user(page, poff, plen);
308 iomap_set_range_uptodate(page, poff, plen);
312 ctx->cur_page_in_bio = true;
315 * Try to merge into a previous segment if we can.
317 sector = iomap_sector(iomap, pos);
318 if (ctx->bio && bio_end_sector(ctx->bio) == sector)
322 __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
323 if (!same_page && iop)
324 atomic_inc(&iop->read_count);
329 * If we start a new segment we need to increase the read count, and we
330 * need to do so before submitting any previous full bio to make sure
331 * that we don't prematurely unlock the page.
334 atomic_inc(&iop->read_count);
336 if (!ctx->bio || !is_contig || bio_full(ctx->bio)) {
337 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
338 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
341 submit_bio(ctx->bio);
343 if (ctx->is_readahead) /* same as readahead_gfp_mask */
344 gfp |= __GFP_NORETRY | __GFP_NOWARN;
345 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
346 ctx->bio->bi_opf = REQ_OP_READ;
347 if (ctx->is_readahead)
348 ctx->bio->bi_opf |= REQ_RAHEAD;
349 ctx->bio->bi_iter.bi_sector = sector;
350 bio_set_dev(ctx->bio, iomap->bdev);
351 ctx->bio->bi_end_io = iomap_read_end_io;
354 bio_add_page(ctx->bio, page, plen, poff);
357 * Move the caller beyond our range so that it keeps making progress.
358 * For that we have to include any leading non-uptodate ranges, but
359 * we can skip trailing ones as they will be handled in the next
362 return pos - orig_pos + plen;
366 iomap_readpage(struct page *page, const struct iomap_ops *ops)
368 struct iomap_readpage_ctx ctx = { .cur_page = page };
369 struct inode *inode = page->mapping->host;
373 for (poff = 0; poff < PAGE_SIZE; poff += ret) {
374 ret = iomap_apply(inode, page_offset(page) + poff,
375 PAGE_SIZE - poff, 0, ops, &ctx,
376 iomap_readpage_actor);
378 WARN_ON_ONCE(ret == 0);
386 WARN_ON_ONCE(!ctx.cur_page_in_bio);
388 WARN_ON_ONCE(ctx.cur_page_in_bio);
393 * Just like mpage_readpages and block_read_full_page we always
394 * return 0 and just mark the page as PageError on errors. This
395 * should be cleaned up all through the stack eventually.
399 EXPORT_SYMBOL_GPL(iomap_readpage);
402 iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
403 loff_t length, loff_t *done)
405 while (!list_empty(pages)) {
406 struct page *page = lru_to_page(pages);
408 if (page_offset(page) >= (u64)pos + length)
411 list_del(&page->lru);
412 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
417 * If we already have a page in the page cache at index we are
418 * done. Upper layers don't care if it is uptodate after the
419 * readpages call itself as every page gets checked again once
430 iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
431 void *data, struct iomap *iomap)
433 struct iomap_readpage_ctx *ctx = data;
436 for (done = 0; done < length; done += ret) {
437 if (ctx->cur_page && offset_in_page(pos + done) == 0) {
438 if (!ctx->cur_page_in_bio)
439 unlock_page(ctx->cur_page);
440 put_page(ctx->cur_page);
441 ctx->cur_page = NULL;
443 if (!ctx->cur_page) {
444 ctx->cur_page = iomap_next_page(inode, ctx->pages,
448 ctx->cur_page_in_bio = false;
450 ret = iomap_readpage_actor(inode, pos + done, length - done,
458 iomap_readpages(struct address_space *mapping, struct list_head *pages,
459 unsigned nr_pages, const struct iomap_ops *ops)
461 struct iomap_readpage_ctx ctx = {
463 .is_readahead = true,
465 loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
466 loff_t last = page_offset(list_entry(pages->next, struct page, lru));
467 loff_t length = last - pos + PAGE_SIZE, ret = 0;
470 ret = iomap_apply(mapping->host, pos, length, 0, ops,
471 &ctx, iomap_readpages_actor);
473 WARN_ON_ONCE(ret == 0);
484 if (!ctx.cur_page_in_bio)
485 unlock_page(ctx.cur_page);
486 put_page(ctx.cur_page);
490 * Check that we didn't lose a page due to the arcance calling
493 WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
496 EXPORT_SYMBOL_GPL(iomap_readpages);
499 * iomap_is_partially_uptodate checks whether blocks within a page are
502 * Returns true if all blocks which correspond to a file portion
503 * we want to read within the page are uptodate.
506 iomap_is_partially_uptodate(struct page *page, unsigned long from,
509 struct iomap_page *iop = to_iomap_page(page);
510 struct inode *inode = page->mapping->host;
511 unsigned len, first, last;
514 /* Limit range to one page */
515 len = min_t(unsigned, PAGE_SIZE - from, count);
517 /* First and last blocks in range within page */
518 first = from >> inode->i_blkbits;
519 last = (from + len - 1) >> inode->i_blkbits;
522 for (i = first; i <= last; i++)
523 if (!test_bit(i, iop->uptodate))
530 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
533 iomap_releasepage(struct page *page, gfp_t gfp_mask)
536 * mm accommodates an old ext3 case where clean pages might not have had
537 * the dirty bit cleared. Thus, it can send actual dirty pages to
538 * ->releasepage() via shrink_active_list(), skip those here.
540 if (PageDirty(page) || PageWriteback(page))
542 iomap_page_release(page);
545 EXPORT_SYMBOL_GPL(iomap_releasepage);
548 iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
551 * If we are invalidating the entire page, clear the dirty state from it
552 * and release it to avoid unnecessary buildup of the LRU.
554 if (offset == 0 && len == PAGE_SIZE) {
555 WARN_ON_ONCE(PageWriteback(page));
556 cancel_dirty_page(page);
557 iomap_page_release(page);
560 EXPORT_SYMBOL_GPL(iomap_invalidatepage);
562 #ifdef CONFIG_MIGRATION
564 iomap_migrate_page(struct address_space *mapping, struct page *newpage,
565 struct page *page, enum migrate_mode mode)
569 ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
570 if (ret != MIGRATEPAGE_SUCCESS)
573 if (page_has_private(page)) {
574 ClearPagePrivate(page);
576 set_page_private(newpage, page_private(page));
577 set_page_private(page, 0);
579 SetPagePrivate(newpage);
582 if (mode != MIGRATE_SYNC_NO_COPY)
583 migrate_page_copy(newpage, page);
585 migrate_page_states(newpage, page);
586 return MIGRATEPAGE_SUCCESS;
588 EXPORT_SYMBOL_GPL(iomap_migrate_page);
589 #endif /* CONFIG_MIGRATION */
592 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
594 loff_t i_size = i_size_read(inode);
597 * Only truncate newly allocated pages beyoned EOF, even if the
598 * write started inside the existing inode size.
600 if (pos + len > i_size)
601 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
605 iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
606 unsigned poff, unsigned plen, unsigned from, unsigned to,
612 if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
613 zero_user_segments(page, poff, from, to, poff + plen);
614 iomap_set_range_uptodate(page, poff, plen);
618 bio_init(&bio, &bvec, 1);
619 bio.bi_opf = REQ_OP_READ;
620 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
621 bio_set_dev(&bio, iomap->bdev);
622 __bio_add_page(&bio, page, plen, poff);
623 return submit_bio_wait(&bio);
627 __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
628 struct page *page, struct iomap *iomap)
630 struct iomap_page *iop = iomap_page_create(inode, page);
631 loff_t block_size = i_blocksize(inode);
632 loff_t block_start = pos & ~(block_size - 1);
633 loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
634 unsigned from = offset_in_page(pos), to = from + len, poff, plen;
637 if (PageUptodate(page))
641 iomap_adjust_read_range(inode, iop, &block_start,
642 block_end - block_start, &poff, &plen);
646 if ((from > poff && from < poff + plen) ||
647 (to > poff && to < poff + plen)) {
648 status = iomap_read_page_sync(inode, block_start, page,
649 poff, plen, from, to, iomap);
654 } while ((block_start += plen) < block_end);
660 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
661 struct page **pagep, struct iomap *iomap)
663 const struct iomap_page_ops *page_ops = iomap->page_ops;
664 pgoff_t index = pos >> PAGE_SHIFT;
668 BUG_ON(pos + len > iomap->offset + iomap->length);
670 if (fatal_signal_pending(current))
673 if (page_ops && page_ops->page_prepare) {
674 status = page_ops->page_prepare(inode, pos, len, iomap);
679 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
685 if (iomap->type == IOMAP_INLINE)
686 iomap_read_inline_data(inode, page, iomap);
687 else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
688 status = __block_write_begin_int(page, pos, len, NULL, iomap);
690 status = __iomap_write_begin(inode, pos, len, page, iomap);
692 if (unlikely(status))
701 iomap_write_failed(inode, pos, len);
704 if (page_ops && page_ops->page_done)
705 page_ops->page_done(inode, pos, 0, NULL, iomap);
710 iomap_set_page_dirty(struct page *page)
712 struct address_space *mapping = page_mapping(page);
715 if (unlikely(!mapping))
716 return !TestSetPageDirty(page);
719 * Lock out page->mem_cgroup migration to keep PageDirty
720 * synchronized with per-memcg dirty page counters.
722 lock_page_memcg(page);
723 newly_dirty = !TestSetPageDirty(page);
725 __set_page_dirty(page, mapping, 0);
726 unlock_page_memcg(page);
729 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
732 EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
735 __iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
736 unsigned copied, struct page *page, struct iomap *iomap)
738 flush_dcache_page(page);
741 * The blocks that were entirely written will now be uptodate, so we
742 * don't have to worry about a readpage reading them and overwriting a
743 * partial write. However if we have encountered a short write and only
744 * partially written into a block, it will not be marked uptodate, so a
745 * readpage might come in and destroy our partial write.
747 * Do the simplest thing, and just treat any short write to a non
748 * uptodate page as a zero-length write, and force the caller to redo
751 if (unlikely(copied < len && !PageUptodate(page)))
753 iomap_set_range_uptodate(page, offset_in_page(pos), len);
754 iomap_set_page_dirty(page);
759 iomap_write_end_inline(struct inode *inode, struct page *page,
760 struct iomap *iomap, loff_t pos, unsigned copied)
764 WARN_ON_ONCE(!PageUptodate(page));
765 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
767 addr = kmap_atomic(page);
768 memcpy(iomap->inline_data + pos, addr + pos, copied);
771 mark_inode_dirty(inode);
776 iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
777 unsigned copied, struct page *page, struct iomap *iomap)
779 const struct iomap_page_ops *page_ops = iomap->page_ops;
782 if (iomap->type == IOMAP_INLINE) {
783 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
784 } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
785 ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
788 ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
791 __generic_write_end(inode, pos, ret, page);
792 if (page_ops && page_ops->page_done)
793 page_ops->page_done(inode, pos, copied, page, iomap);
797 iomap_write_failed(inode, pos, len);
802 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
805 struct iov_iter *i = data;
808 unsigned int flags = AOP_FLAG_NOFS;
812 unsigned long offset; /* Offset into pagecache page */
813 unsigned long bytes; /* Bytes to write to page */
814 size_t copied; /* Bytes copied from user */
816 offset = offset_in_page(pos);
817 bytes = min_t(unsigned long, PAGE_SIZE - offset,
824 * Bring in the user page that we will copy from _first_.
825 * Otherwise there's a nasty deadlock on copying from the
826 * same page as we're writing to, without it being marked
829 * Not only is this an optimisation, but it is also required
830 * to check that the address is actually valid, when atomic
831 * usercopies are used, below.
833 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
838 status = iomap_write_begin(inode, pos, bytes, flags, &page,
840 if (unlikely(status))
843 if (mapping_writably_mapped(inode->i_mapping))
844 flush_dcache_page(page);
846 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
848 flush_dcache_page(page);
850 status = iomap_write_end(inode, pos, bytes, copied, page,
852 if (unlikely(status < 0))
858 iov_iter_advance(i, copied);
859 if (unlikely(copied == 0)) {
861 * If we were unable to copy any data at all, we must
862 * fall back to a single segment length write.
864 * If we didn't fallback here, we could livelock
865 * because not all segments in the iov can be copied at
866 * once without a pagefault.
868 bytes = min_t(unsigned long, PAGE_SIZE - offset,
869 iov_iter_single_seg_count(i));
876 balance_dirty_pages_ratelimited(inode->i_mapping);
877 } while (iov_iter_count(i) && length);
879 return written ? written : status;
883 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
884 const struct iomap_ops *ops)
886 struct inode *inode = iocb->ki_filp->f_mapping->host;
887 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
889 while (iov_iter_count(iter)) {
890 ret = iomap_apply(inode, pos, iov_iter_count(iter),
891 IOMAP_WRITE, ops, iter, iomap_write_actor);
898 return written ? written : ret;
900 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
903 __iomap_read_page(struct inode *inode, loff_t offset)
905 struct address_space *mapping = inode->i_mapping;
908 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
911 if (!PageUptodate(page)) {
913 return ERR_PTR(-EIO);
919 iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
926 struct page *page, *rpage;
927 unsigned long offset; /* Offset into pagecache page */
928 unsigned long bytes; /* Bytes to write to page */
930 offset = offset_in_page(pos);
931 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
933 rpage = __iomap_read_page(inode, pos);
935 return PTR_ERR(rpage);
937 status = iomap_write_begin(inode, pos, bytes,
938 AOP_FLAG_NOFS, &page, iomap);
940 if (unlikely(status))
943 WARN_ON_ONCE(!PageUptodate(page));
945 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
946 if (unlikely(status <= 0)) {
947 if (WARN_ON_ONCE(status == 0))
958 balance_dirty_pages_ratelimited(inode->i_mapping);
965 iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
966 const struct iomap_ops *ops)
971 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
981 EXPORT_SYMBOL_GPL(iomap_file_dirty);
983 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
984 unsigned bytes, struct iomap *iomap)
989 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
994 zero_user(page, offset, bytes);
995 mark_page_accessed(page);
997 return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
1000 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
1001 struct iomap *iomap)
1003 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
1004 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
1008 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
1009 void *data, struct iomap *iomap)
1011 bool *did_zero = data;
1015 /* already zeroed? we're done. */
1016 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1020 unsigned offset, bytes;
1022 offset = offset_in_page(pos);
1023 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
1026 status = iomap_dax_zero(pos, offset, bytes, iomap);
1028 status = iomap_zero(inode, pos, offset, bytes, iomap);
1037 } while (count > 0);
1043 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1044 const struct iomap_ops *ops)
1049 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
1050 ops, did_zero, iomap_zero_range_actor);
1060 EXPORT_SYMBOL_GPL(iomap_zero_range);
1063 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1064 const struct iomap_ops *ops)
1066 unsigned int blocksize = i_blocksize(inode);
1067 unsigned int off = pos & (blocksize - 1);
1069 /* Block boundary? Nothing to do */
1072 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1074 EXPORT_SYMBOL_GPL(iomap_truncate_page);
1077 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
1078 void *data, struct iomap *iomap)
1080 struct page *page = data;
1083 if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
1084 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
1087 block_commit_write(page, 0, length);
1089 WARN_ON_ONCE(!PageUptodate(page));
1090 iomap_page_create(inode, page);
1091 set_page_dirty(page);
1097 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1099 struct page *page = vmf->page;
1100 struct inode *inode = file_inode(vmf->vma->vm_file);
1101 unsigned long length;
1102 loff_t offset, size;
1106 size = i_size_read(inode);
1107 if ((page->mapping != inode->i_mapping) ||
1108 (page_offset(page) > size)) {
1109 /* We overload EFAULT to mean page got truncated */
1114 /* page is wholly or partially inside EOF */
1115 if (((page->index + 1) << PAGE_SHIFT) > size)
1116 length = offset_in_page(size);
1120 offset = page_offset(page);
1121 while (length > 0) {
1122 ret = iomap_apply(inode, offset, length,
1123 IOMAP_WRITE | IOMAP_FAULT, ops, page,
1124 iomap_page_mkwrite_actor);
1125 if (unlikely(ret <= 0))
1131 wait_for_stable_page(page);
1132 return VM_FAULT_LOCKED;
1135 return block_page_mkwrite_return(ret);
1137 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1140 struct fiemap_extent_info *fi;
1144 static int iomap_to_fiemap(struct fiemap_extent_info *fi,
1145 struct iomap *iomap, u32 flags)
1147 switch (iomap->type) {
1151 case IOMAP_DELALLOC:
1152 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
1156 case IOMAP_UNWRITTEN:
1157 flags |= FIEMAP_EXTENT_UNWRITTEN;
1160 flags |= FIEMAP_EXTENT_DATA_INLINE;
1164 if (iomap->flags & IOMAP_F_MERGED)
1165 flags |= FIEMAP_EXTENT_MERGED;
1166 if (iomap->flags & IOMAP_F_SHARED)
1167 flags |= FIEMAP_EXTENT_SHARED;
1169 return fiemap_fill_next_extent(fi, iomap->offset,
1170 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
1171 iomap->length, flags);
1175 iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1176 struct iomap *iomap)
1178 struct fiemap_ctx *ctx = data;
1179 loff_t ret = length;
1181 if (iomap->type == IOMAP_HOLE)
1184 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
1187 case 0: /* success */
1189 case 1: /* extent array full */
1196 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
1197 loff_t start, loff_t len, const struct iomap_ops *ops)
1199 struct fiemap_ctx ctx;
1202 memset(&ctx, 0, sizeof(ctx));
1204 ctx.prev.type = IOMAP_HOLE;
1206 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
1210 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
1211 ret = filemap_write_and_wait(inode->i_mapping);
1217 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
1218 iomap_fiemap_actor);
1219 /* inode with no (attribute) mapping will give ENOENT */
1231 if (ctx.prev.type != IOMAP_HOLE) {
1232 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
1239 EXPORT_SYMBOL_GPL(iomap_fiemap);
1242 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
1243 * Returns true if found and updates @lastoff to the offset in file.
1246 page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
1249 const struct address_space_operations *ops = inode->i_mapping->a_ops;
1250 unsigned int bsize = i_blocksize(inode), off;
1251 bool seek_data = whence == SEEK_DATA;
1252 loff_t poff = page_offset(page);
1254 if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE))
1257 if (*lastoff < poff) {
1259 * Last offset smaller than the start of the page means we found
1262 if (whence == SEEK_HOLE)
1268 * Just check the page unless we can and should check block ranges:
1270 if (bsize == PAGE_SIZE || !ops->is_partially_uptodate)
1271 return PageUptodate(page) == seek_data;
1274 if (unlikely(page->mapping != inode->i_mapping))
1275 goto out_unlock_not_found;
1277 for (off = 0; off < PAGE_SIZE; off += bsize) {
1278 if (offset_in_page(*lastoff) >= off + bsize)
1280 if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
1284 *lastoff = poff + off + bsize;
1287 out_unlock_not_found:
1293 * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
1295 * Within unwritten extents, the page cache determines which parts are holes
1296 * and which are data: uptodate buffer heads count as data; everything else
1299 * Returns the resulting offset on successs, and -ENOENT otherwise.
1302 page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
1305 pgoff_t index = offset >> PAGE_SHIFT;
1306 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1307 loff_t lastoff = offset;
1308 struct pagevec pvec;
1313 pagevec_init(&pvec);
1316 unsigned nr_pages, i;
1318 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
1323 for (i = 0; i < nr_pages; i++) {
1324 struct page *page = pvec.pages[i];
1326 if (page_seek_hole_data(inode, page, &lastoff, whence))
1328 lastoff = page_offset(page) + PAGE_SIZE;
1330 pagevec_release(&pvec);
1331 } while (index < end);
1333 /* When no page at lastoff and we are not done, we found a hole. */
1334 if (whence != SEEK_HOLE)
1338 if (lastoff < offset + length)
1343 pagevec_release(&pvec);
1349 iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
1350 void *data, struct iomap *iomap)
1352 switch (iomap->type) {
1353 case IOMAP_UNWRITTEN:
1354 offset = page_cache_seek_hole_data(inode, offset, length,
1360 *(loff_t *)data = offset;
1368 iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1370 loff_t size = i_size_read(inode);
1371 loff_t length = size - offset;
1374 /* Nothing to be found before or beyond the end of the file. */
1375 if (offset < 0 || offset >= size)
1378 while (length > 0) {
1379 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1380 &offset, iomap_seek_hole_actor);
1392 EXPORT_SYMBOL_GPL(iomap_seek_hole);
1395 iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
1396 void *data, struct iomap *iomap)
1398 switch (iomap->type) {
1401 case IOMAP_UNWRITTEN:
1402 offset = page_cache_seek_hole_data(inode, offset, length,
1408 *(loff_t *)data = offset;
1414 iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1416 loff_t size = i_size_read(inode);
1417 loff_t length = size - offset;
1420 /* Nothing to be found before or beyond the end of the file. */
1421 if (offset < 0 || offset >= size)
1424 while (length > 0) {
1425 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1426 &offset, iomap_seek_data_actor);
1440 EXPORT_SYMBOL_GPL(iomap_seek_data);
1443 * Private flags for iomap_dio, must not overlap with the public ones in
1446 #define IOMAP_DIO_WRITE_FUA (1 << 28)
1447 #define IOMAP_DIO_NEED_SYNC (1 << 29)
1448 #define IOMAP_DIO_WRITE (1 << 30)
1449 #define IOMAP_DIO_DIRTY (1 << 31)
1453 iomap_dio_end_io_t *end_io;
1459 bool wait_for_completion;
1462 /* used during submission and for synchronous completion: */
1464 struct iov_iter *iter;
1465 struct task_struct *waiter;
1466 struct request_queue *last_queue;
1470 /* used for aio completion: */
1472 struct work_struct work;
1477 int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
1479 struct request_queue *q = READ_ONCE(kiocb->private);
1483 return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin);
1485 EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
1487 static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
1490 atomic_inc(&dio->ref);
1492 if (dio->iocb->ki_flags & IOCB_HIPRI)
1493 bio_set_polled(bio, dio->iocb);
1495 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
1496 dio->submit.cookie = submit_bio(bio);
1499 static ssize_t iomap_dio_complete(struct iomap_dio *dio)
1501 struct kiocb *iocb = dio->iocb;
1502 struct inode *inode = file_inode(iocb->ki_filp);
1503 loff_t offset = iocb->ki_pos;
1507 ret = dio->end_io(iocb,
1508 dio->error ? dio->error : dio->size,
1516 /* check for short read */
1517 if (offset + ret > dio->i_size &&
1518 !(dio->flags & IOMAP_DIO_WRITE))
1519 ret = dio->i_size - offset;
1520 iocb->ki_pos += ret;
1524 * Try again to invalidate clean pages which might have been cached by
1525 * non-direct readahead, or faulted in by get_user_pages() if the source
1526 * of the write was an mmap'ed region of the file we're writing. Either
1527 * one is a pretty crazy thing to do, so we don't support it 100%. If
1528 * this invalidation fails, tough, the write still worked...
1530 * And this page cache invalidation has to be after dio->end_io(), as
1531 * some filesystems convert unwritten extents to real allocations in
1532 * end_io() when necessary, otherwise a racing buffer read would cache
1533 * zeros from unwritten extents.
1536 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
1538 err = invalidate_inode_pages2_range(inode->i_mapping,
1539 offset >> PAGE_SHIFT,
1540 (offset + dio->size - 1) >> PAGE_SHIFT);
1542 dio_warn_stale_pagecache(iocb->ki_filp);
1546 * If this is a DSYNC write, make sure we push it to stable storage now
1547 * that we've written data.
1549 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
1550 ret = generic_write_sync(iocb, ret);
1552 inode_dio_end(file_inode(iocb->ki_filp));
1558 static void iomap_dio_complete_work(struct work_struct *work)
1560 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
1561 struct kiocb *iocb = dio->iocb;
1563 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
1567 * Set an error in the dio if none is set yet. We have to use cmpxchg
1568 * as the submission context and the completion context(s) can race to
1571 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
1573 cmpxchg(&dio->error, 0, ret);
1576 static void iomap_dio_bio_end_io(struct bio *bio)
1578 struct iomap_dio *dio = bio->bi_private;
1579 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
1582 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
1584 if (atomic_dec_and_test(&dio->ref)) {
1585 if (dio->wait_for_completion) {
1586 struct task_struct *waiter = dio->submit.waiter;
1587 WRITE_ONCE(dio->submit.waiter, NULL);
1588 blk_wake_io_task(waiter);
1589 } else if (dio->flags & IOMAP_DIO_WRITE) {
1590 struct inode *inode = file_inode(dio->iocb->ki_filp);
1592 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
1593 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
1595 iomap_dio_complete_work(&dio->aio.work);
1600 bio_check_pages_dirty(bio);
1602 if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
1603 struct bvec_iter_all iter_all;
1604 struct bio_vec *bvec;
1606 bio_for_each_segment_all(bvec, bio, iter_all)
1607 put_page(bvec->bv_page);
1614 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
1617 struct page *page = ZERO_PAGE(0);
1618 int flags = REQ_SYNC | REQ_IDLE;
1621 bio = bio_alloc(GFP_KERNEL, 1);
1622 bio_set_dev(bio, iomap->bdev);
1623 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
1624 bio->bi_private = dio;
1625 bio->bi_end_io = iomap_dio_bio_end_io;
1628 __bio_add_page(bio, page, len, 0);
1629 bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
1630 iomap_dio_submit_bio(dio, iomap, bio);
1634 iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1635 struct iomap_dio *dio, struct iomap *iomap)
1637 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
1638 unsigned int fs_block_size = i_blocksize(inode), pad;
1639 unsigned int align = iov_iter_alignment(dio->submit.iter);
1640 struct iov_iter iter;
1642 bool need_zeroout = false;
1643 bool use_fua = false;
1644 int nr_pages, ret = 0;
1647 if ((pos | length | align) & ((1 << blkbits) - 1))
1650 if (iomap->type == IOMAP_UNWRITTEN) {
1651 dio->flags |= IOMAP_DIO_UNWRITTEN;
1652 need_zeroout = true;
1655 if (iomap->flags & IOMAP_F_SHARED)
1656 dio->flags |= IOMAP_DIO_COW;
1658 if (iomap->flags & IOMAP_F_NEW) {
1659 need_zeroout = true;
1660 } else if (iomap->type == IOMAP_MAPPED) {
1662 * Use a FUA write if we need datasync semantics, this is a pure
1663 * data IO that doesn't require any metadata updates (including
1664 * after IO completion such as unwritten extent conversion) and
1665 * the underlying device supports FUA. This allows us to avoid
1666 * cache flushes on IO completion.
1668 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
1669 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
1670 blk_queue_fua(bdev_get_queue(iomap->bdev)))
1675 * Operate on a partial iter trimmed to the extent we were called for.
1676 * We'll update the iter in the dio once we're done with this extent.
1678 iter = *dio->submit.iter;
1679 iov_iter_truncate(&iter, length);
1681 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1686 /* zero out from the start of the block to the write offset */
1687 pad = pos & (fs_block_size - 1);
1689 iomap_dio_zero(dio, iomap, pos - pad, pad);
1695 iov_iter_revert(dio->submit.iter, copied);
1699 bio = bio_alloc(GFP_KERNEL, nr_pages);
1700 bio_set_dev(bio, iomap->bdev);
1701 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
1702 bio->bi_write_hint = dio->iocb->ki_hint;
1703 bio->bi_ioprio = dio->iocb->ki_ioprio;
1704 bio->bi_private = dio;
1705 bio->bi_end_io = iomap_dio_bio_end_io;
1707 ret = bio_iov_iter_get_pages(bio, &iter);
1708 if (unlikely(ret)) {
1710 * We have to stop part way through an IO. We must fall
1711 * through to the sub-block tail zeroing here, otherwise
1712 * this short IO may expose stale data in the tail of
1713 * the block we haven't written data to.
1719 n = bio->bi_iter.bi_size;
1720 if (dio->flags & IOMAP_DIO_WRITE) {
1721 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
1723 bio->bi_opf |= REQ_FUA;
1725 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
1726 task_io_account_write(n);
1728 bio->bi_opf = REQ_OP_READ;
1729 if (dio->flags & IOMAP_DIO_DIRTY)
1730 bio_set_pages_dirty(bio);
1733 iov_iter_advance(dio->submit.iter, n);
1739 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1740 iomap_dio_submit_bio(dio, iomap, bio);
1744 * We need to zeroout the tail of a sub-block write if the extent type
1745 * requires zeroing or the write extends beyond EOF. If we don't zero
1746 * the block tail in the latter case, we can expose stale data via mmap
1747 * reads of the EOF block.
1751 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
1752 /* zero out from the end of the write to the end of the block */
1753 pad = pos & (fs_block_size - 1);
1755 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
1757 return copied ? copied : ret;
1761 iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
1763 length = iov_iter_zero(length, dio->submit.iter);
1764 dio->size += length;
1769 iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
1770 struct iomap_dio *dio, struct iomap *iomap)
1772 struct iov_iter *iter = dio->submit.iter;
1775 BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
1777 if (dio->flags & IOMAP_DIO_WRITE) {
1778 loff_t size = inode->i_size;
1781 memset(iomap->inline_data + size, 0, pos - size);
1782 copied = copy_from_iter(iomap->inline_data + pos, length, iter);
1784 if (pos + copied > size)
1785 i_size_write(inode, pos + copied);
1786 mark_inode_dirty(inode);
1789 copied = copy_to_iter(iomap->inline_data + pos, length, iter);
1791 dio->size += copied;
1796 iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
1797 void *data, struct iomap *iomap)
1799 struct iomap_dio *dio = data;
1801 switch (iomap->type) {
1803 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
1805 return iomap_dio_hole_actor(length, dio);
1806 case IOMAP_UNWRITTEN:
1807 if (!(dio->flags & IOMAP_DIO_WRITE))
1808 return iomap_dio_hole_actor(length, dio);
1809 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
1811 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
1813 return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
1821 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
1822 * is being issued as AIO or not. This allows us to optimise pure data writes
1823 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
1824 * REQ_FLUSH post write. This is slightly tricky because a single request here
1825 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
1826 * may be pure data writes. In that case, we still need to do a full data sync
1830 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1831 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
1833 struct address_space *mapping = iocb->ki_filp->f_mapping;
1834 struct inode *inode = file_inode(iocb->ki_filp);
1835 size_t count = iov_iter_count(iter);
1836 loff_t pos = iocb->ki_pos, start = pos;
1837 loff_t end = iocb->ki_pos + count - 1, ret = 0;
1838 unsigned int flags = IOMAP_DIRECT;
1839 bool wait_for_completion = is_sync_kiocb(iocb);
1840 struct blk_plug plug;
1841 struct iomap_dio *dio;
1843 lockdep_assert_held(&inode->i_rwsem);
1848 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1853 atomic_set(&dio->ref, 1);
1855 dio->i_size = i_size_read(inode);
1856 dio->end_io = end_io;
1860 dio->submit.iter = iter;
1861 dio->submit.waiter = current;
1862 dio->submit.cookie = BLK_QC_T_NONE;
1863 dio->submit.last_queue = NULL;
1865 if (iov_iter_rw(iter) == READ) {
1866 if (pos >= dio->i_size)
1869 if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ)
1870 dio->flags |= IOMAP_DIO_DIRTY;
1872 flags |= IOMAP_WRITE;
1873 dio->flags |= IOMAP_DIO_WRITE;
1875 /* for data sync or sync, we need sync completion processing */
1876 if (iocb->ki_flags & IOCB_DSYNC)
1877 dio->flags |= IOMAP_DIO_NEED_SYNC;
1880 * For datasync only writes, we optimistically try using FUA for
1881 * this IO. Any non-FUA write that occurs will clear this flag,
1882 * hence we know before completion whether a cache flush is
1885 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1886 dio->flags |= IOMAP_DIO_WRITE_FUA;
1889 if (iocb->ki_flags & IOCB_NOWAIT) {
1890 if (filemap_range_has_page(mapping, start, end)) {
1894 flags |= IOMAP_NOWAIT;
1897 ret = filemap_write_and_wait_range(mapping, start, end);
1902 * Try to invalidate cache pages for the range we're direct
1903 * writing. If this invalidation fails, tough, the write will
1904 * still work, but racing two incompatible write paths is a
1905 * pretty crazy thing to do, so we don't support it 100%.
1907 ret = invalidate_inode_pages2_range(mapping,
1908 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
1910 dio_warn_stale_pagecache(iocb->ki_filp);
1913 if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
1914 !inode->i_sb->s_dio_done_wq) {
1915 ret = sb_init_dio_done_wq(inode->i_sb);
1920 inode_dio_begin(inode);
1922 blk_start_plug(&plug);
1924 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1927 /* magic error code to fall back to buffered I/O */
1928 if (ret == -ENOTBLK) {
1929 wait_for_completion = true;
1936 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1938 } while ((count = iov_iter_count(iter)) > 0);
1939 blk_finish_plug(&plug);
1942 iomap_dio_set_error(dio, ret);
1945 * If all the writes we issued were FUA, we don't need to flush the
1946 * cache on IO completion. Clear the sync flag for this case.
1948 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1949 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1951 WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie);
1952 WRITE_ONCE(iocb->private, dio->submit.last_queue);
1955 * We are about to drop our additional submission reference, which
1956 * might be the last reference to the dio. There are three three
1957 * different ways we can progress here:
1959 * (a) If this is the last reference we will always complete and free
1960 * the dio ourselves.
1961 * (b) If this is not the last reference, and we serve an asynchronous
1962 * iocb, we must never touch the dio after the decrement, the
1963 * I/O completion handler will complete and free it.
1964 * (c) If this is not the last reference, but we serve a synchronous
1965 * iocb, the I/O completion handler will wake us up on the drop
1966 * of the final reference, and we will complete and free it here
1967 * after we got woken by the I/O completion handler.
1969 dio->wait_for_completion = wait_for_completion;
1970 if (!atomic_dec_and_test(&dio->ref)) {
1971 if (!wait_for_completion)
1972 return -EIOCBQUEUED;
1975 set_current_state(TASK_UNINTERRUPTIBLE);
1976 if (!READ_ONCE(dio->submit.waiter))
1979 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1980 !dio->submit.last_queue ||
1981 !blk_poll(dio->submit.last_queue,
1982 dio->submit.cookie, true))
1985 __set_current_state(TASK_RUNNING);
1988 return iomap_dio_complete(dio);
1994 EXPORT_SYMBOL_GPL(iomap_dio_rw);
1996 /* Swapfile activation */
1999 struct iomap_swapfile_info {
2000 struct iomap iomap; /* accumulated iomap */
2001 struct swap_info_struct *sis;
2002 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
2003 uint64_t highest_ppage; /* highest physical addr seen (pages) */
2004 unsigned long nr_pages; /* number of pages collected */
2005 int nr_extents; /* extent count */
2009 * Collect physical extents for this swap file. Physical extents reported to
2010 * the swap code must be trimmed to align to a page boundary. The logical
2011 * offset within the file is irrelevant since the swapfile code maps logical
2012 * page numbers of the swap device to the physical page-aligned extents.
2014 static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
2016 struct iomap *iomap = &isi->iomap;
2017 unsigned long nr_pages;
2018 uint64_t first_ppage;
2019 uint64_t first_ppage_reported;
2020 uint64_t next_ppage;
2024 * Round the start up and the end down so that the physical
2025 * extent aligns to a page boundary.
2027 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
2028 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
2031 /* Skip too-short physical extents. */
2032 if (first_ppage >= next_ppage)
2034 nr_pages = next_ppage - first_ppage;
2037 * Calculate how much swap space we're adding; the first page contains
2038 * the swap header and doesn't count. The mm still wants that first
2039 * page fed to add_swap_extent, however.
2041 first_ppage_reported = first_ppage;
2042 if (iomap->offset == 0)
2043 first_ppage_reported++;
2044 if (isi->lowest_ppage > first_ppage_reported)
2045 isi->lowest_ppage = first_ppage_reported;
2046 if (isi->highest_ppage < (next_ppage - 1))
2047 isi->highest_ppage = next_ppage - 1;
2049 /* Add extent, set up for the next call. */
2050 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
2053 isi->nr_extents += error;
2054 isi->nr_pages += nr_pages;
2059 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
2060 * swap only cares about contiguous page-aligned physical extents and makes no
2061 * distinction between written and unwritten extents.
2063 static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
2064 loff_t count, void *data, struct iomap *iomap)
2066 struct iomap_swapfile_info *isi = data;
2069 switch (iomap->type) {
2071 case IOMAP_UNWRITTEN:
2072 /* Only real or unwritten extents. */
2075 /* No inline data. */
2076 pr_err("swapon: file is inline\n");
2079 pr_err("swapon: file has unallocated extents\n");
2083 /* No uncommitted metadata or shared blocks. */
2084 if (iomap->flags & IOMAP_F_DIRTY) {
2085 pr_err("swapon: file is not committed\n");
2088 if (iomap->flags & IOMAP_F_SHARED) {
2089 pr_err("swapon: file has shared extents\n");
2093 /* Only one bdev per swap file. */
2094 if (iomap->bdev != isi->sis->bdev) {
2095 pr_err("swapon: file is on multiple devices\n");
2099 if (isi->iomap.length == 0) {
2100 /* No accumulated extent, so just store it. */
2101 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
2102 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
2103 /* Append this to the accumulated extent. */
2104 isi->iomap.length += iomap->length;
2106 /* Otherwise, add the retained iomap and store this one. */
2107 error = iomap_swapfile_add_extent(isi);
2110 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
2116 * Iterate a swap file's iomaps to construct physical extents that can be
2117 * passed to the swapfile subsystem.
2119 int iomap_swapfile_activate(struct swap_info_struct *sis,
2120 struct file *swap_file, sector_t *pagespan,
2121 const struct iomap_ops *ops)
2123 struct iomap_swapfile_info isi = {
2125 .lowest_ppage = (sector_t)-1ULL,
2127 struct address_space *mapping = swap_file->f_mapping;
2128 struct inode *inode = mapping->host;
2130 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
2134 * Persist all file mapping metadata so that we won't have any
2135 * IOMAP_F_DIRTY iomaps.
2137 ret = vfs_fsync(swap_file, 1);
2142 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
2143 ops, &isi, iomap_swapfile_activate_actor);
2151 if (isi.iomap.length) {
2152 ret = iomap_swapfile_add_extent(&isi);
2157 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
2158 sis->max = isi.nr_pages;
2159 sis->pages = isi.nr_pages - 1;
2160 sis->highest_bit = isi.nr_pages - 1;
2161 return isi.nr_extents;
2163 EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
2164 #endif /* CONFIG_SWAP */
2167 iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
2168 void *data, struct iomap *iomap)
2170 sector_t *bno = data, addr;
2172 if (iomap->type == IOMAP_MAPPED) {
2173 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
2175 WARN(1, "would truncate bmap result\n");
2182 /* legacy ->bmap interface. 0 is the error return (!) */
2184 iomap_bmap(struct address_space *mapping, sector_t bno,
2185 const struct iomap_ops *ops)
2187 struct inode *inode = mapping->host;
2188 loff_t pos = bno << inode->i_blkbits;
2189 unsigned blocksize = i_blocksize(inode);
2191 if (filemap_write_and_wait(mapping))
2195 iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
2198 EXPORT_SYMBOL_GPL(iomap_bmap);