1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2019 Christoph Hellwig.
6 #include <linux/module.h>
7 #include <linux/compiler.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/list_sort.h>
16 #include <linux/swap.h>
17 #include <linux/bio.h>
18 #include <linux/sched/signal.h>
19 #include <linux/migrate.h>
22 #include "../internal.h"
24 #define IOEND_BATCH_SIZE 4096
27 * Structure allocated for each folio when block size < folio size
28 * to track sub-folio uptodate status and I/O completions.
31 atomic_t read_bytes_pending;
32 atomic_t write_bytes_pending;
33 spinlock_t uptodate_lock;
34 unsigned long uptodate[];
37 static inline struct iomap_page *to_iomap_page(struct folio *folio)
39 if (folio_test_private(folio))
40 return folio_get_private(folio);
44 static struct bio_set iomap_ioend_bioset;
46 static struct iomap_page *
47 iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags)
49 struct iomap_page *iop = to_iomap_page(folio);
50 unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
53 if (iop || nr_blocks <= 1)
56 if (flags & IOMAP_NOWAIT)
59 gfp = GFP_NOFS | __GFP_NOFAIL;
61 iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
64 spin_lock_init(&iop->uptodate_lock);
65 if (folio_test_uptodate(folio))
66 bitmap_fill(iop->uptodate, nr_blocks);
67 folio_attach_private(folio, iop);
72 static void iomap_page_release(struct folio *folio)
74 struct iomap_page *iop = folio_detach_private(folio);
75 struct inode *inode = folio->mapping->host;
76 unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
80 WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
81 WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
82 WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
83 folio_test_uptodate(folio));
88 * Calculate the range inside the folio that we actually need to read.
90 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
91 loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
93 struct iomap_page *iop = to_iomap_page(folio);
94 loff_t orig_pos = *pos;
95 loff_t isize = i_size_read(inode);
96 unsigned block_bits = inode->i_blkbits;
97 unsigned block_size = (1 << block_bits);
98 size_t poff = offset_in_folio(folio, *pos);
99 size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
100 unsigned first = poff >> block_bits;
101 unsigned last = (poff + plen - 1) >> block_bits;
104 * If the block size is smaller than the page size, we need to check the
105 * per-block uptodate status and adjust the offset and length if needed
106 * to avoid reading in already uptodate ranges.
111 /* move forward for each leading block marked uptodate */
112 for (i = first; i <= last; i++) {
113 if (!test_bit(i, iop->uptodate))
121 /* truncate len if we find any trailing uptodate block(s) */
122 for ( ; i <= last; i++) {
123 if (test_bit(i, iop->uptodate)) {
124 plen -= (last - i + 1) * block_size;
132 * If the extent spans the block that contains the i_size, we need to
133 * handle both halves separately so that we properly zero data in the
134 * page cache for blocks that are entirely outside of i_size.
136 if (orig_pos <= isize && orig_pos + length > isize) {
137 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
139 if (first <= end && last > end)
140 plen -= (last - end) * block_size;
147 static void iomap_iop_set_range_uptodate(struct folio *folio,
148 struct iomap_page *iop, size_t off, size_t len)
150 struct inode *inode = folio->mapping->host;
151 unsigned first = off >> inode->i_blkbits;
152 unsigned last = (off + len - 1) >> inode->i_blkbits;
155 spin_lock_irqsave(&iop->uptodate_lock, flags);
156 bitmap_set(iop->uptodate, first, last - first + 1);
157 if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio)))
158 folio_mark_uptodate(folio);
159 spin_unlock_irqrestore(&iop->uptodate_lock, flags);
162 static void iomap_set_range_uptodate(struct folio *folio,
163 struct iomap_page *iop, size_t off, size_t len)
166 iomap_iop_set_range_uptodate(folio, iop, off, len);
168 folio_mark_uptodate(folio);
171 static void iomap_finish_folio_read(struct folio *folio, size_t offset,
172 size_t len, int error)
174 struct iomap_page *iop = to_iomap_page(folio);
176 if (unlikely(error)) {
177 folio_clear_uptodate(folio);
178 folio_set_error(folio);
180 iomap_set_range_uptodate(folio, iop, offset, len);
183 if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending))
187 static void iomap_read_end_io(struct bio *bio)
189 int error = blk_status_to_errno(bio->bi_status);
190 struct folio_iter fi;
192 bio_for_each_folio_all(fi, bio)
193 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
197 struct iomap_readpage_ctx {
198 struct folio *cur_folio;
199 bool cur_folio_in_bio;
201 struct readahead_control *rac;
205 * iomap_read_inline_data - copy inline data into the page cache
206 * @iter: iteration structure
207 * @folio: folio to copy to
209 * Copy the inline data in @iter into @folio and zero out the rest of the folio.
210 * Only a single IOMAP_INLINE extent is allowed at the end of each file.
211 * Returns zero for success to complete the read, or the usual negative errno.
213 static int iomap_read_inline_data(const struct iomap_iter *iter,
216 struct iomap_page *iop;
217 const struct iomap *iomap = iomap_iter_srcmap(iter);
218 size_t size = i_size_read(iter->inode) - iomap->offset;
219 size_t poff = offset_in_page(iomap->offset);
220 size_t offset = offset_in_folio(folio, iomap->offset);
223 if (folio_test_uptodate(folio))
226 if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
228 if (WARN_ON_ONCE(size > PAGE_SIZE -
229 offset_in_page(iomap->inline_data)))
231 if (WARN_ON_ONCE(size > iomap->length))
234 iop = iomap_page_create(iter->inode, folio, iter->flags);
236 iop = to_iomap_page(folio);
238 addr = kmap_local_folio(folio, offset);
239 memcpy(addr, iomap->inline_data, size);
240 memset(addr + size, 0, PAGE_SIZE - poff - size);
242 iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff);
246 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
249 const struct iomap *srcmap = iomap_iter_srcmap(iter);
251 return srcmap->type != IOMAP_MAPPED ||
252 (srcmap->flags & IOMAP_F_NEW) ||
253 pos >= i_size_read(iter->inode);
256 static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
257 struct iomap_readpage_ctx *ctx, loff_t offset)
259 const struct iomap *iomap = &iter->iomap;
260 loff_t pos = iter->pos + offset;
261 loff_t length = iomap_length(iter) - offset;
262 struct folio *folio = ctx->cur_folio;
263 struct iomap_page *iop;
264 loff_t orig_pos = pos;
268 if (iomap->type == IOMAP_INLINE)
269 return iomap_read_inline_data(iter, folio);
271 /* zero post-eof blocks as the page may be mapped */
272 iop = iomap_page_create(iter->inode, folio, iter->flags);
273 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
277 if (iomap_block_needs_zeroing(iter, pos)) {
278 folio_zero_range(folio, poff, plen);
279 iomap_set_range_uptodate(folio, iop, poff, plen);
283 ctx->cur_folio_in_bio = true;
285 atomic_add(plen, &iop->read_bytes_pending);
287 sector = iomap_sector(iomap, pos);
289 bio_end_sector(ctx->bio) != sector ||
290 !bio_add_folio(ctx->bio, folio, plen, poff)) {
291 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
292 gfp_t orig_gfp = gfp;
293 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
296 submit_bio(ctx->bio);
298 if (ctx->rac) /* same as readahead_gfp_mask */
299 gfp |= __GFP_NORETRY | __GFP_NOWARN;
300 ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
303 * If the bio_alloc fails, try it again for a single page to
304 * avoid having to deal with partial page reads. This emulates
305 * what do_mpage_read_folio does.
308 ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
312 ctx->bio->bi_opf |= REQ_RAHEAD;
313 ctx->bio->bi_iter.bi_sector = sector;
314 ctx->bio->bi_end_io = iomap_read_end_io;
315 bio_add_folio_nofail(ctx->bio, folio, plen, poff);
320 * Move the caller beyond our range so that it keeps making progress.
321 * For that, we have to include any leading non-uptodate ranges, but
322 * we can skip trailing ones as they will be handled in the next
325 return pos - orig_pos + plen;
328 int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
330 struct iomap_iter iter = {
331 .inode = folio->mapping->host,
332 .pos = folio_pos(folio),
333 .len = folio_size(folio),
335 struct iomap_readpage_ctx ctx = {
340 trace_iomap_readpage(iter.inode, 1);
342 while ((ret = iomap_iter(&iter, ops)) > 0)
343 iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
346 folio_set_error(folio);
350 WARN_ON_ONCE(!ctx.cur_folio_in_bio);
352 WARN_ON_ONCE(ctx.cur_folio_in_bio);
357 * Just like mpage_readahead and block_read_full_folio, we always
358 * return 0 and just set the folio error flag on errors. This
359 * should be cleaned up throughout the stack eventually.
363 EXPORT_SYMBOL_GPL(iomap_read_folio);
365 static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
366 struct iomap_readpage_ctx *ctx)
368 loff_t length = iomap_length(iter);
371 for (done = 0; done < length; done += ret) {
372 if (ctx->cur_folio &&
373 offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
374 if (!ctx->cur_folio_in_bio)
375 folio_unlock(ctx->cur_folio);
376 ctx->cur_folio = NULL;
378 if (!ctx->cur_folio) {
379 ctx->cur_folio = readahead_folio(ctx->rac);
380 ctx->cur_folio_in_bio = false;
382 ret = iomap_readpage_iter(iter, ctx, done);
391 * iomap_readahead - Attempt to read pages from a file.
392 * @rac: Describes the pages to be read.
393 * @ops: The operations vector for the filesystem.
395 * This function is for filesystems to call to implement their readahead
396 * address_space operation.
398 * Context: The @ops callbacks may submit I/O (eg to read the addresses of
399 * blocks from disc), and may wait for it. The caller may be trying to
400 * access a different page, and so sleeping excessively should be avoided.
401 * It may allocate memory, but should avoid costly allocations. This
402 * function is called with memalloc_nofs set, so allocations will not cause
403 * the filesystem to be reentered.
405 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
407 struct iomap_iter iter = {
408 .inode = rac->mapping->host,
409 .pos = readahead_pos(rac),
410 .len = readahead_length(rac),
412 struct iomap_readpage_ctx ctx = {
416 trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
418 while (iomap_iter(&iter, ops) > 0)
419 iter.processed = iomap_readahead_iter(&iter, &ctx);
424 if (!ctx.cur_folio_in_bio)
425 folio_unlock(ctx.cur_folio);
428 EXPORT_SYMBOL_GPL(iomap_readahead);
431 * iomap_is_partially_uptodate checks whether blocks within a folio are
434 * Returns true if all blocks which correspond to the specified part
435 * of the folio are uptodate.
437 bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
439 struct iomap_page *iop = to_iomap_page(folio);
440 struct inode *inode = folio->mapping->host;
441 unsigned first, last, i;
446 /* Caller's range may extend past the end of this folio */
447 count = min(folio_size(folio) - from, count);
449 /* First and last blocks in range within folio */
450 first = from >> inode->i_blkbits;
451 last = (from + count - 1) >> inode->i_blkbits;
453 for (i = first; i <= last; i++)
454 if (!test_bit(i, iop->uptodate))
458 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
461 * iomap_get_folio - get a folio reference for writing
462 * @iter: iteration structure
463 * @pos: start offset of write
465 * Returns a locked reference to the folio at @pos, or an error pointer if the
466 * folio could not be obtained.
468 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos)
470 unsigned fgp = FGP_WRITEBEGIN | FGP_NOFS;
472 if (iter->flags & IOMAP_NOWAIT)
475 return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
476 fgp, mapping_gfp_mask(iter->inode->i_mapping));
478 EXPORT_SYMBOL_GPL(iomap_get_folio);
480 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
482 trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
486 * mm accommodates an old ext3 case where clean folios might
487 * not have had the dirty bit cleared. Thus, it can send actual
488 * dirty folios to ->release_folio() via shrink_active_list();
491 if (folio_test_dirty(folio) || folio_test_writeback(folio))
493 iomap_page_release(folio);
496 EXPORT_SYMBOL_GPL(iomap_release_folio);
498 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
500 trace_iomap_invalidate_folio(folio->mapping->host,
501 folio_pos(folio) + offset, len);
504 * If we're invalidating the entire folio, clear the dirty state
505 * from it and release it to avoid unnecessary buildup of the LRU.
507 if (offset == 0 && len == folio_size(folio)) {
508 WARN_ON_ONCE(folio_test_writeback(folio));
509 folio_cancel_dirty(folio);
510 iomap_page_release(folio);
511 } else if (folio_test_large(folio)) {
512 /* Must release the iop so the page can be split */
513 WARN_ON_ONCE(!folio_test_uptodate(folio) &&
514 folio_test_dirty(folio));
515 iomap_page_release(folio);
518 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
521 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
523 loff_t i_size = i_size_read(inode);
526 * Only truncate newly allocated pages beyoned EOF, even if the
527 * write started inside the existing inode size.
529 if (pos + len > i_size)
530 truncate_pagecache_range(inode, max(pos, i_size),
534 static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
535 size_t poff, size_t plen, const struct iomap *iomap)
540 bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
541 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
542 bio_add_folio_nofail(&bio, folio, plen, poff);
543 return submit_bio_wait(&bio);
546 static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
547 size_t len, struct folio *folio)
549 const struct iomap *srcmap = iomap_iter_srcmap(iter);
550 struct iomap_page *iop;
551 loff_t block_size = i_blocksize(iter->inode);
552 loff_t block_start = round_down(pos, block_size);
553 loff_t block_end = round_up(pos + len, block_size);
554 unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
555 size_t from = offset_in_folio(folio, pos), to = from + len;
558 if (folio_test_uptodate(folio))
560 folio_clear_error(folio);
562 iop = iomap_page_create(iter->inode, folio, iter->flags);
563 if ((iter->flags & IOMAP_NOWAIT) && !iop && nr_blocks > 1)
567 iomap_adjust_read_range(iter->inode, folio, &block_start,
568 block_end - block_start, &poff, &plen);
572 if (!(iter->flags & IOMAP_UNSHARE) &&
573 (from <= poff || from >= poff + plen) &&
574 (to <= poff || to >= poff + plen))
577 if (iomap_block_needs_zeroing(iter, block_start)) {
578 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
580 folio_zero_segments(folio, poff, from, to, poff + plen);
584 if (iter->flags & IOMAP_NOWAIT)
587 status = iomap_read_folio_sync(block_start, folio,
592 iomap_set_range_uptodate(folio, iop, poff, plen);
593 } while ((block_start += plen) < block_end);
598 static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos,
601 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
603 if (folio_ops && folio_ops->get_folio)
604 return folio_ops->get_folio(iter, pos, len);
606 return iomap_get_folio(iter, pos);
609 static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
612 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
614 if (folio_ops && folio_ops->put_folio) {
615 folio_ops->put_folio(iter->inode, pos, ret, folio);
622 static int iomap_write_begin_inline(const struct iomap_iter *iter,
625 /* needs more work for the tailpacking case; disable for now */
626 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
628 return iomap_read_inline_data(iter, folio);
631 static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
632 size_t len, struct folio **foliop)
634 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
635 const struct iomap *srcmap = iomap_iter_srcmap(iter);
639 BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
640 if (srcmap != &iter->iomap)
641 BUG_ON(pos + len > srcmap->offset + srcmap->length);
643 if (fatal_signal_pending(current))
646 if (!mapping_large_folio_support(iter->inode->i_mapping))
647 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
649 folio = __iomap_get_folio(iter, pos, len);
651 return PTR_ERR(folio);
654 * Now we have a locked folio, before we do anything with it we need to
655 * check that the iomap we have cached is not stale. The inode extent
656 * mapping can change due to concurrent IO in flight (e.g.
657 * IOMAP_UNWRITTEN state can change and memory reclaim could have
658 * reclaimed a previously partially written page at this index after IO
659 * completion before this write reaches this file offset) and hence we
660 * could do the wrong thing here (zero a page range incorrectly or fail
661 * to zero) and corrupt data.
663 if (folio_ops && folio_ops->iomap_valid) {
664 bool iomap_valid = folio_ops->iomap_valid(iter->inode,
667 iter->iomap.flags |= IOMAP_F_STALE;
673 if (pos + len > folio_pos(folio) + folio_size(folio))
674 len = folio_pos(folio) + folio_size(folio) - pos;
676 if (srcmap->type == IOMAP_INLINE)
677 status = iomap_write_begin_inline(iter, folio);
678 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
679 status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
681 status = __iomap_write_begin(iter, pos, len, folio);
683 if (unlikely(status))
690 __iomap_put_folio(iter, pos, 0, folio);
691 iomap_write_failed(iter->inode, pos, len);
696 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
697 size_t copied, struct folio *folio)
699 struct iomap_page *iop = to_iomap_page(folio);
700 flush_dcache_folio(folio);
703 * The blocks that were entirely written will now be uptodate, so we
704 * don't have to worry about a read_folio reading them and overwriting a
705 * partial write. However, if we've encountered a short write and only
706 * partially written into a block, it will not be marked uptodate, so a
707 * read_folio might come in and destroy our partial write.
709 * Do the simplest thing and just treat any short write to a
710 * non-uptodate page as a zero-length write, and force the caller to
711 * redo the whole thing.
713 if (unlikely(copied < len && !folio_test_uptodate(folio)))
715 iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
716 filemap_dirty_folio(inode->i_mapping, folio);
720 static size_t iomap_write_end_inline(const struct iomap_iter *iter,
721 struct folio *folio, loff_t pos, size_t copied)
723 const struct iomap *iomap = &iter->iomap;
726 WARN_ON_ONCE(!folio_test_uptodate(folio));
727 BUG_ON(!iomap_inline_data_valid(iomap));
729 flush_dcache_folio(folio);
730 addr = kmap_local_folio(folio, pos);
731 memcpy(iomap_inline_data(iomap, pos), addr, copied);
734 mark_inode_dirty(iter->inode);
738 /* Returns the number of bytes copied. May be 0. Cannot be an errno. */
739 static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
740 size_t copied, struct folio *folio)
742 const struct iomap *srcmap = iomap_iter_srcmap(iter);
743 loff_t old_size = iter->inode->i_size;
746 if (srcmap->type == IOMAP_INLINE) {
747 ret = iomap_write_end_inline(iter, folio, pos, copied);
748 } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
749 ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
750 copied, &folio->page, NULL);
752 ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
756 * Update the in-memory inode size after copying the data into the page
757 * cache. It's up to the file system to write the updated size to disk,
758 * preferably after I/O completion so that no stale data is exposed.
760 if (pos + ret > old_size) {
761 i_size_write(iter->inode, pos + ret);
762 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
764 __iomap_put_folio(iter, pos, ret, folio);
767 pagecache_isize_extended(iter->inode, old_size, pos);
769 iomap_write_failed(iter->inode, pos + ret, len - ret);
773 static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
775 loff_t length = iomap_length(iter);
776 loff_t pos = iter->pos;
779 struct address_space *mapping = iter->inode->i_mapping;
780 unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
785 unsigned long offset; /* Offset into pagecache page */
786 unsigned long bytes; /* Bytes to write to page */
787 size_t copied; /* Bytes copied from user */
789 offset = offset_in_page(pos);
790 bytes = min_t(unsigned long, PAGE_SIZE - offset,
793 status = balance_dirty_pages_ratelimited_flags(mapping,
795 if (unlikely(status))
802 * Bring in the user page that we'll copy from _first_.
803 * Otherwise there's a nasty deadlock on copying from the
804 * same page as we're writing to, without it being marked
807 * For async buffered writes the assumption is that the user
808 * page has already been faulted in. This can be optimized by
809 * faulting the user page.
811 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
816 status = iomap_write_begin(iter, pos, bytes, &folio);
817 if (unlikely(status))
819 if (iter->iomap.flags & IOMAP_F_STALE)
822 page = folio_file_page(folio, pos >> PAGE_SHIFT);
823 if (mapping_writably_mapped(mapping))
824 flush_dcache_page(page);
826 copied = copy_page_from_iter_atomic(page, offset, bytes, i);
828 status = iomap_write_end(iter, pos, bytes, copied, folio);
830 if (unlikely(copied != status))
831 iov_iter_revert(i, copied - status);
834 if (unlikely(status == 0)) {
836 * A short copy made iomap_write_end() reject the
837 * thing entirely. Might be memory poisoning
838 * halfway through, might be a race with munmap,
839 * might be severe memory pressure.
848 } while (iov_iter_count(i) && length);
850 if (status == -EAGAIN) {
851 iov_iter_revert(i, written);
854 return written ? written : status;
858 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
859 const struct iomap_ops *ops)
861 struct iomap_iter iter = {
862 .inode = iocb->ki_filp->f_mapping->host,
864 .len = iov_iter_count(i),
865 .flags = IOMAP_WRITE,
869 if (iocb->ki_flags & IOCB_NOWAIT)
870 iter.flags |= IOMAP_NOWAIT;
872 while ((ret = iomap_iter(&iter, ops)) > 0)
873 iter.processed = iomap_write_iter(&iter, i);
875 if (unlikely(ret < 0))
877 ret = iter.pos - iocb->ki_pos;
881 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
884 * Scan the data range passed to us for dirty page cache folios. If we find a
885 * dirty folio, punch out the preceeding range and update the offset from which
886 * the next punch will start from.
888 * We can punch out storage reservations under clean pages because they either
889 * contain data that has been written back - in which case the delalloc punch
890 * over that range is a no-op - or they have been read faults in which case they
891 * contain zeroes and we can remove the delalloc backing range and any new
892 * writes to those pages will do the normal hole filling operation...
894 * This makes the logic simple: we only need to keep the delalloc extents only
895 * over the dirty ranges of the page cache.
897 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
898 * simplify range iterations.
900 static int iomap_write_delalloc_scan(struct inode *inode,
901 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
902 int (*punch)(struct inode *inode, loff_t offset, loff_t length))
904 while (start_byte < end_byte) {
907 /* grab locked page */
908 folio = filemap_lock_folio(inode->i_mapping,
909 start_byte >> PAGE_SHIFT);
911 start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
916 /* if dirty, punch up to offset */
917 if (folio_test_dirty(folio)) {
918 if (start_byte > *punch_start_byte) {
921 error = punch(inode, *punch_start_byte,
922 start_byte - *punch_start_byte);
931 * Make sure the next punch start is correctly bound to
932 * the end of this data range, not the end of the folio.
934 *punch_start_byte = min_t(loff_t, end_byte,
935 folio_next_index(folio) << PAGE_SHIFT);
938 /* move offset to start of next folio in range */
939 start_byte = folio_next_index(folio) << PAGE_SHIFT;
947 * Punch out all the delalloc blocks in the range given except for those that
948 * have dirty data still pending in the page cache - those are going to be
949 * written and so must still retain the delalloc backing for writeback.
951 * As we are scanning the page cache for data, we don't need to reimplement the
952 * wheel - mapping_seek_hole_data() does exactly what we need to identify the
953 * start and end of data ranges correctly even for sub-folio block sizes. This
954 * byte range based iteration is especially convenient because it means we
955 * don't have to care about variable size folios, nor where the start or end of
956 * the data range lies within a folio, if they lie within the same folio or even
957 * if there are multiple discontiguous data ranges within the folio.
959 * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
960 * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
961 * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
962 * date. A write page fault can then mark it dirty. If we then fail a write()
963 * beyond EOF into that up to date cached range, we allocate a delalloc block
964 * beyond EOF and then have to punch it out. Because the range is up to date,
965 * mapping_seek_hole_data() will return it, and we will skip the punch because
966 * the folio is dirty. THis is incorrect - we always need to punch out delalloc
967 * beyond EOF in this case as writeback will never write back and covert that
968 * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
969 * resulting in always punching out the range from the EOF to the end of the
970 * range the iomap spans.
972 * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
973 * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
974 * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
975 * returns the end of the data range (data_end). Using closed intervals would
976 * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
977 * the code to subtle off-by-one bugs....
979 static int iomap_write_delalloc_release(struct inode *inode,
980 loff_t start_byte, loff_t end_byte,
981 int (*punch)(struct inode *inode, loff_t pos, loff_t length))
983 loff_t punch_start_byte = start_byte;
984 loff_t scan_end_byte = min(i_size_read(inode), end_byte);
988 * Lock the mapping to avoid races with page faults re-instantiating
989 * folios and dirtying them via ->page_mkwrite whilst we walk the
990 * cache and perform delalloc extent removal. Failing to do this can
991 * leave dirty pages with no space reservation in the cache.
993 filemap_invalidate_lock(inode->i_mapping);
994 while (start_byte < scan_end_byte) {
997 start_byte = mapping_seek_hole_data(inode->i_mapping,
998 start_byte, scan_end_byte, SEEK_DATA);
1000 * If there is no more data to scan, all that is left is to
1001 * punch out the remaining range.
1003 if (start_byte == -ENXIO || start_byte == scan_end_byte)
1005 if (start_byte < 0) {
1009 WARN_ON_ONCE(start_byte < punch_start_byte);
1010 WARN_ON_ONCE(start_byte > scan_end_byte);
1013 * We find the end of this contiguous cached data range by
1014 * seeking from start_byte to the beginning of the next hole.
1016 data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
1017 scan_end_byte, SEEK_HOLE);
1022 WARN_ON_ONCE(data_end <= start_byte);
1023 WARN_ON_ONCE(data_end > scan_end_byte);
1025 error = iomap_write_delalloc_scan(inode, &punch_start_byte,
1026 start_byte, data_end, punch);
1030 /* The next data search starts at the end of this one. */
1031 start_byte = data_end;
1034 if (punch_start_byte < end_byte)
1035 error = punch(inode, punch_start_byte,
1036 end_byte - punch_start_byte);
1038 filemap_invalidate_unlock(inode->i_mapping);
1043 * When a short write occurs, the filesystem may need to remove reserved space
1044 * that was allocated in ->iomap_begin from it's ->iomap_end method. For
1045 * filesystems that use delayed allocation, we need to punch out delalloc
1046 * extents from the range that are not dirty in the page cache. As the write can
1047 * race with page faults, there can be dirty pages over the delalloc extent
1048 * outside the range of a short write but still within the delalloc extent
1049 * allocated for this iomap.
1051 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1052 * simplify range iterations.
1054 * The punch() callback *must* only punch delalloc extents in the range passed
1055 * to it. It must skip over all other types of extents in the range and leave
1056 * them completely unchanged. It must do this punch atomically with respect to
1057 * other extent modifications.
1059 * The punch() callback may be called with a folio locked to prevent writeback
1060 * extent allocation racing at the edge of the range we are currently punching.
1061 * The locked folio may or may not cover the range being punched, so it is not
1062 * safe for the punch() callback to lock folios itself.
1066 * inode->i_rwsem (shared or exclusive)
1067 * inode->i_mapping->invalidate_lock (exclusive)
1070 * internal filesystem allocation lock
1072 int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
1073 struct iomap *iomap, loff_t pos, loff_t length,
1075 int (*punch)(struct inode *inode, loff_t pos, loff_t length))
1079 unsigned int blocksize = i_blocksize(inode);
1081 if (iomap->type != IOMAP_DELALLOC)
1084 /* If we didn't reserve the blocks, we're not allowed to punch them. */
1085 if (!(iomap->flags & IOMAP_F_NEW))
1089 * start_byte refers to the first unused block after a short write. If
1090 * nothing was written, round offset down to point at the first block in
1093 if (unlikely(!written))
1094 start_byte = round_down(pos, blocksize);
1096 start_byte = round_up(pos + written, blocksize);
1097 end_byte = round_up(pos + length, blocksize);
1099 /* Nothing to do if we've written the entire delalloc extent */
1100 if (start_byte >= end_byte)
1103 return iomap_write_delalloc_release(inode, start_byte, end_byte,
1106 EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc);
1108 static loff_t iomap_unshare_iter(struct iomap_iter *iter)
1110 struct iomap *iomap = &iter->iomap;
1111 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1112 loff_t pos = iter->pos;
1113 loff_t length = iomap_length(iter);
1117 /* don't bother with blocks that are not shared to start with */
1118 if (!(iomap->flags & IOMAP_F_SHARED))
1120 /* don't bother with holes or unwritten extents */
1121 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1125 unsigned long offset = offset_in_page(pos);
1126 unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
1127 struct folio *folio;
1129 status = iomap_write_begin(iter, pos, bytes, &folio);
1130 if (unlikely(status))
1132 if (iter->iomap.flags & IOMAP_F_STALE)
1135 status = iomap_write_end(iter, pos, bytes, bytes, folio);
1136 if (WARN_ON_ONCE(status == 0))
1145 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
1152 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1153 const struct iomap_ops *ops)
1155 struct iomap_iter iter = {
1159 .flags = IOMAP_WRITE | IOMAP_UNSHARE,
1163 while ((ret = iomap_iter(&iter, ops)) > 0)
1164 iter.processed = iomap_unshare_iter(&iter);
1167 EXPORT_SYMBOL_GPL(iomap_file_unshare);
1169 static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
1171 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1172 loff_t pos = iter->pos;
1173 loff_t length = iomap_length(iter);
1176 /* already zeroed? we're done. */
1177 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1181 struct folio *folio;
1184 size_t bytes = min_t(u64, SIZE_MAX, length);
1186 status = iomap_write_begin(iter, pos, bytes, &folio);
1189 if (iter->iomap.flags & IOMAP_F_STALE)
1192 offset = offset_in_folio(folio, pos);
1193 if (bytes > folio_size(folio) - offset)
1194 bytes = folio_size(folio) - offset;
1196 folio_zero_range(folio, offset, bytes);
1197 folio_mark_accessed(folio);
1199 bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
1200 if (WARN_ON_ONCE(bytes == 0))
1206 } while (length > 0);
1214 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1215 const struct iomap_ops *ops)
1217 struct iomap_iter iter = {
1221 .flags = IOMAP_ZERO,
1225 while ((ret = iomap_iter(&iter, ops)) > 0)
1226 iter.processed = iomap_zero_iter(&iter, did_zero);
1229 EXPORT_SYMBOL_GPL(iomap_zero_range);
1232 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1233 const struct iomap_ops *ops)
1235 unsigned int blocksize = i_blocksize(inode);
1236 unsigned int off = pos & (blocksize - 1);
1238 /* Block boundary? Nothing to do */
1241 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1243 EXPORT_SYMBOL_GPL(iomap_truncate_page);
1245 static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
1246 struct folio *folio)
1248 loff_t length = iomap_length(iter);
1251 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
1252 ret = __block_write_begin_int(folio, iter->pos, length, NULL,
1256 block_commit_write(&folio->page, 0, length);
1258 WARN_ON_ONCE(!folio_test_uptodate(folio));
1259 folio_mark_dirty(folio);
1265 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1267 struct iomap_iter iter = {
1268 .inode = file_inode(vmf->vma->vm_file),
1269 .flags = IOMAP_WRITE | IOMAP_FAULT,
1271 struct folio *folio = page_folio(vmf->page);
1275 ret = folio_mkwrite_check_truncate(folio, iter.inode);
1278 iter.pos = folio_pos(folio);
1280 while ((ret = iomap_iter(&iter, ops)) > 0)
1281 iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
1285 folio_wait_stable(folio);
1286 return VM_FAULT_LOCKED;
1288 folio_unlock(folio);
1289 return block_page_mkwrite_return(ret);
1291 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1293 static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1294 size_t len, int error)
1296 struct iomap_page *iop = to_iomap_page(folio);
1299 folio_set_error(folio);
1300 mapping_set_error(inode->i_mapping, error);
1303 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop);
1304 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
1306 if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
1307 folio_end_writeback(folio);
1311 * We're now finished for good with this ioend structure. Update the page
1312 * state, release holds on bios, and finally free up memory. Do not use the
1316 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1318 struct inode *inode = ioend->io_inode;
1319 struct bio *bio = &ioend->io_inline_bio;
1320 struct bio *last = ioend->io_bio, *next;
1321 u64 start = bio->bi_iter.bi_sector;
1322 loff_t offset = ioend->io_offset;
1323 bool quiet = bio_flagged(bio, BIO_QUIET);
1324 u32 folio_count = 0;
1326 for (bio = &ioend->io_inline_bio; bio; bio = next) {
1327 struct folio_iter fi;
1330 * For the last bio, bi_private points to the ioend, so we
1331 * need to explicitly end the iteration here.
1336 next = bio->bi_private;
1338 /* walk all folios in bio, ending page IO on them */
1339 bio_for_each_folio_all(fi, bio) {
1340 iomap_finish_folio_write(inode, fi.folio, fi.length,
1346 /* The ioend has been freed by bio_put() */
1348 if (unlikely(error && !quiet)) {
1349 printk_ratelimited(KERN_ERR
1350 "%s: writeback error on inode %lu, offset %lld, sector %llu",
1351 inode->i_sb->s_id, inode->i_ino, offset, start);
1357 * Ioend completion routine for merged bios. This can only be called from task
1358 * contexts as merged ioends can be of unbound length. Hence we have to break up
1359 * the writeback completions into manageable chunks to avoid long scheduler
1360 * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
1361 * good batch processing throughput without creating adverse scheduler latency
1365 iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1367 struct list_head tmp;
1372 list_replace_init(&ioend->io_list, &tmp);
1373 completions = iomap_finish_ioend(ioend, error);
1375 while (!list_empty(&tmp)) {
1376 if (completions > IOEND_BATCH_SIZE * 8) {
1380 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1381 list_del_init(&ioend->io_list);
1382 completions += iomap_finish_ioend(ioend, error);
1385 EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1388 * We can merge two adjacent ioends if they have the same set of work to do.
1391 iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1393 if (ioend->io_bio->bi_status != next->io_bio->bi_status)
1395 if ((ioend->io_flags & IOMAP_F_SHARED) ^
1396 (next->io_flags & IOMAP_F_SHARED))
1398 if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1399 (next->io_type == IOMAP_UNWRITTEN))
1401 if (ioend->io_offset + ioend->io_size != next->io_offset)
1404 * Do not merge physically discontiguous ioends. The filesystem
1405 * completion functions will have to iterate the physical
1406 * discontiguities even if we merge the ioends at a logical level, so
1407 * we don't gain anything by merging physical discontiguities here.
1409 * We cannot use bio->bi_iter.bi_sector here as it is modified during
1410 * submission so does not point to the start sector of the bio at
1413 if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
1419 iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
1421 struct iomap_ioend *next;
1423 INIT_LIST_HEAD(&ioend->io_list);
1425 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1427 if (!iomap_ioend_can_merge(ioend, next))
1429 list_move_tail(&next->io_list, &ioend->io_list);
1430 ioend->io_size += next->io_size;
1433 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1436 iomap_ioend_compare(void *priv, const struct list_head *a,
1437 const struct list_head *b)
1439 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1440 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
1442 if (ia->io_offset < ib->io_offset)
1444 if (ia->io_offset > ib->io_offset)
1450 iomap_sort_ioends(struct list_head *ioend_list)
1452 list_sort(NULL, ioend_list, iomap_ioend_compare);
1454 EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1456 static void iomap_writepage_end_bio(struct bio *bio)
1458 struct iomap_ioend *ioend = bio->bi_private;
1460 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
1464 * Submit the final bio for an ioend.
1466 * If @error is non-zero, it means that we have a situation where some part of
1467 * the submission process has failed after we've marked pages for writeback
1468 * and unlocked them. In this situation, we need to fail the bio instead of
1469 * submitting it. This typically only happens on a filesystem shutdown.
1472 iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
1475 ioend->io_bio->bi_private = ioend;
1476 ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
1478 if (wpc->ops->prepare_ioend)
1479 error = wpc->ops->prepare_ioend(ioend, error);
1482 * If we're failing the IO now, just mark the ioend with an
1483 * error and finish it. This will run IO completion immediately
1484 * as there is only one reference to the ioend at this point in
1487 ioend->io_bio->bi_status = errno_to_blk_status(error);
1488 bio_endio(ioend->io_bio);
1492 submit_bio(ioend->io_bio);
1496 static struct iomap_ioend *
1497 iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
1498 loff_t offset, sector_t sector, struct writeback_control *wbc)
1500 struct iomap_ioend *ioend;
1503 bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
1504 REQ_OP_WRITE | wbc_to_write_flags(wbc),
1505 GFP_NOFS, &iomap_ioend_bioset);
1506 bio->bi_iter.bi_sector = sector;
1507 wbc_init_bio(wbc, bio);
1509 ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
1510 INIT_LIST_HEAD(&ioend->io_list);
1511 ioend->io_type = wpc->iomap.type;
1512 ioend->io_flags = wpc->iomap.flags;
1513 ioend->io_inode = inode;
1515 ioend->io_folios = 0;
1516 ioend->io_offset = offset;
1517 ioend->io_bio = bio;
1518 ioend->io_sector = sector;
1523 * Allocate a new bio, and chain the old bio to the new one.
1525 * Note that we have to perform the chaining in this unintuitive order
1526 * so that the bi_private linkage is set up in the right direction for the
1527 * traversal in iomap_finish_ioend().
1530 iomap_chain_bio(struct bio *prev)
1534 new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS);
1535 bio_clone_blkg_association(new, prev);
1536 new->bi_iter.bi_sector = bio_end_sector(prev);
1538 bio_chain(prev, new);
1539 bio_get(prev); /* for iomap_finish_ioend */
1545 iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
1548 if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1549 (wpc->ioend->io_flags & IOMAP_F_SHARED))
1551 if (wpc->iomap.type != wpc->ioend->io_type)
1553 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
1555 if (sector != bio_end_sector(wpc->ioend->io_bio))
1558 * Limit ioend bio chain lengths to minimise IO completion latency. This
1559 * also prevents long tight loops ending page writeback on all the
1560 * folios in the ioend.
1562 if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE)
1568 * Test to see if we have an existing ioend structure that we could append to
1569 * first; otherwise finish off the current ioend and start another.
1572 iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
1573 struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
1574 struct writeback_control *wbc, struct list_head *iolist)
1576 sector_t sector = iomap_sector(&wpc->iomap, pos);
1577 unsigned len = i_blocksize(inode);
1578 size_t poff = offset_in_folio(folio, pos);
1580 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) {
1582 list_add(&wpc->ioend->io_list, iolist);
1583 wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc);
1586 if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
1587 wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
1588 bio_add_folio_nofail(wpc->ioend->io_bio, folio, len, poff);
1592 atomic_add(len, &iop->write_bytes_pending);
1593 wpc->ioend->io_size += len;
1594 wbc_account_cgroup_owner(wbc, &folio->page, len);
1598 * We implement an immediate ioend submission policy here to avoid needing to
1599 * chain multiple ioends and hence nest mempool allocations which can violate
1600 * the forward progress guarantees we need to provide. The current ioend we're
1601 * adding blocks to is cached in the writepage context, and if the new block
1602 * doesn't append to the cached ioend, it will create a new ioend and cache that
1605 * If a new ioend is created and cached, the old ioend is returned and queued
1606 * locally for submission once the entire page is processed or an error has been
1607 * detected. While ioends are submitted immediately after they are completed,
1608 * batching optimisations are provided by higher level block plugging.
1610 * At the end of a writeback pass, there will be a cached ioend remaining on the
1611 * writepage context that the caller will need to submit.
1614 iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1615 struct writeback_control *wbc, struct inode *inode,
1616 struct folio *folio, u64 end_pos)
1618 struct iomap_page *iop = iomap_page_create(inode, folio, 0);
1619 struct iomap_ioend *ioend, *next;
1620 unsigned len = i_blocksize(inode);
1621 unsigned nblocks = i_blocks_per_folio(inode, folio);
1622 u64 pos = folio_pos(folio);
1623 int error = 0, count = 0, i;
1624 LIST_HEAD(submit_list);
1626 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
1629 * Walk through the folio to find areas to write back. If we
1630 * run off the end of the current map or find the current map
1631 * invalid, grab a new one.
1633 for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
1634 if (iop && !test_bit(i, iop->uptodate))
1637 error = wpc->ops->map_blocks(wpc, inode, pos);
1640 trace_iomap_writepage_map(inode, &wpc->iomap);
1641 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
1643 if (wpc->iomap.type == IOMAP_HOLE)
1645 iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc,
1650 wpc->ioend->io_folios++;
1652 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
1653 WARN_ON_ONCE(!folio_test_locked(folio));
1654 WARN_ON_ONCE(folio_test_writeback(folio));
1655 WARN_ON_ONCE(folio_test_dirty(folio));
1658 * We cannot cancel the ioend directly here on error. We may have
1659 * already set other pages under writeback and hence we have to run I/O
1660 * completion to mark the error state of the pages under writeback
1663 if (unlikely(error)) {
1665 * Let the filesystem know what portion of the current page
1666 * failed to map. If the page hasn't been added to ioend, it
1667 * won't be affected by I/O completion and we must unlock it
1670 if (wpc->ops->discard_folio)
1671 wpc->ops->discard_folio(folio, pos);
1673 folio_unlock(folio);
1678 folio_start_writeback(folio);
1679 folio_unlock(folio);
1682 * Preserve the original error if there was one; catch
1683 * submission errors here and propagate into subsequent ioend
1686 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1689 list_del_init(&ioend->io_list);
1690 error2 = iomap_submit_ioend(wpc, ioend, error);
1691 if (error2 && !error)
1696 * We can end up here with no error and nothing to write only if we race
1697 * with a partial page truncate on a sub-page block sized filesystem.
1700 folio_end_writeback(folio);
1702 mapping_set_error(inode->i_mapping, error);
1707 * Write out a dirty page.
1709 * For delalloc space on the page, we need to allocate space and flush it.
1710 * For unwritten space on the page, we need to start the conversion to
1711 * regular allocated space.
1713 static int iomap_do_writepage(struct folio *folio,
1714 struct writeback_control *wbc, void *data)
1716 struct iomap_writepage_ctx *wpc = data;
1717 struct inode *inode = folio->mapping->host;
1720 trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
1723 * Refuse to write the folio out if we're called from reclaim context.
1725 * This avoids stack overflows when called from deeply used stacks in
1726 * random callers for direct reclaim or memcg reclaim. We explicitly
1727 * allow reclaim from kswapd as the stack usage there is relatively low.
1729 * This should never happen except in the case of a VM regression so
1732 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1737 * Is this folio beyond the end of the file?
1739 * The folio index is less than the end_index, adjust the end_pos
1740 * to the highest offset that this folio should represent.
1741 * -----------------------------------------------------
1742 * | file mapping | <EOF> |
1743 * -----------------------------------------------------
1744 * | Page ... | Page N-2 | Page N-1 | Page N | |
1745 * ^--------------------------------^----------|--------
1746 * | desired writeback range | see else |
1747 * ---------------------------------^------------------|
1749 isize = i_size_read(inode);
1750 end_pos = folio_pos(folio) + folio_size(folio);
1751 if (end_pos > isize) {
1753 * Check whether the page to write out is beyond or straddles
1755 * -------------------------------------------------------
1756 * | file mapping | <EOF> |
1757 * -------------------------------------------------------
1758 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1759 * ^--------------------------------^-----------|---------
1761 * ---------------------------------^-----------|--------|
1763 size_t poff = offset_in_folio(folio, isize);
1764 pgoff_t end_index = isize >> PAGE_SHIFT;
1767 * Skip the page if it's fully outside i_size, e.g.
1768 * due to a truncate operation that's in progress. We've
1769 * cleaned this page and truncate will finish things off for
1772 * Note that the end_index is unsigned long. If the given
1773 * offset is greater than 16TB on a 32-bit system then if we
1774 * checked if the page is fully outside i_size with
1775 * "if (page->index >= end_index + 1)", "end_index + 1" would
1776 * overflow and evaluate to 0. Hence this page would be
1777 * redirtied and written out repeatedly, which would result in
1778 * an infinite loop; the user program performing this operation
1779 * would hang. Instead, we can detect this situation by
1780 * checking if the page is totally beyond i_size or if its
1781 * offset is just equal to the EOF.
1783 if (folio->index > end_index ||
1784 (folio->index == end_index && poff == 0))
1788 * The page straddles i_size. It must be zeroed out on each
1789 * and every writepage invocation because it may be mmapped.
1790 * "A file is mapped in multiples of the page size. For a file
1791 * that is not a multiple of the page size, the remaining
1792 * memory is zeroed when mapped, and writes to that region are
1793 * not written out to the file."
1795 folio_zero_segment(folio, poff, folio_size(folio));
1799 return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
1802 folio_redirty_for_writepage(wbc, folio);
1804 folio_unlock(folio);
1809 iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1810 struct iomap_writepage_ctx *wpc,
1811 const struct iomap_writeback_ops *ops)
1816 ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
1819 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1821 EXPORT_SYMBOL_GPL(iomap_writepages);
1823 static int __init iomap_init(void)
1825 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
1826 offsetof(struct iomap_ioend, io_inline_bio),
1829 fs_initcall(iomap_init);