2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "xfs_trans.h"
26 #include "xfs_dmapi.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dir2_sf.h"
32 #include "xfs_attr_sf.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_alloc.h"
36 #include "xfs_btree.h"
37 #include "xfs_error.h"
39 #include "xfs_iomap.h"
40 #include "xfs_vnodeops.h"
41 #include <linux/mpage.h>
42 #include <linux/pagevec.h>
43 #include <linux/writeback.h>
47 * Prime number of hash buckets since address is used as the key.
50 #define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
51 static wait_queue_head_t xfs_ioend_wq[NVSYNC];
58 for (i = 0; i < NVSYNC; i++)
59 init_waitqueue_head(&xfs_ioend_wq[i]);
66 wait_queue_head_t *wq = to_ioend_wq(ip);
68 wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
75 if (atomic_dec_and_test(&ip->i_iocount))
76 wake_up(to_ioend_wq(ip));
86 struct buffer_head *bh, *head;
88 *delalloc = *unmapped = *unwritten = 0;
90 bh = head = page_buffers(page);
92 if (buffer_uptodate(bh) && !buffer_mapped(bh))
94 else if (buffer_unwritten(bh))
96 else if (buffer_delay(bh))
98 } while ((bh = bh->b_this_page) != head);
101 #if defined(XFS_RW_TRACE)
110 loff_t isize = i_size_read(inode);
111 loff_t offset = page_offset(page);
112 int delalloc = -1, unmapped = -1, unwritten = -1;
114 if (page_has_buffers(page))
115 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
121 ktrace_enter(ip->i_rwtrace,
122 (void *)((unsigned long)tag),
127 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
128 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
129 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
130 (void *)((unsigned long)(isize & 0xffffffff)),
131 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
132 (void *)((unsigned long)(offset & 0xffffffff)),
133 (void *)((unsigned long)delalloc),
134 (void *)((unsigned long)unmapped),
135 (void *)((unsigned long)unwritten),
136 (void *)((unsigned long)current_pid()),
140 #define xfs_page_trace(tag, inode, page, pgoff)
143 STATIC struct block_device *
144 xfs_find_bdev_for_inode(
145 struct xfs_inode *ip)
147 struct xfs_mount *mp = ip->i_mount;
149 if (XFS_IS_REALTIME_INODE(ip))
150 return mp->m_rtdev_targp->bt_bdev;
152 return mp->m_ddev_targp->bt_bdev;
156 * We're now finished for good with this ioend structure.
157 * Update the page state via the associated buffer_heads,
158 * release holds on the inode and bio, and finally free
159 * up memory. Do not use the ioend after this.
165 struct buffer_head *bh, *next;
166 struct xfs_inode *ip = XFS_I(ioend->io_inode);
168 for (bh = ioend->io_buffer_head; bh; bh = next) {
169 next = bh->b_private;
170 bh->b_end_io(bh, !ioend->io_error);
174 * Volume managers supporting multiple paths can send back ENODEV
175 * when the final path disappears. In this case continuing to fill
176 * the page cache with dirty data which cannot be written out is
177 * evil, so prevent that.
179 if (unlikely(ioend->io_error == -ENODEV)) {
180 xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ,
185 mempool_free(ioend, xfs_ioend_pool);
189 * If the end of the current ioend is beyond the current EOF,
190 * return the new EOF value, otherwise zero.
196 xfs_inode_t *ip = XFS_I(ioend->io_inode);
200 bsize = ioend->io_offset + ioend->io_size;
201 isize = MAX(ip->i_size, ip->i_new_size);
202 isize = MIN(isize, bsize);
203 return isize > ip->i_d.di_size ? isize : 0;
207 * Update on-disk file size now that data has been written to disk.
208 * The current in-memory file size is i_size. If a write is beyond
209 * eof i_new_size will be the intended file size until i_size is
210 * updated. If this write does not extend all the way to the valid
211 * file size then restrict this update to the end of the write.
218 xfs_inode_t *ip = XFS_I(ioend->io_inode);
221 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
222 ASSERT(ioend->io_type != IOMAP_READ);
224 if (unlikely(ioend->io_error))
227 xfs_ilock(ip, XFS_ILOCK_EXCL);
228 isize = xfs_ioend_new_eof(ioend);
230 ip->i_d.di_size = isize;
231 xfs_mark_inode_dirty_sync(ip);
234 xfs_iunlock(ip, XFS_ILOCK_EXCL);
238 * Buffered IO write completion for delayed allocate extents.
241 xfs_end_bio_delalloc(
242 struct work_struct *work)
245 container_of(work, xfs_ioend_t, io_work);
247 xfs_setfilesize(ioend);
248 xfs_destroy_ioend(ioend);
252 * Buffered IO write completion for regular, written extents.
256 struct work_struct *work)
259 container_of(work, xfs_ioend_t, io_work);
261 xfs_setfilesize(ioend);
262 xfs_destroy_ioend(ioend);
266 * IO write completion for unwritten extents.
268 * Issue transactions to convert a buffer range from unwritten
269 * to written extents.
272 xfs_end_bio_unwritten(
273 struct work_struct *work)
276 container_of(work, xfs_ioend_t, io_work);
277 struct xfs_inode *ip = XFS_I(ioend->io_inode);
278 xfs_off_t offset = ioend->io_offset;
279 size_t size = ioend->io_size;
281 if (likely(!ioend->io_error)) {
282 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
284 error = xfs_iomap_write_unwritten(ip, offset, size);
286 ioend->io_error = error;
288 xfs_setfilesize(ioend);
290 xfs_destroy_ioend(ioend);
294 * IO read completion for regular, written extents.
298 struct work_struct *work)
301 container_of(work, xfs_ioend_t, io_work);
303 xfs_destroy_ioend(ioend);
307 * Schedule IO completion handling on a xfsdatad if this was
308 * the final hold on this ioend. If we are asked to wait,
309 * flush the workqueue.
316 if (atomic_dec_and_test(&ioend->io_remaining)) {
317 struct workqueue_struct *wq = xfsdatad_workqueue;
318 if (ioend->io_work.func == xfs_end_bio_unwritten)
319 wq = xfsconvertd_workqueue;
321 queue_work(wq, &ioend->io_work);
328 * Allocate and initialise an IO completion structure.
329 * We need to track unwritten extent write completion here initially.
330 * We'll need to extend this for updating the ondisk inode size later
340 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
343 * Set the count to 1 initially, which will prevent an I/O
344 * completion callback from happening before we have started
345 * all the I/O from calling the completion routine too early.
347 atomic_set(&ioend->io_remaining, 1);
349 ioend->io_list = NULL;
350 ioend->io_type = type;
351 ioend->io_inode = inode;
352 ioend->io_buffer_head = NULL;
353 ioend->io_buffer_tail = NULL;
354 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
355 ioend->io_offset = 0;
358 if (type == IOMAP_UNWRITTEN)
359 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
360 else if (type == IOMAP_DELAY)
361 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
362 else if (type == IOMAP_READ)
363 INIT_WORK(&ioend->io_work, xfs_end_bio_read);
365 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
380 return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
388 return offset >= iomapp->iomap_offset &&
389 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
393 * BIO completion handler for buffered IO.
400 xfs_ioend_t *ioend = bio->bi_private;
402 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
403 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
405 /* Toss bio and pass work off to an xfsdatad thread */
406 bio->bi_private = NULL;
407 bio->bi_end_io = NULL;
410 xfs_finish_ioend(ioend, 0);
414 xfs_submit_ioend_bio(
415 struct writeback_control *wbc,
419 atomic_inc(&ioend->io_remaining);
420 bio->bi_private = ioend;
421 bio->bi_end_io = xfs_end_bio;
424 * If the I/O is beyond EOF we mark the inode dirty immediately
425 * but don't update the inode size until I/O completion.
427 if (xfs_ioend_new_eof(ioend))
428 xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode));
430 submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
431 WRITE_SYNC_PLUG : WRITE, bio);
432 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
438 struct buffer_head *bh)
441 int nvecs = bio_get_nr_vecs(bh->b_bdev);
444 bio = bio_alloc(GFP_NOIO, nvecs);
448 ASSERT(bio->bi_private == NULL);
449 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
450 bio->bi_bdev = bh->b_bdev;
456 xfs_start_buffer_writeback(
457 struct buffer_head *bh)
459 ASSERT(buffer_mapped(bh));
460 ASSERT(buffer_locked(bh));
461 ASSERT(!buffer_delay(bh));
462 ASSERT(!buffer_unwritten(bh));
464 mark_buffer_async_write(bh);
465 set_buffer_uptodate(bh);
466 clear_buffer_dirty(bh);
470 xfs_start_page_writeback(
475 ASSERT(PageLocked(page));
476 ASSERT(!PageWriteback(page));
478 clear_page_dirty_for_io(page);
479 set_page_writeback(page);
481 /* If no buffers on the page are to be written, finish it here */
483 end_page_writeback(page);
486 static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
488 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
492 * Submit all of the bios for all of the ioends we have saved up, covering the
493 * initial writepage page and also any probed pages.
495 * Because we may have multiple ioends spanning a page, we need to start
496 * writeback on all the buffers before we submit them for I/O. If we mark the
497 * buffers as we got, then we can end up with a page that only has buffers
498 * marked async write and I/O complete on can occur before we mark the other
499 * buffers async write.
501 * The end result of this is that we trip a bug in end_page_writeback() because
502 * we call it twice for the one page as the code in end_buffer_async_write()
503 * assumes that all buffers on the page are started at the same time.
505 * The fix is two passes across the ioend list - one to start writeback on the
506 * buffer_heads, and then submit them for I/O on the second pass.
510 struct writeback_control *wbc,
513 xfs_ioend_t *head = ioend;
515 struct buffer_head *bh;
517 sector_t lastblock = 0;
519 /* Pass 1 - start writeback */
521 next = ioend->io_list;
522 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
523 xfs_start_buffer_writeback(bh);
525 } while ((ioend = next) != NULL);
527 /* Pass 2 - submit I/O */
530 next = ioend->io_list;
533 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
537 bio = xfs_alloc_ioend_bio(bh);
538 } else if (bh->b_blocknr != lastblock + 1) {
539 xfs_submit_ioend_bio(wbc, ioend, bio);
543 if (bio_add_buffer(bio, bh) != bh->b_size) {
544 xfs_submit_ioend_bio(wbc, ioend, bio);
548 lastblock = bh->b_blocknr;
551 xfs_submit_ioend_bio(wbc, ioend, bio);
552 xfs_finish_ioend(ioend, 0);
553 } while ((ioend = next) != NULL);
557 * Cancel submission of all buffer_heads so far in this endio.
558 * Toss the endio too. Only ever called for the initial page
559 * in a writepage request, so only ever one page.
566 struct buffer_head *bh, *next_bh;
569 next = ioend->io_list;
570 bh = ioend->io_buffer_head;
572 next_bh = bh->b_private;
573 clear_buffer_async_write(bh);
575 } while ((bh = next_bh) != NULL);
577 xfs_ioend_wake(XFS_I(ioend->io_inode));
578 mempool_free(ioend, xfs_ioend_pool);
579 } while ((ioend = next) != NULL);
583 * Test to see if we've been building up a completion structure for
584 * earlier buffers -- if so, we try to append to this ioend if we
585 * can, otherwise we finish off any current ioend and start another.
586 * Return true if we've finished the given ioend.
591 struct buffer_head *bh,
594 xfs_ioend_t **result,
597 xfs_ioend_t *ioend = *result;
599 if (!ioend || need_ioend || type != ioend->io_type) {
600 xfs_ioend_t *previous = *result;
602 ioend = xfs_alloc_ioend(inode, type);
603 ioend->io_offset = offset;
604 ioend->io_buffer_head = bh;
605 ioend->io_buffer_tail = bh;
607 previous->io_list = ioend;
610 ioend->io_buffer_tail->b_private = bh;
611 ioend->io_buffer_tail = bh;
614 bh->b_private = NULL;
615 ioend->io_size += bh->b_size;
620 struct buffer_head *bh,
627 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
629 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
630 ((offset - mp->iomap_offset) >> block_bits);
632 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
635 set_buffer_mapped(bh);
640 struct buffer_head *bh,
645 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
646 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
649 xfs_map_buffer(bh, iomapp, offset, block_bits);
650 bh->b_bdev = iomapp->iomap_target->bt_bdev;
651 set_buffer_mapped(bh);
652 clear_buffer_delay(bh);
653 clear_buffer_unwritten(bh);
657 * Look for a page at index that is suitable for clustering.
662 unsigned int pg_offset,
667 if (PageWriteback(page))
670 if (page->mapping && PageDirty(page)) {
671 if (page_has_buffers(page)) {
672 struct buffer_head *bh, *head;
674 bh = head = page_buffers(page);
676 if (!buffer_uptodate(bh))
678 if (mapped != buffer_mapped(bh))
681 if (ret >= pg_offset)
683 } while ((bh = bh->b_this_page) != head);
685 ret = mapped ? 0 : PAGE_CACHE_SIZE;
694 struct page *startpage,
695 struct buffer_head *bh,
696 struct buffer_head *head,
700 pgoff_t tindex, tlast, tloff;
704 /* First sum forwards in this page */
706 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
709 } while ((bh = bh->b_this_page) != head);
711 /* if we reached the end of the page, sum forwards in following pages */
712 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
713 tindex = startpage->index + 1;
715 /* Prune this back to avoid pathological behavior */
716 tloff = min(tlast, startpage->index + 64);
718 pagevec_init(&pvec, 0);
719 while (!done && tindex <= tloff) {
720 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
722 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
725 for (i = 0; i < pagevec_count(&pvec); i++) {
726 struct page *page = pvec.pages[i];
727 size_t pg_offset, pg_len = 0;
729 if (tindex == tlast) {
731 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
737 pg_offset = PAGE_CACHE_SIZE;
739 if (page->index == tindex && trylock_page(page)) {
740 pg_len = xfs_probe_page(page, pg_offset, mapped);
753 pagevec_release(&pvec);
761 * Test if a given page is suitable for writing as part of an unwritten
762 * or delayed allocate extent.
769 if (PageWriteback(page))
772 if (page->mapping && page_has_buffers(page)) {
773 struct buffer_head *bh, *head;
776 bh = head = page_buffers(page);
778 if (buffer_unwritten(bh))
779 acceptable = (type == IOMAP_UNWRITTEN);
780 else if (buffer_delay(bh))
781 acceptable = (type == IOMAP_DELAY);
782 else if (buffer_dirty(bh) && buffer_mapped(bh))
783 acceptable = (type == IOMAP_NEW);
786 } while ((bh = bh->b_this_page) != head);
796 * Allocate & map buffers for page given the extent map. Write it out.
797 * except for the original page of a writepage, this is called on
798 * delalloc/unwritten pages only, for the original page it is possible
799 * that the page has no mapping at all.
807 xfs_ioend_t **ioendp,
808 struct writeback_control *wbc,
812 struct buffer_head *bh, *head;
813 xfs_off_t end_offset;
814 unsigned long p_offset;
816 int bbits = inode->i_blkbits;
818 int count = 0, done = 0, uptodate = 1;
819 xfs_off_t offset = page_offset(page);
821 if (page->index != tindex)
823 if (!trylock_page(page))
825 if (PageWriteback(page))
826 goto fail_unlock_page;
827 if (page->mapping != inode->i_mapping)
828 goto fail_unlock_page;
829 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
830 goto fail_unlock_page;
833 * page_dirty is initially a count of buffers on the page before
834 * EOF and is decremented as we move each into a cleanable state.
838 * End offset is the highest offset that this page should represent.
839 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
840 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
841 * hence give us the correct page_dirty count. On any other page,
842 * it will be zero and in that case we need page_dirty to be the
843 * count of buffers on the page.
845 end_offset = min_t(unsigned long long,
846 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
849 len = 1 << inode->i_blkbits;
850 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
852 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
853 page_dirty = p_offset / len;
855 bh = head = page_buffers(page);
857 if (offset >= end_offset)
859 if (!buffer_uptodate(bh))
861 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
866 if (buffer_unwritten(bh) || buffer_delay(bh)) {
867 if (buffer_unwritten(bh))
868 type = IOMAP_UNWRITTEN;
872 if (!xfs_iomap_valid(mp, offset)) {
877 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
878 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
880 xfs_map_at_offset(bh, offset, bbits, mp);
882 xfs_add_to_ioend(inode, bh, offset,
885 set_buffer_dirty(bh);
887 mark_buffer_dirty(bh);
893 if (buffer_mapped(bh) && all_bh && startio) {
895 xfs_add_to_ioend(inode, bh, offset,
903 } while (offset += len, (bh = bh->b_this_page) != head);
905 if (uptodate && bh == head)
906 SetPageUptodate(page);
910 struct backing_dev_info *bdi;
912 bdi = inode->i_mapping->backing_dev_info;
914 if (bdi_write_congested(bdi)) {
915 wbc->encountered_congestion = 1;
917 } else if (wbc->nr_to_write <= 0) {
921 xfs_start_page_writeback(page, !page_dirty, count);
932 * Convert & write out a cluster of pages in the same extent as defined
933 * by mp and following the start page.
940 xfs_ioend_t **ioendp,
941 struct writeback_control *wbc,
949 pagevec_init(&pvec, 0);
950 while (!done && tindex <= tlast) {
951 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
953 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
956 for (i = 0; i < pagevec_count(&pvec); i++) {
957 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
958 iomapp, ioendp, wbc, startio, all_bh);
963 pagevec_release(&pvec);
969 * Calling this without startio set means we are being asked to make a dirty
970 * page ready for freeing it's buffers. When called with startio set then
971 * we are coming from writepage.
973 * When called with startio set it is important that we write the WHOLE
975 * The bh->b_state's cannot know if any of the blocks or which block for
976 * that matter are dirty due to mmap writes, and therefore bh uptodate is
977 * only valid if the page itself isn't completely uptodate. Some layers
978 * may clear the page dirty flag prior to calling write page, under the
979 * assumption the entire page will be written out; by not writing out the
980 * whole page the page can be reused before all valid dirty data is
981 * written out. Note: in the case of a page that has been dirty'd by
982 * mapwrite and but partially setup by block_prepare_write the
983 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
984 * valid state, thus the whole page must be written out thing.
988 xfs_page_state_convert(
991 struct writeback_control *wbc,
993 int unmapped) /* also implies page uptodate */
995 struct buffer_head *bh, *head;
997 xfs_ioend_t *ioend = NULL, *iohead = NULL;
999 unsigned long p_offset = 0;
1001 __uint64_t end_offset;
1002 pgoff_t end_index, last_index, tlast;
1004 int flags, err, iomap_valid = 0, uptodate = 1;
1005 int page_dirty, count = 0;
1007 int all_bh = unmapped;
1010 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
1011 trylock |= BMAPI_TRYLOCK;
1014 /* Is this page beyond the end of the file? */
1015 offset = i_size_read(inode);
1016 end_index = offset >> PAGE_CACHE_SHIFT;
1017 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
1018 if (page->index >= end_index) {
1019 if ((page->index >= end_index + 1) ||
1020 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
1028 * page_dirty is initially a count of buffers on the page before
1029 * EOF and is decremented as we move each into a cleanable state.
1033 * End offset is the highest offset that this page should represent.
1034 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
1035 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
1036 * hence give us the correct page_dirty count. On any other page,
1037 * it will be zero and in that case we need page_dirty to be the
1038 * count of buffers on the page.
1040 end_offset = min_t(unsigned long long,
1041 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
1042 len = 1 << inode->i_blkbits;
1043 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
1045 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
1046 page_dirty = p_offset / len;
1048 bh = head = page_buffers(page);
1049 offset = page_offset(page);
1053 /* TODO: cleanup count and page_dirty */
1056 if (offset >= end_offset)
1058 if (!buffer_uptodate(bh))
1060 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
1062 * the iomap is actually still valid, but the ioend
1063 * isn't. shouldn't happen too often.
1070 iomap_valid = xfs_iomap_valid(&iomap, offset);
1073 * First case, map an unwritten extent and prepare for
1074 * extent state conversion transaction on completion.
1076 * Second case, allocate space for a delalloc buffer.
1077 * We can return EAGAIN here in the release page case.
1079 * Third case, an unmapped buffer was found, and we are
1080 * in a path where we need to write the whole page out.
1082 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1083 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1084 !buffer_mapped(bh) && (unmapped || startio))) {
1088 * Make sure we don't use a read-only iomap
1090 if (flags == BMAPI_READ)
1093 if (buffer_unwritten(bh)) {
1094 type = IOMAP_UNWRITTEN;
1095 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
1096 } else if (buffer_delay(bh)) {
1098 flags = BMAPI_ALLOCATE | trylock;
1101 flags = BMAPI_WRITE | BMAPI_MMAP;
1106 * if we didn't have a valid mapping then we
1107 * need to ensure that we put the new mapping
1108 * in a new ioend structure. This needs to be
1109 * done to ensure that the ioends correctly
1110 * reflect the block mappings at io completion
1111 * for unwritten extent conversion.
1114 if (type == IOMAP_NEW) {
1115 size = xfs_probe_cluster(inode,
1121 err = xfs_map_blocks(inode, offset, size,
1125 iomap_valid = xfs_iomap_valid(&iomap, offset);
1128 xfs_map_at_offset(bh, offset,
1129 inode->i_blkbits, &iomap);
1131 xfs_add_to_ioend(inode, bh, offset,
1135 set_buffer_dirty(bh);
1137 mark_buffer_dirty(bh);
1142 } else if (buffer_uptodate(bh) && startio) {
1144 * we got here because the buffer is already mapped.
1145 * That means it must already have extents allocated
1146 * underneath it. Map the extent by reading it.
1148 if (!iomap_valid || flags != BMAPI_READ) {
1150 size = xfs_probe_cluster(inode, page, bh,
1152 err = xfs_map_blocks(inode, offset, size,
1156 iomap_valid = xfs_iomap_valid(&iomap, offset);
1160 * We set the type to IOMAP_NEW in case we are doing a
1161 * small write at EOF that is extending the file but
1162 * without needing an allocation. We need to update the
1163 * file size on I/O completion in this case so it is
1164 * the same case as having just allocated a new extent
1165 * that we are writing into for the first time.
1168 if (trylock_buffer(bh)) {
1169 ASSERT(buffer_mapped(bh));
1172 xfs_add_to_ioend(inode, bh, offset, type,
1173 &ioend, !iomap_valid);
1179 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1180 (unmapped || startio)) {
1187 } while (offset += len, ((bh = bh->b_this_page) != head));
1189 if (uptodate && bh == head)
1190 SetPageUptodate(page);
1193 xfs_start_page_writeback(page, 1, count);
1195 if (ioend && iomap_valid) {
1196 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
1198 tlast = min_t(pgoff_t, offset, last_index);
1199 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
1200 wbc, startio, all_bh, tlast);
1204 xfs_submit_ioend(wbc, iohead);
1210 xfs_cancel_ioend(iohead);
1213 * If it's delalloc and we have nowhere to put it,
1214 * throw it away, unless the lower layers told
1217 if (err != -EAGAIN) {
1219 block_invalidatepage(page, 0);
1220 ClearPageUptodate(page);
1226 * writepage: Called from one of two places:
1228 * 1. we are flushing a delalloc buffer head.
1230 * 2. we are writing out a dirty page. Typically the page dirty
1231 * state is cleared before we get here. In this case is it
1232 * conceivable we have no buffer heads.
1234 * For delalloc space on the page we need to allocate space and
1235 * flush it. For unmapped buffer heads on the page we should
1236 * allocate space if the page is uptodate. For any other dirty
1237 * buffer heads on the page we should flush them.
1239 * If we detect that a transaction would be required to flush
1240 * the page, we have to check the process flags first, if we
1241 * are already in a transaction or disk I/O during allocations
1242 * is off, we need to fail the writepage and redirty the page.
1248 struct writeback_control *wbc)
1252 int delalloc, unmapped, unwritten;
1253 struct inode *inode = page->mapping->host;
1255 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1258 * We need a transaction if:
1259 * 1. There are delalloc buffers on the page
1260 * 2. The page is uptodate and we have unmapped buffers
1261 * 3. The page is uptodate and we have no buffers
1262 * 4. There are unwritten buffers on the page
1265 if (!page_has_buffers(page)) {
1269 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1270 if (!PageUptodate(page))
1272 need_trans = delalloc + unmapped + unwritten;
1276 * If we need a transaction and the process flags say
1277 * we are already in a transaction, or no IO is allowed
1278 * then mark the page dirty again and leave the page
1281 if (current_test_flags(PF_FSTRANS) && need_trans)
1285 * Delay hooking up buffer heads until we have
1286 * made our go/no-go decision.
1288 if (!page_has_buffers(page))
1289 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1293 * VM calculation for nr_to_write seems off. Bump it way
1294 * up, this gets simple streaming writes zippy again.
1295 * To be reviewed again after Jens' writeback changes.
1297 wbc->nr_to_write *= 4;
1300 * Convert delayed allocate, unwritten or unmapped space
1301 * to real space and flush out to disk.
1303 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1304 if (error == -EAGAIN)
1306 if (unlikely(error < 0))
1312 redirty_page_for_writepage(wbc, page);
1322 struct address_space *mapping,
1323 struct writeback_control *wbc)
1325 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1326 return generic_writepages(mapping, wbc);
1330 * Called to move a page into cleanable state - and from there
1331 * to be released. Possibly the page is already clean. We always
1332 * have buffer heads in this call.
1334 * Returns 0 if the page is ok to release, 1 otherwise.
1336 * Possible scenarios are:
1338 * 1. We are being called to release a page which has been written
1339 * to via regular I/O. buffer heads will be dirty and possibly
1340 * delalloc. If no delalloc buffer heads in this case then we
1341 * can just return zero.
1343 * 2. We are called to release a page which has been written via
1344 * mmap, all we need to do is ensure there is no delalloc
1345 * state in the buffer heads, if not we can let the caller
1346 * free them and we should come back later via writepage.
1353 struct inode *inode = page->mapping->host;
1354 int dirty, delalloc, unmapped, unwritten;
1355 struct writeback_control wbc = {
1356 .sync_mode = WB_SYNC_ALL,
1360 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
1362 if (!page_has_buffers(page))
1365 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1366 if (!delalloc && !unwritten)
1369 if (!(gfp_mask & __GFP_FS))
1372 /* If we are already inside a transaction or the thread cannot
1373 * do I/O, we cannot release this page.
1375 if (current_test_flags(PF_FSTRANS))
1379 * Convert delalloc space to real space, do not flush the
1380 * data out to disk, that will be done by the caller.
1381 * Never need to allocate space here - we will always
1382 * come back to writepage in that case.
1384 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1385 if (dirty == 0 && !unwritten)
1390 return try_to_free_buffers(page);
1395 struct inode *inode,
1397 struct buffer_head *bh_result,
1400 bmapi_flags_t flags)
1408 offset = (xfs_off_t)iblock << inode->i_blkbits;
1409 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1410 size = bh_result->b_size;
1412 if (!create && direct && offset >= i_size_read(inode))
1415 error = xfs_iomap(XFS_I(inode), offset, size,
1416 create ? flags : BMAPI_READ, &iomap, &niomap);
1422 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
1424 * For unwritten extents do not report a disk address on
1425 * the read case (treat as if we're reading into a hole).
1427 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1428 xfs_map_buffer(bh_result, &iomap, offset,
1431 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1433 bh_result->b_private = inode;
1434 set_buffer_unwritten(bh_result);
1439 * If this is a realtime file, data may be on a different device.
1440 * to that pointed to from the buffer_head b_bdev currently.
1442 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
1445 * If we previously allocated a block out beyond eof and we are now
1446 * coming back to use it then we will need to flag it as new even if it
1447 * has a disk address.
1449 * With sub-block writes into unwritten extents we also need to mark
1450 * the buffer as new so that the unwritten parts of the buffer gets
1454 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1455 (offset >= i_size_read(inode)) ||
1456 (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
1457 set_buffer_new(bh_result);
1459 if (iomap.iomap_flags & IOMAP_DELAY) {
1462 set_buffer_uptodate(bh_result);
1463 set_buffer_mapped(bh_result);
1464 set_buffer_delay(bh_result);
1468 if (direct || size > (1 << inode->i_blkbits)) {
1469 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1470 offset = min_t(xfs_off_t,
1471 iomap.iomap_bsize - iomap.iomap_delta, size);
1472 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
1480 struct inode *inode,
1482 struct buffer_head *bh_result,
1485 return __xfs_get_blocks(inode, iblock,
1486 bh_result, create, 0, BMAPI_WRITE);
1490 xfs_get_blocks_direct(
1491 struct inode *inode,
1493 struct buffer_head *bh_result,
1496 return __xfs_get_blocks(inode, iblock,
1497 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1507 xfs_ioend_t *ioend = iocb->private;
1510 * Non-NULL private data means we need to issue a transaction to
1511 * convert a range from unwritten to written extents. This needs
1512 * to happen from process context but aio+dio I/O completion
1513 * happens from irq context so we need to defer it to a workqueue.
1514 * This is not necessary for synchronous direct I/O, but we do
1515 * it anyway to keep the code uniform and simpler.
1517 * Well, if only it were that simple. Because synchronous direct I/O
1518 * requires extent conversion to occur *before* we return to userspace,
1519 * we have to wait for extent conversion to complete. Look at the
1520 * iocb that has been passed to us to determine if this is AIO or
1521 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1522 * workqueue and wait for it to complete.
1524 * The core direct I/O code might be changed to always call the
1525 * completion handler in the future, in which case all this can
1528 ioend->io_offset = offset;
1529 ioend->io_size = size;
1530 if (ioend->io_type == IOMAP_READ) {
1531 xfs_finish_ioend(ioend, 0);
1532 } else if (private && size > 0) {
1533 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
1536 * A direct I/O write ioend starts it's life in unwritten
1537 * state in case they map an unwritten extent. This write
1538 * didn't map an unwritten extent so switch it's completion
1541 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
1542 xfs_finish_ioend(ioend, 0);
1546 * blockdev_direct_IO can return an error even after the I/O
1547 * completion handler was called. Thus we need to protect
1548 * against double-freeing.
1550 iocb->private = NULL;
1557 const struct iovec *iov,
1559 unsigned long nr_segs)
1561 struct file *file = iocb->ki_filp;
1562 struct inode *inode = file->f_mapping->host;
1563 struct block_device *bdev;
1566 bdev = xfs_find_bdev_for_inode(XFS_I(inode));
1569 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
1570 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1571 bdev, iov, offset, nr_segs,
1572 xfs_get_blocks_direct,
1575 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
1576 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
1577 bdev, iov, offset, nr_segs,
1578 xfs_get_blocks_direct,
1582 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
1583 xfs_destroy_ioend(iocb->private);
1590 struct address_space *mapping,
1594 struct page **pagep,
1598 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1604 struct address_space *mapping,
1607 struct inode *inode = (struct inode *)mapping->host;
1608 struct xfs_inode *ip = XFS_I(inode);
1610 xfs_itrace_entry(XFS_I(inode));
1611 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1612 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
1613 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1614 return generic_block_bmap(mapping, block, xfs_get_blocks);
1619 struct file *unused,
1622 return mpage_readpage(page, xfs_get_blocks);
1627 struct file *unused,
1628 struct address_space *mapping,
1629 struct list_head *pages,
1632 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1636 xfs_vm_invalidatepage(
1638 unsigned long offset)
1640 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1641 page->mapping->host, page, offset);
1642 block_invalidatepage(page, offset);
1645 const struct address_space_operations xfs_address_space_operations = {
1646 .readpage = xfs_vm_readpage,
1647 .readpages = xfs_vm_readpages,
1648 .writepage = xfs_vm_writepage,
1649 .writepages = xfs_vm_writepages,
1650 .sync_page = block_sync_page,
1651 .releasepage = xfs_vm_releasepage,
1652 .invalidatepage = xfs_vm_invalidatepage,
1653 .write_begin = xfs_vm_write_begin,
1654 .write_end = generic_write_end,
1655 .bmap = xfs_vm_bmap,
1656 .direct_IO = xfs_vm_direct_IO,
1657 .migratepage = buffer_migrate_page,
1658 .is_partially_uptodate = block_is_partially_uptodate,
1659 .error_remove_page = generic_error_remove_page,