2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
28 static struct kmem_cache *btrfs_ordered_extent_cache;
30 static u64 entry_end(struct btrfs_ordered_extent *entry)
32 if (entry->file_offset + entry->len < entry->file_offset)
34 return entry->file_offset + entry->len;
37 /* returns NULL if the insertion worked, or it returns the node it did find
40 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
43 struct rb_node **p = &root->rb_node;
44 struct rb_node *parent = NULL;
45 struct btrfs_ordered_extent *entry;
49 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
51 if (file_offset < entry->file_offset)
53 else if (file_offset >= entry_end(entry))
59 rb_link_node(node, parent, p);
60 rb_insert_color(node, root);
64 static void ordered_data_tree_panic(struct inode *inode, int errno,
67 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
68 btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
69 "%llu\n", (unsigned long long)offset);
73 * look for a given offset in the tree, and if it can't be found return the
76 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
77 struct rb_node **prev_ret)
79 struct rb_node *n = root->rb_node;
80 struct rb_node *prev = NULL;
82 struct btrfs_ordered_extent *entry;
83 struct btrfs_ordered_extent *prev_entry = NULL;
86 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
90 if (file_offset < entry->file_offset)
92 else if (file_offset >= entry_end(entry))
100 while (prev && file_offset >= entry_end(prev_entry)) {
101 test = rb_next(prev);
104 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
106 if (file_offset < entry_end(prev_entry))
112 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
114 while (prev && file_offset < entry_end(prev_entry)) {
115 test = rb_prev(prev);
118 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
127 * helper to check if a given offset is inside a given entry
129 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
131 if (file_offset < entry->file_offset ||
132 entry->file_offset + entry->len <= file_offset)
137 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
140 if (file_offset + len <= entry->file_offset ||
141 entry->file_offset + entry->len <= file_offset)
147 * look find the first ordered struct that has this offset, otherwise
148 * the first one less than this offset
150 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
153 struct rb_root *root = &tree->tree;
154 struct rb_node *prev = NULL;
156 struct btrfs_ordered_extent *entry;
159 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
161 if (offset_in_entry(entry, file_offset))
164 ret = __tree_search(root, file_offset, &prev);
172 /* allocate and add a new ordered_extent into the per-inode tree.
173 * file_offset is the logical offset in the file
175 * start is the disk block number of an extent already reserved in the
176 * extent allocation tree
178 * len is the length of the extent
180 * The tree is given a single reference on the ordered extent that was
183 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
184 u64 start, u64 len, u64 disk_len,
185 int type, int dio, int compress_type)
187 struct btrfs_ordered_inode_tree *tree;
188 struct rb_node *node;
189 struct btrfs_ordered_extent *entry;
191 tree = &BTRFS_I(inode)->ordered_tree;
192 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
196 entry->file_offset = file_offset;
197 entry->start = start;
199 entry->disk_len = disk_len;
200 entry->bytes_left = len;
201 entry->inode = igrab(inode);
202 entry->compress_type = compress_type;
203 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
204 set_bit(type, &entry->flags);
207 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
209 /* one ref for the tree */
210 atomic_set(&entry->refs, 1);
211 init_waitqueue_head(&entry->wait);
212 INIT_LIST_HEAD(&entry->list);
213 INIT_LIST_HEAD(&entry->root_extent_list);
214 INIT_LIST_HEAD(&entry->work_list);
215 init_completion(&entry->completion);
217 trace_btrfs_ordered_extent_add(inode, entry);
219 spin_lock_irq(&tree->lock);
220 node = tree_insert(&tree->tree, file_offset,
223 ordered_data_tree_panic(inode, -EEXIST, file_offset);
224 spin_unlock_irq(&tree->lock);
226 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
227 list_add_tail(&entry->root_extent_list,
228 &BTRFS_I(inode)->root->fs_info->ordered_extents);
229 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
234 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
235 u64 start, u64 len, u64 disk_len, int type)
237 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
239 BTRFS_COMPRESS_NONE);
242 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
243 u64 start, u64 len, u64 disk_len, int type)
245 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
247 BTRFS_COMPRESS_NONE);
250 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
251 u64 start, u64 len, u64 disk_len,
252 int type, int compress_type)
254 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
260 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
261 * when an ordered extent is finished. If the list covers more than one
262 * ordered extent, it is split across multiples.
264 void btrfs_add_ordered_sum(struct inode *inode,
265 struct btrfs_ordered_extent *entry,
266 struct btrfs_ordered_sum *sum)
268 struct btrfs_ordered_inode_tree *tree;
270 tree = &BTRFS_I(inode)->ordered_tree;
271 spin_lock_irq(&tree->lock);
272 list_add_tail(&sum->list, &entry->list);
273 spin_unlock_irq(&tree->lock);
277 * this is used to account for finished IO across a given range
278 * of the file. The IO may span ordered extents. If
279 * a given ordered_extent is completely done, 1 is returned, otherwise
282 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
283 * to make sure this function only returns 1 once for a given ordered extent.
285 * file_offset is updated to one byte past the range that is recorded as
286 * complete. This allows you to walk forward in the file.
288 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
289 struct btrfs_ordered_extent **cached,
290 u64 *file_offset, u64 io_size, int uptodate)
292 struct btrfs_ordered_inode_tree *tree;
293 struct rb_node *node;
294 struct btrfs_ordered_extent *entry = NULL;
301 tree = &BTRFS_I(inode)->ordered_tree;
302 spin_lock_irqsave(&tree->lock, flags);
303 node = tree_search(tree, *file_offset);
309 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
310 if (!offset_in_entry(entry, *file_offset)) {
315 dec_start = max(*file_offset, entry->file_offset);
316 dec_end = min(*file_offset + io_size, entry->file_offset +
318 *file_offset = dec_end;
319 if (dec_start > dec_end) {
320 printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
321 (unsigned long long)dec_start,
322 (unsigned long long)dec_end);
324 to_dec = dec_end - dec_start;
325 if (to_dec > entry->bytes_left) {
326 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
327 (unsigned long long)entry->bytes_left,
328 (unsigned long long)to_dec);
330 entry->bytes_left -= to_dec;
332 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
334 if (entry->bytes_left == 0)
335 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
339 if (!ret && cached && entry) {
341 atomic_inc(&entry->refs);
343 spin_unlock_irqrestore(&tree->lock, flags);
348 * this is used to account for finished IO across a given range
349 * of the file. The IO should not span ordered extents. If
350 * a given ordered_extent is completely done, 1 is returned, otherwise
353 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
354 * to make sure this function only returns 1 once for a given ordered extent.
356 int btrfs_dec_test_ordered_pending(struct inode *inode,
357 struct btrfs_ordered_extent **cached,
358 u64 file_offset, u64 io_size, int uptodate)
360 struct btrfs_ordered_inode_tree *tree;
361 struct rb_node *node;
362 struct btrfs_ordered_extent *entry = NULL;
366 tree = &BTRFS_I(inode)->ordered_tree;
367 spin_lock_irqsave(&tree->lock, flags);
368 if (cached && *cached) {
373 node = tree_search(tree, file_offset);
379 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
381 if (!offset_in_entry(entry, file_offset)) {
386 if (io_size > entry->bytes_left) {
387 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
388 (unsigned long long)entry->bytes_left,
389 (unsigned long long)io_size);
391 entry->bytes_left -= io_size;
393 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
395 if (entry->bytes_left == 0)
396 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
400 if (!ret && cached && entry) {
402 atomic_inc(&entry->refs);
404 spin_unlock_irqrestore(&tree->lock, flags);
409 * used to drop a reference on an ordered extent. This will free
410 * the extent if the last reference is dropped
412 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
414 struct list_head *cur;
415 struct btrfs_ordered_sum *sum;
417 trace_btrfs_ordered_extent_put(entry->inode, entry);
419 if (atomic_dec_and_test(&entry->refs)) {
421 btrfs_add_delayed_iput(entry->inode);
422 while (!list_empty(&entry->list)) {
423 cur = entry->list.next;
424 sum = list_entry(cur, struct btrfs_ordered_sum, list);
425 list_del(&sum->list);
428 kmem_cache_free(btrfs_ordered_extent_cache, entry);
433 * remove an ordered extent from the tree. No references are dropped
434 * and waiters are woken up.
436 void btrfs_remove_ordered_extent(struct inode *inode,
437 struct btrfs_ordered_extent *entry)
439 struct btrfs_ordered_inode_tree *tree;
440 struct btrfs_root *root = BTRFS_I(inode)->root;
441 struct rb_node *node;
443 tree = &BTRFS_I(inode)->ordered_tree;
444 spin_lock_irq(&tree->lock);
445 node = &entry->rb_node;
446 rb_erase(node, &tree->tree);
448 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
449 spin_unlock_irq(&tree->lock);
451 spin_lock(&root->fs_info->ordered_extent_lock);
452 list_del_init(&entry->root_extent_list);
454 trace_btrfs_ordered_extent_remove(inode, entry);
457 * we have no more ordered extents for this inode and
458 * no dirty pages. We can safely remove it from the
459 * list of ordered extents
461 if (RB_EMPTY_ROOT(&tree->tree) &&
462 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
463 list_del_init(&BTRFS_I(inode)->ordered_operations);
465 spin_unlock(&root->fs_info->ordered_extent_lock);
466 wake_up(&entry->wait);
469 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
471 struct btrfs_ordered_extent *ordered;
473 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
474 btrfs_start_ordered_extent(ordered->inode, ordered, 1);
475 complete(&ordered->completion);
479 * wait for all the ordered extents in a root. This is done when balancing
480 * space between drives.
482 void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
484 struct list_head splice, works;
485 struct list_head *cur;
486 struct btrfs_ordered_extent *ordered, *next;
489 INIT_LIST_HEAD(&splice);
490 INIT_LIST_HEAD(&works);
492 spin_lock(&root->fs_info->ordered_extent_lock);
493 list_splice_init(&root->fs_info->ordered_extents, &splice);
494 while (!list_empty(&splice)) {
496 ordered = list_entry(cur, struct btrfs_ordered_extent,
498 list_del_init(&ordered->root_extent_list);
499 atomic_inc(&ordered->refs);
502 * the inode may be getting freed (in sys_unlink path).
504 inode = igrab(ordered->inode);
506 spin_unlock(&root->fs_info->ordered_extent_lock);
509 ordered->flush_work.func = btrfs_run_ordered_extent_work;
510 list_add_tail(&ordered->work_list, &works);
511 btrfs_queue_worker(&root->fs_info->flush_workers,
512 &ordered->flush_work);
514 btrfs_put_ordered_extent(ordered);
518 spin_lock(&root->fs_info->ordered_extent_lock);
520 spin_unlock(&root->fs_info->ordered_extent_lock);
522 list_for_each_entry_safe(ordered, next, &works, work_list) {
523 list_del_init(&ordered->work_list);
524 wait_for_completion(&ordered->completion);
526 inode = ordered->inode;
527 btrfs_put_ordered_extent(ordered);
529 btrfs_add_delayed_iput(inode);
538 * this is used during transaction commit to write all the inodes
539 * added to the ordered operation list. These files must be fully on
540 * disk before the transaction commits.
542 * we have two modes here, one is to just start the IO via filemap_flush
543 * and the other is to wait for all the io. When we wait, we have an
544 * extra check to make sure the ordered operation list really is empty
547 int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
549 struct btrfs_inode *btrfs_inode;
551 struct list_head splice;
552 struct list_head works;
553 struct btrfs_delalloc_work *work, *next;
556 INIT_LIST_HEAD(&splice);
557 INIT_LIST_HEAD(&works);
559 mutex_lock(&root->fs_info->ordered_operations_mutex);
560 spin_lock(&root->fs_info->ordered_extent_lock);
562 list_splice_init(&root->fs_info->ordered_operations, &splice);
564 while (!list_empty(&splice)) {
566 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
569 inode = &btrfs_inode->vfs_inode;
571 list_del_init(&btrfs_inode->ordered_operations);
574 * the inode may be getting freed (in sys_unlink path).
576 inode = igrab(inode);
578 if (!wait && inode) {
579 list_add_tail(&BTRFS_I(inode)->ordered_operations,
580 &root->fs_info->ordered_operations);
585 spin_unlock(&root->fs_info->ordered_extent_lock);
587 work = btrfs_alloc_delalloc_work(inode, wait, 1);
589 if (list_empty(&BTRFS_I(inode)->ordered_operations))
590 list_add_tail(&btrfs_inode->ordered_operations,
592 spin_lock(&root->fs_info->ordered_extent_lock);
593 list_splice_tail(&splice,
594 &root->fs_info->ordered_operations);
595 spin_unlock(&root->fs_info->ordered_extent_lock);
599 list_add_tail(&work->list, &works);
600 btrfs_queue_worker(&root->fs_info->flush_workers,
604 spin_lock(&root->fs_info->ordered_extent_lock);
606 if (wait && !list_empty(&root->fs_info->ordered_operations))
609 spin_unlock(&root->fs_info->ordered_extent_lock);
611 list_for_each_entry_safe(work, next, &works, list) {
612 list_del_init(&work->list);
613 btrfs_wait_and_free_delalloc_work(work);
615 mutex_unlock(&root->fs_info->ordered_operations_mutex);
620 * Used to start IO or wait for a given ordered extent to finish.
622 * If wait is one, this effectively waits on page writeback for all the pages
623 * in the extent, and it waits on the io completion code to insert
624 * metadata into the btree corresponding to the extent
626 void btrfs_start_ordered_extent(struct inode *inode,
627 struct btrfs_ordered_extent *entry,
630 u64 start = entry->file_offset;
631 u64 end = start + entry->len - 1;
633 trace_btrfs_ordered_extent_start(inode, entry);
636 * pages in the range can be dirty, clean or writeback. We
637 * start IO on any dirty ones so the wait doesn't stall waiting
638 * for the flusher thread to find them
640 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
641 filemap_fdatawrite_range(inode->i_mapping, start, end);
643 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
649 * Used to wait on ordered extents across a large range of bytes.
651 void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
655 struct btrfs_ordered_extent *ordered;
657 if (start + len < start) {
658 orig_end = INT_LIMIT(loff_t);
660 orig_end = start + len - 1;
661 if (orig_end > INT_LIMIT(loff_t))
662 orig_end = INT_LIMIT(loff_t);
665 /* start IO across the range first to instantiate any delalloc
668 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
671 * So with compression we will find and lock a dirty page and clear the
672 * first one as dirty, setup an async extent, and immediately return
673 * with the entire range locked but with nobody actually marked with
674 * writeback. So we can't just filemap_write_and_wait_range() and
675 * expect it to work since it will just kick off a thread to do the
676 * actual work. So we need to call filemap_fdatawrite_range _again_
677 * since it will wait on the page lock, which won't be unlocked until
678 * after the pages have been marked as writeback and so we're good to go
679 * from there. We have to do this otherwise we'll miss the ordered
680 * extents and that results in badness. Please Josef, do not think you
681 * know better and pull this out at some point in the future, it is
682 * right and you are wrong.
684 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
685 &BTRFS_I(inode)->runtime_flags))
686 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
688 filemap_fdatawait_range(inode->i_mapping, start, orig_end);
692 ordered = btrfs_lookup_first_ordered_extent(inode, end);
695 if (ordered->file_offset > orig_end) {
696 btrfs_put_ordered_extent(ordered);
699 if (ordered->file_offset + ordered->len < start) {
700 btrfs_put_ordered_extent(ordered);
703 btrfs_start_ordered_extent(inode, ordered, 1);
704 end = ordered->file_offset;
705 btrfs_put_ordered_extent(ordered);
706 if (end == 0 || end == start)
713 * find an ordered extent corresponding to file_offset. return NULL if
714 * nothing is found, otherwise take a reference on the extent and return it
716 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
719 struct btrfs_ordered_inode_tree *tree;
720 struct rb_node *node;
721 struct btrfs_ordered_extent *entry = NULL;
723 tree = &BTRFS_I(inode)->ordered_tree;
724 spin_lock_irq(&tree->lock);
725 node = tree_search(tree, file_offset);
729 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
730 if (!offset_in_entry(entry, file_offset))
733 atomic_inc(&entry->refs);
735 spin_unlock_irq(&tree->lock);
739 /* Since the DIO code tries to lock a wide area we need to look for any ordered
740 * extents that exist in the range, rather than just the start of the range.
742 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
746 struct btrfs_ordered_inode_tree *tree;
747 struct rb_node *node;
748 struct btrfs_ordered_extent *entry = NULL;
750 tree = &BTRFS_I(inode)->ordered_tree;
751 spin_lock_irq(&tree->lock);
752 node = tree_search(tree, file_offset);
754 node = tree_search(tree, file_offset + len);
760 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
761 if (range_overlaps(entry, file_offset, len))
764 if (entry->file_offset >= file_offset + len) {
769 node = rb_next(node);
775 atomic_inc(&entry->refs);
776 spin_unlock_irq(&tree->lock);
781 * lookup and return any extent before 'file_offset'. NULL is returned
784 struct btrfs_ordered_extent *
785 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
787 struct btrfs_ordered_inode_tree *tree;
788 struct rb_node *node;
789 struct btrfs_ordered_extent *entry = NULL;
791 tree = &BTRFS_I(inode)->ordered_tree;
792 spin_lock_irq(&tree->lock);
793 node = tree_search(tree, file_offset);
797 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
798 atomic_inc(&entry->refs);
800 spin_unlock_irq(&tree->lock);
805 * After an extent is done, call this to conditionally update the on disk
806 * i_size. i_size is updated to cover any fully written part of the file.
808 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
809 struct btrfs_ordered_extent *ordered)
811 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
814 u64 i_size = i_size_read(inode);
815 struct rb_node *node;
816 struct rb_node *prev = NULL;
817 struct btrfs_ordered_extent *test;
821 offset = entry_end(ordered);
823 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
825 spin_lock_irq(&tree->lock);
826 disk_i_size = BTRFS_I(inode)->disk_i_size;
829 if (disk_i_size > i_size) {
830 BTRFS_I(inode)->disk_i_size = i_size;
836 * if the disk i_size is already at the inode->i_size, or
837 * this ordered extent is inside the disk i_size, we're done
839 if (disk_i_size == i_size)
843 * We still need to update disk_i_size if outstanding_isize is greater
846 if (offset <= disk_i_size &&
847 (!ordered || ordered->outstanding_isize <= disk_i_size))
851 * walk backward from this ordered extent to disk_i_size.
852 * if we find an ordered extent then we can't update disk i_size
856 node = rb_prev(&ordered->rb_node);
858 prev = tree_search(tree, offset);
860 * we insert file extents without involving ordered struct,
861 * so there should be no ordered struct cover this offset
864 test = rb_entry(prev, struct btrfs_ordered_extent,
866 BUG_ON(offset_in_entry(test, offset));
870 for (; node; node = rb_prev(node)) {
871 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
873 /* We treat this entry as if it doesnt exist */
874 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
876 if (test->file_offset + test->len <= disk_i_size)
878 if (test->file_offset >= i_size)
880 if (entry_end(test) > disk_i_size) {
882 * we don't update disk_i_size now, so record this
883 * undealt i_size. Or we will not know the real
886 if (test->outstanding_isize < offset)
887 test->outstanding_isize = offset;
889 ordered->outstanding_isize >
890 test->outstanding_isize)
891 test->outstanding_isize =
892 ordered->outstanding_isize;
896 new_i_size = min_t(u64, offset, i_size);
899 * Some ordered extents may completed before the current one, and
900 * we hold the real i_size in ->outstanding_isize.
902 if (ordered && ordered->outstanding_isize > new_i_size)
903 new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
904 BTRFS_I(inode)->disk_i_size = new_i_size;
908 * We need to do this because we can't remove ordered extents until
909 * after the i_disk_size has been updated and then the inode has been
910 * updated to reflect the change, so we need to tell anybody who finds
911 * this ordered extent that we've already done all the real work, we
912 * just haven't completed all the other work.
915 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
916 spin_unlock_irq(&tree->lock);
921 * search the ordered extents for one corresponding to 'offset' and
922 * try to find a checksum. This is used because we allow pages to
923 * be reclaimed before their checksum is actually put into the btree
925 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
928 struct btrfs_ordered_sum *ordered_sum;
929 struct btrfs_sector_sum *sector_sums;
930 struct btrfs_ordered_extent *ordered;
931 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
932 unsigned long num_sectors;
934 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
937 ordered = btrfs_lookup_ordered_extent(inode, offset);
941 spin_lock_irq(&tree->lock);
942 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
943 if (disk_bytenr >= ordered_sum->bytenr) {
944 num_sectors = ordered_sum->len / sectorsize;
945 sector_sums = ordered_sum->sums;
946 for (i = 0; i < num_sectors; i++) {
947 if (sector_sums[i].bytenr == disk_bytenr) {
948 *sum = sector_sums[i].sum;
956 spin_unlock_irq(&tree->lock);
957 btrfs_put_ordered_extent(ordered);
963 * add a given inode to the list of inodes that must be fully on
964 * disk before a transaction commit finishes.
966 * This basically gives us the ext3 style data=ordered mode, and it is mostly
967 * used to make sure renamed files are fully on disk.
969 * It is a noop if the inode is already fully on disk.
971 * If trans is not null, we'll do a friendly check for a transaction that
972 * is already flushing things and force the IO down ourselves.
974 void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
975 struct btrfs_root *root, struct inode *inode)
979 last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
982 * if this file hasn't been changed since the last transaction
983 * commit, we can safely return without doing anything
985 if (last_mod < root->fs_info->last_trans_committed)
988 spin_lock(&root->fs_info->ordered_extent_lock);
989 if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
990 list_add_tail(&BTRFS_I(inode)->ordered_operations,
991 &root->fs_info->ordered_operations);
993 spin_unlock(&root->fs_info->ordered_extent_lock);
996 int __init ordered_data_init(void)
998 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
999 sizeof(struct btrfs_ordered_extent), 0,
1000 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1002 if (!btrfs_ordered_extent_cache)
1008 void ordered_data_exit(void)
1010 if (btrfs_ordered_extent_cache)
1011 kmem_cache_destroy(btrfs_ordered_extent_cache);