1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/writeback.h>
9 #include <linux/sched/mm.h>
13 #include "transaction.h"
14 #include "btrfs_inode.h"
15 #include "extent_io.h"
17 #include "compression.h"
18 #include "delalloc-space.h"
24 static struct kmem_cache *btrfs_ordered_extent_cache;
26 static u64 entry_end(struct btrfs_ordered_extent *entry)
28 if (entry->file_offset + entry->num_bytes < entry->file_offset)
30 return entry->file_offset + entry->num_bytes;
33 /* returns NULL if the insertion worked, or it returns the node it did find
36 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
39 struct rb_node **p = &root->rb_node;
40 struct rb_node *parent = NULL;
41 struct btrfs_ordered_extent *entry;
45 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
47 if (file_offset < entry->file_offset)
49 else if (file_offset >= entry_end(entry))
55 rb_link_node(node, parent, p);
56 rb_insert_color(node, root);
61 * look for a given offset in the tree, and if it can't be found return the
64 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
65 struct rb_node **prev_ret)
67 struct rb_node *n = root->rb_node;
68 struct rb_node *prev = NULL;
70 struct btrfs_ordered_extent *entry;
71 struct btrfs_ordered_extent *prev_entry = NULL;
74 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
78 if (file_offset < entry->file_offset)
80 else if (file_offset >= entry_end(entry))
88 while (prev && file_offset >= entry_end(prev_entry)) {
92 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
94 if (file_offset < entry_end(prev_entry))
100 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
102 while (prev && file_offset < entry_end(prev_entry)) {
103 test = rb_prev(prev);
106 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
114 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
117 if (file_offset + len <= entry->file_offset ||
118 entry->file_offset + entry->num_bytes <= file_offset)
124 * look find the first ordered struct that has this offset, otherwise
125 * the first one less than this offset
127 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
130 struct rb_root *root = &tree->tree;
131 struct rb_node *prev = NULL;
133 struct btrfs_ordered_extent *entry;
136 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
138 if (in_range(file_offset, entry->file_offset, entry->num_bytes))
141 ret = __tree_search(root, file_offset, &prev);
150 * Add an ordered extent to the per-inode tree.
152 * @inode: Inode that this extent is for.
153 * @file_offset: Logical offset in file where the extent starts.
154 * @num_bytes: Logical length of extent in file.
155 * @ram_bytes: Full length of unencoded data.
156 * @disk_bytenr: Offset of extent on disk.
157 * @disk_num_bytes: Size of extent on disk.
158 * @offset: Offset into unencoded data where file data starts.
159 * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*).
160 * @compress_type: Compression algorithm used for data.
162 * Most of these parameters correspond to &struct btrfs_file_extent_item. The
163 * tree is given a single reference on the ordered extent that was inserted, and
164 * the returned pointer is given a second reference.
166 * Return: the new ordered extent or error pointer.
168 struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
169 struct btrfs_inode *inode, u64 file_offset,
170 u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
171 u64 disk_num_bytes, u64 offset, unsigned long flags,
174 struct btrfs_root *root = inode->root;
175 struct btrfs_fs_info *fs_info = root->fs_info;
176 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
177 struct rb_node *node;
178 struct btrfs_ordered_extent *entry;
182 ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
183 /* For nocow write, we can release the qgroup rsv right now */
184 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
190 * The ordered extent has reserved qgroup space, release now
191 * and pass the reserved number for qgroup_record to free.
193 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
197 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
199 return ERR_PTR(-ENOMEM);
201 entry->file_offset = file_offset;
202 entry->num_bytes = num_bytes;
203 entry->ram_bytes = ram_bytes;
204 entry->disk_bytenr = disk_bytenr;
205 entry->disk_num_bytes = disk_num_bytes;
206 entry->offset = offset;
207 entry->bytes_left = num_bytes;
208 entry->inode = igrab(&inode->vfs_inode);
209 entry->compress_type = compress_type;
210 entry->truncated_len = (u64)-1;
211 entry->qgroup_rsv = ret;
213 ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
214 entry->flags = flags;
216 percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
217 fs_info->delalloc_batch);
219 /* one ref for the tree */
220 refcount_set(&entry->refs, 1);
221 init_waitqueue_head(&entry->wait);
222 INIT_LIST_HEAD(&entry->list);
223 INIT_LIST_HEAD(&entry->log_list);
224 INIT_LIST_HEAD(&entry->root_extent_list);
225 INIT_LIST_HEAD(&entry->work_list);
226 init_completion(&entry->completion);
228 trace_btrfs_ordered_extent_add(inode, entry);
230 spin_lock_irq(&tree->lock);
231 node = tree_insert(&tree->tree, file_offset,
234 btrfs_panic(fs_info, -EEXIST,
235 "inconsistency in ordered tree at offset %llu",
237 spin_unlock_irq(&tree->lock);
239 spin_lock(&root->ordered_extent_lock);
240 list_add_tail(&entry->root_extent_list,
241 &root->ordered_extents);
242 root->nr_ordered_extents++;
243 if (root->nr_ordered_extents == 1) {
244 spin_lock(&fs_info->ordered_root_lock);
245 BUG_ON(!list_empty(&root->ordered_root));
246 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
247 spin_unlock(&fs_info->ordered_root_lock);
249 spin_unlock(&root->ordered_extent_lock);
252 * We don't need the count_max_extents here, we can assume that all of
253 * that work has been done at higher layers, so this is truly the
254 * smallest the extent is going to get.
256 spin_lock(&inode->lock);
257 btrfs_mod_outstanding_extents(inode, 1);
258 spin_unlock(&inode->lock);
260 /* One ref for the returned entry to match semantics of lookup. */
261 refcount_inc(&entry->refs);
267 * Add a new btrfs_ordered_extent for the range, but drop the reference instead
268 * of returning it to the caller.
270 int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
271 u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
272 u64 disk_num_bytes, u64 offset, unsigned long flags,
275 struct btrfs_ordered_extent *ordered;
277 ordered = btrfs_alloc_ordered_extent(inode, file_offset, num_bytes,
278 ram_bytes, disk_bytenr,
279 disk_num_bytes, offset, flags,
283 return PTR_ERR(ordered);
284 btrfs_put_ordered_extent(ordered);
290 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
291 * when an ordered extent is finished. If the list covers more than one
292 * ordered extent, it is split across multiples.
294 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
295 struct btrfs_ordered_sum *sum)
297 struct btrfs_ordered_inode_tree *tree;
299 tree = &BTRFS_I(entry->inode)->ordered_tree;
300 spin_lock_irq(&tree->lock);
301 list_add_tail(&sum->list, &entry->list);
302 spin_unlock_irq(&tree->lock);
305 static void finish_ordered_fn(struct btrfs_work *work)
307 struct btrfs_ordered_extent *ordered_extent;
309 ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
310 btrfs_finish_ordered_io(ordered_extent);
314 * Mark all ordered extents io inside the specified range finished.
316 * @page: The involved page for the operation.
317 * For uncompressed buffered IO, the page status also needs to be
318 * updated to indicate whether the pending ordered io is finished.
319 * Can be NULL for direct IO and compressed write.
320 * For these cases, callers are ensured they won't execute the
321 * endio function twice.
323 * This function is called for endio, thus the range must have ordered
324 * extent(s) covering it.
326 void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
327 struct page *page, u64 file_offset,
328 u64 num_bytes, bool uptodate)
330 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
331 struct btrfs_fs_info *fs_info = inode->root->fs_info;
332 struct btrfs_workqueue *wq;
333 struct rb_node *node;
334 struct btrfs_ordered_extent *entry = NULL;
336 u64 cur = file_offset;
338 if (btrfs_is_free_space_inode(inode))
339 wq = fs_info->endio_freespace_worker;
341 wq = fs_info->endio_write_workers;
344 ASSERT(page->mapping && page_offset(page) <= file_offset &&
345 file_offset + num_bytes <= page_offset(page) + PAGE_SIZE);
347 spin_lock_irqsave(&tree->lock, flags);
348 while (cur < file_offset + num_bytes) {
353 node = tree_search(tree, cur);
354 /* No ordered extents at all */
358 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
359 entry_end = entry->file_offset + entry->num_bytes;
365 if (cur >= entry_end) {
366 node = rb_next(node);
367 /* No more ordered extents, exit */
370 entry = rb_entry(node, struct btrfs_ordered_extent,
373 /* Go to next ordered extent and continue */
374 cur = entry->file_offset;
380 * Go to the start of OE.
382 if (cur < entry->file_offset) {
383 cur = entry->file_offset;
388 * Now we are definitely inside one ordered extent.
394 end = min(entry->file_offset + entry->num_bytes,
395 file_offset + num_bytes) - 1;
396 ASSERT(end + 1 - cur < U32_MAX);
401 * Ordered (Private2) bit indicates whether we still
402 * have pending io unfinished for the ordered extent.
404 * If there's no such bit, we need to skip to next range.
406 if (!btrfs_page_test_ordered(fs_info, page, cur, len)) {
410 btrfs_page_clear_ordered(fs_info, page, cur, len);
413 /* Now we're fine to update the accounting */
414 if (unlikely(len > entry->bytes_left)) {
417 "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu",
418 inode->root->root_key.objectid,
422 len, entry->bytes_left);
423 entry->bytes_left = 0;
425 entry->bytes_left -= len;
429 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
432 * All the IO of the ordered extent is finished, we need to queue
433 * the finish_func to be executed.
435 if (entry->bytes_left == 0) {
436 set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
437 cond_wake_up(&entry->wait);
438 refcount_inc(&entry->refs);
439 trace_btrfs_ordered_extent_mark_finished(inode, entry);
440 spin_unlock_irqrestore(&tree->lock, flags);
441 btrfs_init_work(&entry->work, finish_ordered_fn, NULL, NULL);
442 btrfs_queue_work(wq, &entry->work);
443 spin_lock_irqsave(&tree->lock, flags);
447 spin_unlock_irqrestore(&tree->lock, flags);
451 * Finish IO for one ordered extent across a given range. The range can only
452 * contain one ordered extent.
454 * @cached: The cached ordered extent. If not NULL, we can skip the tree
455 * search and use the ordered extent directly.
456 * Will be also used to store the finished ordered extent.
457 * @file_offset: File offset for the finished IO
458 * @io_size: Length of the finish IO range
460 * Return true if the ordered extent is finished in the range, and update
462 * Return false otherwise.
464 * NOTE: The range can NOT cross multiple ordered extents.
465 * Thus caller should ensure the range doesn't cross ordered extents.
467 bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
468 struct btrfs_ordered_extent **cached,
469 u64 file_offset, u64 io_size)
471 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
472 struct rb_node *node;
473 struct btrfs_ordered_extent *entry = NULL;
475 bool finished = false;
477 spin_lock_irqsave(&tree->lock, flags);
478 if (cached && *cached) {
483 node = tree_search(tree, file_offset);
487 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
489 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
492 if (io_size > entry->bytes_left)
493 btrfs_crit(inode->root->fs_info,
494 "bad ordered accounting left %llu size %llu",
495 entry->bytes_left, io_size);
497 entry->bytes_left -= io_size;
499 if (entry->bytes_left == 0) {
501 * Ensure only one caller can set the flag and finished_ret
504 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
505 /* test_and_set_bit implies a barrier */
506 cond_wake_up_nomb(&entry->wait);
509 if (finished && cached && entry) {
511 refcount_inc(&entry->refs);
512 trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
514 spin_unlock_irqrestore(&tree->lock, flags);
519 * used to drop a reference on an ordered extent. This will free
520 * the extent if the last reference is dropped
522 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
524 struct list_head *cur;
525 struct btrfs_ordered_sum *sum;
527 trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
529 if (refcount_dec_and_test(&entry->refs)) {
530 ASSERT(list_empty(&entry->root_extent_list));
531 ASSERT(list_empty(&entry->log_list));
532 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
534 btrfs_add_delayed_iput(BTRFS_I(entry->inode));
535 while (!list_empty(&entry->list)) {
536 cur = entry->list.next;
537 sum = list_entry(cur, struct btrfs_ordered_sum, list);
538 list_del(&sum->list);
541 kmem_cache_free(btrfs_ordered_extent_cache, entry);
546 * remove an ordered extent from the tree. No references are dropped
547 * and waiters are woken up.
549 void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
550 struct btrfs_ordered_extent *entry)
552 struct btrfs_ordered_inode_tree *tree;
553 struct btrfs_root *root = btrfs_inode->root;
554 struct btrfs_fs_info *fs_info = root->fs_info;
555 struct rb_node *node;
557 bool freespace_inode;
560 * If this is a free space inode the thread has not acquired the ordered
561 * extents lockdep map.
563 freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
565 btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
566 /* This is paired with btrfs_add_ordered_extent. */
567 spin_lock(&btrfs_inode->lock);
568 btrfs_mod_outstanding_extents(btrfs_inode, -1);
569 spin_unlock(&btrfs_inode->lock);
570 if (root != fs_info->tree_root) {
573 if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
574 release = entry->disk_num_bytes;
576 release = entry->num_bytes;
577 btrfs_delalloc_release_metadata(btrfs_inode, release, false);
580 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
581 fs_info->delalloc_batch);
583 tree = &btrfs_inode->ordered_tree;
584 spin_lock_irq(&tree->lock);
585 node = &entry->rb_node;
586 rb_erase(node, &tree->tree);
588 if (tree->last == node)
590 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
591 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
592 spin_unlock_irq(&tree->lock);
595 * The current running transaction is waiting on us, we need to let it
596 * know that we're complete and wake it up.
599 struct btrfs_transaction *trans;
602 * The checks for trans are just a formality, it should be set,
603 * but if it isn't we don't want to deref/assert under the spin
604 * lock, so be nice and check if trans is set, but ASSERT() so
605 * if it isn't set a developer will notice.
607 spin_lock(&fs_info->trans_lock);
608 trans = fs_info->running_transaction;
610 refcount_inc(&trans->use_count);
611 spin_unlock(&fs_info->trans_lock);
615 if (atomic_dec_and_test(&trans->pending_ordered))
616 wake_up(&trans->pending_wait);
617 btrfs_put_transaction(trans);
621 btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
623 spin_lock(&root->ordered_extent_lock);
624 list_del_init(&entry->root_extent_list);
625 root->nr_ordered_extents--;
627 trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
629 if (!root->nr_ordered_extents) {
630 spin_lock(&fs_info->ordered_root_lock);
631 BUG_ON(list_empty(&root->ordered_root));
632 list_del_init(&root->ordered_root);
633 spin_unlock(&fs_info->ordered_root_lock);
635 spin_unlock(&root->ordered_extent_lock);
636 wake_up(&entry->wait);
637 if (!freespace_inode)
638 btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
641 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
643 struct btrfs_ordered_extent *ordered;
645 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
646 btrfs_start_ordered_extent(ordered);
647 complete(&ordered->completion);
651 * wait for all the ordered extents in a root. This is done when balancing
652 * space between drives.
654 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
655 const u64 range_start, const u64 range_len)
657 struct btrfs_fs_info *fs_info = root->fs_info;
661 struct btrfs_ordered_extent *ordered, *next;
663 const u64 range_end = range_start + range_len;
665 mutex_lock(&root->ordered_extent_mutex);
666 spin_lock(&root->ordered_extent_lock);
667 list_splice_init(&root->ordered_extents, &splice);
668 while (!list_empty(&splice) && nr) {
669 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
672 if (range_end <= ordered->disk_bytenr ||
673 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
674 list_move_tail(&ordered->root_extent_list, &skipped);
675 cond_resched_lock(&root->ordered_extent_lock);
679 list_move_tail(&ordered->root_extent_list,
680 &root->ordered_extents);
681 refcount_inc(&ordered->refs);
682 spin_unlock(&root->ordered_extent_lock);
684 btrfs_init_work(&ordered->flush_work,
685 btrfs_run_ordered_extent_work, NULL, NULL);
686 list_add_tail(&ordered->work_list, &works);
687 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
690 spin_lock(&root->ordered_extent_lock);
695 list_splice_tail(&skipped, &root->ordered_extents);
696 list_splice_tail(&splice, &root->ordered_extents);
697 spin_unlock(&root->ordered_extent_lock);
699 list_for_each_entry_safe(ordered, next, &works, work_list) {
700 list_del_init(&ordered->work_list);
701 wait_for_completion(&ordered->completion);
702 btrfs_put_ordered_extent(ordered);
705 mutex_unlock(&root->ordered_extent_mutex);
710 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
711 const u64 range_start, const u64 range_len)
713 struct btrfs_root *root;
714 struct list_head splice;
717 INIT_LIST_HEAD(&splice);
719 mutex_lock(&fs_info->ordered_operations_mutex);
720 spin_lock(&fs_info->ordered_root_lock);
721 list_splice_init(&fs_info->ordered_roots, &splice);
722 while (!list_empty(&splice) && nr) {
723 root = list_first_entry(&splice, struct btrfs_root,
725 root = btrfs_grab_root(root);
727 list_move_tail(&root->ordered_root,
728 &fs_info->ordered_roots);
729 spin_unlock(&fs_info->ordered_root_lock);
731 done = btrfs_wait_ordered_extents(root, nr,
732 range_start, range_len);
733 btrfs_put_root(root);
735 spin_lock(&fs_info->ordered_root_lock);
740 list_splice_tail(&splice, &fs_info->ordered_roots);
741 spin_unlock(&fs_info->ordered_root_lock);
742 mutex_unlock(&fs_info->ordered_operations_mutex);
746 * Start IO and wait for a given ordered extent to finish.
748 * Wait on page writeback for all the pages in the extent and the IO completion
749 * code to insert metadata into the btree corresponding to the extent.
751 void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
753 u64 start = entry->file_offset;
754 u64 end = start + entry->num_bytes - 1;
755 struct btrfs_inode *inode = BTRFS_I(entry->inode);
756 bool freespace_inode;
758 trace_btrfs_ordered_extent_start(inode, entry);
761 * If this is a free space inode do not take the ordered extents lockdep
764 freespace_inode = btrfs_is_free_space_inode(inode);
767 * pages in the range can be dirty, clean or writeback. We
768 * start IO on any dirty ones so the wait doesn't stall waiting
769 * for the flusher thread to find them
771 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
772 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
774 if (!freespace_inode)
775 btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
776 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags));
780 * Used to wait on ordered extents across a large range of bytes.
782 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
788 struct btrfs_ordered_extent *ordered;
790 if (start + len < start) {
791 orig_end = OFFSET_MAX;
793 orig_end = start + len - 1;
794 if (orig_end > OFFSET_MAX)
795 orig_end = OFFSET_MAX;
798 /* start IO across the range first to instantiate any delalloc
801 ret = btrfs_fdatawrite_range(inode, start, orig_end);
806 * If we have a writeback error don't return immediately. Wait first
807 * for any ordered extents that haven't completed yet. This is to make
808 * sure no one can dirty the same page ranges and call writepages()
809 * before the ordered extents complete - to avoid failures (-EEXIST)
810 * when adding the new ordered extents to the ordered tree.
812 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
816 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
819 if (ordered->file_offset > orig_end) {
820 btrfs_put_ordered_extent(ordered);
823 if (ordered->file_offset + ordered->num_bytes <= start) {
824 btrfs_put_ordered_extent(ordered);
827 btrfs_start_ordered_extent(ordered);
828 end = ordered->file_offset;
830 * If the ordered extent had an error save the error but don't
831 * exit without waiting first for all other ordered extents in
832 * the range to complete.
834 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
836 btrfs_put_ordered_extent(ordered);
837 if (end == 0 || end == start)
841 return ret_wb ? ret_wb : ret;
845 * find an ordered extent corresponding to file_offset. return NULL if
846 * nothing is found, otherwise take a reference on the extent and return it
848 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
851 struct btrfs_ordered_inode_tree *tree;
852 struct rb_node *node;
853 struct btrfs_ordered_extent *entry = NULL;
856 tree = &inode->ordered_tree;
857 spin_lock_irqsave(&tree->lock, flags);
858 node = tree_search(tree, file_offset);
862 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
863 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
866 refcount_inc(&entry->refs);
867 trace_btrfs_ordered_extent_lookup(inode, entry);
870 spin_unlock_irqrestore(&tree->lock, flags);
874 /* Since the DIO code tries to lock a wide area we need to look for any ordered
875 * extents that exist in the range, rather than just the start of the range.
877 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
878 struct btrfs_inode *inode, u64 file_offset, u64 len)
880 struct btrfs_ordered_inode_tree *tree;
881 struct rb_node *node;
882 struct btrfs_ordered_extent *entry = NULL;
884 tree = &inode->ordered_tree;
885 spin_lock_irq(&tree->lock);
886 node = tree_search(tree, file_offset);
888 node = tree_search(tree, file_offset + len);
894 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
895 if (range_overlaps(entry, file_offset, len))
898 if (entry->file_offset >= file_offset + len) {
903 node = rb_next(node);
909 refcount_inc(&entry->refs);
910 trace_btrfs_ordered_extent_lookup_range(inode, entry);
912 spin_unlock_irq(&tree->lock);
917 * Adds all ordered extents to the given list. The list ends up sorted by the
918 * file_offset of the ordered extents.
920 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
921 struct list_head *list)
923 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
926 ASSERT(inode_is_locked(&inode->vfs_inode));
928 spin_lock_irq(&tree->lock);
929 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
930 struct btrfs_ordered_extent *ordered;
932 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
934 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
937 ASSERT(list_empty(&ordered->log_list));
938 list_add_tail(&ordered->log_list, list);
939 refcount_inc(&ordered->refs);
940 trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
942 spin_unlock_irq(&tree->lock);
946 * lookup and return any extent before 'file_offset'. NULL is returned
949 struct btrfs_ordered_extent *
950 btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
952 struct btrfs_ordered_inode_tree *tree;
953 struct rb_node *node;
954 struct btrfs_ordered_extent *entry = NULL;
956 tree = &inode->ordered_tree;
957 spin_lock_irq(&tree->lock);
958 node = tree_search(tree, file_offset);
962 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
963 refcount_inc(&entry->refs);
964 trace_btrfs_ordered_extent_lookup_first(inode, entry);
966 spin_unlock_irq(&tree->lock);
971 * Lookup the first ordered extent that overlaps the range
972 * [@file_offset, @file_offset + @len).
974 * The difference between this and btrfs_lookup_first_ordered_extent() is
975 * that this one won't return any ordered extent that does not overlap the range.
976 * And the difference against btrfs_lookup_ordered_extent() is, this function
977 * ensures the first ordered extent gets returned.
979 struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
980 struct btrfs_inode *inode, u64 file_offset, u64 len)
982 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
983 struct rb_node *node;
985 struct rb_node *prev;
986 struct rb_node *next;
987 struct btrfs_ordered_extent *entry = NULL;
989 spin_lock_irq(&tree->lock);
990 node = tree->tree.rb_node;
992 * Here we don't want to use tree_search() which will use tree->last
993 * and screw up the search order.
994 * And __tree_search() can't return the adjacent ordered extents
995 * either, thus here we do our own search.
998 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1000 if (file_offset < entry->file_offset) {
1001 node = node->rb_left;
1002 } else if (file_offset >= entry_end(entry)) {
1003 node = node->rb_right;
1006 * Direct hit, got an ordered extent that starts at
1017 cur = &entry->rb_node;
1018 /* We got an entry around @file_offset, check adjacent entries */
1019 if (entry->file_offset < file_offset) {
1021 next = rb_next(cur);
1023 prev = rb_prev(cur);
1027 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
1028 if (range_overlaps(entry, file_offset, len))
1032 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
1033 if (range_overlaps(entry, file_offset, len))
1036 /* No ordered extent in the range */
1040 refcount_inc(&entry->refs);
1041 trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
1044 spin_unlock_irq(&tree->lock);
1049 * Lock the passed range and ensures all pending ordered extents in it are run
1052 * @inode: Inode whose ordered tree is to be searched
1053 * @start: Beginning of range to flush
1054 * @end: Last byte of range to lock
1055 * @cached_state: If passed, will return the extent state responsible for the
1056 * locked range. It's the caller's responsibility to free the
1059 * Always return with the given range locked, ensuring after it's called no
1060 * order extent can be pending.
1062 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1064 struct extent_state **cached_state)
1066 struct btrfs_ordered_extent *ordered;
1067 struct extent_state *cache = NULL;
1068 struct extent_state **cachedp = &cache;
1071 cachedp = cached_state;
1074 lock_extent(&inode->io_tree, start, end, cachedp);
1075 ordered = btrfs_lookup_ordered_range(inode, start,
1079 * If no external cached_state has been passed then
1080 * decrement the extra ref taken for cachedp since we
1081 * aren't exposing it outside of this function
1084 refcount_dec(&cache->refs);
1087 unlock_extent(&inode->io_tree, start, end, cachedp);
1088 btrfs_start_ordered_extent(ordered);
1089 btrfs_put_ordered_extent(ordered);
1094 * Lock the passed range and ensure all pending ordered extents in it are run
1095 * to completion in nowait mode.
1097 * Return true if btrfs_lock_ordered_range does not return any extents,
1100 bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
1101 struct extent_state **cached_state)
1103 struct btrfs_ordered_extent *ordered;
1105 if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
1108 ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
1112 btrfs_put_ordered_extent(ordered);
1113 unlock_extent(&inode->io_tree, start, end, cached_state);
1118 /* Split out a new ordered extent for this first @len bytes of @ordered. */
1119 struct btrfs_ordered_extent *btrfs_split_ordered_extent(
1120 struct btrfs_ordered_extent *ordered, u64 len)
1122 struct inode *inode = ordered->inode;
1123 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1124 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1125 u64 file_offset = ordered->file_offset;
1126 u64 disk_bytenr = ordered->disk_bytenr;
1127 unsigned long flags = ordered->flags & BTRFS_ORDERED_TYPE_FLAGS;
1128 struct rb_node *node;
1130 trace_btrfs_ordered_extent_split(BTRFS_I(inode), ordered);
1132 ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED)));
1135 * The entire bio must be covered by the ordered extent, but we can't
1136 * reduce the original extent to a zero length either.
1138 if (WARN_ON_ONCE(len >= ordered->num_bytes))
1139 return ERR_PTR(-EINVAL);
1140 /* We cannot split once ordered extent is past end_bio. */
1141 if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes))
1142 return ERR_PTR(-EINVAL);
1143 /* We cannot split a compressed ordered extent. */
1144 if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes))
1145 return ERR_PTR(-EINVAL);
1146 /* Checksum list should be empty. */
1147 if (WARN_ON_ONCE(!list_empty(&ordered->list)))
1148 return ERR_PTR(-EINVAL);
1150 spin_lock_irq(&tree->lock);
1151 /* Remove from tree once */
1152 node = &ordered->rb_node;
1153 rb_erase(node, &tree->tree);
1154 RB_CLEAR_NODE(node);
1155 if (tree->last == node)
1158 ordered->file_offset += len;
1159 ordered->disk_bytenr += len;
1160 ordered->num_bytes -= len;
1161 ordered->disk_num_bytes -= len;
1162 ordered->bytes_left -= len;
1164 /* Re-insert the node */
1165 node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
1167 btrfs_panic(fs_info, -EEXIST,
1168 "zoned: inconsistency in ordered tree at offset %llu",
1169 ordered->file_offset);
1171 spin_unlock_irq(&tree->lock);
1174 * The splitting extent is already counted and will be added again in
1175 * btrfs_alloc_ordered_extent(). Subtract len to avoid double counting.
1177 percpu_counter_add_batch(&fs_info->ordered_bytes, -len, fs_info->delalloc_batch);
1179 return btrfs_alloc_ordered_extent(BTRFS_I(inode), file_offset, len, len,
1180 disk_bytenr, len, 0, flags,
1181 ordered->compress_type);
1184 int __init ordered_data_init(void)
1186 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1187 sizeof(struct btrfs_ordered_extent), 0,
1190 if (!btrfs_ordered_extent_cache)
1196 void __cold ordered_data_exit(void)
1198 kmem_cache_destroy(btrfs_ordered_extent_cache);