1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/writeback.h>
9 #include <linux/sched/mm.h>
12 #include "transaction.h"
13 #include "btrfs_inode.h"
14 #include "extent_io.h"
16 #include "compression.h"
17 #include "delalloc-space.h"
21 static struct kmem_cache *btrfs_ordered_extent_cache;
23 static u64 entry_end(struct btrfs_ordered_extent *entry)
25 if (entry->file_offset + entry->num_bytes < entry->file_offset)
27 return entry->file_offset + entry->num_bytes;
30 /* returns NULL if the insertion worked, or it returns the node it did find
33 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
36 struct rb_node **p = &root->rb_node;
37 struct rb_node *parent = NULL;
38 struct btrfs_ordered_extent *entry;
42 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
44 if (file_offset < entry->file_offset)
46 else if (file_offset >= entry_end(entry))
52 rb_link_node(node, parent, p);
53 rb_insert_color(node, root);
58 * look for a given offset in the tree, and if it can't be found return the
61 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
62 struct rb_node **prev_ret)
64 struct rb_node *n = root->rb_node;
65 struct rb_node *prev = NULL;
67 struct btrfs_ordered_extent *entry;
68 struct btrfs_ordered_extent *prev_entry = NULL;
71 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
75 if (file_offset < entry->file_offset)
77 else if (file_offset >= entry_end(entry))
85 while (prev && file_offset >= entry_end(prev_entry)) {
89 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
91 if (file_offset < entry_end(prev_entry))
97 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
99 while (prev && file_offset < entry_end(prev_entry)) {
100 test = rb_prev(prev);
103 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
111 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
114 if (file_offset + len <= entry->file_offset ||
115 entry->file_offset + entry->num_bytes <= file_offset)
121 * look find the first ordered struct that has this offset, otherwise
122 * the first one less than this offset
124 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
127 struct rb_root *root = &tree->tree;
128 struct rb_node *prev = NULL;
130 struct btrfs_ordered_extent *entry;
133 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
135 if (in_range(file_offset, entry->file_offset, entry->num_bytes))
138 ret = __tree_search(root, file_offset, &prev);
147 * Add an ordered extent to the per-inode tree.
149 * @inode: Inode that this extent is for.
150 * @file_offset: Logical offset in file where the extent starts.
151 * @num_bytes: Logical length of extent in file.
152 * @ram_bytes: Full length of unencoded data.
153 * @disk_bytenr: Offset of extent on disk.
154 * @disk_num_bytes: Size of extent on disk.
155 * @offset: Offset into unencoded data where file data starts.
156 * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*).
157 * @compress_type: Compression algorithm used for data.
159 * Most of these parameters correspond to &struct btrfs_file_extent_item. The
160 * tree is given a single reference on the ordered extent that was inserted.
162 * Return: 0 or -ENOMEM.
164 int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
165 u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
166 u64 disk_num_bytes, u64 offset, unsigned flags,
169 struct btrfs_root *root = inode->root;
170 struct btrfs_fs_info *fs_info = root->fs_info;
171 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
172 struct rb_node *node;
173 struct btrfs_ordered_extent *entry;
177 ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
178 /* For nocow write, we can release the qgroup rsv right now */
179 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
185 * The ordered extent has reserved qgroup space, release now
186 * and pass the reserved number for qgroup_record to free.
188 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
192 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
196 entry->file_offset = file_offset;
197 entry->num_bytes = num_bytes;
198 entry->ram_bytes = ram_bytes;
199 entry->disk_bytenr = disk_bytenr;
200 entry->disk_num_bytes = disk_num_bytes;
201 entry->offset = offset;
202 entry->bytes_left = num_bytes;
203 entry->inode = igrab(&inode->vfs_inode);
204 entry->compress_type = compress_type;
205 entry->truncated_len = (u64)-1;
206 entry->qgroup_rsv = ret;
207 entry->physical = (u64)-1;
209 ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
210 entry->flags = flags;
212 percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
213 fs_info->delalloc_batch);
215 /* one ref for the tree */
216 refcount_set(&entry->refs, 1);
217 init_waitqueue_head(&entry->wait);
218 INIT_LIST_HEAD(&entry->list);
219 INIT_LIST_HEAD(&entry->log_list);
220 INIT_LIST_HEAD(&entry->root_extent_list);
221 INIT_LIST_HEAD(&entry->work_list);
222 init_completion(&entry->completion);
224 trace_btrfs_ordered_extent_add(inode, entry);
226 spin_lock_irq(&tree->lock);
227 node = tree_insert(&tree->tree, file_offset,
230 btrfs_panic(fs_info, -EEXIST,
231 "inconsistency in ordered tree at offset %llu",
233 spin_unlock_irq(&tree->lock);
235 spin_lock(&root->ordered_extent_lock);
236 list_add_tail(&entry->root_extent_list,
237 &root->ordered_extents);
238 root->nr_ordered_extents++;
239 if (root->nr_ordered_extents == 1) {
240 spin_lock(&fs_info->ordered_root_lock);
241 BUG_ON(!list_empty(&root->ordered_root));
242 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
243 spin_unlock(&fs_info->ordered_root_lock);
245 spin_unlock(&root->ordered_extent_lock);
248 * We don't need the count_max_extents here, we can assume that all of
249 * that work has been done at higher layers, so this is truly the
250 * smallest the extent is going to get.
252 spin_lock(&inode->lock);
253 btrfs_mod_outstanding_extents(inode, 1);
254 spin_unlock(&inode->lock);
260 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
261 * when an ordered extent is finished. If the list covers more than one
262 * ordered extent, it is split across multiples.
264 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
265 struct btrfs_ordered_sum *sum)
267 struct btrfs_ordered_inode_tree *tree;
269 tree = &BTRFS_I(entry->inode)->ordered_tree;
270 spin_lock_irq(&tree->lock);
271 list_add_tail(&sum->list, &entry->list);
272 spin_unlock_irq(&tree->lock);
275 static void finish_ordered_fn(struct btrfs_work *work)
277 struct btrfs_ordered_extent *ordered_extent;
279 ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
280 btrfs_finish_ordered_io(ordered_extent);
284 * Mark all ordered extents io inside the specified range finished.
286 * @page: The involved page for the operation.
287 * For uncompressed buffered IO, the page status also needs to be
288 * updated to indicate whether the pending ordered io is finished.
289 * Can be NULL for direct IO and compressed write.
290 * For these cases, callers are ensured they won't execute the
291 * endio function twice.
293 * This function is called for endio, thus the range must have ordered
294 * extent(s) covering it.
296 void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
297 struct page *page, u64 file_offset,
298 u64 num_bytes, bool uptodate)
300 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
301 struct btrfs_fs_info *fs_info = inode->root->fs_info;
302 struct btrfs_workqueue *wq;
303 struct rb_node *node;
304 struct btrfs_ordered_extent *entry = NULL;
306 u64 cur = file_offset;
308 if (btrfs_is_free_space_inode(inode))
309 wq = fs_info->endio_freespace_worker;
311 wq = fs_info->endio_write_workers;
314 ASSERT(page->mapping && page_offset(page) <= file_offset &&
315 file_offset + num_bytes <= page_offset(page) + PAGE_SIZE);
317 spin_lock_irqsave(&tree->lock, flags);
318 while (cur < file_offset + num_bytes) {
323 node = tree_search(tree, cur);
324 /* No ordered extents at all */
328 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
329 entry_end = entry->file_offset + entry->num_bytes;
335 if (cur >= entry_end) {
336 node = rb_next(node);
337 /* No more ordered extents, exit */
340 entry = rb_entry(node, struct btrfs_ordered_extent,
343 /* Go to next ordered extent and continue */
344 cur = entry->file_offset;
350 * Go to the start of OE.
352 if (cur < entry->file_offset) {
353 cur = entry->file_offset;
358 * Now we are definitely inside one ordered extent.
364 end = min(entry->file_offset + entry->num_bytes,
365 file_offset + num_bytes) - 1;
366 ASSERT(end + 1 - cur < U32_MAX);
371 * Ordered (Private2) bit indicates whether we still
372 * have pending io unfinished for the ordered extent.
374 * If there's no such bit, we need to skip to next range.
376 if (!btrfs_page_test_ordered(fs_info, page, cur, len)) {
380 btrfs_page_clear_ordered(fs_info, page, cur, len);
383 /* Now we're fine to update the accounting */
384 if (unlikely(len > entry->bytes_left)) {
387 "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu",
388 inode->root->root_key.objectid,
392 len, entry->bytes_left);
393 entry->bytes_left = 0;
395 entry->bytes_left -= len;
399 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
402 * All the IO of the ordered extent is finished, we need to queue
403 * the finish_func to be executed.
405 if (entry->bytes_left == 0) {
406 set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
407 cond_wake_up(&entry->wait);
408 refcount_inc(&entry->refs);
409 trace_btrfs_ordered_extent_mark_finished(inode, entry);
410 spin_unlock_irqrestore(&tree->lock, flags);
411 btrfs_init_work(&entry->work, finish_ordered_fn, NULL, NULL);
412 btrfs_queue_work(wq, &entry->work);
413 spin_lock_irqsave(&tree->lock, flags);
417 spin_unlock_irqrestore(&tree->lock, flags);
421 * Finish IO for one ordered extent across a given range. The range can only
422 * contain one ordered extent.
424 * @cached: The cached ordered extent. If not NULL, we can skip the tree
425 * search and use the ordered extent directly.
426 * Will be also used to store the finished ordered extent.
427 * @file_offset: File offset for the finished IO
428 * @io_size: Length of the finish IO range
430 * Return true if the ordered extent is finished in the range, and update
432 * Return false otherwise.
434 * NOTE: The range can NOT cross multiple ordered extents.
435 * Thus caller should ensure the range doesn't cross ordered extents.
437 bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
438 struct btrfs_ordered_extent **cached,
439 u64 file_offset, u64 io_size)
441 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
442 struct rb_node *node;
443 struct btrfs_ordered_extent *entry = NULL;
445 bool finished = false;
447 spin_lock_irqsave(&tree->lock, flags);
448 if (cached && *cached) {
453 node = tree_search(tree, file_offset);
457 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
459 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
462 if (io_size > entry->bytes_left)
463 btrfs_crit(inode->root->fs_info,
464 "bad ordered accounting left %llu size %llu",
465 entry->bytes_left, io_size);
467 entry->bytes_left -= io_size;
469 if (entry->bytes_left == 0) {
471 * Ensure only one caller can set the flag and finished_ret
474 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
475 /* test_and_set_bit implies a barrier */
476 cond_wake_up_nomb(&entry->wait);
479 if (finished && cached && entry) {
481 refcount_inc(&entry->refs);
482 trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
484 spin_unlock_irqrestore(&tree->lock, flags);
489 * used to drop a reference on an ordered extent. This will free
490 * the extent if the last reference is dropped
492 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
494 struct list_head *cur;
495 struct btrfs_ordered_sum *sum;
497 trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
499 if (refcount_dec_and_test(&entry->refs)) {
500 ASSERT(list_empty(&entry->root_extent_list));
501 ASSERT(list_empty(&entry->log_list));
502 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
504 btrfs_add_delayed_iput(entry->inode);
505 while (!list_empty(&entry->list)) {
506 cur = entry->list.next;
507 sum = list_entry(cur, struct btrfs_ordered_sum, list);
508 list_del(&sum->list);
511 kmem_cache_free(btrfs_ordered_extent_cache, entry);
516 * remove an ordered extent from the tree. No references are dropped
517 * and waiters are woken up.
519 void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
520 struct btrfs_ordered_extent *entry)
522 struct btrfs_ordered_inode_tree *tree;
523 struct btrfs_root *root = btrfs_inode->root;
524 struct btrfs_fs_info *fs_info = root->fs_info;
525 struct rb_node *node;
527 bool freespace_inode;
530 * If this is a free space inode the thread has not acquired the ordered
531 * extents lockdep map.
533 freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
535 btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
536 /* This is paired with btrfs_add_ordered_extent. */
537 spin_lock(&btrfs_inode->lock);
538 btrfs_mod_outstanding_extents(btrfs_inode, -1);
539 spin_unlock(&btrfs_inode->lock);
540 if (root != fs_info->tree_root) {
543 if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
544 release = entry->disk_num_bytes;
546 release = entry->num_bytes;
547 btrfs_delalloc_release_metadata(btrfs_inode, release, false);
550 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
551 fs_info->delalloc_batch);
553 tree = &btrfs_inode->ordered_tree;
554 spin_lock_irq(&tree->lock);
555 node = &entry->rb_node;
556 rb_erase(node, &tree->tree);
558 if (tree->last == node)
560 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
561 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
562 spin_unlock_irq(&tree->lock);
565 * The current running transaction is waiting on us, we need to let it
566 * know that we're complete and wake it up.
569 struct btrfs_transaction *trans;
572 * The checks for trans are just a formality, it should be set,
573 * but if it isn't we don't want to deref/assert under the spin
574 * lock, so be nice and check if trans is set, but ASSERT() so
575 * if it isn't set a developer will notice.
577 spin_lock(&fs_info->trans_lock);
578 trans = fs_info->running_transaction;
580 refcount_inc(&trans->use_count);
581 spin_unlock(&fs_info->trans_lock);
585 if (atomic_dec_and_test(&trans->pending_ordered))
586 wake_up(&trans->pending_wait);
587 btrfs_put_transaction(trans);
591 btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
593 spin_lock(&root->ordered_extent_lock);
594 list_del_init(&entry->root_extent_list);
595 root->nr_ordered_extents--;
597 trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
599 if (!root->nr_ordered_extents) {
600 spin_lock(&fs_info->ordered_root_lock);
601 BUG_ON(list_empty(&root->ordered_root));
602 list_del_init(&root->ordered_root);
603 spin_unlock(&fs_info->ordered_root_lock);
605 spin_unlock(&root->ordered_extent_lock);
606 wake_up(&entry->wait);
607 if (!freespace_inode)
608 btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
611 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
613 struct btrfs_ordered_extent *ordered;
615 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
616 btrfs_start_ordered_extent(ordered, 1);
617 complete(&ordered->completion);
621 * wait for all the ordered extents in a root. This is done when balancing
622 * space between drives.
624 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
625 const u64 range_start, const u64 range_len)
627 struct btrfs_fs_info *fs_info = root->fs_info;
631 struct btrfs_ordered_extent *ordered, *next;
633 const u64 range_end = range_start + range_len;
635 mutex_lock(&root->ordered_extent_mutex);
636 spin_lock(&root->ordered_extent_lock);
637 list_splice_init(&root->ordered_extents, &splice);
638 while (!list_empty(&splice) && nr) {
639 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
642 if (range_end <= ordered->disk_bytenr ||
643 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
644 list_move_tail(&ordered->root_extent_list, &skipped);
645 cond_resched_lock(&root->ordered_extent_lock);
649 list_move_tail(&ordered->root_extent_list,
650 &root->ordered_extents);
651 refcount_inc(&ordered->refs);
652 spin_unlock(&root->ordered_extent_lock);
654 btrfs_init_work(&ordered->flush_work,
655 btrfs_run_ordered_extent_work, NULL, NULL);
656 list_add_tail(&ordered->work_list, &works);
657 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
660 spin_lock(&root->ordered_extent_lock);
665 list_splice_tail(&skipped, &root->ordered_extents);
666 list_splice_tail(&splice, &root->ordered_extents);
667 spin_unlock(&root->ordered_extent_lock);
669 list_for_each_entry_safe(ordered, next, &works, work_list) {
670 list_del_init(&ordered->work_list);
671 wait_for_completion(&ordered->completion);
672 btrfs_put_ordered_extent(ordered);
675 mutex_unlock(&root->ordered_extent_mutex);
680 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
681 const u64 range_start, const u64 range_len)
683 struct btrfs_root *root;
684 struct list_head splice;
687 INIT_LIST_HEAD(&splice);
689 mutex_lock(&fs_info->ordered_operations_mutex);
690 spin_lock(&fs_info->ordered_root_lock);
691 list_splice_init(&fs_info->ordered_roots, &splice);
692 while (!list_empty(&splice) && nr) {
693 root = list_first_entry(&splice, struct btrfs_root,
695 root = btrfs_grab_root(root);
697 list_move_tail(&root->ordered_root,
698 &fs_info->ordered_roots);
699 spin_unlock(&fs_info->ordered_root_lock);
701 done = btrfs_wait_ordered_extents(root, nr,
702 range_start, range_len);
703 btrfs_put_root(root);
705 spin_lock(&fs_info->ordered_root_lock);
710 list_splice_tail(&splice, &fs_info->ordered_roots);
711 spin_unlock(&fs_info->ordered_root_lock);
712 mutex_unlock(&fs_info->ordered_operations_mutex);
716 * Used to start IO or wait for a given ordered extent to finish.
718 * If wait is one, this effectively waits on page writeback for all the pages
719 * in the extent, and it waits on the io completion code to insert
720 * metadata into the btree corresponding to the extent
722 void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
724 u64 start = entry->file_offset;
725 u64 end = start + entry->num_bytes - 1;
726 struct btrfs_inode *inode = BTRFS_I(entry->inode);
727 bool freespace_inode;
729 trace_btrfs_ordered_extent_start(inode, entry);
732 * If this is a free space inode do not take the ordered extents lockdep
735 freespace_inode = btrfs_is_free_space_inode(inode);
738 * pages in the range can be dirty, clean or writeback. We
739 * start IO on any dirty ones so the wait doesn't stall waiting
740 * for the flusher thread to find them
742 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
743 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
745 if (!freespace_inode)
746 btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
747 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
753 * Used to wait on ordered extents across a large range of bytes.
755 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
761 struct btrfs_ordered_extent *ordered;
763 if (start + len < start) {
764 orig_end = INT_LIMIT(loff_t);
766 orig_end = start + len - 1;
767 if (orig_end > INT_LIMIT(loff_t))
768 orig_end = INT_LIMIT(loff_t);
771 /* start IO across the range first to instantiate any delalloc
774 ret = btrfs_fdatawrite_range(inode, start, orig_end);
779 * If we have a writeback error don't return immediately. Wait first
780 * for any ordered extents that haven't completed yet. This is to make
781 * sure no one can dirty the same page ranges and call writepages()
782 * before the ordered extents complete - to avoid failures (-EEXIST)
783 * when adding the new ordered extents to the ordered tree.
785 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
789 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
792 if (ordered->file_offset > orig_end) {
793 btrfs_put_ordered_extent(ordered);
796 if (ordered->file_offset + ordered->num_bytes <= start) {
797 btrfs_put_ordered_extent(ordered);
800 btrfs_start_ordered_extent(ordered, 1);
801 end = ordered->file_offset;
803 * If the ordered extent had an error save the error but don't
804 * exit without waiting first for all other ordered extents in
805 * the range to complete.
807 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
809 btrfs_put_ordered_extent(ordered);
810 if (end == 0 || end == start)
814 return ret_wb ? ret_wb : ret;
818 * find an ordered extent corresponding to file_offset. return NULL if
819 * nothing is found, otherwise take a reference on the extent and return it
821 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
824 struct btrfs_ordered_inode_tree *tree;
825 struct rb_node *node;
826 struct btrfs_ordered_extent *entry = NULL;
829 tree = &inode->ordered_tree;
830 spin_lock_irqsave(&tree->lock, flags);
831 node = tree_search(tree, file_offset);
835 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
836 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
839 refcount_inc(&entry->refs);
840 trace_btrfs_ordered_extent_lookup(inode, entry);
843 spin_unlock_irqrestore(&tree->lock, flags);
847 /* Since the DIO code tries to lock a wide area we need to look for any ordered
848 * extents that exist in the range, rather than just the start of the range.
850 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
851 struct btrfs_inode *inode, u64 file_offset, u64 len)
853 struct btrfs_ordered_inode_tree *tree;
854 struct rb_node *node;
855 struct btrfs_ordered_extent *entry = NULL;
857 tree = &inode->ordered_tree;
858 spin_lock_irq(&tree->lock);
859 node = tree_search(tree, file_offset);
861 node = tree_search(tree, file_offset + len);
867 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
868 if (range_overlaps(entry, file_offset, len))
871 if (entry->file_offset >= file_offset + len) {
876 node = rb_next(node);
882 refcount_inc(&entry->refs);
883 trace_btrfs_ordered_extent_lookup_range(inode, entry);
885 spin_unlock_irq(&tree->lock);
890 * Adds all ordered extents to the given list. The list ends up sorted by the
891 * file_offset of the ordered extents.
893 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
894 struct list_head *list)
896 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
899 ASSERT(inode_is_locked(&inode->vfs_inode));
901 spin_lock_irq(&tree->lock);
902 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
903 struct btrfs_ordered_extent *ordered;
905 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
907 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
910 ASSERT(list_empty(&ordered->log_list));
911 list_add_tail(&ordered->log_list, list);
912 refcount_inc(&ordered->refs);
913 trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
915 spin_unlock_irq(&tree->lock);
919 * lookup and return any extent before 'file_offset'. NULL is returned
922 struct btrfs_ordered_extent *
923 btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
925 struct btrfs_ordered_inode_tree *tree;
926 struct rb_node *node;
927 struct btrfs_ordered_extent *entry = NULL;
929 tree = &inode->ordered_tree;
930 spin_lock_irq(&tree->lock);
931 node = tree_search(tree, file_offset);
935 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
936 refcount_inc(&entry->refs);
937 trace_btrfs_ordered_extent_lookup_first(inode, entry);
939 spin_unlock_irq(&tree->lock);
944 * Lookup the first ordered extent that overlaps the range
945 * [@file_offset, @file_offset + @len).
947 * The difference between this and btrfs_lookup_first_ordered_extent() is
948 * that this one won't return any ordered extent that does not overlap the range.
949 * And the difference against btrfs_lookup_ordered_extent() is, this function
950 * ensures the first ordered extent gets returned.
952 struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
953 struct btrfs_inode *inode, u64 file_offset, u64 len)
955 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
956 struct rb_node *node;
958 struct rb_node *prev;
959 struct rb_node *next;
960 struct btrfs_ordered_extent *entry = NULL;
962 spin_lock_irq(&tree->lock);
963 node = tree->tree.rb_node;
965 * Here we don't want to use tree_search() which will use tree->last
966 * and screw up the search order.
967 * And __tree_search() can't return the adjacent ordered extents
968 * either, thus here we do our own search.
971 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
973 if (file_offset < entry->file_offset) {
974 node = node->rb_left;
975 } else if (file_offset >= entry_end(entry)) {
976 node = node->rb_right;
979 * Direct hit, got an ordered extent that starts at
990 cur = &entry->rb_node;
991 /* We got an entry around @file_offset, check adjacent entries */
992 if (entry->file_offset < file_offset) {
1000 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
1001 if (range_overlaps(entry, file_offset, len))
1005 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
1006 if (range_overlaps(entry, file_offset, len))
1009 /* No ordered extent in the range */
1013 refcount_inc(&entry->refs);
1014 trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
1017 spin_unlock_irq(&tree->lock);
1022 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
1023 * ordered extents in it are run to completion.
1025 * @inode: Inode whose ordered tree is to be searched
1026 * @start: Beginning of range to flush
1027 * @end: Last byte of range to lock
1028 * @cached_state: If passed, will return the extent state responsible for the
1029 * locked range. It's the caller's responsibility to free the cached state.
1031 * This function always returns with the given range locked, ensuring after it's
1032 * called no order extent can be pending.
1034 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1036 struct extent_state **cached_state)
1038 struct btrfs_ordered_extent *ordered;
1039 struct extent_state *cache = NULL;
1040 struct extent_state **cachedp = &cache;
1043 cachedp = cached_state;
1046 lock_extent(&inode->io_tree, start, end, cachedp);
1047 ordered = btrfs_lookup_ordered_range(inode, start,
1051 * If no external cached_state has been passed then
1052 * decrement the extra ref taken for cachedp since we
1053 * aren't exposing it outside of this function
1056 refcount_dec(&cache->refs);
1059 unlock_extent(&inode->io_tree, start, end, cachedp);
1060 btrfs_start_ordered_extent(ordered, 1);
1061 btrfs_put_ordered_extent(ordered);
1066 * Lock the passed range and ensure all pending ordered extents in it are run
1067 * to completion in nowait mode.
1069 * Return true if btrfs_lock_ordered_range does not return any extents,
1072 bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end)
1074 struct btrfs_ordered_extent *ordered;
1076 if (!try_lock_extent(&inode->io_tree, start, end))
1079 ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
1083 btrfs_put_ordered_extent(ordered);
1084 unlock_extent(&inode->io_tree, start, end, NULL);
1090 static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
1093 struct inode *inode = ordered->inode;
1094 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1095 u64 file_offset = ordered->file_offset + pos;
1096 u64 disk_bytenr = ordered->disk_bytenr + pos;
1097 unsigned long flags = ordered->flags & BTRFS_ORDERED_TYPE_FLAGS;
1100 * The splitting extent is already counted and will be added again in
1101 * btrfs_add_ordered_extent_*(). Subtract len to avoid double counting.
1103 percpu_counter_add_batch(&fs_info->ordered_bytes, -len,
1104 fs_info->delalloc_batch);
1105 WARN_ON_ONCE(flags & (1 << BTRFS_ORDERED_COMPRESSED));
1106 return btrfs_add_ordered_extent(BTRFS_I(inode), file_offset, len, len,
1107 disk_bytenr, len, 0, flags,
1108 ordered->compress_type);
1111 int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
1114 struct inode *inode = ordered->inode;
1115 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1116 struct rb_node *node;
1117 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1120 trace_btrfs_ordered_extent_split(BTRFS_I(inode), ordered);
1122 spin_lock_irq(&tree->lock);
1123 /* Remove from tree once */
1124 node = &ordered->rb_node;
1125 rb_erase(node, &tree->tree);
1126 RB_CLEAR_NODE(node);
1127 if (tree->last == node)
1130 ordered->file_offset += pre;
1131 ordered->disk_bytenr += pre;
1132 ordered->num_bytes -= (pre + post);
1133 ordered->disk_num_bytes -= (pre + post);
1134 ordered->bytes_left -= (pre + post);
1136 /* Re-insert the node */
1137 node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
1139 btrfs_panic(fs_info, -EEXIST,
1140 "zoned: inconsistency in ordered tree at offset %llu",
1141 ordered->file_offset);
1143 spin_unlock_irq(&tree->lock);
1146 ret = clone_ordered_extent(ordered, 0, pre);
1147 if (ret == 0 && post)
1148 ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
1154 int __init ordered_data_init(void)
1156 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1157 sizeof(struct btrfs_ordered_extent), 0,
1160 if (!btrfs_ordered_extent_cache)
1166 void __cold ordered_data_exit(void)
1168 kmem_cache_destroy(btrfs_ordered_extent_cache);