1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/error-injection.h>
15 #include "transaction.h"
18 #include "btrfs_inode.h"
19 #include "async-thread.h"
20 #include "free-space-cache.h"
22 #include "print-tree.h"
23 #include "delalloc-space.h"
24 #include "block-group.h"
29 #include "inode-item.h"
34 * [What does relocation do]
36 * The objective of relocation is to relocate all extents of the target block
37 * group to other block groups.
38 * This is utilized by resize (shrink only), profile converting, compacting
39 * space, or balance routine to spread chunks over devices.
42 * ------------------------------------------------------------------
43 * BG A: 10 data extents | BG A: deleted
44 * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated)
45 * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated)
47 * [How does relocation work]
49 * 1. Mark the target block group read-only
50 * New extents won't be allocated from the target block group.
52 * 2.1 Record each extent in the target block group
53 * To build a proper map of extents to be relocated.
55 * 2.2 Build data reloc tree and reloc trees
56 * Data reloc tree will contain an inode, recording all newly relocated
58 * There will be only one data reloc tree for one data block group.
60 * Reloc tree will be a special snapshot of its source tree, containing
61 * relocated tree blocks.
62 * Each tree referring to a tree block in target block group will get its
65 * 2.3 Swap source tree with its corresponding reloc tree
66 * Each involved tree only refers to new extents after swap.
68 * 3. Cleanup reloc trees and data reloc tree.
69 * As old extents in the target block group are still referenced by reloc
70 * trees, we need to clean them up before really freeing the target block
73 * The main complexity is in steps 2.2 and 2.3.
75 * The entry point of relocation is relocate_block_group() function.
78 #define RELOCATION_RESERVED_NODES 256
80 * map address of tree root to tree
84 struct rb_node rb_node;
86 }; /* Use rb_simle_node for search/insert */
91 struct rb_root rb_root;
96 * present a tree block to process
100 struct rb_node rb_node;
102 }; /* Use rb_simple_node for search/insert */
104 struct btrfs_key key;
105 unsigned int level:8;
106 unsigned int key_ready:1;
109 #define MAX_EXTENTS 128
111 struct file_extent_cluster {
114 u64 boundary[MAX_EXTENTS];
118 struct reloc_control {
119 /* block group to relocate */
120 struct btrfs_block_group *block_group;
122 struct btrfs_root *extent_root;
123 /* inode for moving data */
124 struct inode *data_inode;
126 struct btrfs_block_rsv *block_rsv;
128 struct btrfs_backref_cache backref_cache;
130 struct file_extent_cluster cluster;
131 /* tree blocks have been processed */
132 struct extent_io_tree processed_blocks;
133 /* map start of tree root to corresponding reloc tree */
134 struct mapping_tree reloc_root_tree;
135 /* list of reloc trees */
136 struct list_head reloc_roots;
137 /* list of subvolume trees that get relocated */
138 struct list_head dirty_subvol_roots;
139 /* size of metadata reservation for merging reloc trees */
140 u64 merging_rsv_size;
141 /* size of relocated tree nodes */
143 /* reserved size for block group relocation*/
149 unsigned int stage:8;
150 unsigned int create_reloc_tree:1;
151 unsigned int merge_reloc_tree:1;
152 unsigned int found_file_extent:1;
155 /* stages of data relocation */
156 #define MOVE_DATA_EXTENTS 0
157 #define UPDATE_DATA_PTRS 1
159 static void mark_block_processed(struct reloc_control *rc,
160 struct btrfs_backref_node *node)
164 if (node->level == 0 ||
165 in_range(node->bytenr, rc->block_group->start,
166 rc->block_group->length)) {
167 blocksize = rc->extent_root->fs_info->nodesize;
168 set_extent_bits(&rc->processed_blocks, node->bytenr,
169 node->bytenr + blocksize - 1, EXTENT_DIRTY);
175 static void mapping_tree_init(struct mapping_tree *tree)
177 tree->rb_root = RB_ROOT;
178 spin_lock_init(&tree->lock);
182 * walk up backref nodes until reach node presents tree root
184 static struct btrfs_backref_node *walk_up_backref(
185 struct btrfs_backref_node *node,
186 struct btrfs_backref_edge *edges[], int *index)
188 struct btrfs_backref_edge *edge;
191 while (!list_empty(&node->upper)) {
192 edge = list_entry(node->upper.next,
193 struct btrfs_backref_edge, list[LOWER]);
195 node = edge->node[UPPER];
197 BUG_ON(node->detached);
203 * walk down backref nodes to find start of next reference path
205 static struct btrfs_backref_node *walk_down_backref(
206 struct btrfs_backref_edge *edges[], int *index)
208 struct btrfs_backref_edge *edge;
209 struct btrfs_backref_node *lower;
213 edge = edges[idx - 1];
214 lower = edge->node[LOWER];
215 if (list_is_last(&edge->list[LOWER], &lower->upper)) {
219 edge = list_entry(edge->list[LOWER].next,
220 struct btrfs_backref_edge, list[LOWER]);
221 edges[idx - 1] = edge;
223 return edge->node[UPPER];
229 static void update_backref_node(struct btrfs_backref_cache *cache,
230 struct btrfs_backref_node *node, u64 bytenr)
232 struct rb_node *rb_node;
233 rb_erase(&node->rb_node, &cache->rb_root);
234 node->bytenr = bytenr;
235 rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
237 btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST);
241 * update backref cache after a transaction commit
243 static int update_backref_cache(struct btrfs_trans_handle *trans,
244 struct btrfs_backref_cache *cache)
246 struct btrfs_backref_node *node;
249 if (cache->last_trans == 0) {
250 cache->last_trans = trans->transid;
254 if (cache->last_trans == trans->transid)
258 * detached nodes are used to avoid unnecessary backref
259 * lookup. transaction commit changes the extent tree.
260 * so the detached nodes are no longer useful.
262 while (!list_empty(&cache->detached)) {
263 node = list_entry(cache->detached.next,
264 struct btrfs_backref_node, list);
265 btrfs_backref_cleanup_node(cache, node);
268 while (!list_empty(&cache->changed)) {
269 node = list_entry(cache->changed.next,
270 struct btrfs_backref_node, list);
271 list_del_init(&node->list);
272 BUG_ON(node->pending);
273 update_backref_node(cache, node, node->new_bytenr);
277 * some nodes can be left in the pending list if there were
278 * errors during processing the pending nodes.
280 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
281 list_for_each_entry(node, &cache->pending[level], list) {
282 BUG_ON(!node->pending);
283 if (node->bytenr == node->new_bytenr)
285 update_backref_node(cache, node, node->new_bytenr);
289 cache->last_trans = 0;
293 static bool reloc_root_is_dead(struct btrfs_root *root)
296 * Pair with set_bit/clear_bit in clean_dirty_subvols and
297 * btrfs_update_reloc_root. We need to see the updated bit before
298 * trying to access reloc_root
301 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
307 * Check if this subvolume tree has valid reloc tree.
309 * Reloc tree after swap is considered dead, thus not considered as valid.
310 * This is enough for most callers, as they don't distinguish dead reloc root
311 * from no reloc root. But btrfs_should_ignore_reloc_root() below is a
314 static bool have_reloc_root(struct btrfs_root *root)
316 if (reloc_root_is_dead(root))
318 if (!root->reloc_root)
323 int btrfs_should_ignore_reloc_root(struct btrfs_root *root)
325 struct btrfs_root *reloc_root;
327 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
330 /* This root has been merged with its reloc tree, we can ignore it */
331 if (reloc_root_is_dead(root))
334 reloc_root = root->reloc_root;
338 if (btrfs_header_generation(reloc_root->commit_root) ==
339 root->fs_info->running_transaction->transid)
342 * if there is reloc tree and it was created in previous
343 * transaction backref lookup can find the reloc tree,
344 * so backref node for the fs tree root is useless for
351 * find reloc tree by address of tree root
353 struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
355 struct reloc_control *rc = fs_info->reloc_ctl;
356 struct rb_node *rb_node;
357 struct mapping_node *node;
358 struct btrfs_root *root = NULL;
361 spin_lock(&rc->reloc_root_tree.lock);
362 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
364 node = rb_entry(rb_node, struct mapping_node, rb_node);
367 spin_unlock(&rc->reloc_root_tree.lock);
368 return btrfs_grab_root(root);
372 * For useless nodes, do two major clean ups:
374 * - Cleanup the children edges and nodes
375 * If child node is also orphan (no parent) during cleanup, then the child
376 * node will also be cleaned up.
378 * - Freeing up leaves (level 0), keeps nodes detached
379 * For nodes, the node is still cached as "detached"
381 * Return false if @node is not in the @useless_nodes list.
382 * Return true if @node is in the @useless_nodes list.
384 static bool handle_useless_nodes(struct reloc_control *rc,
385 struct btrfs_backref_node *node)
387 struct btrfs_backref_cache *cache = &rc->backref_cache;
388 struct list_head *useless_node = &cache->useless_node;
391 while (!list_empty(useless_node)) {
392 struct btrfs_backref_node *cur;
394 cur = list_first_entry(useless_node, struct btrfs_backref_node,
396 list_del_init(&cur->list);
398 /* Only tree root nodes can be added to @useless_nodes */
399 ASSERT(list_empty(&cur->upper));
404 /* The node is the lowest node */
406 list_del_init(&cur->lower);
410 /* Cleanup the lower edges */
411 while (!list_empty(&cur->lower)) {
412 struct btrfs_backref_edge *edge;
413 struct btrfs_backref_node *lower;
415 edge = list_entry(cur->lower.next,
416 struct btrfs_backref_edge, list[UPPER]);
417 list_del(&edge->list[UPPER]);
418 list_del(&edge->list[LOWER]);
419 lower = edge->node[LOWER];
420 btrfs_backref_free_edge(cache, edge);
422 /* Child node is also orphan, queue for cleanup */
423 if (list_empty(&lower->upper))
424 list_add(&lower->list, useless_node);
426 /* Mark this block processed for relocation */
427 mark_block_processed(rc, cur);
430 * Backref nodes for tree leaves are deleted from the cache.
431 * Backref nodes for upper level tree blocks are left in the
432 * cache to avoid unnecessary backref lookup.
434 if (cur->level > 0) {
435 list_add(&cur->list, &cache->detached);
438 rb_erase(&cur->rb_node, &cache->rb_root);
439 btrfs_backref_free_node(cache, cur);
446 * Build backref tree for a given tree block. Root of the backref tree
447 * corresponds the tree block, leaves of the backref tree correspond roots of
448 * b-trees that reference the tree block.
450 * The basic idea of this function is check backrefs of a given block to find
451 * upper level blocks that reference the block, and then check backrefs of
452 * these upper level blocks recursively. The recursion stops when tree root is
453 * reached or backrefs for the block is cached.
455 * NOTE: if we find that backrefs for a block are cached, we know backrefs for
456 * all upper level blocks that directly/indirectly reference the block are also
459 static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
460 struct reloc_control *rc, struct btrfs_key *node_key,
461 int level, u64 bytenr)
463 struct btrfs_backref_iter *iter;
464 struct btrfs_backref_cache *cache = &rc->backref_cache;
465 /* For searching parent of TREE_BLOCK_REF */
466 struct btrfs_path *path;
467 struct btrfs_backref_node *cur;
468 struct btrfs_backref_node *node = NULL;
469 struct btrfs_backref_edge *edge;
473 iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info, GFP_NOFS);
475 return ERR_PTR(-ENOMEM);
476 path = btrfs_alloc_path();
482 node = btrfs_backref_alloc_node(cache, bytenr, level);
491 /* Breadth-first search to build backref cache */
493 ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
499 edge = list_first_entry_or_null(&cache->pending_edge,
500 struct btrfs_backref_edge, list[UPPER]);
502 * The pending list isn't empty, take the first block to
506 list_del_init(&edge->list[UPPER]);
507 cur = edge->node[UPPER];
511 /* Finish the upper linkage of newly added edges/nodes */
512 ret = btrfs_backref_finish_upper_links(cache, node);
518 if (handle_useless_nodes(rc, node))
521 btrfs_backref_iter_free(iter);
522 btrfs_free_path(path);
524 btrfs_backref_error_cleanup(cache, node);
527 ASSERT(!node || !node->detached);
528 ASSERT(list_empty(&cache->useless_node) &&
529 list_empty(&cache->pending_edge));
534 * helper to add backref node for the newly created snapshot.
535 * the backref node is created by cloning backref node that
536 * corresponds to root of source tree
538 static int clone_backref_node(struct btrfs_trans_handle *trans,
539 struct reloc_control *rc,
540 struct btrfs_root *src,
541 struct btrfs_root *dest)
543 struct btrfs_root *reloc_root = src->reloc_root;
544 struct btrfs_backref_cache *cache = &rc->backref_cache;
545 struct btrfs_backref_node *node = NULL;
546 struct btrfs_backref_node *new_node;
547 struct btrfs_backref_edge *edge;
548 struct btrfs_backref_edge *new_edge;
549 struct rb_node *rb_node;
551 if (cache->last_trans > 0)
552 update_backref_cache(trans, cache);
554 rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
556 node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
560 BUG_ON(node->new_bytenr != reloc_root->node->start);
564 rb_node = rb_simple_search(&cache->rb_root,
565 reloc_root->commit_root->start);
567 node = rb_entry(rb_node, struct btrfs_backref_node,
569 BUG_ON(node->detached);
576 new_node = btrfs_backref_alloc_node(cache, dest->node->start,
581 new_node->lowest = node->lowest;
582 new_node->checked = 1;
583 new_node->root = btrfs_grab_root(dest);
584 ASSERT(new_node->root);
587 list_for_each_entry(edge, &node->lower, list[UPPER]) {
588 new_edge = btrfs_backref_alloc_edge(cache);
592 btrfs_backref_link_edge(new_edge, edge->node[LOWER],
593 new_node, LINK_UPPER);
596 list_add_tail(&new_node->lower, &cache->leaves);
599 rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
602 btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST);
604 if (!new_node->lowest) {
605 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
606 list_add_tail(&new_edge->list[LOWER],
607 &new_edge->node[LOWER]->upper);
612 while (!list_empty(&new_node->lower)) {
613 new_edge = list_entry(new_node->lower.next,
614 struct btrfs_backref_edge, list[UPPER]);
615 list_del(&new_edge->list[UPPER]);
616 btrfs_backref_free_edge(cache, new_edge);
618 btrfs_backref_free_node(cache, new_node);
623 * helper to add 'address of tree root -> reloc tree' mapping
625 static int __must_check __add_reloc_root(struct btrfs_root *root)
627 struct btrfs_fs_info *fs_info = root->fs_info;
628 struct rb_node *rb_node;
629 struct mapping_node *node;
630 struct reloc_control *rc = fs_info->reloc_ctl;
632 node = kmalloc(sizeof(*node), GFP_NOFS);
636 node->bytenr = root->commit_root->start;
639 spin_lock(&rc->reloc_root_tree.lock);
640 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
641 node->bytenr, &node->rb_node);
642 spin_unlock(&rc->reloc_root_tree.lock);
645 "Duplicate root found for start=%llu while inserting into relocation tree",
650 list_add_tail(&root->root_list, &rc->reloc_roots);
655 * helper to delete the 'address of tree root -> reloc tree'
658 static void __del_reloc_root(struct btrfs_root *root)
660 struct btrfs_fs_info *fs_info = root->fs_info;
661 struct rb_node *rb_node;
662 struct mapping_node *node = NULL;
663 struct reloc_control *rc = fs_info->reloc_ctl;
664 bool put_ref = false;
666 if (rc && root->node) {
667 spin_lock(&rc->reloc_root_tree.lock);
668 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
669 root->commit_root->start);
671 node = rb_entry(rb_node, struct mapping_node, rb_node);
672 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
673 RB_CLEAR_NODE(&node->rb_node);
675 spin_unlock(&rc->reloc_root_tree.lock);
676 ASSERT(!node || (struct btrfs_root *)node->data == root);
680 * We only put the reloc root here if it's on the list. There's a lot
681 * of places where the pattern is to splice the rc->reloc_roots, process
682 * the reloc roots, and then add the reloc root back onto
683 * rc->reloc_roots. If we call __del_reloc_root while it's off of the
684 * list we don't want the reference being dropped, because the guy
685 * messing with the list is in charge of the reference.
687 spin_lock(&fs_info->trans_lock);
688 if (!list_empty(&root->root_list)) {
690 list_del_init(&root->root_list);
692 spin_unlock(&fs_info->trans_lock);
694 btrfs_put_root(root);
699 * helper to update the 'address of tree root -> reloc tree'
702 static int __update_reloc_root(struct btrfs_root *root)
704 struct btrfs_fs_info *fs_info = root->fs_info;
705 struct rb_node *rb_node;
706 struct mapping_node *node = NULL;
707 struct reloc_control *rc = fs_info->reloc_ctl;
709 spin_lock(&rc->reloc_root_tree.lock);
710 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
711 root->commit_root->start);
713 node = rb_entry(rb_node, struct mapping_node, rb_node);
714 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
716 spin_unlock(&rc->reloc_root_tree.lock);
720 BUG_ON((struct btrfs_root *)node->data != root);
722 spin_lock(&rc->reloc_root_tree.lock);
723 node->bytenr = root->node->start;
724 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
725 node->bytenr, &node->rb_node);
726 spin_unlock(&rc->reloc_root_tree.lock);
728 btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
732 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
733 struct btrfs_root *root, u64 objectid)
735 struct btrfs_fs_info *fs_info = root->fs_info;
736 struct btrfs_root *reloc_root;
737 struct extent_buffer *eb;
738 struct btrfs_root_item *root_item;
739 struct btrfs_key root_key;
741 bool must_abort = false;
743 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
745 return ERR_PTR(-ENOMEM);
747 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
748 root_key.type = BTRFS_ROOT_ITEM_KEY;
749 root_key.offset = objectid;
751 if (root->root_key.objectid == objectid) {
754 /* called by btrfs_init_reloc_root */
755 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
756 BTRFS_TREE_RELOC_OBJECTID);
761 * Set the last_snapshot field to the generation of the commit
762 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
763 * correctly (returns true) when the relocation root is created
764 * either inside the critical section of a transaction commit
765 * (through transaction.c:qgroup_account_snapshot()) and when
766 * it's created before the transaction commit is started.
768 commit_root_gen = btrfs_header_generation(root->commit_root);
769 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
772 * called by btrfs_reloc_post_snapshot_hook.
773 * the source tree is a reloc tree, all tree blocks
774 * modified after it was created have RELOC flag
775 * set in their headers. so it's OK to not update
776 * the 'last_snapshot'.
778 ret = btrfs_copy_root(trans, root, root->node, &eb,
779 BTRFS_TREE_RELOC_OBJECTID);
785 * We have changed references at this point, we must abort the
786 * transaction if anything fails.
790 memcpy(root_item, &root->root_item, sizeof(*root_item));
791 btrfs_set_root_bytenr(root_item, eb->start);
792 btrfs_set_root_level(root_item, btrfs_header_level(eb));
793 btrfs_set_root_generation(root_item, trans->transid);
795 if (root->root_key.objectid == objectid) {
796 btrfs_set_root_refs(root_item, 0);
797 memset(&root_item->drop_progress, 0,
798 sizeof(struct btrfs_disk_key));
799 btrfs_set_root_drop_level(root_item, 0);
802 btrfs_tree_unlock(eb);
803 free_extent_buffer(eb);
805 ret = btrfs_insert_root(trans, fs_info->tree_root,
806 &root_key, root_item);
812 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
813 if (IS_ERR(reloc_root)) {
814 ret = PTR_ERR(reloc_root);
817 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
818 reloc_root->last_trans = trans->transid;
824 btrfs_abort_transaction(trans, ret);
829 * create reloc tree for a given fs tree. reloc tree is just a
830 * snapshot of the fs tree with special root objectid.
832 * The reloc_root comes out of here with two references, one for
833 * root->reloc_root, and another for being on the rc->reloc_roots list.
835 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
836 struct btrfs_root *root)
838 struct btrfs_fs_info *fs_info = root->fs_info;
839 struct btrfs_root *reloc_root;
840 struct reloc_control *rc = fs_info->reloc_ctl;
841 struct btrfs_block_rsv *rsv;
849 * The subvolume has reloc tree but the swap is finished, no need to
850 * create/update the dead reloc tree
852 if (reloc_root_is_dead(root))
856 * This is subtle but important. We do not do
857 * record_root_in_transaction for reloc roots, instead we record their
858 * corresponding fs root, and then here we update the last trans for the
859 * reloc root. This means that we have to do this for the entire life
860 * of the reloc root, regardless of which stage of the relocation we are
863 if (root->reloc_root) {
864 reloc_root = root->reloc_root;
865 reloc_root->last_trans = trans->transid;
870 * We are merging reloc roots, we do not need new reloc trees. Also
871 * reloc trees never need their own reloc tree.
873 if (!rc->create_reloc_tree ||
874 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
877 if (!trans->reloc_reserved) {
878 rsv = trans->block_rsv;
879 trans->block_rsv = rc->block_rsv;
882 reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
884 trans->block_rsv = rsv;
885 if (IS_ERR(reloc_root))
886 return PTR_ERR(reloc_root);
888 ret = __add_reloc_root(reloc_root);
889 ASSERT(ret != -EEXIST);
891 /* Pairs with create_reloc_root */
892 btrfs_put_root(reloc_root);
895 root->reloc_root = btrfs_grab_root(reloc_root);
900 * update root item of reloc tree
902 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
903 struct btrfs_root *root)
905 struct btrfs_fs_info *fs_info = root->fs_info;
906 struct btrfs_root *reloc_root;
907 struct btrfs_root_item *root_item;
910 if (!have_reloc_root(root))
913 reloc_root = root->reloc_root;
914 root_item = &reloc_root->root_item;
917 * We are probably ok here, but __del_reloc_root() will drop its ref of
918 * the root. We have the ref for root->reloc_root, but just in case
919 * hold it while we update the reloc root.
921 btrfs_grab_root(reloc_root);
923 /* root->reloc_root will stay until current relocation finished */
924 if (fs_info->reloc_ctl->merge_reloc_tree &&
925 btrfs_root_refs(root_item) == 0) {
926 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
928 * Mark the tree as dead before we change reloc_root so
929 * have_reloc_root will not touch it from now on.
932 __del_reloc_root(reloc_root);
935 if (reloc_root->commit_root != reloc_root->node) {
936 __update_reloc_root(reloc_root);
937 btrfs_set_root_node(root_item, reloc_root->node);
938 free_extent_buffer(reloc_root->commit_root);
939 reloc_root->commit_root = btrfs_root_node(reloc_root);
942 ret = btrfs_update_root(trans, fs_info->tree_root,
943 &reloc_root->root_key, root_item);
944 btrfs_put_root(reloc_root);
949 * helper to find first cached inode with inode number >= objectid
952 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
954 struct rb_node *node;
955 struct rb_node *prev;
956 struct btrfs_inode *entry;
959 spin_lock(&root->inode_lock);
961 node = root->inode_tree.rb_node;
965 entry = rb_entry(node, struct btrfs_inode, rb_node);
967 if (objectid < btrfs_ino(entry))
968 node = node->rb_left;
969 else if (objectid > btrfs_ino(entry))
970 node = node->rb_right;
976 entry = rb_entry(prev, struct btrfs_inode, rb_node);
977 if (objectid <= btrfs_ino(entry)) {
981 prev = rb_next(prev);
985 entry = rb_entry(node, struct btrfs_inode, rb_node);
986 inode = igrab(&entry->vfs_inode);
988 spin_unlock(&root->inode_lock);
992 objectid = btrfs_ino(entry) + 1;
993 if (cond_resched_lock(&root->inode_lock))
996 node = rb_next(node);
998 spin_unlock(&root->inode_lock);
1003 * get new location of data
1005 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1006 u64 bytenr, u64 num_bytes)
1008 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1009 struct btrfs_path *path;
1010 struct btrfs_file_extent_item *fi;
1011 struct extent_buffer *leaf;
1014 path = btrfs_alloc_path();
1018 bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1019 ret = btrfs_lookup_file_extent(NULL, root, path,
1020 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
1028 leaf = path->nodes[0];
1029 fi = btrfs_item_ptr(leaf, path->slots[0],
1030 struct btrfs_file_extent_item);
1032 BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1033 btrfs_file_extent_compression(leaf, fi) ||
1034 btrfs_file_extent_encryption(leaf, fi) ||
1035 btrfs_file_extent_other_encoding(leaf, fi));
1037 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1042 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1045 btrfs_free_path(path);
1050 * update file extent items in the tree leaf to point to
1051 * the new locations.
1053 static noinline_for_stack
1054 int replace_file_extents(struct btrfs_trans_handle *trans,
1055 struct reloc_control *rc,
1056 struct btrfs_root *root,
1057 struct extent_buffer *leaf)
1059 struct btrfs_fs_info *fs_info = root->fs_info;
1060 struct btrfs_key key;
1061 struct btrfs_file_extent_item *fi;
1062 struct inode *inode = NULL;
1074 if (rc->stage != UPDATE_DATA_PTRS)
1077 /* reloc trees always use full backref */
1078 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1079 parent = leaf->start;
1083 nritems = btrfs_header_nritems(leaf);
1084 for (i = 0; i < nritems; i++) {
1085 struct btrfs_ref ref = { 0 };
1088 btrfs_item_key_to_cpu(leaf, &key, i);
1089 if (key.type != BTRFS_EXTENT_DATA_KEY)
1091 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1092 if (btrfs_file_extent_type(leaf, fi) ==
1093 BTRFS_FILE_EXTENT_INLINE)
1095 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1096 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1099 if (!in_range(bytenr, rc->block_group->start,
1100 rc->block_group->length))
1104 * if we are modifying block in fs tree, wait for read_folio
1105 * to complete and drop the extent cache
1107 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1109 inode = find_next_inode(root, key.objectid);
1111 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
1112 btrfs_add_delayed_iput(inode);
1113 inode = find_next_inode(root, key.objectid);
1115 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
1117 btrfs_file_extent_num_bytes(leaf, fi);
1118 WARN_ON(!IS_ALIGNED(key.offset,
1119 fs_info->sectorsize));
1120 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1122 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1127 btrfs_drop_extent_map_range(BTRFS_I(inode),
1128 key.offset, end, true);
1129 unlock_extent(&BTRFS_I(inode)->io_tree,
1130 key.offset, end, NULL);
1134 ret = get_new_location(rc->data_inode, &new_bytenr,
1138 * Don't have to abort since we've not changed anything
1139 * in the file extent yet.
1144 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1147 key.offset -= btrfs_file_extent_offset(leaf, fi);
1148 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1150 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1151 key.objectid, key.offset,
1152 root->root_key.objectid, false);
1153 ret = btrfs_inc_extent_ref(trans, &ref);
1155 btrfs_abort_transaction(trans, ret);
1159 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1161 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1162 key.objectid, key.offset,
1163 root->root_key.objectid, false);
1164 ret = btrfs_free_extent(trans, &ref);
1166 btrfs_abort_transaction(trans, ret);
1171 btrfs_mark_buffer_dirty(leaf);
1173 btrfs_add_delayed_iput(inode);
1177 static noinline_for_stack
1178 int memcmp_node_keys(struct extent_buffer *eb, int slot,
1179 struct btrfs_path *path, int level)
1181 struct btrfs_disk_key key1;
1182 struct btrfs_disk_key key2;
1183 btrfs_node_key(eb, &key1, slot);
1184 btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1185 return memcmp(&key1, &key2, sizeof(key1));
1189 * try to replace tree blocks in fs tree with the new blocks
1190 * in reloc tree. tree blocks haven't been modified since the
1191 * reloc tree was create can be replaced.
1193 * if a block was replaced, level of the block + 1 is returned.
1194 * if no block got replaced, 0 is returned. if there are other
1195 * errors, a negative error number is returned.
1197 static noinline_for_stack
1198 int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
1199 struct btrfs_root *dest, struct btrfs_root *src,
1200 struct btrfs_path *path, struct btrfs_key *next_key,
1201 int lowest_level, int max_level)
1203 struct btrfs_fs_info *fs_info = dest->fs_info;
1204 struct extent_buffer *eb;
1205 struct extent_buffer *parent;
1206 struct btrfs_ref ref = { 0 };
1207 struct btrfs_key key;
1219 ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1220 ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1222 last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1224 slot = path->slots[lowest_level];
1225 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1227 eb = btrfs_lock_root_node(dest);
1228 level = btrfs_header_level(eb);
1230 if (level < lowest_level) {
1231 btrfs_tree_unlock(eb);
1232 free_extent_buffer(eb);
1237 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb,
1240 btrfs_tree_unlock(eb);
1241 free_extent_buffer(eb);
1247 next_key->objectid = (u64)-1;
1248 next_key->type = (u8)-1;
1249 next_key->offset = (u64)-1;
1254 level = btrfs_header_level(parent);
1255 ASSERT(level >= lowest_level);
1257 ret = btrfs_bin_search(parent, &key, &slot);
1260 if (ret && slot > 0)
1263 if (next_key && slot + 1 < btrfs_header_nritems(parent))
1264 btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1266 old_bytenr = btrfs_node_blockptr(parent, slot);
1267 blocksize = fs_info->nodesize;
1268 old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1270 if (level <= max_level) {
1271 eb = path->nodes[level];
1272 new_bytenr = btrfs_node_blockptr(eb,
1273 path->slots[level]);
1274 new_ptr_gen = btrfs_node_ptr_generation(eb,
1275 path->slots[level]);
1281 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1286 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1287 memcmp_node_keys(parent, slot, path, level)) {
1288 if (level <= lowest_level) {
1293 eb = btrfs_read_node_slot(parent, slot);
1298 btrfs_tree_lock(eb);
1300 ret = btrfs_cow_block(trans, dest, eb, parent,
1304 btrfs_tree_unlock(eb);
1305 free_extent_buffer(eb);
1310 btrfs_tree_unlock(parent);
1311 free_extent_buffer(parent);
1318 btrfs_tree_unlock(parent);
1319 free_extent_buffer(parent);
1324 btrfs_node_key_to_cpu(path->nodes[level], &key,
1325 path->slots[level]);
1326 btrfs_release_path(path);
1328 path->lowest_level = level;
1329 set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1330 ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1331 clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1332 path->lowest_level = 0;
1340 * Info qgroup to trace both subtrees.
1342 * We must trace both trees.
1343 * 1) Tree reloc subtree
1344 * If not traced, we will leak data numbers
1346 * If not traced, we will double count old data
1348 * We don't scan the subtree right now, but only record
1349 * the swapped tree blocks.
1350 * The real subtree rescan is delayed until we have new
1351 * CoW on the subtree root node before transaction commit.
1353 ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
1354 rc->block_group, parent, slot,
1355 path->nodes[level], path->slots[level],
1360 * swap blocks in fs tree and reloc tree.
1362 btrfs_set_node_blockptr(parent, slot, new_bytenr);
1363 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1364 btrfs_mark_buffer_dirty(parent);
1366 btrfs_set_node_blockptr(path->nodes[level],
1367 path->slots[level], old_bytenr);
1368 btrfs_set_node_ptr_generation(path->nodes[level],
1369 path->slots[level], old_ptr_gen);
1370 btrfs_mark_buffer_dirty(path->nodes[level]);
1372 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
1373 blocksize, path->nodes[level]->start);
1374 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
1376 ret = btrfs_inc_extent_ref(trans, &ref);
1378 btrfs_abort_transaction(trans, ret);
1381 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1383 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0,
1385 ret = btrfs_inc_extent_ref(trans, &ref);
1387 btrfs_abort_transaction(trans, ret);
1391 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
1392 blocksize, path->nodes[level]->start);
1393 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
1395 ret = btrfs_free_extent(trans, &ref);
1397 btrfs_abort_transaction(trans, ret);
1401 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
1403 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid,
1405 ret = btrfs_free_extent(trans, &ref);
1407 btrfs_abort_transaction(trans, ret);
1411 btrfs_unlock_up_safe(path, 0);
1416 btrfs_tree_unlock(parent);
1417 free_extent_buffer(parent);
1422 * helper to find next relocated block in reloc tree
1424 static noinline_for_stack
1425 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1428 struct extent_buffer *eb;
1433 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1435 for (i = 0; i < *level; i++) {
1436 free_extent_buffer(path->nodes[i]);
1437 path->nodes[i] = NULL;
1440 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1441 eb = path->nodes[i];
1442 nritems = btrfs_header_nritems(eb);
1443 while (path->slots[i] + 1 < nritems) {
1445 if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1452 free_extent_buffer(path->nodes[i]);
1453 path->nodes[i] = NULL;
1459 * walk down reloc tree to find relocated block of lowest level
1461 static noinline_for_stack
1462 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1465 struct extent_buffer *eb = NULL;
1471 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1473 for (i = *level; i > 0; i--) {
1474 eb = path->nodes[i];
1475 nritems = btrfs_header_nritems(eb);
1476 while (path->slots[i] < nritems) {
1477 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
1478 if (ptr_gen > last_snapshot)
1482 if (path->slots[i] >= nritems) {
1493 eb = btrfs_read_node_slot(eb, path->slots[i]);
1496 BUG_ON(btrfs_header_level(eb) != i - 1);
1497 path->nodes[i - 1] = eb;
1498 path->slots[i - 1] = 0;
1504 * invalidate extent cache for file extents whose key in range of
1505 * [min_key, max_key)
1507 static int invalidate_extent_cache(struct btrfs_root *root,
1508 struct btrfs_key *min_key,
1509 struct btrfs_key *max_key)
1511 struct btrfs_fs_info *fs_info = root->fs_info;
1512 struct inode *inode = NULL;
1517 objectid = min_key->objectid;
1522 if (objectid > max_key->objectid)
1525 inode = find_next_inode(root, objectid);
1528 ino = btrfs_ino(BTRFS_I(inode));
1530 if (ino > max_key->objectid) {
1536 if (!S_ISREG(inode->i_mode))
1539 if (unlikely(min_key->objectid == ino)) {
1540 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1542 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
1545 start = min_key->offset;
1546 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
1552 if (unlikely(max_key->objectid == ino)) {
1553 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1555 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
1558 if (max_key->offset == 0)
1560 end = max_key->offset;
1561 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1568 /* the lock_extent waits for read_folio to complete */
1569 lock_extent(&BTRFS_I(inode)->io_tree, start, end, NULL);
1570 btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, true);
1571 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, NULL);
1576 static int find_next_key(struct btrfs_path *path, int level,
1577 struct btrfs_key *key)
1580 while (level < BTRFS_MAX_LEVEL) {
1581 if (!path->nodes[level])
1583 if (path->slots[level] + 1 <
1584 btrfs_header_nritems(path->nodes[level])) {
1585 btrfs_node_key_to_cpu(path->nodes[level], key,
1586 path->slots[level] + 1);
1595 * Insert current subvolume into reloc_control::dirty_subvol_roots
1597 static int insert_dirty_subvol(struct btrfs_trans_handle *trans,
1598 struct reloc_control *rc,
1599 struct btrfs_root *root)
1601 struct btrfs_root *reloc_root = root->reloc_root;
1602 struct btrfs_root_item *reloc_root_item;
1605 /* @root must be a subvolume tree root with a valid reloc tree */
1606 ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1609 reloc_root_item = &reloc_root->root_item;
1610 memset(&reloc_root_item->drop_progress, 0,
1611 sizeof(reloc_root_item->drop_progress));
1612 btrfs_set_root_drop_level(reloc_root_item, 0);
1613 btrfs_set_root_refs(reloc_root_item, 0);
1614 ret = btrfs_update_reloc_root(trans, root);
1618 if (list_empty(&root->reloc_dirty_list)) {
1619 btrfs_grab_root(root);
1620 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
1626 static int clean_dirty_subvols(struct reloc_control *rc)
1628 struct btrfs_root *root;
1629 struct btrfs_root *next;
1633 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
1635 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1636 /* Merged subvolume, cleanup its reloc root */
1637 struct btrfs_root *reloc_root = root->reloc_root;
1639 list_del_init(&root->reloc_dirty_list);
1640 root->reloc_root = NULL;
1642 * Need barrier to ensure clear_bit() only happens after
1643 * root->reloc_root = NULL. Pairs with have_reloc_root.
1646 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1649 * btrfs_drop_snapshot drops our ref we hold for
1650 * ->reloc_root. If it fails however we must
1651 * drop the ref ourselves.
1653 ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
1655 btrfs_put_root(reloc_root);
1660 btrfs_put_root(root);
1662 /* Orphan reloc tree, just clean it up */
1663 ret2 = btrfs_drop_snapshot(root, 0, 1);
1665 btrfs_put_root(root);
1675 * merge the relocated tree blocks in reloc tree with corresponding
1678 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1679 struct btrfs_root *root)
1681 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1682 struct btrfs_key key;
1683 struct btrfs_key next_key;
1684 struct btrfs_trans_handle *trans = NULL;
1685 struct btrfs_root *reloc_root;
1686 struct btrfs_root_item *root_item;
1687 struct btrfs_path *path;
1688 struct extent_buffer *leaf;
1696 path = btrfs_alloc_path();
1699 path->reada = READA_FORWARD;
1701 reloc_root = root->reloc_root;
1702 root_item = &reloc_root->root_item;
1704 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1705 level = btrfs_root_level(root_item);
1706 atomic_inc(&reloc_root->node->refs);
1707 path->nodes[level] = reloc_root->node;
1708 path->slots[level] = 0;
1710 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1712 level = btrfs_root_drop_level(root_item);
1714 path->lowest_level = level;
1715 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
1716 path->lowest_level = 0;
1718 btrfs_free_path(path);
1722 btrfs_node_key_to_cpu(path->nodes[level], &next_key,
1723 path->slots[level]);
1724 WARN_ON(memcmp(&key, &next_key, sizeof(key)));
1726 btrfs_unlock_up_safe(path, 0);
1730 * In merge_reloc_root(), we modify the upper level pointer to swap the
1731 * tree blocks between reloc tree and subvolume tree. Thus for tree
1732 * block COW, we COW at most from level 1 to root level for each tree.
1734 * Thus the needed metadata size is at most root_level * nodesize,
1735 * and * 2 since we have two trees to COW.
1737 reserve_level = max_t(int, 1, btrfs_root_level(root_item));
1738 min_reserved = fs_info->nodesize * reserve_level * 2;
1739 memset(&next_key, 0, sizeof(next_key));
1742 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
1744 BTRFS_RESERVE_FLUSH_LIMIT);
1747 trans = btrfs_start_transaction(root, 0);
1748 if (IS_ERR(trans)) {
1749 ret = PTR_ERR(trans);
1755 * At this point we no longer have a reloc_control, so we can't
1756 * depend on btrfs_init_reloc_root to update our last_trans.
1758 * But that's ok, we started the trans handle on our
1759 * corresponding fs_root, which means it's been added to the
1760 * dirty list. At commit time we'll still call
1761 * btrfs_update_reloc_root() and update our root item
1764 reloc_root->last_trans = trans->transid;
1765 trans->block_rsv = rc->block_rsv;
1770 ret = walk_down_reloc_tree(reloc_root, path, &level);
1776 if (!find_next_key(path, level, &key) &&
1777 btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
1780 ret = replace_path(trans, rc, root, reloc_root, path,
1781 &next_key, level, max_level);
1787 btrfs_node_key_to_cpu(path->nodes[level], &key,
1788 path->slots[level]);
1792 ret = walk_up_reloc_tree(reloc_root, path, &level);
1798 * save the merging progress in the drop_progress.
1799 * this is OK since root refs == 1 in this case.
1801 btrfs_node_key(path->nodes[level], &root_item->drop_progress,
1802 path->slots[level]);
1803 btrfs_set_root_drop_level(root_item, level);
1805 btrfs_end_transaction_throttle(trans);
1808 btrfs_btree_balance_dirty(fs_info);
1810 if (replaced && rc->stage == UPDATE_DATA_PTRS)
1811 invalidate_extent_cache(root, &key, &next_key);
1815 * handle the case only one block in the fs tree need to be
1816 * relocated and the block is tree root.
1818 leaf = btrfs_lock_root_node(root);
1819 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf,
1821 btrfs_tree_unlock(leaf);
1822 free_extent_buffer(leaf);
1824 btrfs_free_path(path);
1827 ret = insert_dirty_subvol(trans, rc, root);
1829 btrfs_abort_transaction(trans, ret);
1833 btrfs_end_transaction_throttle(trans);
1835 btrfs_btree_balance_dirty(fs_info);
1837 if (replaced && rc->stage == UPDATE_DATA_PTRS)
1838 invalidate_extent_cache(root, &key, &next_key);
1843 static noinline_for_stack
1844 int prepare_to_merge(struct reloc_control *rc, int err)
1846 struct btrfs_root *root = rc->extent_root;
1847 struct btrfs_fs_info *fs_info = root->fs_info;
1848 struct btrfs_root *reloc_root;
1849 struct btrfs_trans_handle *trans;
1850 LIST_HEAD(reloc_roots);
1854 mutex_lock(&fs_info->reloc_mutex);
1855 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
1856 rc->merging_rsv_size += rc->nodes_relocated * 2;
1857 mutex_unlock(&fs_info->reloc_mutex);
1861 num_bytes = rc->merging_rsv_size;
1862 ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes,
1863 BTRFS_RESERVE_FLUSH_ALL);
1868 trans = btrfs_join_transaction(rc->extent_root);
1869 if (IS_ERR(trans)) {
1871 btrfs_block_rsv_release(fs_info, rc->block_rsv,
1873 return PTR_ERR(trans);
1877 if (num_bytes != rc->merging_rsv_size) {
1878 btrfs_end_transaction(trans);
1879 btrfs_block_rsv_release(fs_info, rc->block_rsv,
1885 rc->merge_reloc_tree = 1;
1887 while (!list_empty(&rc->reloc_roots)) {
1888 reloc_root = list_entry(rc->reloc_roots.next,
1889 struct btrfs_root, root_list);
1890 list_del_init(&reloc_root->root_list);
1892 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1896 * Even if we have an error we need this reloc root
1897 * back on our list so we can clean up properly.
1899 list_add(&reloc_root->root_list, &reloc_roots);
1900 btrfs_abort_transaction(trans, (int)PTR_ERR(root));
1902 err = PTR_ERR(root);
1905 ASSERT(root->reloc_root == reloc_root);
1908 * set reference count to 1, so btrfs_recover_relocation
1909 * knows it should resumes merging
1912 btrfs_set_root_refs(&reloc_root->root_item, 1);
1913 ret = btrfs_update_reloc_root(trans, root);
1916 * Even if we have an error we need this reloc root back on our
1917 * list so we can clean up properly.
1919 list_add(&reloc_root->root_list, &reloc_roots);
1920 btrfs_put_root(root);
1923 btrfs_abort_transaction(trans, ret);
1930 list_splice(&reloc_roots, &rc->reloc_roots);
1933 err = btrfs_commit_transaction(trans);
1935 btrfs_end_transaction(trans);
1939 static noinline_for_stack
1940 void free_reloc_roots(struct list_head *list)
1942 struct btrfs_root *reloc_root, *tmp;
1944 list_for_each_entry_safe(reloc_root, tmp, list, root_list)
1945 __del_reloc_root(reloc_root);
1948 static noinline_for_stack
1949 void merge_reloc_roots(struct reloc_control *rc)
1951 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1952 struct btrfs_root *root;
1953 struct btrfs_root *reloc_root;
1954 LIST_HEAD(reloc_roots);
1958 root = rc->extent_root;
1961 * this serializes us with btrfs_record_root_in_transaction,
1962 * we have to make sure nobody is in the middle of
1963 * adding their roots to the list while we are
1966 mutex_lock(&fs_info->reloc_mutex);
1967 list_splice_init(&rc->reloc_roots, &reloc_roots);
1968 mutex_unlock(&fs_info->reloc_mutex);
1970 while (!list_empty(&reloc_roots)) {
1972 reloc_root = list_entry(reloc_roots.next,
1973 struct btrfs_root, root_list);
1975 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1977 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
1980 * For recovery we read the fs roots on mount,
1981 * and if we didn't find the root then we marked
1982 * the reloc root as a garbage root. For normal
1983 * relocation obviously the root should exist in
1984 * memory. However there's no reason we can't
1985 * handle the error properly here just in case.
1988 ret = PTR_ERR(root);
1991 if (root->reloc_root != reloc_root) {
1993 * This is actually impossible without something
1994 * going really wrong (like weird race condition
2001 ret = merge_reloc_root(rc, root);
2002 btrfs_put_root(root);
2004 if (list_empty(&reloc_root->root_list))
2005 list_add_tail(&reloc_root->root_list,
2010 if (!IS_ERR(root)) {
2011 if (root->reloc_root == reloc_root) {
2012 root->reloc_root = NULL;
2013 btrfs_put_root(reloc_root);
2015 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE,
2017 btrfs_put_root(root);
2020 list_del_init(&reloc_root->root_list);
2021 /* Don't forget to queue this reloc root for cleanup */
2022 list_add_tail(&reloc_root->reloc_dirty_list,
2023 &rc->dirty_subvol_roots);
2033 btrfs_handle_fs_error(fs_info, ret, NULL);
2034 free_reloc_roots(&reloc_roots);
2036 /* new reloc root may be added */
2037 mutex_lock(&fs_info->reloc_mutex);
2038 list_splice_init(&rc->reloc_roots, &reloc_roots);
2039 mutex_unlock(&fs_info->reloc_mutex);
2040 free_reloc_roots(&reloc_roots);
2046 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
2048 * here, but it's wrong. If we fail to start the transaction in
2049 * prepare_to_merge() we will have only 0 ref reloc roots, none of which
2050 * have actually been removed from the reloc_root_tree rb tree. This is
2051 * fine because we're bailing here, and we hold a reference on the root
2052 * for the list that holds it, so these roots will be cleaned up when we
2053 * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root
2054 * will be cleaned up on unmount.
2056 * The remaining nodes will be cleaned up by free_reloc_control.
2060 static void free_block_list(struct rb_root *blocks)
2062 struct tree_block *block;
2063 struct rb_node *rb_node;
2064 while ((rb_node = rb_first(blocks))) {
2065 block = rb_entry(rb_node, struct tree_block, rb_node);
2066 rb_erase(rb_node, blocks);
2071 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2072 struct btrfs_root *reloc_root)
2074 struct btrfs_fs_info *fs_info = reloc_root->fs_info;
2075 struct btrfs_root *root;
2078 if (reloc_root->last_trans == trans->transid)
2081 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
2084 * This should succeed, since we can't have a reloc root without having
2085 * already looked up the actual root and created the reloc root for this
2088 * However if there's some sort of corruption where we have a ref to a
2089 * reloc root without a corresponding root this could return ENOENT.
2093 return PTR_ERR(root);
2095 if (root->reloc_root != reloc_root) {
2098 "root %llu has two reloc roots associated with it",
2099 reloc_root->root_key.offset);
2100 btrfs_put_root(root);
2103 ret = btrfs_record_root_in_trans(trans, root);
2104 btrfs_put_root(root);
2109 static noinline_for_stack
2110 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2111 struct reloc_control *rc,
2112 struct btrfs_backref_node *node,
2113 struct btrfs_backref_edge *edges[])
2115 struct btrfs_backref_node *next;
2116 struct btrfs_root *root;
2123 next = walk_up_backref(next, edges, &index);
2127 * If there is no root, then our references for this block are
2128 * incomplete, as we should be able to walk all the way up to a
2129 * block that is owned by a root.
2131 * This path is only for SHAREABLE roots, so if we come upon a
2132 * non-SHAREABLE root then we have backrefs that resolve
2135 * Both of these cases indicate file system corruption, or a bug
2136 * in the backref walking code.
2140 btrfs_err(trans->fs_info,
2141 "bytenr %llu doesn't have a backref path ending in a root",
2143 return ERR_PTR(-EUCLEAN);
2145 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2147 btrfs_err(trans->fs_info,
2148 "bytenr %llu has multiple refs with one ending in a non-shareable root",
2150 return ERR_PTR(-EUCLEAN);
2153 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2154 ret = record_reloc_root_in_trans(trans, root);
2156 return ERR_PTR(ret);
2160 ret = btrfs_record_root_in_trans(trans, root);
2162 return ERR_PTR(ret);
2163 root = root->reloc_root;
2166 * We could have raced with another thread which failed, so
2167 * root->reloc_root may not be set, return ENOENT in this case.
2170 return ERR_PTR(-ENOENT);
2172 if (next->new_bytenr != root->node->start) {
2174 * We just created the reloc root, so we shouldn't have
2175 * ->new_bytenr set and this shouldn't be in the changed
2176 * list. If it is then we have multiple roots pointing
2177 * at the same bytenr which indicates corruption, or
2178 * we've made a mistake in the backref walking code.
2180 ASSERT(next->new_bytenr == 0);
2181 ASSERT(list_empty(&next->list));
2182 if (next->new_bytenr || !list_empty(&next->list)) {
2183 btrfs_err(trans->fs_info,
2184 "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
2185 node->bytenr, next->bytenr);
2186 return ERR_PTR(-EUCLEAN);
2189 next->new_bytenr = root->node->start;
2190 btrfs_put_root(next->root);
2191 next->root = btrfs_grab_root(root);
2193 list_add_tail(&next->list,
2194 &rc->backref_cache.changed);
2195 mark_block_processed(rc, next);
2201 next = walk_down_backref(edges, &index);
2202 if (!next || next->level <= node->level)
2207 * This can happen if there's fs corruption or if there's a bug
2208 * in the backref lookup code.
2211 return ERR_PTR(-ENOENT);
2215 /* setup backref node path for btrfs_reloc_cow_block */
2217 rc->backref_cache.path[next->level] = next;
2220 next = edges[index]->node[UPPER];
2226 * Select a tree root for relocation.
2228 * Return NULL if the block is not shareable. We should use do_relocation() in
2231 * Return a tree root pointer if the block is shareable.
2232 * Return -ENOENT if the block is root of reloc tree.
2234 static noinline_for_stack
2235 struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
2237 struct btrfs_backref_node *next;
2238 struct btrfs_root *root;
2239 struct btrfs_root *fs_root = NULL;
2240 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2246 next = walk_up_backref(next, edges, &index);
2250 * This can occur if we have incomplete extent refs leading all
2251 * the way up a particular path, in this case return -EUCLEAN.
2254 return ERR_PTR(-EUCLEAN);
2256 /* No other choice for non-shareable tree */
2257 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2260 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2266 next = walk_down_backref(edges, &index);
2267 if (!next || next->level <= node->level)
2272 return ERR_PTR(-ENOENT);
2276 static noinline_for_stack
2277 u64 calcu_metadata_size(struct reloc_control *rc,
2278 struct btrfs_backref_node *node, int reserve)
2280 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2281 struct btrfs_backref_node *next = node;
2282 struct btrfs_backref_edge *edge;
2283 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2287 BUG_ON(reserve && node->processed);
2292 if (next->processed && (reserve || next != node))
2295 num_bytes += fs_info->nodesize;
2297 if (list_empty(&next->upper))
2300 edge = list_entry(next->upper.next,
2301 struct btrfs_backref_edge, list[LOWER]);
2302 edges[index++] = edge;
2303 next = edge->node[UPPER];
2305 next = walk_down_backref(edges, &index);
2310 static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2311 struct reloc_control *rc,
2312 struct btrfs_backref_node *node)
2314 struct btrfs_root *root = rc->extent_root;
2315 struct btrfs_fs_info *fs_info = root->fs_info;
2320 num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2322 trans->block_rsv = rc->block_rsv;
2323 rc->reserved_bytes += num_bytes;
2326 * We are under a transaction here so we can only do limited flushing.
2327 * If we get an enospc just kick back -EAGAIN so we know to drop the
2328 * transaction and try to refill when we can flush all the things.
2330 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes,
2331 BTRFS_RESERVE_FLUSH_LIMIT);
2333 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2334 while (tmp <= rc->reserved_bytes)
2337 * only one thread can access block_rsv at this point,
2338 * so we don't need hold lock to protect block_rsv.
2339 * we expand more reservation size here to allow enough
2340 * space for relocation and we will return earlier in
2343 rc->block_rsv->size = tmp + fs_info->nodesize *
2344 RELOCATION_RESERVED_NODES;
2352 * relocate a block tree, and then update pointers in upper level
2353 * blocks that reference the block to point to the new location.
2355 * if called by link_to_upper, the block has already been relocated.
2356 * in that case this function just updates pointers.
2358 static int do_relocation(struct btrfs_trans_handle *trans,
2359 struct reloc_control *rc,
2360 struct btrfs_backref_node *node,
2361 struct btrfs_key *key,
2362 struct btrfs_path *path, int lowest)
2364 struct btrfs_backref_node *upper;
2365 struct btrfs_backref_edge *edge;
2366 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2367 struct btrfs_root *root;
2368 struct extent_buffer *eb;
2375 * If we are lowest then this is the first time we're processing this
2376 * block, and thus shouldn't have an eb associated with it yet.
2378 ASSERT(!lowest || !node->eb);
2380 path->lowest_level = node->level + 1;
2381 rc->backref_cache.path[node->level] = node;
2382 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2383 struct btrfs_ref ref = { 0 };
2387 upper = edge->node[UPPER];
2388 root = select_reloc_root(trans, rc, upper, edges);
2390 ret = PTR_ERR(root);
2394 if (upper->eb && !upper->locked) {
2396 ret = btrfs_bin_search(upper->eb, key, &slot);
2400 bytenr = btrfs_node_blockptr(upper->eb, slot);
2401 if (node->eb->start == bytenr)
2404 btrfs_backref_drop_node_buffer(upper);
2408 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2413 btrfs_release_path(path);
2418 upper->eb = path->nodes[upper->level];
2419 path->nodes[upper->level] = NULL;
2421 BUG_ON(upper->eb != path->nodes[upper->level]);
2425 path->locks[upper->level] = 0;
2427 slot = path->slots[upper->level];
2428 btrfs_release_path(path);
2430 ret = btrfs_bin_search(upper->eb, key, &slot);
2436 bytenr = btrfs_node_blockptr(upper->eb, slot);
2438 if (bytenr != node->bytenr) {
2439 btrfs_err(root->fs_info,
2440 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2441 bytenr, node->bytenr, slot,
2447 if (node->eb->start == bytenr)
2451 blocksize = root->fs_info->nodesize;
2452 eb = btrfs_read_node_slot(upper->eb, slot);
2457 btrfs_tree_lock(eb);
2460 ret = btrfs_cow_block(trans, root, eb, upper->eb,
2461 slot, &eb, BTRFS_NESTING_COW);
2462 btrfs_tree_unlock(eb);
2463 free_extent_buffer(eb);
2467 * We've just COWed this block, it should have updated
2468 * the correct backref node entry.
2470 ASSERT(node->eb == eb);
2472 btrfs_set_node_blockptr(upper->eb, slot,
2474 btrfs_set_node_ptr_generation(upper->eb, slot,
2476 btrfs_mark_buffer_dirty(upper->eb);
2478 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2479 node->eb->start, blocksize,
2481 btrfs_init_tree_ref(&ref, node->level,
2482 btrfs_header_owner(upper->eb),
2483 root->root_key.objectid, false);
2484 ret = btrfs_inc_extent_ref(trans, &ref);
2486 ret = btrfs_drop_subtree(trans, root, eb,
2489 btrfs_abort_transaction(trans, ret);
2492 if (!upper->pending)
2493 btrfs_backref_drop_node_buffer(upper);
2495 btrfs_backref_unlock_node_buffer(upper);
2500 if (!ret && node->pending) {
2501 btrfs_backref_drop_node_buffer(node);
2502 list_move_tail(&node->list, &rc->backref_cache.changed);
2506 path->lowest_level = 0;
2509 * We should have allocated all of our space in the block rsv and thus
2512 ASSERT(ret != -ENOSPC);
2516 static int link_to_upper(struct btrfs_trans_handle *trans,
2517 struct reloc_control *rc,
2518 struct btrfs_backref_node *node,
2519 struct btrfs_path *path)
2521 struct btrfs_key key;
2523 btrfs_node_key_to_cpu(node->eb, &key, 0);
2524 return do_relocation(trans, rc, node, &key, path, 0);
2527 static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2528 struct reloc_control *rc,
2529 struct btrfs_path *path, int err)
2532 struct btrfs_backref_cache *cache = &rc->backref_cache;
2533 struct btrfs_backref_node *node;
2537 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2538 while (!list_empty(&cache->pending[level])) {
2539 node = list_entry(cache->pending[level].next,
2540 struct btrfs_backref_node, list);
2541 list_move_tail(&node->list, &list);
2542 BUG_ON(!node->pending);
2545 ret = link_to_upper(trans, rc, node, path);
2550 list_splice_init(&list, &cache->pending[level]);
2556 * mark a block and all blocks directly/indirectly reference the block
2559 static void update_processed_blocks(struct reloc_control *rc,
2560 struct btrfs_backref_node *node)
2562 struct btrfs_backref_node *next = node;
2563 struct btrfs_backref_edge *edge;
2564 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2570 if (next->processed)
2573 mark_block_processed(rc, next);
2575 if (list_empty(&next->upper))
2578 edge = list_entry(next->upper.next,
2579 struct btrfs_backref_edge, list[LOWER]);
2580 edges[index++] = edge;
2581 next = edge->node[UPPER];
2583 next = walk_down_backref(edges, &index);
2587 static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2589 u32 blocksize = rc->extent_root->fs_info->nodesize;
2591 if (test_range_bit(&rc->processed_blocks, bytenr,
2592 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
2597 static int get_tree_block_key(struct btrfs_fs_info *fs_info,
2598 struct tree_block *block)
2600 struct extent_buffer *eb;
2602 eb = read_tree_block(fs_info, block->bytenr, block->owner,
2603 block->key.offset, block->level, NULL);
2606 if (!extent_buffer_uptodate(eb)) {
2607 free_extent_buffer(eb);
2610 if (block->level == 0)
2611 btrfs_item_key_to_cpu(eb, &block->key, 0);
2613 btrfs_node_key_to_cpu(eb, &block->key, 0);
2614 free_extent_buffer(eb);
2615 block->key_ready = 1;
2620 * helper function to relocate a tree block
2622 static int relocate_tree_block(struct btrfs_trans_handle *trans,
2623 struct reloc_control *rc,
2624 struct btrfs_backref_node *node,
2625 struct btrfs_key *key,
2626 struct btrfs_path *path)
2628 struct btrfs_root *root;
2635 * If we fail here we want to drop our backref_node because we are going
2636 * to start over and regenerate the tree for it.
2638 ret = reserve_metadata_space(trans, rc, node);
2642 BUG_ON(node->processed);
2643 root = select_one_root(node);
2645 ret = PTR_ERR(root);
2647 /* See explanation in select_one_root for the -EUCLEAN case. */
2648 ASSERT(ret == -ENOENT);
2649 if (ret == -ENOENT) {
2651 update_processed_blocks(rc, node);
2657 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2659 * This block was the root block of a root, and this is
2660 * the first time we're processing the block and thus it
2661 * should not have had the ->new_bytenr modified and
2662 * should have not been included on the changed list.
2664 * However in the case of corruption we could have
2665 * multiple refs pointing to the same block improperly,
2666 * and thus we would trip over these checks. ASSERT()
2667 * for the developer case, because it could indicate a
2668 * bug in the backref code, however error out for a
2669 * normal user in the case of corruption.
2671 ASSERT(node->new_bytenr == 0);
2672 ASSERT(list_empty(&node->list));
2673 if (node->new_bytenr || !list_empty(&node->list)) {
2674 btrfs_err(root->fs_info,
2675 "bytenr %llu has improper references to it",
2680 ret = btrfs_record_root_in_trans(trans, root);
2684 * Another thread could have failed, need to check if we
2685 * have reloc_root actually set.
2687 if (!root->reloc_root) {
2691 root = root->reloc_root;
2692 node->new_bytenr = root->node->start;
2693 btrfs_put_root(node->root);
2694 node->root = btrfs_grab_root(root);
2696 list_add_tail(&node->list, &rc->backref_cache.changed);
2698 path->lowest_level = node->level;
2699 if (root == root->fs_info->chunk_root)
2700 btrfs_reserve_chunk_metadata(trans, false);
2701 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2702 btrfs_release_path(path);
2703 if (root == root->fs_info->chunk_root)
2704 btrfs_trans_release_chunk_metadata(trans);
2709 update_processed_blocks(rc, node);
2711 ret = do_relocation(trans, rc, node, key, path, 1);
2714 if (ret || node->level == 0 || node->cowonly)
2715 btrfs_backref_cleanup_node(&rc->backref_cache, node);
2720 * relocate a list of blocks
2722 static noinline_for_stack
2723 int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2724 struct reloc_control *rc, struct rb_root *blocks)
2726 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2727 struct btrfs_backref_node *node;
2728 struct btrfs_path *path;
2729 struct tree_block *block;
2730 struct tree_block *next;
2734 path = btrfs_alloc_path();
2737 goto out_free_blocks;
2740 /* Kick in readahead for tree blocks with missing keys */
2741 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2742 if (!block->key_ready)
2743 btrfs_readahead_tree_block(fs_info, block->bytenr,
2748 /* Get first keys */
2749 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2750 if (!block->key_ready) {
2751 err = get_tree_block_key(fs_info, block);
2757 /* Do tree relocation */
2758 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2759 node = build_backref_tree(rc, &block->key,
2760 block->level, block->bytenr);
2762 err = PTR_ERR(node);
2766 ret = relocate_tree_block(trans, rc, node, &block->key,
2774 err = finish_pending_nodes(trans, rc, path, err);
2777 btrfs_free_path(path);
2779 free_block_list(blocks);
2783 static noinline_for_stack int prealloc_file_extent_cluster(
2784 struct btrfs_inode *inode,
2785 struct file_extent_cluster *cluster)
2790 u64 offset = inode->index_cnt;
2794 u64 i_size = i_size_read(&inode->vfs_inode);
2795 u64 prealloc_start = cluster->start - offset;
2796 u64 prealloc_end = cluster->end - offset;
2797 u64 cur_offset = prealloc_start;
2800 * For subpage case, previous i_size may not be aligned to PAGE_SIZE.
2801 * This means the range [i_size, PAGE_END + 1) is filled with zeros by
2802 * btrfs_do_readpage() call of previously relocated file cluster.
2804 * If the current cluster starts in the above range, btrfs_do_readpage()
2805 * will skip the read, and relocate_one_page() will later writeback
2806 * the padding zeros as new data, causing data corruption.
2808 * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
2810 if (!IS_ALIGNED(i_size, PAGE_SIZE)) {
2811 struct address_space *mapping = inode->vfs_inode.i_mapping;
2812 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2813 const u32 sectorsize = fs_info->sectorsize;
2816 ASSERT(sectorsize < PAGE_SIZE);
2817 ASSERT(IS_ALIGNED(i_size, sectorsize));
2820 * Subpage can't handle page with DIRTY but without UPTODATE
2821 * bit as it can lead to the following deadlock:
2823 * btrfs_read_folio()
2824 * | Page already *locked*
2825 * |- btrfs_lock_and_flush_ordered_range()
2826 * |- btrfs_start_ordered_extent()
2827 * |- extent_write_cache_pages()
2829 * We try to lock the page we already hold.
2831 * Here we just writeback the whole data reloc inode, so that
2832 * we will be ensured to have no dirty range in the page, and
2833 * are safe to clear the uptodate bits.
2835 * This shouldn't cause too much overhead, as we need to write
2836 * the data back anyway.
2838 ret = filemap_write_and_wait(mapping);
2842 clear_extent_bits(&inode->io_tree, i_size,
2843 round_up(i_size, PAGE_SIZE) - 1,
2845 page = find_lock_page(mapping, i_size >> PAGE_SHIFT);
2847 * If page is freed we don't need to do anything then, as we
2848 * will re-read the whole page anyway.
2851 btrfs_subpage_clear_uptodate(fs_info, page, i_size,
2852 round_up(i_size, PAGE_SIZE) - i_size);
2858 BUG_ON(cluster->start != cluster->boundary[0]);
2859 ret = btrfs_alloc_data_chunk_ondemand(inode,
2860 prealloc_end + 1 - prealloc_start);
2864 btrfs_inode_lock(&inode->vfs_inode, 0);
2865 for (nr = 0; nr < cluster->nr; nr++) {
2866 start = cluster->boundary[nr] - offset;
2867 if (nr + 1 < cluster->nr)
2868 end = cluster->boundary[nr + 1] - 1 - offset;
2870 end = cluster->end - offset;
2872 lock_extent(&inode->io_tree, start, end, NULL);
2873 num_bytes = end + 1 - start;
2874 ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
2875 num_bytes, num_bytes,
2876 end + 1, &alloc_hint);
2877 cur_offset = end + 1;
2878 unlock_extent(&inode->io_tree, start, end, NULL);
2882 btrfs_inode_unlock(&inode->vfs_inode, 0);
2884 if (cur_offset < prealloc_end)
2885 btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
2886 prealloc_end + 1 - cur_offset);
2890 static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inode,
2891 u64 start, u64 end, u64 block_start)
2893 struct extent_map *em;
2896 em = alloc_extent_map();
2901 em->len = end + 1 - start;
2902 em->block_len = em->len;
2903 em->block_start = block_start;
2904 set_bit(EXTENT_FLAG_PINNED, &em->flags);
2906 lock_extent(&BTRFS_I(inode)->io_tree, start, end, NULL);
2907 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, false);
2908 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, NULL);
2909 free_extent_map(em);
2915 * Allow error injection to test balance/relocation cancellation
2917 noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
2919 return atomic_read(&fs_info->balance_cancel_req) ||
2920 atomic_read(&fs_info->reloc_cancel_req) ||
2921 fatal_signal_pending(current);
2923 ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
2925 static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster,
2928 /* Last extent, use cluster end directly */
2929 if (cluster_nr >= cluster->nr - 1)
2930 return cluster->end;
2932 /* Use next boundary start*/
2933 return cluster->boundary[cluster_nr + 1] - 1;
2936 static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
2937 struct file_extent_cluster *cluster,
2938 int *cluster_nr, unsigned long page_index)
2940 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2941 u64 offset = BTRFS_I(inode)->index_cnt;
2942 const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
2943 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
2950 ASSERT(page_index <= last_index);
2951 page = find_lock_page(inode->i_mapping, page_index);
2953 page_cache_sync_readahead(inode->i_mapping, ra, NULL,
2954 page_index, last_index + 1 - page_index);
2955 page = find_or_create_page(inode->i_mapping, page_index, mask);
2959 ret = set_page_extent_mapped(page);
2963 if (PageReadahead(page))
2964 page_cache_async_readahead(inode->i_mapping, ra, NULL,
2965 page_folio(page), page_index,
2966 last_index + 1 - page_index);
2968 if (!PageUptodate(page)) {
2969 btrfs_read_folio(NULL, page_folio(page));
2971 if (!PageUptodate(page)) {
2977 page_start = page_offset(page);
2978 page_end = page_start + PAGE_SIZE - 1;
2981 * Start from the cluster, as for subpage case, the cluster can start
2984 cur = max(page_start, cluster->boundary[*cluster_nr] - offset);
2985 while (cur <= page_end) {
2986 u64 extent_start = cluster->boundary[*cluster_nr] - offset;
2987 u64 extent_end = get_cluster_boundary_end(cluster,
2988 *cluster_nr) - offset;
2989 u64 clamped_start = max(page_start, extent_start);
2990 u64 clamped_end = min(page_end, extent_end);
2991 u32 clamped_len = clamped_end + 1 - clamped_start;
2993 /* Reserve metadata for this range */
2994 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
2995 clamped_len, clamped_len,
3000 /* Mark the range delalloc and dirty for later writeback */
3001 lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, NULL);
3002 ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
3003 clamped_end, 0, NULL);
3005 clear_extent_bits(&BTRFS_I(inode)->io_tree,
3006 clamped_start, clamped_end,
3007 EXTENT_LOCKED | EXTENT_BOUNDARY);
3008 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3010 btrfs_delalloc_release_extents(BTRFS_I(inode),
3014 btrfs_page_set_dirty(fs_info, page, clamped_start, clamped_len);
3017 * Set the boundary if it's inside the page.
3018 * Data relocation requires the destination extents to have the
3019 * same size as the source.
3020 * EXTENT_BOUNDARY bit prevents current extent from being merged
3021 * with previous extent.
3023 if (in_range(cluster->boundary[*cluster_nr] - offset,
3024 page_start, PAGE_SIZE)) {
3025 u64 boundary_start = cluster->boundary[*cluster_nr] -
3027 u64 boundary_end = boundary_start +
3028 fs_info->sectorsize - 1;
3030 set_extent_bits(&BTRFS_I(inode)->io_tree,
3031 boundary_start, boundary_end,
3034 unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, NULL);
3035 btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
3038 /* Crossed extent end, go to next extent */
3039 if (cur >= extent_end) {
3041 /* Just finished the last extent of the cluster, exit. */
3042 if (*cluster_nr >= cluster->nr)
3049 balance_dirty_pages_ratelimited(inode->i_mapping);
3050 btrfs_throttle(fs_info);
3051 if (btrfs_should_cancel_balance(fs_info))
3061 static int relocate_file_extent_cluster(struct inode *inode,
3062 struct file_extent_cluster *cluster)
3064 u64 offset = BTRFS_I(inode)->index_cnt;
3065 unsigned long index;
3066 unsigned long last_index;
3067 struct file_ra_state *ra;
3074 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3078 ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster);
3082 file_ra_state_init(ra, inode->i_mapping);
3084 ret = setup_relocation_extent_mapping(inode, cluster->start - offset,
3085 cluster->end - offset, cluster->start);
3089 last_index = (cluster->end - offset) >> PAGE_SHIFT;
3090 for (index = (cluster->start - offset) >> PAGE_SHIFT;
3091 index <= last_index && !ret; index++)
3092 ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index);
3094 WARN_ON(cluster_nr != cluster->nr);
3100 static noinline_for_stack
3101 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
3102 struct file_extent_cluster *cluster)
3106 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3107 ret = relocate_file_extent_cluster(inode, cluster);
3114 cluster->start = extent_key->objectid;
3116 BUG_ON(cluster->nr >= MAX_EXTENTS);
3117 cluster->end = extent_key->objectid + extent_key->offset - 1;
3118 cluster->boundary[cluster->nr] = extent_key->objectid;
3121 if (cluster->nr >= MAX_EXTENTS) {
3122 ret = relocate_file_extent_cluster(inode, cluster);
3131 * helper to add a tree block to the list.
3132 * the major work is getting the generation and level of the block
3134 static int add_tree_block(struct reloc_control *rc,
3135 struct btrfs_key *extent_key,
3136 struct btrfs_path *path,
3137 struct rb_root *blocks)
3139 struct extent_buffer *eb;
3140 struct btrfs_extent_item *ei;
3141 struct btrfs_tree_block_info *bi;
3142 struct tree_block *block;
3143 struct rb_node *rb_node;
3149 eb = path->nodes[0];
3150 item_size = btrfs_item_size(eb, path->slots[0]);
3152 if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3153 item_size >= sizeof(*ei) + sizeof(*bi)) {
3154 unsigned long ptr = 0, end;
3156 ei = btrfs_item_ptr(eb, path->slots[0],
3157 struct btrfs_extent_item);
3158 end = (unsigned long)ei + item_size;
3159 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3160 bi = (struct btrfs_tree_block_info *)(ei + 1);
3161 level = btrfs_tree_block_level(eb, bi);
3162 ptr = (unsigned long)(bi + 1);
3164 level = (int)extent_key->offset;
3165 ptr = (unsigned long)(ei + 1);
3167 generation = btrfs_extent_generation(eb, ei);
3170 * We're reading random blocks without knowing their owner ahead
3171 * of time. This is ok most of the time, as all reloc roots and
3172 * fs roots have the same lock type. However normal trees do
3173 * not, and the only way to know ahead of time is to read the
3174 * inline ref offset. We know it's an fs root if
3176 * 1. There's more than one ref.
3177 * 2. There's a SHARED_DATA_REF_KEY set.
3178 * 3. FULL_BACKREF is set on the flags.
3180 * Otherwise it's safe to assume that the ref offset == the
3181 * owner of this block, so we can use that when calling
3184 if (btrfs_extent_refs(eb, ei) == 1 &&
3185 !(btrfs_extent_flags(eb, ei) &
3186 BTRFS_BLOCK_FLAG_FULL_BACKREF) &&
3188 struct btrfs_extent_inline_ref *iref;
3191 iref = (struct btrfs_extent_inline_ref *)ptr;
3192 type = btrfs_get_extent_inline_ref_type(eb, iref,
3193 BTRFS_REF_TYPE_BLOCK);
3194 if (type == BTRFS_REF_TYPE_INVALID)
3196 if (type == BTRFS_TREE_BLOCK_REF_KEY)
3197 owner = btrfs_extent_inline_ref_offset(eb, iref);
3199 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
3200 btrfs_print_v0_err(eb->fs_info);
3201 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
3207 btrfs_release_path(path);
3209 BUG_ON(level == -1);
3211 block = kmalloc(sizeof(*block), GFP_NOFS);
3215 block->bytenr = extent_key->objectid;
3216 block->key.objectid = rc->extent_root->fs_info->nodesize;
3217 block->key.offset = generation;
3218 block->level = level;
3219 block->key_ready = 0;
3220 block->owner = owner;
3222 rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
3224 btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
3231 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3233 static int __add_tree_block(struct reloc_control *rc,
3234 u64 bytenr, u32 blocksize,
3235 struct rb_root *blocks)
3237 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3238 struct btrfs_path *path;
3239 struct btrfs_key key;
3241 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
3243 if (tree_block_processed(bytenr, rc))
3246 if (rb_simple_search(blocks, bytenr))
3249 path = btrfs_alloc_path();
3253 key.objectid = bytenr;
3255 key.type = BTRFS_METADATA_ITEM_KEY;
3256 key.offset = (u64)-1;
3258 key.type = BTRFS_EXTENT_ITEM_KEY;
3259 key.offset = blocksize;
3262 path->search_commit_root = 1;
3263 path->skip_locking = 1;
3264 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3268 if (ret > 0 && skinny) {
3269 if (path->slots[0]) {
3271 btrfs_item_key_to_cpu(path->nodes[0], &key,
3273 if (key.objectid == bytenr &&
3274 (key.type == BTRFS_METADATA_ITEM_KEY ||
3275 (key.type == BTRFS_EXTENT_ITEM_KEY &&
3276 key.offset == blocksize)))
3282 btrfs_release_path(path);
3288 btrfs_print_leaf(path->nodes[0]);
3290 "tree block extent item (%llu) is not found in extent tree",
3297 ret = add_tree_block(rc, &key, path, blocks);
3299 btrfs_free_path(path);
3303 static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3304 struct btrfs_block_group *block_group,
3305 struct inode *inode,
3308 struct btrfs_root *root = fs_info->tree_root;
3309 struct btrfs_trans_handle *trans;
3315 inode = btrfs_iget(fs_info->sb, ino, root);
3320 ret = btrfs_check_trunc_cache_free_space(fs_info,
3321 &fs_info->global_block_rsv);
3325 trans = btrfs_join_transaction(root);
3326 if (IS_ERR(trans)) {
3327 ret = PTR_ERR(trans);
3331 ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3333 btrfs_end_transaction(trans);
3334 btrfs_btree_balance_dirty(fs_info);
3341 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3342 * cache inode, to avoid free space cache data extent blocking data relocation.
3344 static int delete_v1_space_cache(struct extent_buffer *leaf,
3345 struct btrfs_block_group *block_group,
3348 u64 space_cache_ino;
3349 struct btrfs_file_extent_item *ei;
3350 struct btrfs_key key;
3355 if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
3358 for (i = 0; i < btrfs_header_nritems(leaf); i++) {
3361 btrfs_item_key_to_cpu(leaf, &key, i);
3362 if (key.type != BTRFS_EXTENT_DATA_KEY)
3364 ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3365 type = btrfs_file_extent_type(leaf, ei);
3367 if ((type == BTRFS_FILE_EXTENT_REG ||
3368 type == BTRFS_FILE_EXTENT_PREALLOC) &&
3369 btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
3371 space_cache_ino = key.objectid;
3377 ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
3383 * helper to find all tree blocks that reference a given data extent
3385 static noinline_for_stack
3386 int add_data_references(struct reloc_control *rc,
3387 struct btrfs_key *extent_key,
3388 struct btrfs_path *path,
3389 struct rb_root *blocks)
3391 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3392 struct ulist *leaves = NULL;
3393 struct ulist_iterator leaf_uiter;
3394 struct ulist_node *ref_node = NULL;
3395 const u32 blocksize = fs_info->nodesize;
3398 btrfs_release_path(path);
3399 ret = btrfs_find_all_leafs(NULL, fs_info, extent_key->objectid,
3400 0, &leaves, NULL, true);
3404 ULIST_ITER_INIT(&leaf_uiter);
3405 while ((ref_node = ulist_next(leaves, &leaf_uiter))) {
3406 struct extent_buffer *eb;
3408 eb = read_tree_block(fs_info, ref_node->val, 0, 0, 0, NULL);
3413 ret = delete_v1_space_cache(eb, rc->block_group,
3414 extent_key->objectid);
3415 free_extent_buffer(eb);
3418 ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
3423 free_block_list(blocks);
3429 * helper to find next unprocessed extent
3431 static noinline_for_stack
3432 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3433 struct btrfs_key *extent_key)
3435 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3436 struct btrfs_key key;
3437 struct extent_buffer *leaf;
3438 u64 start, end, last;
3441 last = rc->block_group->start + rc->block_group->length;
3444 if (rc->search_start >= last) {
3449 key.objectid = rc->search_start;
3450 key.type = BTRFS_EXTENT_ITEM_KEY;
3453 path->search_commit_root = 1;
3454 path->skip_locking = 1;
3455 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3460 leaf = path->nodes[0];
3461 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3462 ret = btrfs_next_leaf(rc->extent_root, path);
3465 leaf = path->nodes[0];
3468 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3469 if (key.objectid >= last) {
3474 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3475 key.type != BTRFS_METADATA_ITEM_KEY) {
3480 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3481 key.objectid + key.offset <= rc->search_start) {
3486 if (key.type == BTRFS_METADATA_ITEM_KEY &&
3487 key.objectid + fs_info->nodesize <=
3493 ret = find_first_extent_bit(&rc->processed_blocks,
3494 key.objectid, &start, &end,
3495 EXTENT_DIRTY, NULL);
3497 if (ret == 0 && start <= key.objectid) {
3498 btrfs_release_path(path);
3499 rc->search_start = end + 1;
3501 if (key.type == BTRFS_EXTENT_ITEM_KEY)
3502 rc->search_start = key.objectid + key.offset;
3504 rc->search_start = key.objectid +
3506 memcpy(extent_key, &key, sizeof(key));
3510 btrfs_release_path(path);
3514 static void set_reloc_control(struct reloc_control *rc)
3516 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3518 mutex_lock(&fs_info->reloc_mutex);
3519 fs_info->reloc_ctl = rc;
3520 mutex_unlock(&fs_info->reloc_mutex);
3523 static void unset_reloc_control(struct reloc_control *rc)
3525 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3527 mutex_lock(&fs_info->reloc_mutex);
3528 fs_info->reloc_ctl = NULL;
3529 mutex_unlock(&fs_info->reloc_mutex);
3532 static noinline_for_stack
3533 int prepare_to_relocate(struct reloc_control *rc)
3535 struct btrfs_trans_handle *trans;
3538 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3539 BTRFS_BLOCK_RSV_TEMP);
3543 memset(&rc->cluster, 0, sizeof(rc->cluster));
3544 rc->search_start = rc->block_group->start;
3545 rc->extents_found = 0;
3546 rc->nodes_relocated = 0;
3547 rc->merging_rsv_size = 0;
3548 rc->reserved_bytes = 0;
3549 rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3550 RELOCATION_RESERVED_NODES;
3551 ret = btrfs_block_rsv_refill(rc->extent_root->fs_info,
3552 rc->block_rsv, rc->block_rsv->size,
3553 BTRFS_RESERVE_FLUSH_ALL);
3557 rc->create_reloc_tree = 1;
3558 set_reloc_control(rc);
3560 trans = btrfs_join_transaction(rc->extent_root);
3561 if (IS_ERR(trans)) {
3562 unset_reloc_control(rc);
3564 * extent tree is not a ref_cow tree and has no reloc_root to
3565 * cleanup. And callers are responsible to free the above
3568 return PTR_ERR(trans);
3571 ret = btrfs_commit_transaction(trans);
3573 unset_reloc_control(rc);
3578 static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3580 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3581 struct rb_root blocks = RB_ROOT;
3582 struct btrfs_key key;
3583 struct btrfs_trans_handle *trans = NULL;
3584 struct btrfs_path *path;
3585 struct btrfs_extent_item *ei;
3591 path = btrfs_alloc_path();
3594 path->reada = READA_FORWARD;
3596 ret = prepare_to_relocate(rc);
3603 rc->reserved_bytes = 0;
3604 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
3605 rc->block_rsv->size,
3606 BTRFS_RESERVE_FLUSH_ALL);
3612 trans = btrfs_start_transaction(rc->extent_root, 0);
3613 if (IS_ERR(trans)) {
3614 err = PTR_ERR(trans);
3619 if (update_backref_cache(trans, &rc->backref_cache)) {
3620 btrfs_end_transaction(trans);
3625 ret = find_next_extent(rc, path, &key);
3631 rc->extents_found++;
3633 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3634 struct btrfs_extent_item);
3635 flags = btrfs_extent_flags(path->nodes[0], ei);
3637 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3638 ret = add_tree_block(rc, &key, path, &blocks);
3639 } else if (rc->stage == UPDATE_DATA_PTRS &&
3640 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3641 ret = add_data_references(rc, &key, path, &blocks);
3643 btrfs_release_path(path);
3651 if (!RB_EMPTY_ROOT(&blocks)) {
3652 ret = relocate_tree_blocks(trans, rc, &blocks);
3654 if (ret != -EAGAIN) {
3658 rc->extents_found--;
3659 rc->search_start = key.objectid;
3663 btrfs_end_transaction_throttle(trans);
3664 btrfs_btree_balance_dirty(fs_info);
3667 if (rc->stage == MOVE_DATA_EXTENTS &&
3668 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3669 rc->found_file_extent = 1;
3670 ret = relocate_data_extent(rc->data_inode,
3671 &key, &rc->cluster);
3677 if (btrfs_should_cancel_balance(fs_info)) {
3682 if (trans && progress && err == -ENOSPC) {
3683 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
3691 btrfs_release_path(path);
3692 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
3695 btrfs_end_transaction_throttle(trans);
3696 btrfs_btree_balance_dirty(fs_info);
3700 ret = relocate_file_extent_cluster(rc->data_inode,
3706 rc->create_reloc_tree = 0;
3707 set_reloc_control(rc);
3709 btrfs_backref_release_cache(&rc->backref_cache);
3710 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3713 * Even in the case when the relocation is cancelled, we should all go
3714 * through prepare_to_merge() and merge_reloc_roots().
3716 * For error (including cancelled balance), prepare_to_merge() will
3717 * mark all reloc trees orphan, then queue them for cleanup in
3718 * merge_reloc_roots()
3720 err = prepare_to_merge(rc, err);
3722 merge_reloc_roots(rc);
3724 rc->merge_reloc_tree = 0;
3725 unset_reloc_control(rc);
3726 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3728 /* get rid of pinned extents */
3729 trans = btrfs_join_transaction(rc->extent_root);
3730 if (IS_ERR(trans)) {
3731 err = PTR_ERR(trans);
3734 ret = btrfs_commit_transaction(trans);
3738 ret = clean_dirty_subvols(rc);
3739 if (ret < 0 && !err)
3741 btrfs_free_block_rsv(fs_info, rc->block_rsv);
3742 btrfs_free_path(path);
3746 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
3747 struct btrfs_root *root, u64 objectid)
3749 struct btrfs_path *path;
3750 struct btrfs_inode_item *item;
3751 struct extent_buffer *leaf;
3754 path = btrfs_alloc_path();
3758 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
3762 leaf = path->nodes[0];
3763 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
3764 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3765 btrfs_set_inode_generation(leaf, item, 1);
3766 btrfs_set_inode_size(leaf, item, 0);
3767 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
3768 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
3769 BTRFS_INODE_PREALLOC);
3770 btrfs_mark_buffer_dirty(leaf);
3772 btrfs_free_path(path);
3776 static void delete_orphan_inode(struct btrfs_trans_handle *trans,
3777 struct btrfs_root *root, u64 objectid)
3779 struct btrfs_path *path;
3780 struct btrfs_key key;
3783 path = btrfs_alloc_path();
3789 key.objectid = objectid;
3790 key.type = BTRFS_INODE_ITEM_KEY;
3792 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3798 ret = btrfs_del_item(trans, root, path);
3801 btrfs_abort_transaction(trans, ret);
3802 btrfs_free_path(path);
3806 * helper to create inode for data relocation.
3807 * the inode is in data relocation tree and its link count is 0
3809 static noinline_for_stack
3810 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
3811 struct btrfs_block_group *group)
3813 struct inode *inode = NULL;
3814 struct btrfs_trans_handle *trans;
3815 struct btrfs_root *root;
3819 root = btrfs_grab_root(fs_info->data_reloc_root);
3820 trans = btrfs_start_transaction(root, 6);
3821 if (IS_ERR(trans)) {
3822 btrfs_put_root(root);
3823 return ERR_CAST(trans);
3826 err = btrfs_get_free_objectid(root, &objectid);
3830 err = __insert_orphan_inode(trans, root, objectid);
3834 inode = btrfs_iget(fs_info->sb, objectid, root);
3835 if (IS_ERR(inode)) {
3836 delete_orphan_inode(trans, root, objectid);
3837 err = PTR_ERR(inode);
3841 BTRFS_I(inode)->index_cnt = group->start;
3843 err = btrfs_orphan_add(trans, BTRFS_I(inode));
3845 btrfs_put_root(root);
3846 btrfs_end_transaction(trans);
3847 btrfs_btree_balance_dirty(fs_info);
3850 inode = ERR_PTR(err);
3856 * Mark start of chunk relocation that is cancellable. Check if the cancellation
3857 * has been requested meanwhile and don't start in that case.
3861 * -EINPROGRESS operation is already in progress, that's probably a bug
3862 * -ECANCELED cancellation request was set before the operation started
3864 static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
3866 if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
3867 /* This should not happen */
3868 btrfs_err(fs_info, "reloc already running, cannot start");
3869 return -EINPROGRESS;
3872 if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
3873 btrfs_info(fs_info, "chunk relocation canceled on start");
3875 * On cancel, clear all requests but let the caller mark
3876 * the end after cleanup operations.
3878 atomic_set(&fs_info->reloc_cancel_req, 0);
3885 * Mark end of chunk relocation that is cancellable and wake any waiters.
3887 static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
3889 /* Requested after start, clear bit first so any waiters can continue */
3890 if (atomic_read(&fs_info->reloc_cancel_req) > 0)
3891 btrfs_info(fs_info, "chunk relocation canceled during operation");
3892 clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
3893 atomic_set(&fs_info->reloc_cancel_req, 0);
3896 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
3898 struct reloc_control *rc;
3900 rc = kzalloc(sizeof(*rc), GFP_NOFS);
3904 INIT_LIST_HEAD(&rc->reloc_roots);
3905 INIT_LIST_HEAD(&rc->dirty_subvol_roots);
3906 btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1);
3907 mapping_tree_init(&rc->reloc_root_tree);
3908 extent_io_tree_init(fs_info, &rc->processed_blocks,
3909 IO_TREE_RELOC_BLOCKS, NULL);
3913 static void free_reloc_control(struct reloc_control *rc)
3915 struct mapping_node *node, *tmp;
3917 free_reloc_roots(&rc->reloc_roots);
3918 rbtree_postorder_for_each_entry_safe(node, tmp,
3919 &rc->reloc_root_tree.rb_root, rb_node)
3926 * Print the block group being relocated
3928 static void describe_relocation(struct btrfs_fs_info *fs_info,
3929 struct btrfs_block_group *block_group)
3931 char buf[128] = {'\0'};
3933 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
3936 "relocating block group %llu flags %s",
3937 block_group->start, buf);
3940 static const char *stage_to_string(int stage)
3942 if (stage == MOVE_DATA_EXTENTS)
3943 return "move data extents";
3944 if (stage == UPDATE_DATA_PTRS)
3945 return "update data pointers";
3950 * function to relocate all extents in a block group.
3952 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
3954 struct btrfs_block_group *bg;
3955 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start);
3956 struct reloc_control *rc;
3957 struct inode *inode;
3958 struct btrfs_path *path;
3964 * This only gets set if we had a half-deleted snapshot on mount. We
3965 * cannot allow relocation to start while we're still trying to clean up
3966 * these pending deletions.
3968 ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE);
3972 /* We may have been woken up by close_ctree, so bail if we're closing. */
3973 if (btrfs_fs_closing(fs_info))
3976 bg = btrfs_lookup_block_group(fs_info, group_start);
3981 * Relocation of a data block group creates ordered extents. Without
3982 * sb_start_write(), we can freeze the filesystem while unfinished
3983 * ordered extents are left. Such ordered extents can cause a deadlock
3984 * e.g. when syncfs() is waiting for their completion but they can't
3985 * finish because they block when joining a transaction, due to the
3986 * fact that the freeze locks are being held in write mode.
3988 if (bg->flags & BTRFS_BLOCK_GROUP_DATA)
3989 ASSERT(sb_write_started(fs_info->sb));
3991 if (btrfs_pinned_by_swapfile(fs_info, bg)) {
3992 btrfs_put_block_group(bg);
3996 rc = alloc_reloc_control(fs_info);
3998 btrfs_put_block_group(bg);
4002 ret = reloc_chunk_start(fs_info);
4008 rc->extent_root = extent_root;
4009 rc->block_group = bg;
4011 ret = btrfs_inc_block_group_ro(rc->block_group, true);
4018 path = btrfs_alloc_path();
4024 inode = lookup_free_space_inode(rc->block_group, path);
4025 btrfs_free_path(path);
4028 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
4030 ret = PTR_ERR(inode);
4032 if (ret && ret != -ENOENT) {
4037 rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
4038 if (IS_ERR(rc->data_inode)) {
4039 err = PTR_ERR(rc->data_inode);
4040 rc->data_inode = NULL;
4044 describe_relocation(fs_info, rc->block_group);
4046 btrfs_wait_block_group_reservations(rc->block_group);
4047 btrfs_wait_nocow_writers(rc->block_group);
4048 btrfs_wait_ordered_roots(fs_info, U64_MAX,
4049 rc->block_group->start,
4050 rc->block_group->length);
4052 ret = btrfs_zone_finish(rc->block_group);
4053 WARN_ON(ret && ret != -EAGAIN);
4058 mutex_lock(&fs_info->cleaner_mutex);
4059 ret = relocate_block_group(rc);
4060 mutex_unlock(&fs_info->cleaner_mutex);
4064 finishes_stage = rc->stage;
4066 * We may have gotten ENOSPC after we already dirtied some
4067 * extents. If writeout happens while we're relocating a
4068 * different block group we could end up hitting the
4069 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
4070 * btrfs_reloc_cow_block. Make sure we write everything out
4071 * properly so we don't trip over this problem, and then break
4072 * out of the loop if we hit an error.
4074 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4075 ret = btrfs_wait_ordered_range(rc->data_inode, 0,
4079 invalidate_mapping_pages(rc->data_inode->i_mapping,
4081 rc->stage = UPDATE_DATA_PTRS;
4087 if (rc->extents_found == 0)
4090 btrfs_info(fs_info, "found %llu extents, stage: %s",
4091 rc->extents_found, stage_to_string(finishes_stage));
4094 WARN_ON(rc->block_group->pinned > 0);
4095 WARN_ON(rc->block_group->reserved > 0);
4096 WARN_ON(rc->block_group->used > 0);
4099 btrfs_dec_block_group_ro(rc->block_group);
4100 iput(rc->data_inode);
4102 btrfs_put_block_group(bg);
4103 reloc_chunk_end(fs_info);
4104 free_reloc_control(rc);
4108 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4110 struct btrfs_fs_info *fs_info = root->fs_info;
4111 struct btrfs_trans_handle *trans;
4114 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4116 return PTR_ERR(trans);
4118 memset(&root->root_item.drop_progress, 0,
4119 sizeof(root->root_item.drop_progress));
4120 btrfs_set_root_drop_level(&root->root_item, 0);
4121 btrfs_set_root_refs(&root->root_item, 0);
4122 ret = btrfs_update_root(trans, fs_info->tree_root,
4123 &root->root_key, &root->root_item);
4125 err = btrfs_end_transaction(trans);
4132 * recover relocation interrupted by system crash.
4134 * this function resumes merging reloc trees with corresponding fs trees.
4135 * this is important for keeping the sharing of tree blocks
4137 int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
4139 LIST_HEAD(reloc_roots);
4140 struct btrfs_key key;
4141 struct btrfs_root *fs_root;
4142 struct btrfs_root *reloc_root;
4143 struct btrfs_path *path;
4144 struct extent_buffer *leaf;
4145 struct reloc_control *rc = NULL;
4146 struct btrfs_trans_handle *trans;
4150 path = btrfs_alloc_path();
4153 path->reada = READA_BACK;
4155 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4156 key.type = BTRFS_ROOT_ITEM_KEY;
4157 key.offset = (u64)-1;
4160 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
4167 if (path->slots[0] == 0)
4171 leaf = path->nodes[0];
4172 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4173 btrfs_release_path(path);
4175 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4176 key.type != BTRFS_ROOT_ITEM_KEY)
4179 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key);
4180 if (IS_ERR(reloc_root)) {
4181 err = PTR_ERR(reloc_root);
4185 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
4186 list_add(&reloc_root->root_list, &reloc_roots);
4188 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4189 fs_root = btrfs_get_fs_root(fs_info,
4190 reloc_root->root_key.offset, false);
4191 if (IS_ERR(fs_root)) {
4192 ret = PTR_ERR(fs_root);
4193 if (ret != -ENOENT) {
4197 ret = mark_garbage_root(reloc_root);
4203 btrfs_put_root(fs_root);
4207 if (key.offset == 0)
4212 btrfs_release_path(path);
4214 if (list_empty(&reloc_roots))
4217 rc = alloc_reloc_control(fs_info);
4223 ret = reloc_chunk_start(fs_info);
4229 rc->extent_root = btrfs_extent_root(fs_info, 0);
4231 set_reloc_control(rc);
4233 trans = btrfs_join_transaction(rc->extent_root);
4234 if (IS_ERR(trans)) {
4235 err = PTR_ERR(trans);
4239 rc->merge_reloc_tree = 1;
4241 while (!list_empty(&reloc_roots)) {
4242 reloc_root = list_entry(reloc_roots.next,
4243 struct btrfs_root, root_list);
4244 list_del(&reloc_root->root_list);
4246 if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4247 list_add_tail(&reloc_root->root_list,
4252 fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
4254 if (IS_ERR(fs_root)) {
4255 err = PTR_ERR(fs_root);
4256 list_add_tail(&reloc_root->root_list, &reloc_roots);
4257 btrfs_end_transaction(trans);
4261 err = __add_reloc_root(reloc_root);
4262 ASSERT(err != -EEXIST);
4264 list_add_tail(&reloc_root->root_list, &reloc_roots);
4265 btrfs_put_root(fs_root);
4266 btrfs_end_transaction(trans);
4269 fs_root->reloc_root = btrfs_grab_root(reloc_root);
4270 btrfs_put_root(fs_root);
4273 err = btrfs_commit_transaction(trans);
4277 merge_reloc_roots(rc);
4279 unset_reloc_control(rc);
4281 trans = btrfs_join_transaction(rc->extent_root);
4282 if (IS_ERR(trans)) {
4283 err = PTR_ERR(trans);
4286 err = btrfs_commit_transaction(trans);
4288 ret = clean_dirty_subvols(rc);
4289 if (ret < 0 && !err)
4292 unset_reloc_control(rc);
4294 reloc_chunk_end(fs_info);
4295 free_reloc_control(rc);
4297 free_reloc_roots(&reloc_roots);
4299 btrfs_free_path(path);
4302 /* cleanup orphan inode in data relocation tree */
4303 fs_root = btrfs_grab_root(fs_info->data_reloc_root);
4305 err = btrfs_orphan_cleanup(fs_root);
4306 btrfs_put_root(fs_root);
4312 * helper to add ordered checksum for data relocation.
4314 * cloning checksum properly handles the nodatasum extents.
4315 * it also saves CPU time to re-calculate the checksum.
4317 int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len)
4319 struct btrfs_fs_info *fs_info = inode->root->fs_info;
4320 struct btrfs_root *csum_root;
4321 struct btrfs_ordered_sum *sums;
4322 struct btrfs_ordered_extent *ordered;
4328 ordered = btrfs_lookup_ordered_extent(inode, file_pos);
4329 BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len);
4331 disk_bytenr = file_pos + inode->index_cnt;
4332 csum_root = btrfs_csum_root(fs_info, disk_bytenr);
4333 ret = btrfs_lookup_csums_range(csum_root, disk_bytenr,
4334 disk_bytenr + len - 1, &list, 0, false);
4338 while (!list_empty(&list)) {
4339 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
4340 list_del_init(&sums->list);
4343 * We need to offset the new_bytenr based on where the csum is.
4344 * We need to do this because we will read in entire prealloc
4345 * extents but we may have written to say the middle of the
4346 * prealloc extent, so we need to make sure the csum goes with
4347 * the right disk offset.
4349 * We can do this because the data reloc inode refers strictly
4350 * to the on disk bytes, so we don't have to worry about
4351 * disk_len vs real len like with real inodes since it's all
4354 new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr;
4355 sums->bytenr = new_bytenr;
4357 btrfs_add_ordered_sum(ordered, sums);
4360 btrfs_put_ordered_extent(ordered);
4364 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4365 struct btrfs_root *root, struct extent_buffer *buf,
4366 struct extent_buffer *cow)
4368 struct btrfs_fs_info *fs_info = root->fs_info;
4369 struct reloc_control *rc;
4370 struct btrfs_backref_node *node;
4375 rc = fs_info->reloc_ctl;
4379 BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root));
4381 level = btrfs_header_level(buf);
4382 if (btrfs_header_generation(buf) <=
4383 btrfs_root_last_snapshot(&root->root_item))
4386 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4387 rc->create_reloc_tree) {
4388 WARN_ON(!first_cow && level == 0);
4390 node = rc->backref_cache.path[level];
4391 BUG_ON(node->bytenr != buf->start &&
4392 node->new_bytenr != buf->start);
4394 btrfs_backref_drop_node_buffer(node);
4395 atomic_inc(&cow->refs);
4397 node->new_bytenr = cow->start;
4399 if (!node->pending) {
4400 list_move_tail(&node->list,
4401 &rc->backref_cache.pending[level]);
4406 mark_block_processed(rc, node);
4408 if (first_cow && level > 0)
4409 rc->nodes_relocated += buf->len;
4412 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4413 ret = replace_file_extents(trans, rc, root, cow);
4418 * called before creating snapshot. it calculates metadata reservation
4419 * required for relocating tree blocks in the snapshot
4421 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4422 u64 *bytes_to_reserve)
4424 struct btrfs_root *root = pending->root;
4425 struct reloc_control *rc = root->fs_info->reloc_ctl;
4427 if (!rc || !have_reloc_root(root))
4430 if (!rc->merge_reloc_tree)
4433 root = root->reloc_root;
4434 BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4436 * relocation is in the stage of merging trees. the space
4437 * used by merging a reloc tree is twice the size of
4438 * relocated tree nodes in the worst case. half for cowing
4439 * the reloc tree, half for cowing the fs tree. the space
4440 * used by cowing the reloc tree will be freed after the
4441 * tree is dropped. if we create snapshot, cowing the fs
4442 * tree may use more space than it frees. so we need
4443 * reserve extra space.
4445 *bytes_to_reserve += rc->nodes_relocated;
4449 * called after snapshot is created. migrate block reservation
4450 * and create reloc root for the newly created snapshot
4452 * This is similar to btrfs_init_reloc_root(), we come out of here with two
4453 * references held on the reloc_root, one for root->reloc_root and one for
4456 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4457 struct btrfs_pending_snapshot *pending)
4459 struct btrfs_root *root = pending->root;
4460 struct btrfs_root *reloc_root;
4461 struct btrfs_root *new_root;
4462 struct reloc_control *rc = root->fs_info->reloc_ctl;
4465 if (!rc || !have_reloc_root(root))
4468 rc = root->fs_info->reloc_ctl;
4469 rc->merging_rsv_size += rc->nodes_relocated;
4471 if (rc->merge_reloc_tree) {
4472 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4474 rc->nodes_relocated, true);
4479 new_root = pending->snap;
4480 reloc_root = create_reloc_root(trans, root->reloc_root,
4481 new_root->root_key.objectid);
4482 if (IS_ERR(reloc_root))
4483 return PTR_ERR(reloc_root);
4485 ret = __add_reloc_root(reloc_root);
4486 ASSERT(ret != -EEXIST);
4488 /* Pairs with create_reloc_root */
4489 btrfs_put_root(reloc_root);
4492 new_root->reloc_root = btrfs_grab_root(reloc_root);
4494 if (rc->create_reloc_tree)
4495 ret = clone_backref_node(trans, rc, root, reloc_root);