1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 STRATO. All rights reserved.
7 #include <linux/rbtree.h>
8 #include <trace/events/btrfs.h>
13 #include "transaction.h"
14 #include "delayed-ref.h"
18 /* Just an arbitrary number so we can be sure this happened */
19 #define BACKREF_FOUND_SHARED 6
21 struct extent_inode_elem {
24 struct extent_inode_elem *next;
27 static int check_extent_in_eb(const struct btrfs_key *key,
28 const struct extent_buffer *eb,
29 const struct btrfs_file_extent_item *fi,
31 struct extent_inode_elem **eie,
35 struct extent_inode_elem *e;
38 !btrfs_file_extent_compression(eb, fi) &&
39 !btrfs_file_extent_encryption(eb, fi) &&
40 !btrfs_file_extent_other_encoding(eb, fi)) {
44 data_offset = btrfs_file_extent_offset(eb, fi);
45 data_len = btrfs_file_extent_num_bytes(eb, fi);
47 if (extent_item_pos < data_offset ||
48 extent_item_pos >= data_offset + data_len)
50 offset = extent_item_pos - data_offset;
53 e = kmalloc(sizeof(*e), GFP_NOFS);
58 e->inum = key->objectid;
59 e->offset = key->offset + offset;
65 static void free_inode_elem_list(struct extent_inode_elem *eie)
67 struct extent_inode_elem *eie_next;
69 for (; eie; eie = eie_next) {
75 static int find_extent_in_eb(const struct extent_buffer *eb,
76 u64 wanted_disk_byte, u64 extent_item_pos,
77 struct extent_inode_elem **eie,
82 struct btrfs_file_extent_item *fi;
89 * from the shared data ref, we only have the leaf but we need
90 * the key. thus, we must look into all items and see that we
91 * find one (some) with a reference to our extent item.
93 nritems = btrfs_header_nritems(eb);
94 for (slot = 0; slot < nritems; ++slot) {
95 btrfs_item_key_to_cpu(eb, &key, slot);
96 if (key.type != BTRFS_EXTENT_DATA_KEY)
98 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
99 extent_type = btrfs_file_extent_type(eb, fi);
100 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
102 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
103 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
104 if (disk_byte != wanted_disk_byte)
107 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
116 struct rb_root_cached root;
120 #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
123 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
124 struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
125 struct preftree indirect_missing_keys;
129 * Checks for a shared extent during backref search.
131 * The share_count tracks prelim_refs (direct and indirect) having a
133 * - incremented when a ref->count transitions to >0
134 * - decremented when a ref->count transitions to <1
142 static inline int extent_is_shared(struct share_check *sc)
144 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
147 static struct kmem_cache *btrfs_prelim_ref_cache;
149 int __init btrfs_prelim_ref_init(void)
151 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
152 sizeof(struct prelim_ref),
156 if (!btrfs_prelim_ref_cache)
161 void __cold btrfs_prelim_ref_exit(void)
163 kmem_cache_destroy(btrfs_prelim_ref_cache);
166 static void free_pref(struct prelim_ref *ref)
168 kmem_cache_free(btrfs_prelim_ref_cache, ref);
172 * Return 0 when both refs are for the same block (and can be merged).
173 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
174 * indicates a 'higher' block.
176 static int prelim_ref_compare(struct prelim_ref *ref1,
177 struct prelim_ref *ref2)
179 if (ref1->level < ref2->level)
181 if (ref1->level > ref2->level)
183 if (ref1->root_id < ref2->root_id)
185 if (ref1->root_id > ref2->root_id)
187 if (ref1->key_for_search.type < ref2->key_for_search.type)
189 if (ref1->key_for_search.type > ref2->key_for_search.type)
191 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
193 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
195 if (ref1->key_for_search.offset < ref2->key_for_search.offset)
197 if (ref1->key_for_search.offset > ref2->key_for_search.offset)
199 if (ref1->parent < ref2->parent)
201 if (ref1->parent > ref2->parent)
207 static void update_share_count(struct share_check *sc, int oldcount,
210 if ((!sc) || (oldcount == 0 && newcount < 1))
213 if (oldcount > 0 && newcount < 1)
215 else if (oldcount < 1 && newcount > 0)
220 * Add @newref to the @root rbtree, merging identical refs.
222 * Callers should assume that newref has been freed after calling.
224 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
225 struct preftree *preftree,
226 struct prelim_ref *newref,
227 struct share_check *sc)
229 struct rb_root_cached *root;
231 struct rb_node *parent = NULL;
232 struct prelim_ref *ref;
234 bool leftmost = true;
236 root = &preftree->root;
237 p = &root->rb_root.rb_node;
241 ref = rb_entry(parent, struct prelim_ref, rbnode);
242 result = prelim_ref_compare(ref, newref);
245 } else if (result > 0) {
249 /* Identical refs, merge them and free @newref */
250 struct extent_inode_elem *eie = ref->inode_list;
252 while (eie && eie->next)
256 ref->inode_list = newref->inode_list;
258 eie->next = newref->inode_list;
259 trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
262 * A delayed ref can have newref->count < 0.
263 * The ref->count is updated to follow any
264 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
266 update_share_count(sc, ref->count,
267 ref->count + newref->count);
268 ref->count += newref->count;
274 update_share_count(sc, 0, newref->count);
276 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
277 rb_link_node(&newref->rbnode, parent, p);
278 rb_insert_color_cached(&newref->rbnode, root, leftmost);
282 * Release the entire tree. We don't care about internal consistency so
283 * just free everything and then reset the tree root.
285 static void prelim_release(struct preftree *preftree)
287 struct prelim_ref *ref, *next_ref;
289 rbtree_postorder_for_each_entry_safe(ref, next_ref,
290 &preftree->root.rb_root, rbnode)
293 preftree->root = RB_ROOT_CACHED;
298 * the rules for all callers of this function are:
299 * - obtaining the parent is the goal
300 * - if you add a key, you must know that it is a correct key
301 * - if you cannot add the parent or a correct key, then we will look into the
302 * block later to set a correct key
306 * backref type | shared | indirect | shared | indirect
307 * information | tree | tree | data | data
308 * --------------------+--------+----------+--------+----------
309 * parent logical | y | - | - | -
310 * key to resolve | - | y | y | y
311 * tree block logical | - | - | - | -
312 * root for resolving | y | y | y | y
314 * - column 1: we've the parent -> done
315 * - column 2, 3, 4: we use the key to find the parent
317 * on disk refs (inline or keyed)
318 * ==============================
319 * backref type | shared | indirect | shared | indirect
320 * information | tree | tree | data | data
321 * --------------------+--------+----------+--------+----------
322 * parent logical | y | - | y | -
323 * key to resolve | - | - | - | y
324 * tree block logical | y | y | y | y
325 * root for resolving | - | y | y | y
327 * - column 1, 3: we've the parent -> done
328 * - column 2: we take the first key from the block to find the parent
329 * (see add_missing_keys)
330 * - column 4: we use the key to find the parent
332 * additional information that's available but not required to find the parent
333 * block might help in merging entries to gain some speed.
335 static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
336 struct preftree *preftree, u64 root_id,
337 const struct btrfs_key *key, int level, u64 parent,
338 u64 wanted_disk_byte, int count,
339 struct share_check *sc, gfp_t gfp_mask)
341 struct prelim_ref *ref;
343 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
346 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
350 ref->root_id = root_id;
352 ref->key_for_search = *key;
354 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
356 ref->inode_list = NULL;
359 ref->parent = parent;
360 ref->wanted_disk_byte = wanted_disk_byte;
361 prelim_ref_insert(fs_info, preftree, ref, sc);
362 return extent_is_shared(sc);
365 /* direct refs use root == 0, key == NULL */
366 static int add_direct_ref(const struct btrfs_fs_info *fs_info,
367 struct preftrees *preftrees, int level, u64 parent,
368 u64 wanted_disk_byte, int count,
369 struct share_check *sc, gfp_t gfp_mask)
371 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
372 parent, wanted_disk_byte, count, sc, gfp_mask);
375 /* indirect refs use parent == 0 */
376 static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
377 struct preftrees *preftrees, u64 root_id,
378 const struct btrfs_key *key, int level,
379 u64 wanted_disk_byte, int count,
380 struct share_check *sc, gfp_t gfp_mask)
382 struct preftree *tree = &preftrees->indirect;
385 tree = &preftrees->indirect_missing_keys;
386 return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
387 wanted_disk_byte, count, sc, gfp_mask);
390 static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
392 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
393 struct rb_node *parent = NULL;
394 struct prelim_ref *ref = NULL;
395 struct prelim_ref target = {};
398 target.parent = bytenr;
402 ref = rb_entry(parent, struct prelim_ref, rbnode);
403 result = prelim_ref_compare(ref, &target);
415 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
416 struct ulist *parents,
417 struct preftrees *preftrees, struct prelim_ref *ref,
418 int level, u64 time_seq, const u64 *extent_item_pos,
423 struct extent_buffer *eb;
424 struct btrfs_key key;
425 struct btrfs_key *key_for_search = &ref->key_for_search;
426 struct btrfs_file_extent_item *fi;
427 struct extent_inode_elem *eie = NULL, *old = NULL;
429 u64 wanted_disk_byte = ref->wanted_disk_byte;
434 eb = path->nodes[level];
435 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
442 * 1. We normally enter this function with the path already pointing to
443 * the first item to check. But sometimes, we may enter it with
445 * 2. We are searching for normal backref but bytenr of this leaf
446 * matches shared data backref
447 * 3. The leaf owner is not equal to the root we are searching
449 * For these cases, go to the next leaf before we continue.
452 if (path->slots[0] >= btrfs_header_nritems(eb) ||
453 is_shared_data_backref(preftrees, eb->start) ||
454 ref->root_id != btrfs_header_owner(eb)) {
455 if (time_seq == SEQ_LAST)
456 ret = btrfs_next_leaf(root, path);
458 ret = btrfs_next_old_leaf(root, path, time_seq);
461 while (!ret && count < ref->count) {
463 slot = path->slots[0];
465 btrfs_item_key_to_cpu(eb, &key, slot);
467 if (key.objectid != key_for_search->objectid ||
468 key.type != BTRFS_EXTENT_DATA_KEY)
472 * We are searching for normal backref but bytenr of this leaf
473 * matches shared data backref, OR
474 * the leaf owner is not equal to the root we are searching for
477 (is_shared_data_backref(preftrees, eb->start) ||
478 ref->root_id != btrfs_header_owner(eb))) {
479 if (time_seq == SEQ_LAST)
480 ret = btrfs_next_leaf(root, path);
482 ret = btrfs_next_old_leaf(root, path, time_seq);
485 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
486 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
487 data_offset = btrfs_file_extent_offset(eb, fi);
489 if (disk_byte == wanted_disk_byte) {
492 if (ref->key_for_search.offset == key.offset - data_offset)
496 if (extent_item_pos) {
497 ret = check_extent_in_eb(&key, eb, fi,
499 &eie, ignore_offset);
505 ret = ulist_add_merge_ptr(parents, eb->start,
506 eie, (void **)&old, GFP_NOFS);
509 if (!ret && extent_item_pos) {
517 if (time_seq == SEQ_LAST)
518 ret = btrfs_next_item(root, path);
520 ret = btrfs_next_old_item(root, path, time_seq);
526 free_inode_elem_list(eie);
531 * resolve an indirect backref in the form (root_id, key, level)
532 * to a logical address
534 static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
535 struct btrfs_path *path, u64 time_seq,
536 struct preftrees *preftrees,
537 struct prelim_ref *ref, struct ulist *parents,
538 const u64 *extent_item_pos, bool ignore_offset)
540 struct btrfs_root *root;
541 struct extent_buffer *eb;
544 int level = ref->level;
545 struct btrfs_key search_key = ref->key_for_search;
548 * If we're search_commit_root we could possibly be holding locks on
549 * other tree nodes. This happens when qgroups does backref walks when
550 * adding new delayed refs. To deal with this we need to look in cache
551 * for the root, and if we don't find it then we need to search the
552 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
555 if (path->search_commit_root)
556 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
558 root = btrfs_get_fs_root(fs_info, ref->root_id, false);
564 if (!path->search_commit_root &&
565 test_bit(BTRFS_ROOT_DELETING, &root->state)) {
570 if (btrfs_is_testing(fs_info)) {
575 if (path->search_commit_root)
576 root_level = btrfs_header_level(root->commit_root);
577 else if (time_seq == SEQ_LAST)
578 root_level = btrfs_header_level(root->node);
580 root_level = btrfs_old_root_level(root, time_seq);
582 if (root_level + 1 == level)
586 * We can often find data backrefs with an offset that is too large
587 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
588 * subtracting a file's offset with the data offset of its
589 * corresponding extent data item. This can happen for example in the
592 * So if we detect such case we set the search key's offset to zero to
593 * make sure we will find the matching file extent item at
594 * add_all_parents(), otherwise we will miss it because the offset
595 * taken form the backref is much larger then the offset of the file
596 * extent item. This can make us scan a very large number of file
597 * extent items, but at least it will not make us miss any.
599 * This is an ugly workaround for a behaviour that should have never
600 * existed, but it does and a fix for the clone ioctl would touch a lot
601 * of places, cause backwards incompatibility and would not fix the
602 * problem for extents cloned with older kernels.
604 if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
605 search_key.offset >= LLONG_MAX)
606 search_key.offset = 0;
607 path->lowest_level = level;
608 if (time_seq == SEQ_LAST)
609 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
611 ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
614 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
615 ref->root_id, level, ref->count, ret,
616 ref->key_for_search.objectid, ref->key_for_search.type,
617 ref->key_for_search.offset);
621 eb = path->nodes[level];
623 if (WARN_ON(!level)) {
628 eb = path->nodes[level];
631 ret = add_all_parents(root, path, parents, preftrees, ref, level,
632 time_seq, extent_item_pos, ignore_offset);
634 btrfs_put_root(root);
636 path->lowest_level = 0;
637 btrfs_release_path(path);
641 static struct extent_inode_elem *
642 unode_aux_to_inode_list(struct ulist_node *node)
646 return (struct extent_inode_elem *)(uintptr_t)node->aux;
650 * We maintain three separate rbtrees: one for direct refs, one for
651 * indirect refs which have a key, and one for indirect refs which do not
652 * have a key. Each tree does merge on insertion.
654 * Once all of the references are located, we iterate over the tree of
655 * indirect refs with missing keys. An appropriate key is located and
656 * the ref is moved onto the tree for indirect refs. After all missing
657 * keys are thus located, we iterate over the indirect ref tree, resolve
658 * each reference, and then insert the resolved reference onto the
659 * direct tree (merging there too).
661 * New backrefs (i.e., for parent nodes) are added to the appropriate
662 * rbtree as they are encountered. The new backrefs are subsequently
665 static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
666 struct btrfs_path *path, u64 time_seq,
667 struct preftrees *preftrees,
668 const u64 *extent_item_pos,
669 struct share_check *sc, bool ignore_offset)
673 struct ulist *parents;
674 struct ulist_node *node;
675 struct ulist_iterator uiter;
676 struct rb_node *rnode;
678 parents = ulist_alloc(GFP_NOFS);
683 * We could trade memory usage for performance here by iterating
684 * the tree, allocating new refs for each insertion, and then
685 * freeing the entire indirect tree when we're done. In some test
686 * cases, the tree can grow quite large (~200k objects).
688 while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
689 struct prelim_ref *ref;
691 ref = rb_entry(rnode, struct prelim_ref, rbnode);
692 if (WARN(ref->parent,
693 "BUG: direct ref found in indirect tree")) {
698 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
699 preftrees->indirect.count--;
701 if (ref->count == 0) {
706 if (sc && sc->root_objectid &&
707 ref->root_id != sc->root_objectid) {
709 ret = BACKREF_FOUND_SHARED;
712 err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
713 ref, parents, extent_item_pos,
716 * we can only tolerate ENOENT,otherwise,we should catch error
717 * and return directly.
719 if (err == -ENOENT) {
720 prelim_ref_insert(fs_info, &preftrees->direct, ref,
729 /* we put the first parent into the ref at hand */
730 ULIST_ITER_INIT(&uiter);
731 node = ulist_next(parents, &uiter);
732 ref->parent = node ? node->val : 0;
733 ref->inode_list = unode_aux_to_inode_list(node);
735 /* Add a prelim_ref(s) for any other parent(s). */
736 while ((node = ulist_next(parents, &uiter))) {
737 struct prelim_ref *new_ref;
739 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
746 memcpy(new_ref, ref, sizeof(*ref));
747 new_ref->parent = node->val;
748 new_ref->inode_list = unode_aux_to_inode_list(node);
749 prelim_ref_insert(fs_info, &preftrees->direct,
754 * Now it's a direct ref, put it in the direct tree. We must
755 * do this last because the ref could be merged/freed here.
757 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
759 ulist_reinit(parents);
768 * read tree blocks and add keys where required.
770 static int add_missing_keys(struct btrfs_fs_info *fs_info,
771 struct preftrees *preftrees, bool lock)
773 struct prelim_ref *ref;
774 struct extent_buffer *eb;
775 struct preftree *tree = &preftrees->indirect_missing_keys;
776 struct rb_node *node;
778 while ((node = rb_first_cached(&tree->root))) {
779 ref = rb_entry(node, struct prelim_ref, rbnode);
780 rb_erase_cached(node, &tree->root);
782 BUG_ON(ref->parent); /* should not be a direct ref */
783 BUG_ON(ref->key_for_search.type);
784 BUG_ON(!ref->wanted_disk_byte);
786 eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0,
787 ref->level - 1, NULL);
791 } else if (!extent_buffer_uptodate(eb)) {
793 free_extent_buffer(eb);
797 btrfs_tree_read_lock(eb);
798 if (btrfs_header_level(eb) == 0)
799 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
801 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
803 btrfs_tree_read_unlock(eb);
804 free_extent_buffer(eb);
805 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
812 * add all currently queued delayed refs from this head whose seq nr is
813 * smaller or equal that seq to the list
815 static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
816 struct btrfs_delayed_ref_head *head, u64 seq,
817 struct preftrees *preftrees, struct share_check *sc)
819 struct btrfs_delayed_ref_node *node;
820 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
821 struct btrfs_key key;
822 struct btrfs_key tmp_op_key;
827 if (extent_op && extent_op->update_key)
828 btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
830 spin_lock(&head->lock);
831 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
832 node = rb_entry(n, struct btrfs_delayed_ref_node,
837 switch (node->action) {
838 case BTRFS_ADD_DELAYED_EXTENT:
839 case BTRFS_UPDATE_DELAYED_HEAD:
842 case BTRFS_ADD_DELAYED_REF:
843 count = node->ref_mod;
845 case BTRFS_DROP_DELAYED_REF:
846 count = node->ref_mod * -1;
851 switch (node->type) {
852 case BTRFS_TREE_BLOCK_REF_KEY: {
853 /* NORMAL INDIRECT METADATA backref */
854 struct btrfs_delayed_tree_ref *ref;
856 ref = btrfs_delayed_node_to_tree_ref(node);
857 ret = add_indirect_ref(fs_info, preftrees, ref->root,
858 &tmp_op_key, ref->level + 1,
859 node->bytenr, count, sc,
863 case BTRFS_SHARED_BLOCK_REF_KEY: {
864 /* SHARED DIRECT METADATA backref */
865 struct btrfs_delayed_tree_ref *ref;
867 ref = btrfs_delayed_node_to_tree_ref(node);
869 ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
870 ref->parent, node->bytenr, count,
874 case BTRFS_EXTENT_DATA_REF_KEY: {
875 /* NORMAL INDIRECT DATA backref */
876 struct btrfs_delayed_data_ref *ref;
877 ref = btrfs_delayed_node_to_data_ref(node);
879 key.objectid = ref->objectid;
880 key.type = BTRFS_EXTENT_DATA_KEY;
881 key.offset = ref->offset;
884 * Found a inum that doesn't match our known inum, we
887 if (sc && sc->inum && ref->objectid != sc->inum) {
888 ret = BACKREF_FOUND_SHARED;
892 ret = add_indirect_ref(fs_info, preftrees, ref->root,
893 &key, 0, node->bytenr, count, sc,
897 case BTRFS_SHARED_DATA_REF_KEY: {
898 /* SHARED DIRECT FULL backref */
899 struct btrfs_delayed_data_ref *ref;
901 ref = btrfs_delayed_node_to_data_ref(node);
903 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
904 node->bytenr, count, sc,
912 * We must ignore BACKREF_FOUND_SHARED until all delayed
913 * refs have been checked.
915 if (ret && (ret != BACKREF_FOUND_SHARED))
919 ret = extent_is_shared(sc);
921 spin_unlock(&head->lock);
926 * add all inline backrefs for bytenr to the list
928 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
930 static int add_inline_refs(const struct btrfs_fs_info *fs_info,
931 struct btrfs_path *path, u64 bytenr,
932 int *info_level, struct preftrees *preftrees,
933 struct share_check *sc)
937 struct extent_buffer *leaf;
938 struct btrfs_key key;
939 struct btrfs_key found_key;
942 struct btrfs_extent_item *ei;
947 * enumerate all inline refs
949 leaf = path->nodes[0];
950 slot = path->slots[0];
952 item_size = btrfs_item_size_nr(leaf, slot);
953 BUG_ON(item_size < sizeof(*ei));
955 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
956 flags = btrfs_extent_flags(leaf, ei);
957 btrfs_item_key_to_cpu(leaf, &found_key, slot);
959 ptr = (unsigned long)(ei + 1);
960 end = (unsigned long)ei + item_size;
962 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
963 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
964 struct btrfs_tree_block_info *info;
966 info = (struct btrfs_tree_block_info *)ptr;
967 *info_level = btrfs_tree_block_level(leaf, info);
968 ptr += sizeof(struct btrfs_tree_block_info);
970 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
971 *info_level = found_key.offset;
973 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
977 struct btrfs_extent_inline_ref *iref;
981 iref = (struct btrfs_extent_inline_ref *)ptr;
982 type = btrfs_get_extent_inline_ref_type(leaf, iref,
984 if (type == BTRFS_REF_TYPE_INVALID)
987 offset = btrfs_extent_inline_ref_offset(leaf, iref);
990 case BTRFS_SHARED_BLOCK_REF_KEY:
991 ret = add_direct_ref(fs_info, preftrees,
992 *info_level + 1, offset,
993 bytenr, 1, NULL, GFP_NOFS);
995 case BTRFS_SHARED_DATA_REF_KEY: {
996 struct btrfs_shared_data_ref *sdref;
999 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1000 count = btrfs_shared_data_ref_count(leaf, sdref);
1002 ret = add_direct_ref(fs_info, preftrees, 0, offset,
1003 bytenr, count, sc, GFP_NOFS);
1006 case BTRFS_TREE_BLOCK_REF_KEY:
1007 ret = add_indirect_ref(fs_info, preftrees, offset,
1008 NULL, *info_level + 1,
1009 bytenr, 1, NULL, GFP_NOFS);
1011 case BTRFS_EXTENT_DATA_REF_KEY: {
1012 struct btrfs_extent_data_ref *dref;
1016 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1017 count = btrfs_extent_data_ref_count(leaf, dref);
1018 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1020 key.type = BTRFS_EXTENT_DATA_KEY;
1021 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1023 if (sc && sc->inum && key.objectid != sc->inum) {
1024 ret = BACKREF_FOUND_SHARED;
1028 root = btrfs_extent_data_ref_root(leaf, dref);
1030 ret = add_indirect_ref(fs_info, preftrees, root,
1031 &key, 0, bytenr, count,
1040 ptr += btrfs_extent_inline_ref_size(type);
1047 * add all non-inline backrefs for bytenr to the list
1049 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1051 static int add_keyed_refs(struct btrfs_fs_info *fs_info,
1052 struct btrfs_path *path, u64 bytenr,
1053 int info_level, struct preftrees *preftrees,
1054 struct share_check *sc)
1056 struct btrfs_root *extent_root = fs_info->extent_root;
1059 struct extent_buffer *leaf;
1060 struct btrfs_key key;
1063 ret = btrfs_next_item(extent_root, path);
1071 slot = path->slots[0];
1072 leaf = path->nodes[0];
1073 btrfs_item_key_to_cpu(leaf, &key, slot);
1075 if (key.objectid != bytenr)
1077 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1079 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1083 case BTRFS_SHARED_BLOCK_REF_KEY:
1084 /* SHARED DIRECT METADATA backref */
1085 ret = add_direct_ref(fs_info, preftrees,
1086 info_level + 1, key.offset,
1087 bytenr, 1, NULL, GFP_NOFS);
1089 case BTRFS_SHARED_DATA_REF_KEY: {
1090 /* SHARED DIRECT FULL backref */
1091 struct btrfs_shared_data_ref *sdref;
1094 sdref = btrfs_item_ptr(leaf, slot,
1095 struct btrfs_shared_data_ref);
1096 count = btrfs_shared_data_ref_count(leaf, sdref);
1097 ret = add_direct_ref(fs_info, preftrees, 0,
1098 key.offset, bytenr, count,
1102 case BTRFS_TREE_BLOCK_REF_KEY:
1103 /* NORMAL INDIRECT METADATA backref */
1104 ret = add_indirect_ref(fs_info, preftrees, key.offset,
1105 NULL, info_level + 1, bytenr,
1108 case BTRFS_EXTENT_DATA_REF_KEY: {
1109 /* NORMAL INDIRECT DATA backref */
1110 struct btrfs_extent_data_ref *dref;
1114 dref = btrfs_item_ptr(leaf, slot,
1115 struct btrfs_extent_data_ref);
1116 count = btrfs_extent_data_ref_count(leaf, dref);
1117 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1119 key.type = BTRFS_EXTENT_DATA_KEY;
1120 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1122 if (sc && sc->inum && key.objectid != sc->inum) {
1123 ret = BACKREF_FOUND_SHARED;
1127 root = btrfs_extent_data_ref_root(leaf, dref);
1128 ret = add_indirect_ref(fs_info, preftrees, root,
1129 &key, 0, bytenr, count,
1145 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1146 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1147 * indirect refs to their parent bytenr.
1148 * When roots are found, they're added to the roots list
1150 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1151 * much like trans == NULL case, the difference only lies in it will not
1153 * The special case is for qgroup to search roots in commit_transaction().
1155 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1156 * shared extent is detected.
1158 * Otherwise this returns 0 for success and <0 for an error.
1160 * If ignore_offset is set to false, only extent refs whose offsets match
1161 * extent_item_pos are returned. If true, every extent ref is returned
1162 * and extent_item_pos is ignored.
1164 * FIXME some caching might speed things up
1166 static int find_parent_nodes(struct btrfs_trans_handle *trans,
1167 struct btrfs_fs_info *fs_info, u64 bytenr,
1168 u64 time_seq, struct ulist *refs,
1169 struct ulist *roots, const u64 *extent_item_pos,
1170 struct share_check *sc, bool ignore_offset)
1172 struct btrfs_key key;
1173 struct btrfs_path *path;
1174 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1175 struct btrfs_delayed_ref_head *head;
1178 struct prelim_ref *ref;
1179 struct rb_node *node;
1180 struct extent_inode_elem *eie = NULL;
1181 struct preftrees preftrees = {
1182 .direct = PREFTREE_INIT,
1183 .indirect = PREFTREE_INIT,
1184 .indirect_missing_keys = PREFTREE_INIT
1187 key.objectid = bytenr;
1188 key.offset = (u64)-1;
1189 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1190 key.type = BTRFS_METADATA_ITEM_KEY;
1192 key.type = BTRFS_EXTENT_ITEM_KEY;
1194 path = btrfs_alloc_path();
1198 path->search_commit_root = 1;
1199 path->skip_locking = 1;
1202 if (time_seq == SEQ_LAST)
1203 path->skip_locking = 1;
1206 * grab both a lock on the path and a lock on the delayed ref head.
1207 * We need both to get a consistent picture of how the refs look
1208 * at a specified point in time
1213 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1218 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1219 if (trans && likely(trans->type != __TRANS_DUMMY) &&
1220 time_seq != SEQ_LAST) {
1222 if (trans && time_seq != SEQ_LAST) {
1225 * look if there are updates for this ref queued and lock the
1228 delayed_refs = &trans->transaction->delayed_refs;
1229 spin_lock(&delayed_refs->lock);
1230 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1232 if (!mutex_trylock(&head->mutex)) {
1233 refcount_inc(&head->refs);
1234 spin_unlock(&delayed_refs->lock);
1236 btrfs_release_path(path);
1239 * Mutex was contended, block until it's
1240 * released and try again
1242 mutex_lock(&head->mutex);
1243 mutex_unlock(&head->mutex);
1244 btrfs_put_delayed_ref_head(head);
1247 spin_unlock(&delayed_refs->lock);
1248 ret = add_delayed_refs(fs_info, head, time_seq,
1250 mutex_unlock(&head->mutex);
1254 spin_unlock(&delayed_refs->lock);
1258 if (path->slots[0]) {
1259 struct extent_buffer *leaf;
1263 leaf = path->nodes[0];
1264 slot = path->slots[0];
1265 btrfs_item_key_to_cpu(leaf, &key, slot);
1266 if (key.objectid == bytenr &&
1267 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1268 key.type == BTRFS_METADATA_ITEM_KEY)) {
1269 ret = add_inline_refs(fs_info, path, bytenr,
1270 &info_level, &preftrees, sc);
1273 ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1280 btrfs_release_path(path);
1282 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1286 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1288 ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1289 extent_item_pos, sc, ignore_offset);
1293 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1296 * This walks the tree of merged and resolved refs. Tree blocks are
1297 * read in as needed. Unique entries are added to the ulist, and
1298 * the list of found roots is updated.
1300 * We release the entire tree in one go before returning.
1302 node = rb_first_cached(&preftrees.direct.root);
1304 ref = rb_entry(node, struct prelim_ref, rbnode);
1305 node = rb_next(&ref->rbnode);
1307 * ref->count < 0 can happen here if there are delayed
1308 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1309 * prelim_ref_insert() relies on this when merging
1310 * identical refs to keep the overall count correct.
1311 * prelim_ref_insert() will merge only those refs
1312 * which compare identically. Any refs having
1313 * e.g. different offsets would not be merged,
1314 * and would retain their original ref->count < 0.
1316 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1317 if (sc && sc->root_objectid &&
1318 ref->root_id != sc->root_objectid) {
1319 ret = BACKREF_FOUND_SHARED;
1323 /* no parent == root of tree */
1324 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1328 if (ref->count && ref->parent) {
1329 if (extent_item_pos && !ref->inode_list &&
1331 struct extent_buffer *eb;
1333 eb = read_tree_block(fs_info, ref->parent, 0,
1338 } else if (!extent_buffer_uptodate(eb)) {
1339 free_extent_buffer(eb);
1344 if (!path->skip_locking) {
1345 btrfs_tree_read_lock(eb);
1346 btrfs_set_lock_blocking_read(eb);
1348 ret = find_extent_in_eb(eb, bytenr,
1349 *extent_item_pos, &eie, ignore_offset);
1350 if (!path->skip_locking)
1351 btrfs_tree_read_unlock_blocking(eb);
1352 free_extent_buffer(eb);
1355 ref->inode_list = eie;
1357 ret = ulist_add_merge_ptr(refs, ref->parent,
1359 (void **)&eie, GFP_NOFS);
1362 if (!ret && extent_item_pos) {
1364 * we've recorded that parent, so we must extend
1365 * its inode list here
1370 eie->next = ref->inode_list;
1378 btrfs_free_path(path);
1380 prelim_release(&preftrees.direct);
1381 prelim_release(&preftrees.indirect);
1382 prelim_release(&preftrees.indirect_missing_keys);
1385 free_inode_elem_list(eie);
1389 static void free_leaf_list(struct ulist *blocks)
1391 struct ulist_node *node = NULL;
1392 struct extent_inode_elem *eie;
1393 struct ulist_iterator uiter;
1395 ULIST_ITER_INIT(&uiter);
1396 while ((node = ulist_next(blocks, &uiter))) {
1399 eie = unode_aux_to_inode_list(node);
1400 free_inode_elem_list(eie);
1408 * Finds all leafs with a reference to the specified combination of bytenr and
1409 * offset. key_list_head will point to a list of corresponding keys (caller must
1410 * free each list element). The leafs will be stored in the leafs ulist, which
1411 * must be freed with ulist_free.
1413 * returns 0 on success, <0 on error
1415 int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1416 struct btrfs_fs_info *fs_info, u64 bytenr,
1417 u64 time_seq, struct ulist **leafs,
1418 const u64 *extent_item_pos, bool ignore_offset)
1422 *leafs = ulist_alloc(GFP_NOFS);
1426 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1427 *leafs, NULL, extent_item_pos, NULL, ignore_offset);
1428 if (ret < 0 && ret != -ENOENT) {
1429 free_leaf_list(*leafs);
1437 * walk all backrefs for a given extent to find all roots that reference this
1438 * extent. Walking a backref means finding all extents that reference this
1439 * extent and in turn walk the backrefs of those, too. Naturally this is a
1440 * recursive process, but here it is implemented in an iterative fashion: We
1441 * find all referencing extents for the extent in question and put them on a
1442 * list. In turn, we find all referencing extents for those, further appending
1443 * to the list. The way we iterate the list allows adding more elements after
1444 * the current while iterating. The process stops when we reach the end of the
1445 * list. Found roots are added to the roots list.
1447 * returns 0 on success, < 0 on error.
1449 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1450 struct btrfs_fs_info *fs_info, u64 bytenr,
1451 u64 time_seq, struct ulist **roots,
1455 struct ulist_node *node = NULL;
1456 struct ulist_iterator uiter;
1459 tmp = ulist_alloc(GFP_NOFS);
1462 *roots = ulist_alloc(GFP_NOFS);
1468 ULIST_ITER_INIT(&uiter);
1470 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1471 tmp, *roots, NULL, NULL, ignore_offset);
1472 if (ret < 0 && ret != -ENOENT) {
1478 node = ulist_next(tmp, &uiter);
1489 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1490 struct btrfs_fs_info *fs_info, u64 bytenr,
1491 u64 time_seq, struct ulist **roots,
1497 down_read(&fs_info->commit_root_sem);
1498 ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1499 time_seq, roots, ignore_offset);
1501 up_read(&fs_info->commit_root_sem);
1506 * btrfs_check_shared - tell us whether an extent is shared
1508 * btrfs_check_shared uses the backref walking code but will short
1509 * circuit as soon as it finds a root or inode that doesn't match the
1510 * one passed in. This provides a significant performance benefit for
1511 * callers (such as fiemap) which want to know whether the extent is
1512 * shared but do not need a ref count.
1514 * This attempts to attach to the running transaction in order to account for
1515 * delayed refs, but continues on even when no running transaction exists.
1517 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1519 int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1520 struct ulist *roots, struct ulist *tmp)
1522 struct btrfs_fs_info *fs_info = root->fs_info;
1523 struct btrfs_trans_handle *trans;
1524 struct ulist_iterator uiter;
1525 struct ulist_node *node;
1526 struct seq_list elem = SEQ_LIST_INIT(elem);
1528 struct share_check shared = {
1529 .root_objectid = root->root_key.objectid,
1537 trans = btrfs_join_transaction_nostart(root);
1538 if (IS_ERR(trans)) {
1539 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1540 ret = PTR_ERR(trans);
1544 down_read(&fs_info->commit_root_sem);
1546 btrfs_get_tree_mod_seq(fs_info, &elem);
1549 ULIST_ITER_INIT(&uiter);
1551 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1552 roots, NULL, &shared, false);
1553 if (ret == BACKREF_FOUND_SHARED) {
1554 /* this is the only condition under which we return 1 */
1558 if (ret < 0 && ret != -ENOENT)
1561 node = ulist_next(tmp, &uiter);
1565 shared.share_count = 0;
1570 btrfs_put_tree_mod_seq(fs_info, &elem);
1571 btrfs_end_transaction(trans);
1573 up_read(&fs_info->commit_root_sem);
1576 ulist_release(roots);
1581 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1582 u64 start_off, struct btrfs_path *path,
1583 struct btrfs_inode_extref **ret_extref,
1587 struct btrfs_key key;
1588 struct btrfs_key found_key;
1589 struct btrfs_inode_extref *extref;
1590 const struct extent_buffer *leaf;
1593 key.objectid = inode_objectid;
1594 key.type = BTRFS_INODE_EXTREF_KEY;
1595 key.offset = start_off;
1597 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1602 leaf = path->nodes[0];
1603 slot = path->slots[0];
1604 if (slot >= btrfs_header_nritems(leaf)) {
1606 * If the item at offset is not found,
1607 * btrfs_search_slot will point us to the slot
1608 * where it should be inserted. In our case
1609 * that will be the slot directly before the
1610 * next INODE_REF_KEY_V2 item. In the case
1611 * that we're pointing to the last slot in a
1612 * leaf, we must move one leaf over.
1614 ret = btrfs_next_leaf(root, path);
1623 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1626 * Check that we're still looking at an extended ref key for
1627 * this particular objectid. If we have different
1628 * objectid or type then there are no more to be found
1629 * in the tree and we can exit.
1632 if (found_key.objectid != inode_objectid)
1634 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1638 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1639 extref = (struct btrfs_inode_extref *)ptr;
1640 *ret_extref = extref;
1642 *found_off = found_key.offset;
1650 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1651 * Elements of the path are separated by '/' and the path is guaranteed to be
1652 * 0-terminated. the path is only given within the current file system.
1653 * Therefore, it never starts with a '/'. the caller is responsible to provide
1654 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1655 * the start point of the resulting string is returned. this pointer is within
1657 * in case the path buffer would overflow, the pointer is decremented further
1658 * as if output was written to the buffer, though no more output is actually
1659 * generated. that way, the caller can determine how much space would be
1660 * required for the path to fit into the buffer. in that case, the returned
1661 * value will be smaller than dest. callers must check this!
1663 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1664 u32 name_len, unsigned long name_off,
1665 struct extent_buffer *eb_in, u64 parent,
1666 char *dest, u32 size)
1671 s64 bytes_left = ((s64)size) - 1;
1672 struct extent_buffer *eb = eb_in;
1673 struct btrfs_key found_key;
1674 int leave_spinning = path->leave_spinning;
1675 struct btrfs_inode_ref *iref;
1677 if (bytes_left >= 0)
1678 dest[bytes_left] = '\0';
1680 path->leave_spinning = 1;
1682 bytes_left -= name_len;
1683 if (bytes_left >= 0)
1684 read_extent_buffer(eb, dest + bytes_left,
1685 name_off, name_len);
1687 if (!path->skip_locking)
1688 btrfs_tree_read_unlock_blocking(eb);
1689 free_extent_buffer(eb);
1691 ret = btrfs_find_item(fs_root, path, parent, 0,
1692 BTRFS_INODE_REF_KEY, &found_key);
1698 next_inum = found_key.offset;
1700 /* regular exit ahead */
1701 if (parent == next_inum)
1704 slot = path->slots[0];
1705 eb = path->nodes[0];
1706 /* make sure we can use eb after releasing the path */
1708 if (!path->skip_locking)
1709 btrfs_set_lock_blocking_read(eb);
1710 path->nodes[0] = NULL;
1713 btrfs_release_path(path);
1714 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1716 name_len = btrfs_inode_ref_name_len(eb, iref);
1717 name_off = (unsigned long)(iref + 1);
1721 if (bytes_left >= 0)
1722 dest[bytes_left] = '/';
1725 btrfs_release_path(path);
1726 path->leave_spinning = leave_spinning;
1729 return ERR_PTR(ret);
1731 return dest + bytes_left;
1735 * this makes the path point to (logical EXTENT_ITEM *)
1736 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1737 * tree blocks and <0 on error.
1739 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1740 struct btrfs_path *path, struct btrfs_key *found_key,
1747 const struct extent_buffer *eb;
1748 struct btrfs_extent_item *ei;
1749 struct btrfs_key key;
1751 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1752 key.type = BTRFS_METADATA_ITEM_KEY;
1754 key.type = BTRFS_EXTENT_ITEM_KEY;
1755 key.objectid = logical;
1756 key.offset = (u64)-1;
1758 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1762 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1768 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1769 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1770 size = fs_info->nodesize;
1771 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1772 size = found_key->offset;
1774 if (found_key->objectid > logical ||
1775 found_key->objectid + size <= logical) {
1776 btrfs_debug(fs_info,
1777 "logical %llu is not within any extent", logical);
1781 eb = path->nodes[0];
1782 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1783 BUG_ON(item_size < sizeof(*ei));
1785 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1786 flags = btrfs_extent_flags(eb, ei);
1788 btrfs_debug(fs_info,
1789 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1790 logical, logical - found_key->objectid, found_key->objectid,
1791 found_key->offset, flags, item_size);
1793 WARN_ON(!flags_ret);
1795 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1796 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1797 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1798 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1808 * helper function to iterate extent inline refs. ptr must point to a 0 value
1809 * for the first call and may be modified. it is used to track state.
1810 * if more refs exist, 0 is returned and the next call to
1811 * get_extent_inline_ref must pass the modified ptr parameter to get the
1812 * next ref. after the last ref was processed, 1 is returned.
1813 * returns <0 on error
1815 static int get_extent_inline_ref(unsigned long *ptr,
1816 const struct extent_buffer *eb,
1817 const struct btrfs_key *key,
1818 const struct btrfs_extent_item *ei,
1820 struct btrfs_extent_inline_ref **out_eiref,
1825 struct btrfs_tree_block_info *info;
1829 flags = btrfs_extent_flags(eb, ei);
1830 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1831 if (key->type == BTRFS_METADATA_ITEM_KEY) {
1832 /* a skinny metadata extent */
1834 (struct btrfs_extent_inline_ref *)(ei + 1);
1836 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1837 info = (struct btrfs_tree_block_info *)(ei + 1);
1839 (struct btrfs_extent_inline_ref *)(info + 1);
1842 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1844 *ptr = (unsigned long)*out_eiref;
1845 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1849 end = (unsigned long)ei + item_size;
1850 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1851 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1852 BTRFS_REF_TYPE_ANY);
1853 if (*out_type == BTRFS_REF_TYPE_INVALID)
1856 *ptr += btrfs_extent_inline_ref_size(*out_type);
1857 WARN_ON(*ptr > end);
1859 return 1; /* last */
1865 * reads the tree block backref for an extent. tree level and root are returned
1866 * through out_level and out_root. ptr must point to a 0 value for the first
1867 * call and may be modified (see get_extent_inline_ref comment).
1868 * returns 0 if data was provided, 1 if there was no more data to provide or
1871 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1872 struct btrfs_key *key, struct btrfs_extent_item *ei,
1873 u32 item_size, u64 *out_root, u8 *out_level)
1877 struct btrfs_extent_inline_ref *eiref;
1879 if (*ptr == (unsigned long)-1)
1883 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1888 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1889 type == BTRFS_SHARED_BLOCK_REF_KEY)
1896 /* we can treat both ref types equally here */
1897 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1899 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1900 struct btrfs_tree_block_info *info;
1902 info = (struct btrfs_tree_block_info *)(ei + 1);
1903 *out_level = btrfs_tree_block_level(eb, info);
1905 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1906 *out_level = (u8)key->offset;
1910 *ptr = (unsigned long)-1;
1915 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1916 struct extent_inode_elem *inode_list,
1917 u64 root, u64 extent_item_objectid,
1918 iterate_extent_inodes_t *iterate, void *ctx)
1920 struct extent_inode_elem *eie;
1923 for (eie = inode_list; eie; eie = eie->next) {
1924 btrfs_debug(fs_info,
1925 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1926 extent_item_objectid, eie->inum,
1928 ret = iterate(eie->inum, eie->offset, root, ctx);
1930 btrfs_debug(fs_info,
1931 "stopping iteration for %llu due to ret=%d",
1932 extent_item_objectid, ret);
1941 * calls iterate() for every inode that references the extent identified by
1942 * the given parameters.
1943 * when the iterator function returns a non-zero value, iteration stops.
1945 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1946 u64 extent_item_objectid, u64 extent_item_pos,
1947 int search_commit_root,
1948 iterate_extent_inodes_t *iterate, void *ctx,
1952 struct btrfs_trans_handle *trans = NULL;
1953 struct ulist *refs = NULL;
1954 struct ulist *roots = NULL;
1955 struct ulist_node *ref_node = NULL;
1956 struct ulist_node *root_node = NULL;
1957 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
1958 struct ulist_iterator ref_uiter;
1959 struct ulist_iterator root_uiter;
1961 btrfs_debug(fs_info, "resolving all inodes for extent %llu",
1962 extent_item_objectid);
1964 if (!search_commit_root) {
1965 trans = btrfs_attach_transaction(fs_info->extent_root);
1966 if (IS_ERR(trans)) {
1967 if (PTR_ERR(trans) != -ENOENT &&
1968 PTR_ERR(trans) != -EROFS)
1969 return PTR_ERR(trans);
1975 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1977 down_read(&fs_info->commit_root_sem);
1979 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1980 tree_mod_seq_elem.seq, &refs,
1981 &extent_item_pos, ignore_offset);
1985 ULIST_ITER_INIT(&ref_uiter);
1986 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1987 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
1988 tree_mod_seq_elem.seq, &roots,
1992 ULIST_ITER_INIT(&root_uiter);
1993 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1994 btrfs_debug(fs_info,
1995 "root %llu references leaf %llu, data list %#llx",
1996 root_node->val, ref_node->val,
1998 ret = iterate_leaf_refs(fs_info,
1999 (struct extent_inode_elem *)
2000 (uintptr_t)ref_node->aux,
2002 extent_item_objectid,
2008 free_leaf_list(refs);
2011 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2012 btrfs_end_transaction(trans);
2014 up_read(&fs_info->commit_root_sem);
2020 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2021 struct btrfs_path *path,
2022 iterate_extent_inodes_t *iterate, void *ctx,
2026 u64 extent_item_pos;
2028 struct btrfs_key found_key;
2029 int search_commit_root = path->search_commit_root;
2031 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2032 btrfs_release_path(path);
2035 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2038 extent_item_pos = logical - found_key.objectid;
2039 ret = iterate_extent_inodes(fs_info, found_key.objectid,
2040 extent_item_pos, search_commit_root,
2041 iterate, ctx, ignore_offset);
2046 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
2047 struct extent_buffer *eb, void *ctx);
2049 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
2050 struct btrfs_path *path,
2051 iterate_irefs_t *iterate, void *ctx)
2060 struct extent_buffer *eb;
2061 struct btrfs_item *item;
2062 struct btrfs_inode_ref *iref;
2063 struct btrfs_key found_key;
2066 ret = btrfs_find_item(fs_root, path, inum,
2067 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2073 ret = found ? 0 : -ENOENT;
2078 parent = found_key.offset;
2079 slot = path->slots[0];
2080 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2085 btrfs_release_path(path);
2087 item = btrfs_item_nr(slot);
2088 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2090 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
2091 name_len = btrfs_inode_ref_name_len(eb, iref);
2092 /* path must be released before calling iterate()! */
2093 btrfs_debug(fs_root->fs_info,
2094 "following ref at offset %u for inode %llu in tree %llu",
2095 cur, found_key.objectid,
2096 fs_root->root_key.objectid);
2097 ret = iterate(parent, name_len,
2098 (unsigned long)(iref + 1), eb, ctx);
2101 len = sizeof(*iref) + name_len;
2102 iref = (struct btrfs_inode_ref *)((char *)iref + len);
2104 free_extent_buffer(eb);
2107 btrfs_release_path(path);
2112 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2113 struct btrfs_path *path,
2114 iterate_irefs_t *iterate, void *ctx)
2121 struct extent_buffer *eb;
2122 struct btrfs_inode_extref *extref;
2128 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2133 ret = found ? 0 : -ENOENT;
2138 slot = path->slots[0];
2139 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2144 btrfs_release_path(path);
2146 item_size = btrfs_item_size_nr(eb, slot);
2147 ptr = btrfs_item_ptr_offset(eb, slot);
2150 while (cur_offset < item_size) {
2153 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2154 parent = btrfs_inode_extref_parent(eb, extref);
2155 name_len = btrfs_inode_extref_name_len(eb, extref);
2156 ret = iterate(parent, name_len,
2157 (unsigned long)&extref->name, eb, ctx);
2161 cur_offset += btrfs_inode_extref_name_len(eb, extref);
2162 cur_offset += sizeof(*extref);
2164 free_extent_buffer(eb);
2169 btrfs_release_path(path);
2174 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
2175 struct btrfs_path *path, iterate_irefs_t *iterate,
2181 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
2184 else if (ret != -ENOENT)
2187 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
2188 if (ret == -ENOENT && found_refs)
2195 * returns 0 if the path could be dumped (probably truncated)
2196 * returns <0 in case of an error
2198 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2199 struct extent_buffer *eb, void *ctx)
2201 struct inode_fs_paths *ipath = ctx;
2204 int i = ipath->fspath->elem_cnt;
2205 const int s_ptr = sizeof(char *);
2208 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2209 ipath->fspath->bytes_left - s_ptr : 0;
2211 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2212 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2213 name_off, eb, inum, fspath_min, bytes_left);
2215 return PTR_ERR(fspath);
2217 if (fspath > fspath_min) {
2218 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2219 ++ipath->fspath->elem_cnt;
2220 ipath->fspath->bytes_left = fspath - fspath_min;
2222 ++ipath->fspath->elem_missed;
2223 ipath->fspath->bytes_missing += fspath_min - fspath;
2224 ipath->fspath->bytes_left = 0;
2231 * this dumps all file system paths to the inode into the ipath struct, provided
2232 * is has been created large enough. each path is zero-terminated and accessed
2233 * from ipath->fspath->val[i].
2234 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2235 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2236 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2237 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2238 * have been needed to return all paths.
2240 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2242 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2243 inode_to_path, ipath);
2246 struct btrfs_data_container *init_data_container(u32 total_bytes)
2248 struct btrfs_data_container *data;
2251 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2252 data = kvmalloc(alloc_bytes, GFP_KERNEL);
2254 return ERR_PTR(-ENOMEM);
2256 if (total_bytes >= sizeof(*data)) {
2257 data->bytes_left = total_bytes - sizeof(*data);
2258 data->bytes_missing = 0;
2260 data->bytes_missing = sizeof(*data) - total_bytes;
2261 data->bytes_left = 0;
2265 data->elem_missed = 0;
2271 * allocates space to return multiple file system paths for an inode.
2272 * total_bytes to allocate are passed, note that space usable for actual path
2273 * information will be total_bytes - sizeof(struct inode_fs_paths).
2274 * the returned pointer must be freed with free_ipath() in the end.
2276 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2277 struct btrfs_path *path)
2279 struct inode_fs_paths *ifp;
2280 struct btrfs_data_container *fspath;
2282 fspath = init_data_container(total_bytes);
2284 return ERR_CAST(fspath);
2286 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2289 return ERR_PTR(-ENOMEM);
2292 ifp->btrfs_path = path;
2293 ifp->fspath = fspath;
2294 ifp->fs_root = fs_root;
2299 void free_ipath(struct inode_fs_paths *ipath)
2303 kvfree(ipath->fspath);
2307 struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2308 struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2310 struct btrfs_backref_iter *ret;
2312 ret = kzalloc(sizeof(*ret), gfp_flag);
2316 ret->path = btrfs_alloc_path();
2322 /* Current backref iterator only supports iteration in commit root */
2323 ret->path->search_commit_root = 1;
2324 ret->path->skip_locking = 1;
2325 ret->fs_info = fs_info;
2330 int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2332 struct btrfs_fs_info *fs_info = iter->fs_info;
2333 struct btrfs_path *path = iter->path;
2334 struct btrfs_extent_item *ei;
2335 struct btrfs_key key;
2338 key.objectid = bytenr;
2339 key.type = BTRFS_METADATA_ITEM_KEY;
2340 key.offset = (u64)-1;
2341 iter->bytenr = bytenr;
2343 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2350 if (path->slots[0] == 0) {
2351 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2357 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2358 if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2359 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2363 memcpy(&iter->cur_key, &key, sizeof(key));
2364 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2366 iter->end_ptr = (u32)(iter->item_ptr +
2367 btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2368 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2369 struct btrfs_extent_item);
2372 * Only support iteration on tree backref yet.
2374 * This is an extra precaution for non skinny-metadata, where
2375 * EXTENT_ITEM is also used for tree blocks, that we can only use
2376 * extent flags to determine if it's a tree block.
2378 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2382 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2384 /* If there is no inline backref, go search for keyed backref */
2385 if (iter->cur_ptr >= iter->end_ptr) {
2386 ret = btrfs_next_item(fs_info->extent_root, path);
2388 /* No inline nor keyed ref */
2396 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2398 if (iter->cur_key.objectid != bytenr ||
2399 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2400 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2404 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2406 iter->item_ptr = iter->cur_ptr;
2407 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2408 path->nodes[0], path->slots[0]));
2413 btrfs_backref_iter_release(iter);
2418 * Go to the next backref item of current bytenr, can be either inlined or
2421 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2423 * Return 0 if we get next backref without problem.
2424 * Return >0 if there is no extra backref for this bytenr.
2425 * Return <0 if there is something wrong happened.
2427 int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2429 struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2430 struct btrfs_path *path = iter->path;
2431 struct btrfs_extent_inline_ref *iref;
2435 if (btrfs_backref_iter_is_inline_ref(iter)) {
2436 /* We're still inside the inline refs */
2437 ASSERT(iter->cur_ptr < iter->end_ptr);
2439 if (btrfs_backref_has_tree_block_info(iter)) {
2440 /* First tree block info */
2441 size = sizeof(struct btrfs_tree_block_info);
2443 /* Use inline ref type to determine the size */
2446 iref = (struct btrfs_extent_inline_ref *)
2447 ((unsigned long)iter->cur_ptr);
2448 type = btrfs_extent_inline_ref_type(eb, iref);
2450 size = btrfs_extent_inline_ref_size(type);
2452 iter->cur_ptr += size;
2453 if (iter->cur_ptr < iter->end_ptr)
2456 /* All inline items iterated, fall through */
2459 /* We're at keyed items, there is no inline item, go to the next one */
2460 ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
2464 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2465 if (iter->cur_key.objectid != iter->bytenr ||
2466 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2467 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2469 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2471 iter->cur_ptr = iter->item_ptr;
2472 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2477 void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2478 struct btrfs_backref_cache *cache, int is_reloc)
2482 cache->rb_root = RB_ROOT;
2483 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2484 INIT_LIST_HEAD(&cache->pending[i]);
2485 INIT_LIST_HEAD(&cache->changed);
2486 INIT_LIST_HEAD(&cache->detached);
2487 INIT_LIST_HEAD(&cache->leaves);
2488 INIT_LIST_HEAD(&cache->pending_edge);
2489 INIT_LIST_HEAD(&cache->useless_node);
2490 cache->fs_info = fs_info;
2491 cache->is_reloc = is_reloc;
2494 struct btrfs_backref_node *btrfs_backref_alloc_node(
2495 struct btrfs_backref_cache *cache, u64 bytenr, int level)
2497 struct btrfs_backref_node *node;
2499 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2500 node = kzalloc(sizeof(*node), GFP_NOFS);
2504 INIT_LIST_HEAD(&node->list);
2505 INIT_LIST_HEAD(&node->upper);
2506 INIT_LIST_HEAD(&node->lower);
2507 RB_CLEAR_NODE(&node->rb_node);
2509 node->level = level;
2510 node->bytenr = bytenr;
2515 struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2516 struct btrfs_backref_cache *cache)
2518 struct btrfs_backref_edge *edge;
2520 edge = kzalloc(sizeof(*edge), GFP_NOFS);
2527 * Drop the backref node from cache, also cleaning up all its
2528 * upper edges and any uncached nodes in the path.
2530 * This cleanup happens bottom up, thus the node should either
2531 * be the lowest node in the cache or a detached node.
2533 void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2534 struct btrfs_backref_node *node)
2536 struct btrfs_backref_node *upper;
2537 struct btrfs_backref_edge *edge;
2542 BUG_ON(!node->lowest && !node->detached);
2543 while (!list_empty(&node->upper)) {
2544 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2546 upper = edge->node[UPPER];
2547 list_del(&edge->list[LOWER]);
2548 list_del(&edge->list[UPPER]);
2549 btrfs_backref_free_edge(cache, edge);
2551 if (RB_EMPTY_NODE(&upper->rb_node)) {
2552 BUG_ON(!list_empty(&node->upper));
2553 btrfs_backref_drop_node(cache, node);
2559 * Add the node to leaf node list if no other child block
2562 if (list_empty(&upper->lower)) {
2563 list_add_tail(&upper->lower, &cache->leaves);
2568 btrfs_backref_drop_node(cache, node);
2572 * Release all nodes/edges from current cache
2574 void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2576 struct btrfs_backref_node *node;
2579 while (!list_empty(&cache->detached)) {
2580 node = list_entry(cache->detached.next,
2581 struct btrfs_backref_node, list);
2582 btrfs_backref_cleanup_node(cache, node);
2585 while (!list_empty(&cache->leaves)) {
2586 node = list_entry(cache->leaves.next,
2587 struct btrfs_backref_node, lower);
2588 btrfs_backref_cleanup_node(cache, node);
2591 cache->last_trans = 0;
2593 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2594 ASSERT(list_empty(&cache->pending[i]));
2595 ASSERT(list_empty(&cache->pending_edge));
2596 ASSERT(list_empty(&cache->useless_node));
2597 ASSERT(list_empty(&cache->changed));
2598 ASSERT(list_empty(&cache->detached));
2599 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2600 ASSERT(!cache->nr_nodes);
2601 ASSERT(!cache->nr_edges);
2605 * Handle direct tree backref
2607 * Direct tree backref means, the backref item shows its parent bytenr
2608 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
2610 * @ref_key: The converted backref key.
2611 * For keyed backref, it's the item key.
2612 * For inlined backref, objectid is the bytenr,
2613 * type is btrfs_inline_ref_type, offset is
2614 * btrfs_inline_ref_offset.
2616 static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2617 struct btrfs_key *ref_key,
2618 struct btrfs_backref_node *cur)
2620 struct btrfs_backref_edge *edge;
2621 struct btrfs_backref_node *upper;
2622 struct rb_node *rb_node;
2624 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2626 /* Only reloc root uses backref pointing to itself */
2627 if (ref_key->objectid == ref_key->offset) {
2628 struct btrfs_root *root;
2630 cur->is_reloc_root = 1;
2631 /* Only reloc backref cache cares about a specific root */
2632 if (cache->is_reloc) {
2633 root = find_reloc_root(cache->fs_info, cur->bytenr);
2639 * For generic purpose backref cache, reloc root node
2642 list_add(&cur->list, &cache->useless_node);
2647 edge = btrfs_backref_alloc_edge(cache);
2651 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2653 /* Parent node not yet cached */
2654 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2657 btrfs_backref_free_edge(cache, edge);
2662 * Backrefs for the upper level block isn't cached, add the
2663 * block to pending list
2665 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2667 /* Parent node already cached */
2668 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2669 ASSERT(upper->checked);
2670 INIT_LIST_HEAD(&edge->list[UPPER]);
2672 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2677 * Handle indirect tree backref
2679 * Indirect tree backref means, we only know which tree the node belongs to.
2680 * We still need to do a tree search to find out the parents. This is for
2681 * TREE_BLOCK_REF backref (keyed or inlined).
2683 * @ref_key: The same as @ref_key in handle_direct_tree_backref()
2684 * @tree_key: The first key of this tree block.
2685 * @path: A clean (released) path, to avoid allocating path everytime
2686 * the function get called.
2688 static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
2689 struct btrfs_path *path,
2690 struct btrfs_key *ref_key,
2691 struct btrfs_key *tree_key,
2692 struct btrfs_backref_node *cur)
2694 struct btrfs_fs_info *fs_info = cache->fs_info;
2695 struct btrfs_backref_node *upper;
2696 struct btrfs_backref_node *lower;
2697 struct btrfs_backref_edge *edge;
2698 struct extent_buffer *eb;
2699 struct btrfs_root *root;
2700 struct rb_node *rb_node;
2702 bool need_check = true;
2705 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2707 return PTR_ERR(root);
2708 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2711 if (btrfs_root_level(&root->root_item) == cur->level) {
2713 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2715 * For reloc backref cache, we may ignore reloc root. But for
2716 * general purpose backref cache, we can't rely on
2717 * btrfs_should_ignore_reloc_root() as it may conflict with
2718 * current running relocation and lead to missing root.
2720 * For general purpose backref cache, reloc root detection is
2721 * completely relying on direct backref (key->offset is parent
2722 * bytenr), thus only do such check for reloc cache.
2724 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2725 btrfs_put_root(root);
2726 list_add(&cur->list, &cache->useless_node);
2733 level = cur->level + 1;
2735 /* Search the tree to find parent blocks referring to the block */
2736 path->search_commit_root = 1;
2737 path->skip_locking = 1;
2738 path->lowest_level = level;
2739 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2740 path->lowest_level = 0;
2742 btrfs_put_root(root);
2745 if (ret > 0 && path->slots[level] > 0)
2746 path->slots[level]--;
2748 eb = path->nodes[level];
2749 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2751 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2752 cur->bytenr, level - 1, root->root_key.objectid,
2753 tree_key->objectid, tree_key->type, tree_key->offset);
2754 btrfs_put_root(root);
2760 /* Add all nodes and edges in the path */
2761 for (; level < BTRFS_MAX_LEVEL; level++) {
2762 if (!path->nodes[level]) {
2763 ASSERT(btrfs_root_bytenr(&root->root_item) ==
2765 /* Same as previous should_ignore_reloc_root() call */
2766 if (btrfs_should_ignore_reloc_root(root) &&
2768 btrfs_put_root(root);
2769 list_add(&lower->list, &cache->useless_node);
2776 edge = btrfs_backref_alloc_edge(cache);
2778 btrfs_put_root(root);
2783 eb = path->nodes[level];
2784 rb_node = rb_simple_search(&cache->rb_root, eb->start);
2786 upper = btrfs_backref_alloc_node(cache, eb->start,
2789 btrfs_put_root(root);
2790 btrfs_backref_free_edge(cache, edge);
2794 upper->owner = btrfs_header_owner(eb);
2795 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2799 * If we know the block isn't shared we can avoid
2800 * checking its backrefs.
2802 if (btrfs_block_can_be_shared(root, eb))
2808 * Add the block to pending list if we need to check its
2809 * backrefs, we only do this once while walking up a
2810 * tree as we will catch anything else later on.
2812 if (!upper->checked && need_check) {
2814 list_add_tail(&edge->list[UPPER],
2815 &cache->pending_edge);
2819 INIT_LIST_HEAD(&edge->list[UPPER]);
2822 upper = rb_entry(rb_node, struct btrfs_backref_node,
2824 ASSERT(upper->checked);
2825 INIT_LIST_HEAD(&edge->list[UPPER]);
2827 upper->owner = btrfs_header_owner(eb);
2829 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2832 btrfs_put_root(root);
2839 btrfs_release_path(path);
2844 * Add backref node @cur into @cache.
2846 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
2847 * links aren't yet bi-directional. Needs to finish such links.
2848 * Use btrfs_backref_finish_upper_links() to finish such linkage.
2850 * @path: Released path for indirect tree backref lookup
2851 * @iter: Released backref iter for extent tree search
2852 * @node_key: The first key of the tree block
2854 int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
2855 struct btrfs_path *path,
2856 struct btrfs_backref_iter *iter,
2857 struct btrfs_key *node_key,
2858 struct btrfs_backref_node *cur)
2860 struct btrfs_fs_info *fs_info = cache->fs_info;
2861 struct btrfs_backref_edge *edge;
2862 struct btrfs_backref_node *exist;
2865 ret = btrfs_backref_iter_start(iter, cur->bytenr);
2869 * We skip the first btrfs_tree_block_info, as we don't use the key
2870 * stored in it, but fetch it from the tree block
2872 if (btrfs_backref_has_tree_block_info(iter)) {
2873 ret = btrfs_backref_iter_next(iter);
2876 /* No extra backref? This means the tree block is corrupted */
2882 WARN_ON(cur->checked);
2883 if (!list_empty(&cur->upper)) {
2885 * The backref was added previously when processing backref of
2886 * type BTRFS_TREE_BLOCK_REF_KEY
2888 ASSERT(list_is_singular(&cur->upper));
2889 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2891 ASSERT(list_empty(&edge->list[UPPER]));
2892 exist = edge->node[UPPER];
2894 * Add the upper level block to pending list if we need check
2897 if (!exist->checked)
2898 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2903 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
2904 struct extent_buffer *eb;
2905 struct btrfs_key key;
2909 eb = btrfs_backref_get_eb(iter);
2911 key.objectid = iter->bytenr;
2912 if (btrfs_backref_iter_is_inline_ref(iter)) {
2913 struct btrfs_extent_inline_ref *iref;
2915 /* Update key for inline backref */
2916 iref = (struct btrfs_extent_inline_ref *)
2917 ((unsigned long)iter->cur_ptr);
2918 type = btrfs_get_extent_inline_ref_type(eb, iref,
2919 BTRFS_REF_TYPE_BLOCK);
2920 if (type == BTRFS_REF_TYPE_INVALID) {
2925 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
2927 key.type = iter->cur_key.type;
2928 key.offset = iter->cur_key.offset;
2932 * Parent node found and matches current inline ref, no need to
2933 * rebuild this node for this inline ref
2936 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
2937 exist->owner == key.offset) ||
2938 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
2939 exist->bytenr == key.offset))) {
2944 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
2945 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
2946 ret = handle_direct_tree_backref(cache, &key, cur);
2950 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
2952 btrfs_print_v0_err(fs_info);
2953 btrfs_handle_fs_error(fs_info, ret, NULL);
2955 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
2960 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
2961 * means the root objectid. We need to search the tree to get
2962 * its parent bytenr.
2964 ret = handle_indirect_tree_backref(cache, path, &key, node_key,
2973 btrfs_backref_iter_release(iter);
2978 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
2980 int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
2981 struct btrfs_backref_node *start)
2983 struct list_head *useless_node = &cache->useless_node;
2984 struct btrfs_backref_edge *edge;
2985 struct rb_node *rb_node;
2986 LIST_HEAD(pending_edge);
2988 ASSERT(start->checked);
2990 /* Insert this node to cache if it's not COW-only */
2991 if (!start->cowonly) {
2992 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
2995 btrfs_backref_panic(cache->fs_info, start->bytenr,
2997 list_add_tail(&start->lower, &cache->leaves);
3001 * Use breadth first search to iterate all related edges.
3003 * The starting points are all the edges of this node
3005 list_for_each_entry(edge, &start->upper, list[LOWER])
3006 list_add_tail(&edge->list[UPPER], &pending_edge);
3008 while (!list_empty(&pending_edge)) {
3009 struct btrfs_backref_node *upper;
3010 struct btrfs_backref_node *lower;
3012 edge = list_first_entry(&pending_edge,
3013 struct btrfs_backref_edge, list[UPPER]);
3014 list_del_init(&edge->list[UPPER]);
3015 upper = edge->node[UPPER];
3016 lower = edge->node[LOWER];
3018 /* Parent is detached, no need to keep any edges */
3019 if (upper->detached) {
3020 list_del(&edge->list[LOWER]);
3021 btrfs_backref_free_edge(cache, edge);
3023 /* Lower node is orphan, queue for cleanup */
3024 if (list_empty(&lower->upper))
3025 list_add(&lower->list, useless_node);
3030 * All new nodes added in current build_backref_tree() haven't
3031 * been linked to the cache rb tree.
3032 * So if we have upper->rb_node populated, this means a cache
3033 * hit. We only need to link the edge, as @upper and all its
3034 * parents have already been linked.
3036 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3037 if (upper->lowest) {
3038 list_del_init(&upper->lower);
3042 list_add_tail(&edge->list[UPPER], &upper->lower);
3046 /* Sanity check, we shouldn't have any unchecked nodes */
3047 if (!upper->checked) {
3052 /* Sanity check, COW-only node has non-COW-only parent */
3053 if (start->cowonly != upper->cowonly) {
3058 /* Only cache non-COW-only (subvolume trees) tree blocks */
3059 if (!upper->cowonly) {
3060 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3063 btrfs_backref_panic(cache->fs_info,
3064 upper->bytenr, -EEXIST);
3069 list_add_tail(&edge->list[UPPER], &upper->lower);
3072 * Also queue all the parent edges of this uncached node
3073 * to finish the upper linkage
3075 list_for_each_entry(edge, &upper->upper, list[LOWER])
3076 list_add_tail(&edge->list[UPPER], &pending_edge);
3081 void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3082 struct btrfs_backref_node *node)
3084 struct btrfs_backref_node *lower;
3085 struct btrfs_backref_node *upper;
3086 struct btrfs_backref_edge *edge;
3088 while (!list_empty(&cache->useless_node)) {
3089 lower = list_first_entry(&cache->useless_node,
3090 struct btrfs_backref_node, list);
3091 list_del_init(&lower->list);
3093 while (!list_empty(&cache->pending_edge)) {
3094 edge = list_first_entry(&cache->pending_edge,
3095 struct btrfs_backref_edge, list[UPPER]);
3096 list_del(&edge->list[UPPER]);
3097 list_del(&edge->list[LOWER]);
3098 lower = edge->node[LOWER];
3099 upper = edge->node[UPPER];
3100 btrfs_backref_free_edge(cache, edge);
3103 * Lower is no longer linked to any upper backref nodes and
3104 * isn't in the cache, we can free it ourselves.
3106 if (list_empty(&lower->upper) &&
3107 RB_EMPTY_NODE(&lower->rb_node))
3108 list_add(&lower->list, &cache->useless_node);
3110 if (!RB_EMPTY_NODE(&upper->rb_node))
3113 /* Add this guy's upper edges to the list to process */
3114 list_for_each_entry(edge, &upper->upper, list[LOWER])
3115 list_add_tail(&edge->list[UPPER],
3116 &cache->pending_edge);
3117 if (list_empty(&upper->upper))
3118 list_add(&upper->list, &cache->useless_node);
3121 while (!list_empty(&cache->useless_node)) {
3122 lower = list_first_entry(&cache->useless_node,
3123 struct btrfs_backref_node, list);
3124 list_del_init(&lower->list);
3127 btrfs_backref_free_node(cache, lower);
3130 btrfs_backref_cleanup_node(cache, node);
3131 ASSERT(list_empty(&cache->useless_node) &&
3132 list_empty(&cache->pending_edge));