1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 STRATO. All rights reserved.
7 #include <linux/rbtree.h>
8 #include <trace/events/btrfs.h>
13 #include "transaction.h"
14 #include "delayed-ref.h"
17 #include "tree-mod-log.h"
19 /* Just an arbitrary number so we can be sure this happened */
20 #define BACKREF_FOUND_SHARED 6
22 struct extent_inode_elem {
25 struct extent_inode_elem *next;
28 static int check_extent_in_eb(const struct btrfs_key *key,
29 const struct extent_buffer *eb,
30 const struct btrfs_file_extent_item *fi,
32 struct extent_inode_elem **eie,
36 struct extent_inode_elem *e;
39 !btrfs_file_extent_compression(eb, fi) &&
40 !btrfs_file_extent_encryption(eb, fi) &&
41 !btrfs_file_extent_other_encoding(eb, fi)) {
45 data_offset = btrfs_file_extent_offset(eb, fi);
46 data_len = btrfs_file_extent_num_bytes(eb, fi);
48 if (extent_item_pos < data_offset ||
49 extent_item_pos >= data_offset + data_len)
51 offset = extent_item_pos - data_offset;
54 e = kmalloc(sizeof(*e), GFP_NOFS);
59 e->inum = key->objectid;
60 e->offset = key->offset + offset;
66 static void free_inode_elem_list(struct extent_inode_elem *eie)
68 struct extent_inode_elem *eie_next;
70 for (; eie; eie = eie_next) {
76 static int find_extent_in_eb(const struct extent_buffer *eb,
77 u64 wanted_disk_byte, u64 extent_item_pos,
78 struct extent_inode_elem **eie,
83 struct btrfs_file_extent_item *fi;
90 * from the shared data ref, we only have the leaf but we need
91 * the key. thus, we must look into all items and see that we
92 * find one (some) with a reference to our extent item.
94 nritems = btrfs_header_nritems(eb);
95 for (slot = 0; slot < nritems; ++slot) {
96 btrfs_item_key_to_cpu(eb, &key, slot);
97 if (key.type != BTRFS_EXTENT_DATA_KEY)
99 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
100 extent_type = btrfs_file_extent_type(eb, fi);
101 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
103 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
104 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
105 if (disk_byte != wanted_disk_byte)
108 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
117 struct rb_root_cached root;
121 #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
124 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
125 struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
126 struct preftree indirect_missing_keys;
130 * Checks for a shared extent during backref search.
132 * The share_count tracks prelim_refs (direct and indirect) having a
134 * - incremented when a ref->count transitions to >0
135 * - decremented when a ref->count transitions to <1
141 bool have_delayed_delete_refs;
144 static inline int extent_is_shared(struct share_check *sc)
146 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
149 static struct kmem_cache *btrfs_prelim_ref_cache;
151 int __init btrfs_prelim_ref_init(void)
153 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
154 sizeof(struct prelim_ref),
158 if (!btrfs_prelim_ref_cache)
163 void __cold btrfs_prelim_ref_exit(void)
165 kmem_cache_destroy(btrfs_prelim_ref_cache);
168 static void free_pref(struct prelim_ref *ref)
170 kmem_cache_free(btrfs_prelim_ref_cache, ref);
174 * Return 0 when both refs are for the same block (and can be merged).
175 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
176 * indicates a 'higher' block.
178 static int prelim_ref_compare(struct prelim_ref *ref1,
179 struct prelim_ref *ref2)
181 if (ref1->level < ref2->level)
183 if (ref1->level > ref2->level)
185 if (ref1->root_id < ref2->root_id)
187 if (ref1->root_id > ref2->root_id)
189 if (ref1->key_for_search.type < ref2->key_for_search.type)
191 if (ref1->key_for_search.type > ref2->key_for_search.type)
193 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
195 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
197 if (ref1->key_for_search.offset < ref2->key_for_search.offset)
199 if (ref1->key_for_search.offset > ref2->key_for_search.offset)
201 if (ref1->parent < ref2->parent)
203 if (ref1->parent > ref2->parent)
209 static void update_share_count(struct share_check *sc, int oldcount,
212 if ((!sc) || (oldcount == 0 && newcount < 1))
215 if (oldcount > 0 && newcount < 1)
217 else if (oldcount < 1 && newcount > 0)
222 * Add @newref to the @root rbtree, merging identical refs.
224 * Callers should assume that newref has been freed after calling.
226 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
227 struct preftree *preftree,
228 struct prelim_ref *newref,
229 struct share_check *sc)
231 struct rb_root_cached *root;
233 struct rb_node *parent = NULL;
234 struct prelim_ref *ref;
236 bool leftmost = true;
238 root = &preftree->root;
239 p = &root->rb_root.rb_node;
243 ref = rb_entry(parent, struct prelim_ref, rbnode);
244 result = prelim_ref_compare(ref, newref);
247 } else if (result > 0) {
251 /* Identical refs, merge them and free @newref */
252 struct extent_inode_elem *eie = ref->inode_list;
254 while (eie && eie->next)
258 ref->inode_list = newref->inode_list;
260 eie->next = newref->inode_list;
261 trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
264 * A delayed ref can have newref->count < 0.
265 * The ref->count is updated to follow any
266 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
268 update_share_count(sc, ref->count,
269 ref->count + newref->count);
270 ref->count += newref->count;
276 update_share_count(sc, 0, newref->count);
278 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
279 rb_link_node(&newref->rbnode, parent, p);
280 rb_insert_color_cached(&newref->rbnode, root, leftmost);
284 * Release the entire tree. We don't care about internal consistency so
285 * just free everything and then reset the tree root.
287 static void prelim_release(struct preftree *preftree)
289 struct prelim_ref *ref, *next_ref;
291 rbtree_postorder_for_each_entry_safe(ref, next_ref,
292 &preftree->root.rb_root, rbnode) {
293 free_inode_elem_list(ref->inode_list);
297 preftree->root = RB_ROOT_CACHED;
302 * the rules for all callers of this function are:
303 * - obtaining the parent is the goal
304 * - if you add a key, you must know that it is a correct key
305 * - if you cannot add the parent or a correct key, then we will look into the
306 * block later to set a correct key
310 * backref type | shared | indirect | shared | indirect
311 * information | tree | tree | data | data
312 * --------------------+--------+----------+--------+----------
313 * parent logical | y | - | - | -
314 * key to resolve | - | y | y | y
315 * tree block logical | - | - | - | -
316 * root for resolving | y | y | y | y
318 * - column 1: we've the parent -> done
319 * - column 2, 3, 4: we use the key to find the parent
321 * on disk refs (inline or keyed)
322 * ==============================
323 * backref type | shared | indirect | shared | indirect
324 * information | tree | tree | data | data
325 * --------------------+--------+----------+--------+----------
326 * parent logical | y | - | y | -
327 * key to resolve | - | - | - | y
328 * tree block logical | y | y | y | y
329 * root for resolving | - | y | y | y
331 * - column 1, 3: we've the parent -> done
332 * - column 2: we take the first key from the block to find the parent
333 * (see add_missing_keys)
334 * - column 4: we use the key to find the parent
336 * additional information that's available but not required to find the parent
337 * block might help in merging entries to gain some speed.
339 static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
340 struct preftree *preftree, u64 root_id,
341 const struct btrfs_key *key, int level, u64 parent,
342 u64 wanted_disk_byte, int count,
343 struct share_check *sc, gfp_t gfp_mask)
345 struct prelim_ref *ref;
347 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
350 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
354 ref->root_id = root_id;
356 ref->key_for_search = *key;
358 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
360 ref->inode_list = NULL;
363 ref->parent = parent;
364 ref->wanted_disk_byte = wanted_disk_byte;
365 prelim_ref_insert(fs_info, preftree, ref, sc);
366 return extent_is_shared(sc);
369 /* direct refs use root == 0, key == NULL */
370 static int add_direct_ref(const struct btrfs_fs_info *fs_info,
371 struct preftrees *preftrees, int level, u64 parent,
372 u64 wanted_disk_byte, int count,
373 struct share_check *sc, gfp_t gfp_mask)
375 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
376 parent, wanted_disk_byte, count, sc, gfp_mask);
379 /* indirect refs use parent == 0 */
380 static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
381 struct preftrees *preftrees, u64 root_id,
382 const struct btrfs_key *key, int level,
383 u64 wanted_disk_byte, int count,
384 struct share_check *sc, gfp_t gfp_mask)
386 struct preftree *tree = &preftrees->indirect;
389 tree = &preftrees->indirect_missing_keys;
390 return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
391 wanted_disk_byte, count, sc, gfp_mask);
394 static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
396 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
397 struct rb_node *parent = NULL;
398 struct prelim_ref *ref = NULL;
399 struct prelim_ref target = {};
402 target.parent = bytenr;
406 ref = rb_entry(parent, struct prelim_ref, rbnode);
407 result = prelim_ref_compare(ref, &target);
419 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
420 struct ulist *parents,
421 struct preftrees *preftrees, struct prelim_ref *ref,
422 int level, u64 time_seq, const u64 *extent_item_pos,
427 struct extent_buffer *eb;
428 struct btrfs_key key;
429 struct btrfs_key *key_for_search = &ref->key_for_search;
430 struct btrfs_file_extent_item *fi;
431 struct extent_inode_elem *eie = NULL, *old = NULL;
433 u64 wanted_disk_byte = ref->wanted_disk_byte;
438 eb = path->nodes[level];
439 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
446 * 1. We normally enter this function with the path already pointing to
447 * the first item to check. But sometimes, we may enter it with
449 * 2. We are searching for normal backref but bytenr of this leaf
450 * matches shared data backref
451 * 3. The leaf owner is not equal to the root we are searching
453 * For these cases, go to the next leaf before we continue.
456 if (path->slots[0] >= btrfs_header_nritems(eb) ||
457 is_shared_data_backref(preftrees, eb->start) ||
458 ref->root_id != btrfs_header_owner(eb)) {
459 if (time_seq == BTRFS_SEQ_LAST)
460 ret = btrfs_next_leaf(root, path);
462 ret = btrfs_next_old_leaf(root, path, time_seq);
465 while (!ret && count < ref->count) {
467 slot = path->slots[0];
469 btrfs_item_key_to_cpu(eb, &key, slot);
471 if (key.objectid != key_for_search->objectid ||
472 key.type != BTRFS_EXTENT_DATA_KEY)
476 * We are searching for normal backref but bytenr of this leaf
477 * matches shared data backref, OR
478 * the leaf owner is not equal to the root we are searching for
481 (is_shared_data_backref(preftrees, eb->start) ||
482 ref->root_id != btrfs_header_owner(eb))) {
483 if (time_seq == BTRFS_SEQ_LAST)
484 ret = btrfs_next_leaf(root, path);
486 ret = btrfs_next_old_leaf(root, path, time_seq);
489 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
490 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
491 data_offset = btrfs_file_extent_offset(eb, fi);
493 if (disk_byte == wanted_disk_byte) {
496 if (ref->key_for_search.offset == key.offset - data_offset)
500 if (extent_item_pos) {
501 ret = check_extent_in_eb(&key, eb, fi,
503 &eie, ignore_offset);
509 ret = ulist_add_merge_ptr(parents, eb->start,
510 eie, (void **)&old, GFP_NOFS);
513 if (!ret && extent_item_pos) {
521 if (time_seq == BTRFS_SEQ_LAST)
522 ret = btrfs_next_item(root, path);
524 ret = btrfs_next_old_item(root, path, time_seq);
530 free_inode_elem_list(eie);
535 * resolve an indirect backref in the form (root_id, key, level)
536 * to a logical address
538 static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
539 struct btrfs_path *path, u64 time_seq,
540 struct preftrees *preftrees,
541 struct prelim_ref *ref, struct ulist *parents,
542 const u64 *extent_item_pos, bool ignore_offset)
544 struct btrfs_root *root;
545 struct extent_buffer *eb;
548 int level = ref->level;
549 struct btrfs_key search_key = ref->key_for_search;
552 * If we're search_commit_root we could possibly be holding locks on
553 * other tree nodes. This happens when qgroups does backref walks when
554 * adding new delayed refs. To deal with this we need to look in cache
555 * for the root, and if we don't find it then we need to search the
556 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
559 if (path->search_commit_root)
560 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
562 root = btrfs_get_fs_root(fs_info, ref->root_id, false);
568 if (!path->search_commit_root &&
569 test_bit(BTRFS_ROOT_DELETING, &root->state)) {
574 if (btrfs_is_testing(fs_info)) {
579 if (path->search_commit_root)
580 root_level = btrfs_header_level(root->commit_root);
581 else if (time_seq == BTRFS_SEQ_LAST)
582 root_level = btrfs_header_level(root->node);
584 root_level = btrfs_old_root_level(root, time_seq);
586 if (root_level + 1 == level)
590 * We can often find data backrefs with an offset that is too large
591 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
592 * subtracting a file's offset with the data offset of its
593 * corresponding extent data item. This can happen for example in the
596 * So if we detect such case we set the search key's offset to zero to
597 * make sure we will find the matching file extent item at
598 * add_all_parents(), otherwise we will miss it because the offset
599 * taken form the backref is much larger then the offset of the file
600 * extent item. This can make us scan a very large number of file
601 * extent items, but at least it will not make us miss any.
603 * This is an ugly workaround for a behaviour that should have never
604 * existed, but it does and a fix for the clone ioctl would touch a lot
605 * of places, cause backwards incompatibility and would not fix the
606 * problem for extents cloned with older kernels.
608 if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
609 search_key.offset >= LLONG_MAX)
610 search_key.offset = 0;
611 path->lowest_level = level;
612 if (time_seq == BTRFS_SEQ_LAST)
613 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
615 ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
618 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
619 ref->root_id, level, ref->count, ret,
620 ref->key_for_search.objectid, ref->key_for_search.type,
621 ref->key_for_search.offset);
625 eb = path->nodes[level];
627 if (WARN_ON(!level)) {
632 eb = path->nodes[level];
635 ret = add_all_parents(root, path, parents, preftrees, ref, level,
636 time_seq, extent_item_pos, ignore_offset);
638 btrfs_put_root(root);
640 path->lowest_level = 0;
641 btrfs_release_path(path);
645 static struct extent_inode_elem *
646 unode_aux_to_inode_list(struct ulist_node *node)
650 return (struct extent_inode_elem *)(uintptr_t)node->aux;
653 static void free_leaf_list(struct ulist *ulist)
655 struct ulist_node *node;
656 struct ulist_iterator uiter;
658 ULIST_ITER_INIT(&uiter);
659 while ((node = ulist_next(ulist, &uiter)))
660 free_inode_elem_list(unode_aux_to_inode_list(node));
666 * We maintain three separate rbtrees: one for direct refs, one for
667 * indirect refs which have a key, and one for indirect refs which do not
668 * have a key. Each tree does merge on insertion.
670 * Once all of the references are located, we iterate over the tree of
671 * indirect refs with missing keys. An appropriate key is located and
672 * the ref is moved onto the tree for indirect refs. After all missing
673 * keys are thus located, we iterate over the indirect ref tree, resolve
674 * each reference, and then insert the resolved reference onto the
675 * direct tree (merging there too).
677 * New backrefs (i.e., for parent nodes) are added to the appropriate
678 * rbtree as they are encountered. The new backrefs are subsequently
681 static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
682 struct btrfs_path *path, u64 time_seq,
683 struct preftrees *preftrees,
684 const u64 *extent_item_pos,
685 struct share_check *sc, bool ignore_offset)
689 struct ulist *parents;
690 struct ulist_node *node;
691 struct ulist_iterator uiter;
692 struct rb_node *rnode;
694 parents = ulist_alloc(GFP_NOFS);
699 * We could trade memory usage for performance here by iterating
700 * the tree, allocating new refs for each insertion, and then
701 * freeing the entire indirect tree when we're done. In some test
702 * cases, the tree can grow quite large (~200k objects).
704 while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
705 struct prelim_ref *ref;
707 ref = rb_entry(rnode, struct prelim_ref, rbnode);
708 if (WARN(ref->parent,
709 "BUG: direct ref found in indirect tree")) {
714 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
715 preftrees->indirect.count--;
717 if (ref->count == 0) {
722 if (sc && sc->root_objectid &&
723 ref->root_id != sc->root_objectid) {
725 ret = BACKREF_FOUND_SHARED;
728 err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
729 ref, parents, extent_item_pos,
732 * we can only tolerate ENOENT,otherwise,we should catch error
733 * and return directly.
735 if (err == -ENOENT) {
736 prelim_ref_insert(fs_info, &preftrees->direct, ref,
745 /* we put the first parent into the ref at hand */
746 ULIST_ITER_INIT(&uiter);
747 node = ulist_next(parents, &uiter);
748 ref->parent = node ? node->val : 0;
749 ref->inode_list = unode_aux_to_inode_list(node);
751 /* Add a prelim_ref(s) for any other parent(s). */
752 while ((node = ulist_next(parents, &uiter))) {
753 struct prelim_ref *new_ref;
755 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
762 memcpy(new_ref, ref, sizeof(*ref));
763 new_ref->parent = node->val;
764 new_ref->inode_list = unode_aux_to_inode_list(node);
765 prelim_ref_insert(fs_info, &preftrees->direct,
770 * Now it's a direct ref, put it in the direct tree. We must
771 * do this last because the ref could be merged/freed here.
773 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
775 ulist_reinit(parents);
780 * We may have inode lists attached to refs in the parents ulist, so we
781 * must free them before freeing the ulist and its refs.
783 free_leaf_list(parents);
788 * read tree blocks and add keys where required.
790 static int add_missing_keys(struct btrfs_fs_info *fs_info,
791 struct preftrees *preftrees, bool lock)
793 struct prelim_ref *ref;
794 struct extent_buffer *eb;
795 struct preftree *tree = &preftrees->indirect_missing_keys;
796 struct rb_node *node;
798 while ((node = rb_first_cached(&tree->root))) {
799 ref = rb_entry(node, struct prelim_ref, rbnode);
800 rb_erase_cached(node, &tree->root);
802 BUG_ON(ref->parent); /* should not be a direct ref */
803 BUG_ON(ref->key_for_search.type);
804 BUG_ON(!ref->wanted_disk_byte);
806 eb = read_tree_block(fs_info, ref->wanted_disk_byte,
807 ref->root_id, 0, ref->level - 1, NULL);
811 } else if (!extent_buffer_uptodate(eb)) {
813 free_extent_buffer(eb);
817 btrfs_tree_read_lock(eb);
818 if (btrfs_header_level(eb) == 0)
819 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
821 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
823 btrfs_tree_read_unlock(eb);
824 free_extent_buffer(eb);
825 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
832 * add all currently queued delayed refs from this head whose seq nr is
833 * smaller or equal that seq to the list
835 static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
836 struct btrfs_delayed_ref_head *head, u64 seq,
837 struct preftrees *preftrees, struct share_check *sc)
839 struct btrfs_delayed_ref_node *node;
840 struct btrfs_key key;
845 spin_lock(&head->lock);
846 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
847 node = rb_entry(n, struct btrfs_delayed_ref_node,
852 switch (node->action) {
853 case BTRFS_ADD_DELAYED_EXTENT:
854 case BTRFS_UPDATE_DELAYED_HEAD:
857 case BTRFS_ADD_DELAYED_REF:
858 count = node->ref_mod;
860 case BTRFS_DROP_DELAYED_REF:
861 count = node->ref_mod * -1;
866 switch (node->type) {
867 case BTRFS_TREE_BLOCK_REF_KEY: {
868 /* NORMAL INDIRECT METADATA backref */
869 struct btrfs_delayed_tree_ref *ref;
870 struct btrfs_key *key_ptr = NULL;
872 if (head->extent_op && head->extent_op->update_key) {
873 btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
877 ref = btrfs_delayed_node_to_tree_ref(node);
878 ret = add_indirect_ref(fs_info, preftrees, ref->root,
879 key_ptr, ref->level + 1,
880 node->bytenr, count, sc,
884 case BTRFS_SHARED_BLOCK_REF_KEY: {
885 /* SHARED DIRECT METADATA backref */
886 struct btrfs_delayed_tree_ref *ref;
888 ref = btrfs_delayed_node_to_tree_ref(node);
890 ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
891 ref->parent, node->bytenr, count,
895 case BTRFS_EXTENT_DATA_REF_KEY: {
896 /* NORMAL INDIRECT DATA backref */
897 struct btrfs_delayed_data_ref *ref;
898 ref = btrfs_delayed_node_to_data_ref(node);
900 key.objectid = ref->objectid;
901 key.type = BTRFS_EXTENT_DATA_KEY;
902 key.offset = ref->offset;
905 * If we have a share check context and a reference for
906 * another inode, we can't exit immediately. This is
907 * because even if this is a BTRFS_ADD_DELAYED_REF
908 * reference we may find next a BTRFS_DROP_DELAYED_REF
909 * which cancels out this ADD reference.
911 * If this is a DROP reference and there was no previous
912 * ADD reference, then we need to signal that when we
913 * process references from the extent tree (through
914 * add_inline_refs() and add_keyed_refs()), we should
915 * not exit early if we find a reference for another
916 * inode, because one of the delayed DROP references
917 * may cancel that reference in the extent tree.
920 sc->have_delayed_delete_refs = true;
922 ret = add_indirect_ref(fs_info, preftrees, ref->root,
923 &key, 0, node->bytenr, count, sc,
927 case BTRFS_SHARED_DATA_REF_KEY: {
928 /* SHARED DIRECT FULL backref */
929 struct btrfs_delayed_data_ref *ref;
931 ref = btrfs_delayed_node_to_data_ref(node);
933 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
934 node->bytenr, count, sc,
942 * We must ignore BACKREF_FOUND_SHARED until all delayed
943 * refs have been checked.
945 if (ret && (ret != BACKREF_FOUND_SHARED))
949 ret = extent_is_shared(sc);
951 spin_unlock(&head->lock);
956 * add all inline backrefs for bytenr to the list
958 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
960 static int add_inline_refs(const struct btrfs_fs_info *fs_info,
961 struct btrfs_path *path, u64 bytenr,
962 int *info_level, struct preftrees *preftrees,
963 struct share_check *sc)
967 struct extent_buffer *leaf;
968 struct btrfs_key key;
969 struct btrfs_key found_key;
972 struct btrfs_extent_item *ei;
977 * enumerate all inline refs
979 leaf = path->nodes[0];
980 slot = path->slots[0];
982 item_size = btrfs_item_size_nr(leaf, slot);
983 BUG_ON(item_size < sizeof(*ei));
985 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
986 flags = btrfs_extent_flags(leaf, ei);
987 btrfs_item_key_to_cpu(leaf, &found_key, slot);
989 ptr = (unsigned long)(ei + 1);
990 end = (unsigned long)ei + item_size;
992 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
993 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
994 struct btrfs_tree_block_info *info;
996 info = (struct btrfs_tree_block_info *)ptr;
997 *info_level = btrfs_tree_block_level(leaf, info);
998 ptr += sizeof(struct btrfs_tree_block_info);
1000 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
1001 *info_level = found_key.offset;
1003 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1007 struct btrfs_extent_inline_ref *iref;
1011 iref = (struct btrfs_extent_inline_ref *)ptr;
1012 type = btrfs_get_extent_inline_ref_type(leaf, iref,
1013 BTRFS_REF_TYPE_ANY);
1014 if (type == BTRFS_REF_TYPE_INVALID)
1017 offset = btrfs_extent_inline_ref_offset(leaf, iref);
1020 case BTRFS_SHARED_BLOCK_REF_KEY:
1021 ret = add_direct_ref(fs_info, preftrees,
1022 *info_level + 1, offset,
1023 bytenr, 1, NULL, GFP_NOFS);
1025 case BTRFS_SHARED_DATA_REF_KEY: {
1026 struct btrfs_shared_data_ref *sdref;
1029 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1030 count = btrfs_shared_data_ref_count(leaf, sdref);
1032 ret = add_direct_ref(fs_info, preftrees, 0, offset,
1033 bytenr, count, sc, GFP_NOFS);
1036 case BTRFS_TREE_BLOCK_REF_KEY:
1037 ret = add_indirect_ref(fs_info, preftrees, offset,
1038 NULL, *info_level + 1,
1039 bytenr, 1, NULL, GFP_NOFS);
1041 case BTRFS_EXTENT_DATA_REF_KEY: {
1042 struct btrfs_extent_data_ref *dref;
1046 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1047 count = btrfs_extent_data_ref_count(leaf, dref);
1048 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1050 key.type = BTRFS_EXTENT_DATA_KEY;
1051 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1053 if (sc && sc->inum && key.objectid != sc->inum &&
1054 !sc->have_delayed_delete_refs) {
1055 ret = BACKREF_FOUND_SHARED;
1059 root = btrfs_extent_data_ref_root(leaf, dref);
1061 ret = add_indirect_ref(fs_info, preftrees, root,
1062 &key, 0, bytenr, count,
1072 ptr += btrfs_extent_inline_ref_size(type);
1079 * add all non-inline backrefs for bytenr to the list
1081 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1083 static int add_keyed_refs(struct btrfs_fs_info *fs_info,
1084 struct btrfs_path *path, u64 bytenr,
1085 int info_level, struct preftrees *preftrees,
1086 struct share_check *sc)
1088 struct btrfs_root *extent_root = fs_info->extent_root;
1091 struct extent_buffer *leaf;
1092 struct btrfs_key key;
1095 ret = btrfs_next_item(extent_root, path);
1103 slot = path->slots[0];
1104 leaf = path->nodes[0];
1105 btrfs_item_key_to_cpu(leaf, &key, slot);
1107 if (key.objectid != bytenr)
1109 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1111 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1115 case BTRFS_SHARED_BLOCK_REF_KEY:
1116 /* SHARED DIRECT METADATA backref */
1117 ret = add_direct_ref(fs_info, preftrees,
1118 info_level + 1, key.offset,
1119 bytenr, 1, NULL, GFP_NOFS);
1121 case BTRFS_SHARED_DATA_REF_KEY: {
1122 /* SHARED DIRECT FULL backref */
1123 struct btrfs_shared_data_ref *sdref;
1126 sdref = btrfs_item_ptr(leaf, slot,
1127 struct btrfs_shared_data_ref);
1128 count = btrfs_shared_data_ref_count(leaf, sdref);
1129 ret = add_direct_ref(fs_info, preftrees, 0,
1130 key.offset, bytenr, count,
1134 case BTRFS_TREE_BLOCK_REF_KEY:
1135 /* NORMAL INDIRECT METADATA backref */
1136 ret = add_indirect_ref(fs_info, preftrees, key.offset,
1137 NULL, info_level + 1, bytenr,
1140 case BTRFS_EXTENT_DATA_REF_KEY: {
1141 /* NORMAL INDIRECT DATA backref */
1142 struct btrfs_extent_data_ref *dref;
1146 dref = btrfs_item_ptr(leaf, slot,
1147 struct btrfs_extent_data_ref);
1148 count = btrfs_extent_data_ref_count(leaf, dref);
1149 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1151 key.type = BTRFS_EXTENT_DATA_KEY;
1152 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1154 if (sc && sc->inum && key.objectid != sc->inum &&
1155 !sc->have_delayed_delete_refs) {
1156 ret = BACKREF_FOUND_SHARED;
1160 root = btrfs_extent_data_ref_root(leaf, dref);
1161 ret = add_indirect_ref(fs_info, preftrees, root,
1162 &key, 0, bytenr, count,
1178 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1179 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1180 * indirect refs to their parent bytenr.
1181 * When roots are found, they're added to the roots list
1183 * If time_seq is set to BTRFS_SEQ_LAST, it will not search delayed_refs, and
1184 * behave much like trans == NULL case, the difference only lies in it will not
1186 * The special case is for qgroup to search roots in commit_transaction().
1188 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1189 * shared extent is detected.
1191 * Otherwise this returns 0 for success and <0 for an error.
1193 * If ignore_offset is set to false, only extent refs whose offsets match
1194 * extent_item_pos are returned. If true, every extent ref is returned
1195 * and extent_item_pos is ignored.
1197 * FIXME some caching might speed things up
1199 static int find_parent_nodes(struct btrfs_trans_handle *trans,
1200 struct btrfs_fs_info *fs_info, u64 bytenr,
1201 u64 time_seq, struct ulist *refs,
1202 struct ulist *roots, const u64 *extent_item_pos,
1203 struct share_check *sc, bool ignore_offset)
1205 struct btrfs_key key;
1206 struct btrfs_path *path;
1207 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1208 struct btrfs_delayed_ref_head *head;
1211 struct prelim_ref *ref;
1212 struct rb_node *node;
1213 struct extent_inode_elem *eie = NULL;
1214 struct preftrees preftrees = {
1215 .direct = PREFTREE_INIT,
1216 .indirect = PREFTREE_INIT,
1217 .indirect_missing_keys = PREFTREE_INIT
1220 key.objectid = bytenr;
1221 key.offset = (u64)-1;
1222 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1223 key.type = BTRFS_METADATA_ITEM_KEY;
1225 key.type = BTRFS_EXTENT_ITEM_KEY;
1227 path = btrfs_alloc_path();
1231 path->search_commit_root = 1;
1232 path->skip_locking = 1;
1235 if (time_seq == BTRFS_SEQ_LAST)
1236 path->skip_locking = 1;
1239 * grab both a lock on the path and a lock on the delayed ref head.
1240 * We need both to get a consistent picture of how the refs look
1241 * at a specified point in time
1246 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1250 /* This shouldn't happen, indicates a bug or fs corruption. */
1256 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1257 if (trans && likely(trans->type != __TRANS_DUMMY) &&
1258 time_seq != BTRFS_SEQ_LAST) {
1260 if (trans && time_seq != BTRFS_SEQ_LAST) {
1263 * look if there are updates for this ref queued and lock the
1266 delayed_refs = &trans->transaction->delayed_refs;
1267 spin_lock(&delayed_refs->lock);
1268 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1270 if (!mutex_trylock(&head->mutex)) {
1271 refcount_inc(&head->refs);
1272 spin_unlock(&delayed_refs->lock);
1274 btrfs_release_path(path);
1277 * Mutex was contended, block until it's
1278 * released and try again
1280 mutex_lock(&head->mutex);
1281 mutex_unlock(&head->mutex);
1282 btrfs_put_delayed_ref_head(head);
1285 spin_unlock(&delayed_refs->lock);
1286 ret = add_delayed_refs(fs_info, head, time_seq,
1288 mutex_unlock(&head->mutex);
1292 spin_unlock(&delayed_refs->lock);
1296 if (path->slots[0]) {
1297 struct extent_buffer *leaf;
1301 leaf = path->nodes[0];
1302 slot = path->slots[0];
1303 btrfs_item_key_to_cpu(leaf, &key, slot);
1304 if (key.objectid == bytenr &&
1305 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1306 key.type == BTRFS_METADATA_ITEM_KEY)) {
1307 ret = add_inline_refs(fs_info, path, bytenr,
1308 &info_level, &preftrees, sc);
1311 ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1318 btrfs_release_path(path);
1320 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1324 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1326 ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1327 extent_item_pos, sc, ignore_offset);
1331 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1334 * This walks the tree of merged and resolved refs. Tree blocks are
1335 * read in as needed. Unique entries are added to the ulist, and
1336 * the list of found roots is updated.
1338 * We release the entire tree in one go before returning.
1340 node = rb_first_cached(&preftrees.direct.root);
1342 ref = rb_entry(node, struct prelim_ref, rbnode);
1343 node = rb_next(&ref->rbnode);
1345 * ref->count < 0 can happen here if there are delayed
1346 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1347 * prelim_ref_insert() relies on this when merging
1348 * identical refs to keep the overall count correct.
1349 * prelim_ref_insert() will merge only those refs
1350 * which compare identically. Any refs having
1351 * e.g. different offsets would not be merged,
1352 * and would retain their original ref->count < 0.
1354 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1355 if (sc && sc->root_objectid &&
1356 ref->root_id != sc->root_objectid) {
1357 ret = BACKREF_FOUND_SHARED;
1361 /* no parent == root of tree */
1362 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1366 if (ref->count && ref->parent) {
1367 if (extent_item_pos && !ref->inode_list &&
1369 struct extent_buffer *eb;
1371 eb = read_tree_block(fs_info, ref->parent, 0,
1372 0, ref->level, NULL);
1376 } else if (!extent_buffer_uptodate(eb)) {
1377 free_extent_buffer(eb);
1382 if (!path->skip_locking)
1383 btrfs_tree_read_lock(eb);
1384 ret = find_extent_in_eb(eb, bytenr,
1385 *extent_item_pos, &eie, ignore_offset);
1386 if (!path->skip_locking)
1387 btrfs_tree_read_unlock(eb);
1388 free_extent_buffer(eb);
1391 ref->inode_list = eie;
1393 * We transferred the list ownership to the ref,
1394 * so set to NULL to avoid a double free in case
1395 * an error happens after this.
1399 ret = ulist_add_merge_ptr(refs, ref->parent,
1401 (void **)&eie, GFP_NOFS);
1404 if (!ret && extent_item_pos) {
1406 * We've recorded that parent, so we must extend
1407 * its inode list here.
1409 * However if there was corruption we may not
1410 * have found an eie, return an error in this
1420 eie->next = ref->inode_list;
1424 * We have transferred the inode list ownership from
1425 * this ref to the ref we added to the 'refs' ulist.
1426 * So set this ref's inode list to NULL to avoid
1427 * use-after-free when our caller uses it or double
1428 * frees in case an error happens before we return.
1430 ref->inode_list = NULL;
1436 btrfs_free_path(path);
1438 prelim_release(&preftrees.direct);
1439 prelim_release(&preftrees.indirect);
1440 prelim_release(&preftrees.indirect_missing_keys);
1443 free_inode_elem_list(eie);
1448 * Finds all leafs with a reference to the specified combination of bytenr and
1449 * offset. key_list_head will point to a list of corresponding keys (caller must
1450 * free each list element). The leafs will be stored in the leafs ulist, which
1451 * must be freed with ulist_free.
1453 * returns 0 on success, <0 on error
1455 int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1456 struct btrfs_fs_info *fs_info, u64 bytenr,
1457 u64 time_seq, struct ulist **leafs,
1458 const u64 *extent_item_pos, bool ignore_offset)
1462 *leafs = ulist_alloc(GFP_NOFS);
1466 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1467 *leafs, NULL, extent_item_pos, NULL, ignore_offset);
1468 if (ret < 0 && ret != -ENOENT) {
1469 free_leaf_list(*leafs);
1477 * walk all backrefs for a given extent to find all roots that reference this
1478 * extent. Walking a backref means finding all extents that reference this
1479 * extent and in turn walk the backrefs of those, too. Naturally this is a
1480 * recursive process, but here it is implemented in an iterative fashion: We
1481 * find all referencing extents for the extent in question and put them on a
1482 * list. In turn, we find all referencing extents for those, further appending
1483 * to the list. The way we iterate the list allows adding more elements after
1484 * the current while iterating. The process stops when we reach the end of the
1485 * list. Found roots are added to the roots list.
1487 * returns 0 on success, < 0 on error.
1489 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1490 struct btrfs_fs_info *fs_info, u64 bytenr,
1491 u64 time_seq, struct ulist **roots,
1495 struct ulist_node *node = NULL;
1496 struct ulist_iterator uiter;
1499 tmp = ulist_alloc(GFP_NOFS);
1502 *roots = ulist_alloc(GFP_NOFS);
1508 ULIST_ITER_INIT(&uiter);
1510 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1511 tmp, *roots, NULL, NULL, ignore_offset);
1512 if (ret < 0 && ret != -ENOENT) {
1518 node = ulist_next(tmp, &uiter);
1529 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1530 struct btrfs_fs_info *fs_info, u64 bytenr,
1531 u64 time_seq, struct ulist **roots,
1532 bool skip_commit_root_sem)
1536 if (!trans && !skip_commit_root_sem)
1537 down_read(&fs_info->commit_root_sem);
1538 ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1539 time_seq, roots, false);
1540 if (!trans && !skip_commit_root_sem)
1541 up_read(&fs_info->commit_root_sem);
1546 * Check if an extent is shared or not
1548 * @root: root inode belongs to
1549 * @inum: inode number of the inode whose extent we are checking
1550 * @bytenr: logical bytenr of the extent we are checking
1551 * @roots: list of roots this extent is shared among
1552 * @tmp: temporary list used for iteration
1554 * btrfs_check_shared uses the backref walking code but will short
1555 * circuit as soon as it finds a root or inode that doesn't match the
1556 * one passed in. This provides a significant performance benefit for
1557 * callers (such as fiemap) which want to know whether the extent is
1558 * shared but do not need a ref count.
1560 * This attempts to attach to the running transaction in order to account for
1561 * delayed refs, but continues on even when no running transaction exists.
1563 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1565 int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1566 struct ulist *roots, struct ulist *tmp)
1568 struct btrfs_fs_info *fs_info = root->fs_info;
1569 struct btrfs_trans_handle *trans;
1570 struct ulist_iterator uiter;
1571 struct ulist_node *node;
1572 struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
1574 struct share_check shared = {
1575 .root_objectid = root->root_key.objectid,
1578 .have_delayed_delete_refs = false,
1584 trans = btrfs_join_transaction_nostart(root);
1585 if (IS_ERR(trans)) {
1586 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1587 ret = PTR_ERR(trans);
1591 down_read(&fs_info->commit_root_sem);
1593 btrfs_get_tree_mod_seq(fs_info, &elem);
1596 ULIST_ITER_INIT(&uiter);
1598 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1599 roots, NULL, &shared, false);
1600 if (ret == BACKREF_FOUND_SHARED) {
1601 /* this is the only condition under which we return 1 */
1605 if (ret < 0 && ret != -ENOENT)
1608 node = ulist_next(tmp, &uiter);
1612 shared.share_count = 0;
1613 shared.have_delayed_delete_refs = false;
1618 btrfs_put_tree_mod_seq(fs_info, &elem);
1619 btrfs_end_transaction(trans);
1621 up_read(&fs_info->commit_root_sem);
1624 ulist_release(roots);
1629 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1630 u64 start_off, struct btrfs_path *path,
1631 struct btrfs_inode_extref **ret_extref,
1635 struct btrfs_key key;
1636 struct btrfs_key found_key;
1637 struct btrfs_inode_extref *extref;
1638 const struct extent_buffer *leaf;
1641 key.objectid = inode_objectid;
1642 key.type = BTRFS_INODE_EXTREF_KEY;
1643 key.offset = start_off;
1645 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1650 leaf = path->nodes[0];
1651 slot = path->slots[0];
1652 if (slot >= btrfs_header_nritems(leaf)) {
1654 * If the item at offset is not found,
1655 * btrfs_search_slot will point us to the slot
1656 * where it should be inserted. In our case
1657 * that will be the slot directly before the
1658 * next INODE_REF_KEY_V2 item. In the case
1659 * that we're pointing to the last slot in a
1660 * leaf, we must move one leaf over.
1662 ret = btrfs_next_leaf(root, path);
1671 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1674 * Check that we're still looking at an extended ref key for
1675 * this particular objectid. If we have different
1676 * objectid or type then there are no more to be found
1677 * in the tree and we can exit.
1680 if (found_key.objectid != inode_objectid)
1682 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1686 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1687 extref = (struct btrfs_inode_extref *)ptr;
1688 *ret_extref = extref;
1690 *found_off = found_key.offset;
1698 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1699 * Elements of the path are separated by '/' and the path is guaranteed to be
1700 * 0-terminated. the path is only given within the current file system.
1701 * Therefore, it never starts with a '/'. the caller is responsible to provide
1702 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1703 * the start point of the resulting string is returned. this pointer is within
1705 * in case the path buffer would overflow, the pointer is decremented further
1706 * as if output was written to the buffer, though no more output is actually
1707 * generated. that way, the caller can determine how much space would be
1708 * required for the path to fit into the buffer. in that case, the returned
1709 * value will be smaller than dest. callers must check this!
1711 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1712 u32 name_len, unsigned long name_off,
1713 struct extent_buffer *eb_in, u64 parent,
1714 char *dest, u32 size)
1719 s64 bytes_left = ((s64)size) - 1;
1720 struct extent_buffer *eb = eb_in;
1721 struct btrfs_key found_key;
1722 struct btrfs_inode_ref *iref;
1724 if (bytes_left >= 0)
1725 dest[bytes_left] = '\0';
1728 bytes_left -= name_len;
1729 if (bytes_left >= 0)
1730 read_extent_buffer(eb, dest + bytes_left,
1731 name_off, name_len);
1733 if (!path->skip_locking)
1734 btrfs_tree_read_unlock(eb);
1735 free_extent_buffer(eb);
1737 ret = btrfs_find_item(fs_root, path, parent, 0,
1738 BTRFS_INODE_REF_KEY, &found_key);
1744 next_inum = found_key.offset;
1746 /* regular exit ahead */
1747 if (parent == next_inum)
1750 slot = path->slots[0];
1751 eb = path->nodes[0];
1752 /* make sure we can use eb after releasing the path */
1754 path->nodes[0] = NULL;
1757 btrfs_release_path(path);
1758 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1760 name_len = btrfs_inode_ref_name_len(eb, iref);
1761 name_off = (unsigned long)(iref + 1);
1765 if (bytes_left >= 0)
1766 dest[bytes_left] = '/';
1769 btrfs_release_path(path);
1772 return ERR_PTR(ret);
1774 return dest + bytes_left;
1778 * this makes the path point to (logical EXTENT_ITEM *)
1779 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1780 * tree blocks and <0 on error.
1782 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1783 struct btrfs_path *path, struct btrfs_key *found_key,
1790 const struct extent_buffer *eb;
1791 struct btrfs_extent_item *ei;
1792 struct btrfs_key key;
1794 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1795 key.type = BTRFS_METADATA_ITEM_KEY;
1797 key.type = BTRFS_EXTENT_ITEM_KEY;
1798 key.objectid = logical;
1799 key.offset = (u64)-1;
1801 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1805 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1811 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1812 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1813 size = fs_info->nodesize;
1814 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1815 size = found_key->offset;
1817 if (found_key->objectid > logical ||
1818 found_key->objectid + size <= logical) {
1819 btrfs_debug(fs_info,
1820 "logical %llu is not within any extent", logical);
1824 eb = path->nodes[0];
1825 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1826 BUG_ON(item_size < sizeof(*ei));
1828 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1829 flags = btrfs_extent_flags(eb, ei);
1831 btrfs_debug(fs_info,
1832 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1833 logical, logical - found_key->objectid, found_key->objectid,
1834 found_key->offset, flags, item_size);
1836 WARN_ON(!flags_ret);
1838 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1839 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1840 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1841 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1851 * helper function to iterate extent inline refs. ptr must point to a 0 value
1852 * for the first call and may be modified. it is used to track state.
1853 * if more refs exist, 0 is returned and the next call to
1854 * get_extent_inline_ref must pass the modified ptr parameter to get the
1855 * next ref. after the last ref was processed, 1 is returned.
1856 * returns <0 on error
1858 static int get_extent_inline_ref(unsigned long *ptr,
1859 const struct extent_buffer *eb,
1860 const struct btrfs_key *key,
1861 const struct btrfs_extent_item *ei,
1863 struct btrfs_extent_inline_ref **out_eiref,
1868 struct btrfs_tree_block_info *info;
1872 flags = btrfs_extent_flags(eb, ei);
1873 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1874 if (key->type == BTRFS_METADATA_ITEM_KEY) {
1875 /* a skinny metadata extent */
1877 (struct btrfs_extent_inline_ref *)(ei + 1);
1879 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1880 info = (struct btrfs_tree_block_info *)(ei + 1);
1882 (struct btrfs_extent_inline_ref *)(info + 1);
1885 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1887 *ptr = (unsigned long)*out_eiref;
1888 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1892 end = (unsigned long)ei + item_size;
1893 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1894 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1895 BTRFS_REF_TYPE_ANY);
1896 if (*out_type == BTRFS_REF_TYPE_INVALID)
1899 *ptr += btrfs_extent_inline_ref_size(*out_type);
1900 WARN_ON(*ptr > end);
1902 return 1; /* last */
1908 * reads the tree block backref for an extent. tree level and root are returned
1909 * through out_level and out_root. ptr must point to a 0 value for the first
1910 * call and may be modified (see get_extent_inline_ref comment).
1911 * returns 0 if data was provided, 1 if there was no more data to provide or
1914 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1915 struct btrfs_key *key, struct btrfs_extent_item *ei,
1916 u32 item_size, u64 *out_root, u8 *out_level)
1920 struct btrfs_extent_inline_ref *eiref;
1922 if (*ptr == (unsigned long)-1)
1926 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1931 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1932 type == BTRFS_SHARED_BLOCK_REF_KEY)
1939 /* we can treat both ref types equally here */
1940 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1942 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1943 struct btrfs_tree_block_info *info;
1945 info = (struct btrfs_tree_block_info *)(ei + 1);
1946 *out_level = btrfs_tree_block_level(eb, info);
1948 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1949 *out_level = (u8)key->offset;
1953 *ptr = (unsigned long)-1;
1958 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1959 struct extent_inode_elem *inode_list,
1960 u64 root, u64 extent_item_objectid,
1961 iterate_extent_inodes_t *iterate, void *ctx)
1963 struct extent_inode_elem *eie;
1966 for (eie = inode_list; eie; eie = eie->next) {
1967 btrfs_debug(fs_info,
1968 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1969 extent_item_objectid, eie->inum,
1971 ret = iterate(eie->inum, eie->offset, root, ctx);
1973 btrfs_debug(fs_info,
1974 "stopping iteration for %llu due to ret=%d",
1975 extent_item_objectid, ret);
1984 * calls iterate() for every inode that references the extent identified by
1985 * the given parameters.
1986 * when the iterator function returns a non-zero value, iteration stops.
1988 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1989 u64 extent_item_objectid, u64 extent_item_pos,
1990 int search_commit_root,
1991 iterate_extent_inodes_t *iterate, void *ctx,
1995 struct btrfs_trans_handle *trans = NULL;
1996 struct ulist *refs = NULL;
1997 struct ulist *roots = NULL;
1998 struct ulist_node *ref_node = NULL;
1999 struct ulist_node *root_node = NULL;
2000 struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
2001 struct ulist_iterator ref_uiter;
2002 struct ulist_iterator root_uiter;
2004 btrfs_debug(fs_info, "resolving all inodes for extent %llu",
2005 extent_item_objectid);
2007 if (!search_commit_root) {
2008 trans = btrfs_attach_transaction(fs_info->extent_root);
2009 if (IS_ERR(trans)) {
2010 if (PTR_ERR(trans) != -ENOENT &&
2011 PTR_ERR(trans) != -EROFS)
2012 return PTR_ERR(trans);
2018 btrfs_get_tree_mod_seq(fs_info, &seq_elem);
2020 down_read(&fs_info->commit_root_sem);
2022 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
2023 seq_elem.seq, &refs,
2024 &extent_item_pos, ignore_offset);
2028 ULIST_ITER_INIT(&ref_uiter);
2029 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2030 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
2031 seq_elem.seq, &roots,
2035 ULIST_ITER_INIT(&root_uiter);
2036 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
2037 btrfs_debug(fs_info,
2038 "root %llu references leaf %llu, data list %#llx",
2039 root_node->val, ref_node->val,
2041 ret = iterate_leaf_refs(fs_info,
2042 (struct extent_inode_elem *)
2043 (uintptr_t)ref_node->aux,
2045 extent_item_objectid,
2051 free_leaf_list(refs);
2054 btrfs_put_tree_mod_seq(fs_info, &seq_elem);
2055 btrfs_end_transaction(trans);
2057 up_read(&fs_info->commit_root_sem);
2063 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
2065 struct btrfs_data_container *inodes = ctx;
2066 const size_t c = 3 * sizeof(u64);
2068 if (inodes->bytes_left >= c) {
2069 inodes->bytes_left -= c;
2070 inodes->val[inodes->elem_cnt] = inum;
2071 inodes->val[inodes->elem_cnt + 1] = offset;
2072 inodes->val[inodes->elem_cnt + 2] = root;
2073 inodes->elem_cnt += 3;
2075 inodes->bytes_missing += c - inodes->bytes_left;
2076 inodes->bytes_left = 0;
2077 inodes->elem_missed += 3;
2083 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2084 struct btrfs_path *path,
2085 void *ctx, bool ignore_offset)
2088 u64 extent_item_pos;
2090 struct btrfs_key found_key;
2091 int search_commit_root = path->search_commit_root;
2093 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2094 btrfs_release_path(path);
2097 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2100 extent_item_pos = logical - found_key.objectid;
2101 ret = iterate_extent_inodes(fs_info, found_key.objectid,
2102 extent_item_pos, search_commit_root,
2103 build_ino_list, ctx, ignore_offset);
2108 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
2109 struct extent_buffer *eb, void *ctx);
2111 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
2112 struct btrfs_path *path,
2113 iterate_irefs_t *iterate, void *ctx)
2122 struct extent_buffer *eb;
2123 struct btrfs_item *item;
2124 struct btrfs_inode_ref *iref;
2125 struct btrfs_key found_key;
2128 ret = btrfs_find_item(fs_root, path, inum,
2129 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2135 ret = found ? 0 : -ENOENT;
2140 parent = found_key.offset;
2141 slot = path->slots[0];
2142 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2147 btrfs_release_path(path);
2149 item = btrfs_item_nr(slot);
2150 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2152 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
2153 name_len = btrfs_inode_ref_name_len(eb, iref);
2154 /* path must be released before calling iterate()! */
2155 btrfs_debug(fs_root->fs_info,
2156 "following ref at offset %u for inode %llu in tree %llu",
2157 cur, found_key.objectid,
2158 fs_root->root_key.objectid);
2159 ret = iterate(parent, name_len,
2160 (unsigned long)(iref + 1), eb, ctx);
2163 len = sizeof(*iref) + name_len;
2164 iref = (struct btrfs_inode_ref *)((char *)iref + len);
2166 free_extent_buffer(eb);
2169 btrfs_release_path(path);
2174 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2175 struct btrfs_path *path,
2176 iterate_irefs_t *iterate, void *ctx)
2183 struct extent_buffer *eb;
2184 struct btrfs_inode_extref *extref;
2190 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2195 ret = found ? 0 : -ENOENT;
2200 slot = path->slots[0];
2201 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2206 btrfs_release_path(path);
2208 item_size = btrfs_item_size_nr(eb, slot);
2209 ptr = btrfs_item_ptr_offset(eb, slot);
2212 while (cur_offset < item_size) {
2215 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2216 parent = btrfs_inode_extref_parent(eb, extref);
2217 name_len = btrfs_inode_extref_name_len(eb, extref);
2218 ret = iterate(parent, name_len,
2219 (unsigned long)&extref->name, eb, ctx);
2223 cur_offset += btrfs_inode_extref_name_len(eb, extref);
2224 cur_offset += sizeof(*extref);
2226 free_extent_buffer(eb);
2231 btrfs_release_path(path);
2236 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
2237 struct btrfs_path *path, iterate_irefs_t *iterate,
2243 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
2246 else if (ret != -ENOENT)
2249 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
2250 if (ret == -ENOENT && found_refs)
2257 * returns 0 if the path could be dumped (probably truncated)
2258 * returns <0 in case of an error
2260 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2261 struct extent_buffer *eb, void *ctx)
2263 struct inode_fs_paths *ipath = ctx;
2266 int i = ipath->fspath->elem_cnt;
2267 const int s_ptr = sizeof(char *);
2270 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2271 ipath->fspath->bytes_left - s_ptr : 0;
2273 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2274 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2275 name_off, eb, inum, fspath_min, bytes_left);
2277 return PTR_ERR(fspath);
2279 if (fspath > fspath_min) {
2280 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2281 ++ipath->fspath->elem_cnt;
2282 ipath->fspath->bytes_left = fspath - fspath_min;
2284 ++ipath->fspath->elem_missed;
2285 ipath->fspath->bytes_missing += fspath_min - fspath;
2286 ipath->fspath->bytes_left = 0;
2293 * this dumps all file system paths to the inode into the ipath struct, provided
2294 * is has been created large enough. each path is zero-terminated and accessed
2295 * from ipath->fspath->val[i].
2296 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2297 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2298 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2299 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2300 * have been needed to return all paths.
2302 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2304 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2305 inode_to_path, ipath);
2308 struct btrfs_data_container *init_data_container(u32 total_bytes)
2310 struct btrfs_data_container *data;
2313 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2314 data = kvmalloc(alloc_bytes, GFP_KERNEL);
2316 return ERR_PTR(-ENOMEM);
2318 if (total_bytes >= sizeof(*data)) {
2319 data->bytes_left = total_bytes - sizeof(*data);
2320 data->bytes_missing = 0;
2322 data->bytes_missing = sizeof(*data) - total_bytes;
2323 data->bytes_left = 0;
2327 data->elem_missed = 0;
2333 * allocates space to return multiple file system paths for an inode.
2334 * total_bytes to allocate are passed, note that space usable for actual path
2335 * information will be total_bytes - sizeof(struct inode_fs_paths).
2336 * the returned pointer must be freed with free_ipath() in the end.
2338 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2339 struct btrfs_path *path)
2341 struct inode_fs_paths *ifp;
2342 struct btrfs_data_container *fspath;
2344 fspath = init_data_container(total_bytes);
2346 return ERR_CAST(fspath);
2348 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2351 return ERR_PTR(-ENOMEM);
2354 ifp->btrfs_path = path;
2355 ifp->fspath = fspath;
2356 ifp->fs_root = fs_root;
2361 void free_ipath(struct inode_fs_paths *ipath)
2365 kvfree(ipath->fspath);
2369 struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2370 struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2372 struct btrfs_backref_iter *ret;
2374 ret = kzalloc(sizeof(*ret), gfp_flag);
2378 ret->path = btrfs_alloc_path();
2384 /* Current backref iterator only supports iteration in commit root */
2385 ret->path->search_commit_root = 1;
2386 ret->path->skip_locking = 1;
2387 ret->fs_info = fs_info;
2392 int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2394 struct btrfs_fs_info *fs_info = iter->fs_info;
2395 struct btrfs_path *path = iter->path;
2396 struct btrfs_extent_item *ei;
2397 struct btrfs_key key;
2400 key.objectid = bytenr;
2401 key.type = BTRFS_METADATA_ITEM_KEY;
2402 key.offset = (u64)-1;
2403 iter->bytenr = bytenr;
2405 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2412 if (path->slots[0] == 0) {
2413 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2419 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2420 if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2421 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2425 memcpy(&iter->cur_key, &key, sizeof(key));
2426 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2428 iter->end_ptr = (u32)(iter->item_ptr +
2429 btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2430 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2431 struct btrfs_extent_item);
2434 * Only support iteration on tree backref yet.
2436 * This is an extra precaution for non skinny-metadata, where
2437 * EXTENT_ITEM is also used for tree blocks, that we can only use
2438 * extent flags to determine if it's a tree block.
2440 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2444 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2446 /* If there is no inline backref, go search for keyed backref */
2447 if (iter->cur_ptr >= iter->end_ptr) {
2448 ret = btrfs_next_item(fs_info->extent_root, path);
2450 /* No inline nor keyed ref */
2458 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2460 if (iter->cur_key.objectid != bytenr ||
2461 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2462 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2466 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2468 iter->item_ptr = iter->cur_ptr;
2469 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2470 path->nodes[0], path->slots[0]));
2475 btrfs_backref_iter_release(iter);
2480 * Go to the next backref item of current bytenr, can be either inlined or
2483 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2485 * Return 0 if we get next backref without problem.
2486 * Return >0 if there is no extra backref for this bytenr.
2487 * Return <0 if there is something wrong happened.
2489 int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2491 struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2492 struct btrfs_path *path = iter->path;
2493 struct btrfs_extent_inline_ref *iref;
2497 if (btrfs_backref_iter_is_inline_ref(iter)) {
2498 /* We're still inside the inline refs */
2499 ASSERT(iter->cur_ptr < iter->end_ptr);
2501 if (btrfs_backref_has_tree_block_info(iter)) {
2502 /* First tree block info */
2503 size = sizeof(struct btrfs_tree_block_info);
2505 /* Use inline ref type to determine the size */
2508 iref = (struct btrfs_extent_inline_ref *)
2509 ((unsigned long)iter->cur_ptr);
2510 type = btrfs_extent_inline_ref_type(eb, iref);
2512 size = btrfs_extent_inline_ref_size(type);
2514 iter->cur_ptr += size;
2515 if (iter->cur_ptr < iter->end_ptr)
2518 /* All inline items iterated, fall through */
2521 /* We're at keyed items, there is no inline item, go to the next one */
2522 ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
2526 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2527 if (iter->cur_key.objectid != iter->bytenr ||
2528 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2529 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2531 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2533 iter->cur_ptr = iter->item_ptr;
2534 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2539 void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2540 struct btrfs_backref_cache *cache, int is_reloc)
2544 cache->rb_root = RB_ROOT;
2545 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2546 INIT_LIST_HEAD(&cache->pending[i]);
2547 INIT_LIST_HEAD(&cache->changed);
2548 INIT_LIST_HEAD(&cache->detached);
2549 INIT_LIST_HEAD(&cache->leaves);
2550 INIT_LIST_HEAD(&cache->pending_edge);
2551 INIT_LIST_HEAD(&cache->useless_node);
2552 cache->fs_info = fs_info;
2553 cache->is_reloc = is_reloc;
2556 struct btrfs_backref_node *btrfs_backref_alloc_node(
2557 struct btrfs_backref_cache *cache, u64 bytenr, int level)
2559 struct btrfs_backref_node *node;
2561 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2562 node = kzalloc(sizeof(*node), GFP_NOFS);
2566 INIT_LIST_HEAD(&node->list);
2567 INIT_LIST_HEAD(&node->upper);
2568 INIT_LIST_HEAD(&node->lower);
2569 RB_CLEAR_NODE(&node->rb_node);
2571 node->level = level;
2572 node->bytenr = bytenr;
2577 struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2578 struct btrfs_backref_cache *cache)
2580 struct btrfs_backref_edge *edge;
2582 edge = kzalloc(sizeof(*edge), GFP_NOFS);
2589 * Drop the backref node from cache, also cleaning up all its
2590 * upper edges and any uncached nodes in the path.
2592 * This cleanup happens bottom up, thus the node should either
2593 * be the lowest node in the cache or a detached node.
2595 void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2596 struct btrfs_backref_node *node)
2598 struct btrfs_backref_node *upper;
2599 struct btrfs_backref_edge *edge;
2604 BUG_ON(!node->lowest && !node->detached);
2605 while (!list_empty(&node->upper)) {
2606 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2608 upper = edge->node[UPPER];
2609 list_del(&edge->list[LOWER]);
2610 list_del(&edge->list[UPPER]);
2611 btrfs_backref_free_edge(cache, edge);
2614 * Add the node to leaf node list if no other child block
2617 if (list_empty(&upper->lower)) {
2618 list_add_tail(&upper->lower, &cache->leaves);
2623 btrfs_backref_drop_node(cache, node);
2627 * Release all nodes/edges from current cache
2629 void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2631 struct btrfs_backref_node *node;
2634 while (!list_empty(&cache->detached)) {
2635 node = list_entry(cache->detached.next,
2636 struct btrfs_backref_node, list);
2637 btrfs_backref_cleanup_node(cache, node);
2640 while (!list_empty(&cache->leaves)) {
2641 node = list_entry(cache->leaves.next,
2642 struct btrfs_backref_node, lower);
2643 btrfs_backref_cleanup_node(cache, node);
2646 cache->last_trans = 0;
2648 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2649 ASSERT(list_empty(&cache->pending[i]));
2650 ASSERT(list_empty(&cache->pending_edge));
2651 ASSERT(list_empty(&cache->useless_node));
2652 ASSERT(list_empty(&cache->changed));
2653 ASSERT(list_empty(&cache->detached));
2654 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2655 ASSERT(!cache->nr_nodes);
2656 ASSERT(!cache->nr_edges);
2660 * Handle direct tree backref
2662 * Direct tree backref means, the backref item shows its parent bytenr
2663 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
2665 * @ref_key: The converted backref key.
2666 * For keyed backref, it's the item key.
2667 * For inlined backref, objectid is the bytenr,
2668 * type is btrfs_inline_ref_type, offset is
2669 * btrfs_inline_ref_offset.
2671 static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2672 struct btrfs_key *ref_key,
2673 struct btrfs_backref_node *cur)
2675 struct btrfs_backref_edge *edge;
2676 struct btrfs_backref_node *upper;
2677 struct rb_node *rb_node;
2679 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2681 /* Only reloc root uses backref pointing to itself */
2682 if (ref_key->objectid == ref_key->offset) {
2683 struct btrfs_root *root;
2685 cur->is_reloc_root = 1;
2686 /* Only reloc backref cache cares about a specific root */
2687 if (cache->is_reloc) {
2688 root = find_reloc_root(cache->fs_info, cur->bytenr);
2694 * For generic purpose backref cache, reloc root node
2697 list_add(&cur->list, &cache->useless_node);
2702 edge = btrfs_backref_alloc_edge(cache);
2706 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2708 /* Parent node not yet cached */
2709 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2712 btrfs_backref_free_edge(cache, edge);
2717 * Backrefs for the upper level block isn't cached, add the
2718 * block to pending list
2720 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2722 /* Parent node already cached */
2723 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2724 ASSERT(upper->checked);
2725 INIT_LIST_HEAD(&edge->list[UPPER]);
2727 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2732 * Handle indirect tree backref
2734 * Indirect tree backref means, we only know which tree the node belongs to.
2735 * We still need to do a tree search to find out the parents. This is for
2736 * TREE_BLOCK_REF backref (keyed or inlined).
2738 * @ref_key: The same as @ref_key in handle_direct_tree_backref()
2739 * @tree_key: The first key of this tree block.
2740 * @path: A clean (released) path, to avoid allocating path every time
2741 * the function get called.
2743 static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
2744 struct btrfs_path *path,
2745 struct btrfs_key *ref_key,
2746 struct btrfs_key *tree_key,
2747 struct btrfs_backref_node *cur)
2749 struct btrfs_fs_info *fs_info = cache->fs_info;
2750 struct btrfs_backref_node *upper;
2751 struct btrfs_backref_node *lower;
2752 struct btrfs_backref_edge *edge;
2753 struct extent_buffer *eb;
2754 struct btrfs_root *root;
2755 struct rb_node *rb_node;
2757 bool need_check = true;
2760 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2762 return PTR_ERR(root);
2763 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2766 if (btrfs_root_level(&root->root_item) == cur->level) {
2768 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2770 * For reloc backref cache, we may ignore reloc root. But for
2771 * general purpose backref cache, we can't rely on
2772 * btrfs_should_ignore_reloc_root() as it may conflict with
2773 * current running relocation and lead to missing root.
2775 * For general purpose backref cache, reloc root detection is
2776 * completely relying on direct backref (key->offset is parent
2777 * bytenr), thus only do such check for reloc cache.
2779 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2780 btrfs_put_root(root);
2781 list_add(&cur->list, &cache->useless_node);
2788 level = cur->level + 1;
2790 /* Search the tree to find parent blocks referring to the block */
2791 path->search_commit_root = 1;
2792 path->skip_locking = 1;
2793 path->lowest_level = level;
2794 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2795 path->lowest_level = 0;
2797 btrfs_put_root(root);
2800 if (ret > 0 && path->slots[level] > 0)
2801 path->slots[level]--;
2803 eb = path->nodes[level];
2804 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2806 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2807 cur->bytenr, level - 1, root->root_key.objectid,
2808 tree_key->objectid, tree_key->type, tree_key->offset);
2809 btrfs_put_root(root);
2815 /* Add all nodes and edges in the path */
2816 for (; level < BTRFS_MAX_LEVEL; level++) {
2817 if (!path->nodes[level]) {
2818 ASSERT(btrfs_root_bytenr(&root->root_item) ==
2820 /* Same as previous should_ignore_reloc_root() call */
2821 if (btrfs_should_ignore_reloc_root(root) &&
2823 btrfs_put_root(root);
2824 list_add(&lower->list, &cache->useless_node);
2831 edge = btrfs_backref_alloc_edge(cache);
2833 btrfs_put_root(root);
2838 eb = path->nodes[level];
2839 rb_node = rb_simple_search(&cache->rb_root, eb->start);
2841 upper = btrfs_backref_alloc_node(cache, eb->start,
2844 btrfs_put_root(root);
2845 btrfs_backref_free_edge(cache, edge);
2849 upper->owner = btrfs_header_owner(eb);
2850 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2854 * If we know the block isn't shared we can avoid
2855 * checking its backrefs.
2857 if (btrfs_block_can_be_shared(root, eb))
2863 * Add the block to pending list if we need to check its
2864 * backrefs, we only do this once while walking up a
2865 * tree as we will catch anything else later on.
2867 if (!upper->checked && need_check) {
2869 list_add_tail(&edge->list[UPPER],
2870 &cache->pending_edge);
2874 INIT_LIST_HEAD(&edge->list[UPPER]);
2877 upper = rb_entry(rb_node, struct btrfs_backref_node,
2879 ASSERT(upper->checked);
2880 INIT_LIST_HEAD(&edge->list[UPPER]);
2882 upper->owner = btrfs_header_owner(eb);
2884 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2887 btrfs_put_root(root);
2894 btrfs_release_path(path);
2899 * Add backref node @cur into @cache.
2901 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
2902 * links aren't yet bi-directional. Needs to finish such links.
2903 * Use btrfs_backref_finish_upper_links() to finish such linkage.
2905 * @path: Released path for indirect tree backref lookup
2906 * @iter: Released backref iter for extent tree search
2907 * @node_key: The first key of the tree block
2909 int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
2910 struct btrfs_path *path,
2911 struct btrfs_backref_iter *iter,
2912 struct btrfs_key *node_key,
2913 struct btrfs_backref_node *cur)
2915 struct btrfs_fs_info *fs_info = cache->fs_info;
2916 struct btrfs_backref_edge *edge;
2917 struct btrfs_backref_node *exist;
2920 ret = btrfs_backref_iter_start(iter, cur->bytenr);
2924 * We skip the first btrfs_tree_block_info, as we don't use the key
2925 * stored in it, but fetch it from the tree block
2927 if (btrfs_backref_has_tree_block_info(iter)) {
2928 ret = btrfs_backref_iter_next(iter);
2931 /* No extra backref? This means the tree block is corrupted */
2937 WARN_ON(cur->checked);
2938 if (!list_empty(&cur->upper)) {
2940 * The backref was added previously when processing backref of
2941 * type BTRFS_TREE_BLOCK_REF_KEY
2943 ASSERT(list_is_singular(&cur->upper));
2944 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2946 ASSERT(list_empty(&edge->list[UPPER]));
2947 exist = edge->node[UPPER];
2949 * Add the upper level block to pending list if we need check
2952 if (!exist->checked)
2953 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2958 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
2959 struct extent_buffer *eb;
2960 struct btrfs_key key;
2964 eb = btrfs_backref_get_eb(iter);
2966 key.objectid = iter->bytenr;
2967 if (btrfs_backref_iter_is_inline_ref(iter)) {
2968 struct btrfs_extent_inline_ref *iref;
2970 /* Update key for inline backref */
2971 iref = (struct btrfs_extent_inline_ref *)
2972 ((unsigned long)iter->cur_ptr);
2973 type = btrfs_get_extent_inline_ref_type(eb, iref,
2974 BTRFS_REF_TYPE_BLOCK);
2975 if (type == BTRFS_REF_TYPE_INVALID) {
2980 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
2982 key.type = iter->cur_key.type;
2983 key.offset = iter->cur_key.offset;
2987 * Parent node found and matches current inline ref, no need to
2988 * rebuild this node for this inline ref
2991 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
2992 exist->owner == key.offset) ||
2993 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
2994 exist->bytenr == key.offset))) {
2999 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3000 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
3001 ret = handle_direct_tree_backref(cache, &key, cur);
3005 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
3007 btrfs_print_v0_err(fs_info);
3008 btrfs_handle_fs_error(fs_info, ret, NULL);
3010 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
3015 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
3016 * means the root objectid. We need to search the tree to get
3017 * its parent bytenr.
3019 ret = handle_indirect_tree_backref(cache, path, &key, node_key,
3028 btrfs_backref_iter_release(iter);
3033 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3035 int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3036 struct btrfs_backref_node *start)
3038 struct list_head *useless_node = &cache->useless_node;
3039 struct btrfs_backref_edge *edge;
3040 struct rb_node *rb_node;
3041 LIST_HEAD(pending_edge);
3043 ASSERT(start->checked);
3045 /* Insert this node to cache if it's not COW-only */
3046 if (!start->cowonly) {
3047 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3050 btrfs_backref_panic(cache->fs_info, start->bytenr,
3052 list_add_tail(&start->lower, &cache->leaves);
3056 * Use breadth first search to iterate all related edges.
3058 * The starting points are all the edges of this node
3060 list_for_each_entry(edge, &start->upper, list[LOWER])
3061 list_add_tail(&edge->list[UPPER], &pending_edge);
3063 while (!list_empty(&pending_edge)) {
3064 struct btrfs_backref_node *upper;
3065 struct btrfs_backref_node *lower;
3067 edge = list_first_entry(&pending_edge,
3068 struct btrfs_backref_edge, list[UPPER]);
3069 list_del_init(&edge->list[UPPER]);
3070 upper = edge->node[UPPER];
3071 lower = edge->node[LOWER];
3073 /* Parent is detached, no need to keep any edges */
3074 if (upper->detached) {
3075 list_del(&edge->list[LOWER]);
3076 btrfs_backref_free_edge(cache, edge);
3078 /* Lower node is orphan, queue for cleanup */
3079 if (list_empty(&lower->upper))
3080 list_add(&lower->list, useless_node);
3085 * All new nodes added in current build_backref_tree() haven't
3086 * been linked to the cache rb tree.
3087 * So if we have upper->rb_node populated, this means a cache
3088 * hit. We only need to link the edge, as @upper and all its
3089 * parents have already been linked.
3091 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3092 if (upper->lowest) {
3093 list_del_init(&upper->lower);
3097 list_add_tail(&edge->list[UPPER], &upper->lower);
3101 /* Sanity check, we shouldn't have any unchecked nodes */
3102 if (!upper->checked) {
3107 /* Sanity check, COW-only node has non-COW-only parent */
3108 if (start->cowonly != upper->cowonly) {
3113 /* Only cache non-COW-only (subvolume trees) tree blocks */
3114 if (!upper->cowonly) {
3115 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3118 btrfs_backref_panic(cache->fs_info,
3119 upper->bytenr, -EEXIST);
3124 list_add_tail(&edge->list[UPPER], &upper->lower);
3127 * Also queue all the parent edges of this uncached node
3128 * to finish the upper linkage
3130 list_for_each_entry(edge, &upper->upper, list[LOWER])
3131 list_add_tail(&edge->list[UPPER], &pending_edge);
3136 void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3137 struct btrfs_backref_node *node)
3139 struct btrfs_backref_node *lower;
3140 struct btrfs_backref_node *upper;
3141 struct btrfs_backref_edge *edge;
3143 while (!list_empty(&cache->useless_node)) {
3144 lower = list_first_entry(&cache->useless_node,
3145 struct btrfs_backref_node, list);
3146 list_del_init(&lower->list);
3148 while (!list_empty(&cache->pending_edge)) {
3149 edge = list_first_entry(&cache->pending_edge,
3150 struct btrfs_backref_edge, list[UPPER]);
3151 list_del(&edge->list[UPPER]);
3152 list_del(&edge->list[LOWER]);
3153 lower = edge->node[LOWER];
3154 upper = edge->node[UPPER];
3155 btrfs_backref_free_edge(cache, edge);
3158 * Lower is no longer linked to any upper backref nodes and
3159 * isn't in the cache, we can free it ourselves.
3161 if (list_empty(&lower->upper) &&
3162 RB_EMPTY_NODE(&lower->rb_node))
3163 list_add(&lower->list, &cache->useless_node);
3165 if (!RB_EMPTY_NODE(&upper->rb_node))
3168 /* Add this guy's upper edges to the list to process */
3169 list_for_each_entry(edge, &upper->upper, list[LOWER])
3170 list_add_tail(&edge->list[UPPER],
3171 &cache->pending_edge);
3172 if (list_empty(&upper->upper))
3173 list_add(&upper->list, &cache->useless_node);
3176 while (!list_empty(&cache->useless_node)) {
3177 lower = list_first_entry(&cache->useless_node,
3178 struct btrfs_backref_node, list);
3179 list_del_init(&lower->list);
3182 btrfs_backref_drop_node(cache, lower);
3185 btrfs_backref_cleanup_node(cache, node);
3186 ASSERT(list_empty(&cache->useless_node) &&
3187 list_empty(&cache->pending_edge));