2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
23 #include "transaction.h"
24 #include "delayed-ref.h"
27 struct extent_inode_elem {
30 struct extent_inode_elem *next;
33 static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
34 struct btrfs_file_extent_item *fi,
36 struct extent_inode_elem **eie)
40 struct extent_inode_elem *e;
42 data_offset = btrfs_file_extent_offset(eb, fi);
43 data_len = btrfs_file_extent_num_bytes(eb, fi);
45 if (extent_item_pos < data_offset ||
46 extent_item_pos >= data_offset + data_len)
49 e = kmalloc(sizeof(*e), GFP_NOFS);
54 e->inum = key->objectid;
55 e->offset = key->offset + (extent_item_pos - data_offset);
61 static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte,
63 struct extent_inode_elem **eie)
67 struct btrfs_file_extent_item *fi;
74 * from the shared data ref, we only have the leaf but we need
75 * the key. thus, we must look into all items and see that we
76 * find one (some) with a reference to our extent item.
78 nritems = btrfs_header_nritems(eb);
79 for (slot = 0; slot < nritems; ++slot) {
80 btrfs_item_key_to_cpu(eb, &key, slot);
81 if (key.type != BTRFS_EXTENT_DATA_KEY)
83 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
84 extent_type = btrfs_file_extent_type(eb, fi);
85 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
87 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
88 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
89 if (disk_byte != wanted_disk_byte)
92 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie);
101 * this structure records all encountered refs on the way up to the root
103 struct __prelim_ref {
104 struct list_head list;
106 struct btrfs_key key_for_search;
109 struct extent_inode_elem *inode_list;
111 u64 wanted_disk_byte;
115 * the rules for all callers of this function are:
116 * - obtaining the parent is the goal
117 * - if you add a key, you must know that it is a correct key
118 * - if you cannot add the parent or a correct key, then we will look into the
119 * block later to set a correct key
123 * backref type | shared | indirect | shared | indirect
124 * information | tree | tree | data | data
125 * --------------------+--------+----------+--------+----------
126 * parent logical | y | - | - | -
127 * key to resolve | - | y | y | y
128 * tree block logical | - | - | - | -
129 * root for resolving | y | y | y | y
131 * - column 1: we've the parent -> done
132 * - column 2, 3, 4: we use the key to find the parent
134 * on disk refs (inline or keyed)
135 * ==============================
136 * backref type | shared | indirect | shared | indirect
137 * information | tree | tree | data | data
138 * --------------------+--------+----------+--------+----------
139 * parent logical | y | - | y | -
140 * key to resolve | - | - | - | y
141 * tree block logical | y | y | y | y
142 * root for resolving | - | y | y | y
144 * - column 1, 3: we've the parent -> done
145 * - column 2: we take the first key from the block to find the parent
146 * (see __add_missing_keys)
147 * - column 4: we use the key to find the parent
149 * additional information that's available but not required to find the parent
150 * block might help in merging entries to gain some speed.
153 static int __add_prelim_ref(struct list_head *head, u64 root_id,
154 struct btrfs_key *key, int level,
155 u64 parent, u64 wanted_disk_byte, int count)
157 struct __prelim_ref *ref;
159 /* in case we're adding delayed refs, we're holding the refs spinlock */
160 ref = kmalloc(sizeof(*ref), GFP_ATOMIC);
164 ref->root_id = root_id;
166 ref->key_for_search = *key;
168 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
170 ref->inode_list = NULL;
173 ref->parent = parent;
174 ref->wanted_disk_byte = wanted_disk_byte;
175 list_add_tail(&ref->list, head);
180 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
181 struct ulist *parents, int level,
182 struct btrfs_key *key_for_search, u64 time_seq,
183 u64 wanted_disk_byte,
184 const u64 *extent_item_pos)
188 struct extent_buffer *eb;
189 struct btrfs_key key;
190 struct btrfs_file_extent_item *fi;
191 struct extent_inode_elem *eie = NULL;
195 eb = path->nodes[level];
196 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
203 * We normally enter this function with the path already pointing to
204 * the first item to check. But sometimes, we may enter it with
205 * slot==nritems. In that case, go to the next leaf before we continue.
207 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
208 ret = btrfs_next_old_leaf(root, path, time_seq);
212 slot = path->slots[0];
214 btrfs_item_key_to_cpu(eb, &key, slot);
216 if (key.objectid != key_for_search->objectid ||
217 key.type != BTRFS_EXTENT_DATA_KEY)
220 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
221 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
223 if (disk_byte == wanted_disk_byte) {
225 if (extent_item_pos) {
226 ret = check_extent_in_eb(&key, eb, fi,
233 ret = ulist_add(parents, eb->start,
234 (unsigned long)eie, GFP_NOFS);
237 if (!extent_item_pos) {
238 ret = btrfs_next_old_leaf(root, path,
244 ret = btrfs_next_old_item(root, path, time_seq);
253 * resolve an indirect backref in the form (root_id, key, level)
254 * to a logical address
256 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
257 int search_commit_root,
259 struct __prelim_ref *ref,
260 struct ulist *parents,
261 const u64 *extent_item_pos)
263 struct btrfs_path *path;
264 struct btrfs_root *root;
265 struct btrfs_key root_key;
266 struct extent_buffer *eb;
269 int level = ref->level;
271 path = btrfs_alloc_path();
274 path->search_commit_root = !!search_commit_root;
276 root_key.objectid = ref->root_id;
277 root_key.type = BTRFS_ROOT_ITEM_KEY;
278 root_key.offset = (u64)-1;
279 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
286 root_level = btrfs_header_level(root->node);
289 if (root_level + 1 == level)
292 path->lowest_level = level;
293 ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq);
294 pr_debug("search slot in root %llu (level %d, ref count %d) returned "
295 "%d for key (%llu %u %llu)\n",
296 (unsigned long long)ref->root_id, level, ref->count, ret,
297 (unsigned long long)ref->key_for_search.objectid,
298 ref->key_for_search.type,
299 (unsigned long long)ref->key_for_search.offset);
303 eb = path->nodes[level];
310 ret = add_all_parents(root, path, parents, level, &ref->key_for_search,
311 time_seq, ref->wanted_disk_byte,
314 btrfs_free_path(path);
319 * resolve all indirect backrefs from the list
321 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
322 int search_commit_root, u64 time_seq,
323 struct list_head *head,
324 const u64 *extent_item_pos)
328 struct __prelim_ref *ref;
329 struct __prelim_ref *ref_safe;
330 struct __prelim_ref *new_ref;
331 struct ulist *parents;
332 struct ulist_node *node;
333 struct ulist_iterator uiter;
335 parents = ulist_alloc(GFP_NOFS);
340 * _safe allows us to insert directly after the current item without
341 * iterating over the newly inserted items.
342 * we're also allowed to re-assign ref during iteration.
344 list_for_each_entry_safe(ref, ref_safe, head, list) {
345 if (ref->parent) /* already direct */
349 err = __resolve_indirect_ref(fs_info, search_commit_root,
350 time_seq, ref, parents,
358 /* we put the first parent into the ref at hand */
359 ULIST_ITER_INIT(&uiter);
360 node = ulist_next(parents, &uiter);
361 ref->parent = node ? node->val : 0;
363 node ? (struct extent_inode_elem *)node->aux : 0;
365 /* additional parents require new refs being added here */
366 while ((node = ulist_next(parents, &uiter))) {
367 new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
372 memcpy(new_ref, ref, sizeof(*ref));
373 new_ref->parent = node->val;
374 new_ref->inode_list =
375 (struct extent_inode_elem *)node->aux;
376 list_add(&new_ref->list, &ref->list);
378 ulist_reinit(parents);
385 static inline int ref_for_same_block(struct __prelim_ref *ref1,
386 struct __prelim_ref *ref2)
388 if (ref1->level != ref2->level)
390 if (ref1->root_id != ref2->root_id)
392 if (ref1->key_for_search.type != ref2->key_for_search.type)
394 if (ref1->key_for_search.objectid != ref2->key_for_search.objectid)
396 if (ref1->key_for_search.offset != ref2->key_for_search.offset)
398 if (ref1->parent != ref2->parent)
405 * read tree blocks and add keys where required.
407 static int __add_missing_keys(struct btrfs_fs_info *fs_info,
408 struct list_head *head)
410 struct list_head *pos;
411 struct extent_buffer *eb;
413 list_for_each(pos, head) {
414 struct __prelim_ref *ref;
415 ref = list_entry(pos, struct __prelim_ref, list);
419 if (ref->key_for_search.type)
421 BUG_ON(!ref->wanted_disk_byte);
422 eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
423 fs_info->tree_root->leafsize, 0);
425 btrfs_tree_read_lock(eb);
426 if (btrfs_header_level(eb) == 0)
427 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
429 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
430 btrfs_tree_read_unlock(eb);
431 free_extent_buffer(eb);
437 * merge two lists of backrefs and adjust counts accordingly
439 * mode = 1: merge identical keys, if key is set
440 * FIXME: if we add more keys in __add_prelim_ref, we can merge more here.
441 * additionally, we could even add a key range for the blocks we
442 * looked into to merge even more (-> replace unresolved refs by those
444 * mode = 2: merge identical parents
446 static int __merge_refs(struct list_head *head, int mode)
448 struct list_head *pos1;
450 list_for_each(pos1, head) {
451 struct list_head *n2;
452 struct list_head *pos2;
453 struct __prelim_ref *ref1;
455 ref1 = list_entry(pos1, struct __prelim_ref, list);
457 for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
458 pos2 = n2, n2 = pos2->next) {
459 struct __prelim_ref *ref2;
460 struct __prelim_ref *xchg;
462 ref2 = list_entry(pos2, struct __prelim_ref, list);
465 if (!ref_for_same_block(ref1, ref2))
467 if (!ref1->parent && ref2->parent) {
472 ref1->count += ref2->count;
474 if (ref1->parent != ref2->parent)
476 ref1->count += ref2->count;
478 list_del(&ref2->list);
487 * add all currently queued delayed refs from this head whose seq nr is
488 * smaller or equal that seq to the list
490 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
491 struct list_head *prefs)
493 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
494 struct rb_node *n = &head->node.rb_node;
495 struct btrfs_key key;
496 struct btrfs_key op_key = {0};
500 if (extent_op && extent_op->update_key)
501 btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
503 while ((n = rb_prev(n))) {
504 struct btrfs_delayed_ref_node *node;
505 node = rb_entry(n, struct btrfs_delayed_ref_node,
507 if (node->bytenr != head->node.bytenr)
509 WARN_ON(node->is_head);
514 switch (node->action) {
515 case BTRFS_ADD_DELAYED_EXTENT:
516 case BTRFS_UPDATE_DELAYED_HEAD:
519 case BTRFS_ADD_DELAYED_REF:
522 case BTRFS_DROP_DELAYED_REF:
528 switch (node->type) {
529 case BTRFS_TREE_BLOCK_REF_KEY: {
530 struct btrfs_delayed_tree_ref *ref;
532 ref = btrfs_delayed_node_to_tree_ref(node);
533 ret = __add_prelim_ref(prefs, ref->root, &op_key,
534 ref->level + 1, 0, node->bytenr,
535 node->ref_mod * sgn);
538 case BTRFS_SHARED_BLOCK_REF_KEY: {
539 struct btrfs_delayed_tree_ref *ref;
541 ref = btrfs_delayed_node_to_tree_ref(node);
542 ret = __add_prelim_ref(prefs, ref->root, NULL,
543 ref->level + 1, ref->parent,
545 node->ref_mod * sgn);
548 case BTRFS_EXTENT_DATA_REF_KEY: {
549 struct btrfs_delayed_data_ref *ref;
550 ref = btrfs_delayed_node_to_data_ref(node);
552 key.objectid = ref->objectid;
553 key.type = BTRFS_EXTENT_DATA_KEY;
554 key.offset = ref->offset;
555 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
557 node->ref_mod * sgn);
560 case BTRFS_SHARED_DATA_REF_KEY: {
561 struct btrfs_delayed_data_ref *ref;
563 ref = btrfs_delayed_node_to_data_ref(node);
565 key.objectid = ref->objectid;
566 key.type = BTRFS_EXTENT_DATA_KEY;
567 key.offset = ref->offset;
568 ret = __add_prelim_ref(prefs, ref->root, &key, 0,
569 ref->parent, node->bytenr,
570 node->ref_mod * sgn);
583 * add all inline backrefs for bytenr to the list
585 static int __add_inline_refs(struct btrfs_fs_info *fs_info,
586 struct btrfs_path *path, u64 bytenr,
587 int *info_level, struct list_head *prefs)
591 struct extent_buffer *leaf;
592 struct btrfs_key key;
595 struct btrfs_extent_item *ei;
600 * enumerate all inline refs
602 leaf = path->nodes[0];
603 slot = path->slots[0];
605 item_size = btrfs_item_size_nr(leaf, slot);
606 BUG_ON(item_size < sizeof(*ei));
608 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
609 flags = btrfs_extent_flags(leaf, ei);
611 ptr = (unsigned long)(ei + 1);
612 end = (unsigned long)ei + item_size;
614 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
615 struct btrfs_tree_block_info *info;
617 info = (struct btrfs_tree_block_info *)ptr;
618 *info_level = btrfs_tree_block_level(leaf, info);
619 ptr += sizeof(struct btrfs_tree_block_info);
622 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
626 struct btrfs_extent_inline_ref *iref;
630 iref = (struct btrfs_extent_inline_ref *)ptr;
631 type = btrfs_extent_inline_ref_type(leaf, iref);
632 offset = btrfs_extent_inline_ref_offset(leaf, iref);
635 case BTRFS_SHARED_BLOCK_REF_KEY:
636 ret = __add_prelim_ref(prefs, 0, NULL,
637 *info_level + 1, offset,
640 case BTRFS_SHARED_DATA_REF_KEY: {
641 struct btrfs_shared_data_ref *sdref;
644 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
645 count = btrfs_shared_data_ref_count(leaf, sdref);
646 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
650 case BTRFS_TREE_BLOCK_REF_KEY:
651 ret = __add_prelim_ref(prefs, offset, NULL,
655 case BTRFS_EXTENT_DATA_REF_KEY: {
656 struct btrfs_extent_data_ref *dref;
660 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
661 count = btrfs_extent_data_ref_count(leaf, dref);
662 key.objectid = btrfs_extent_data_ref_objectid(leaf,
664 key.type = BTRFS_EXTENT_DATA_KEY;
665 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
666 root = btrfs_extent_data_ref_root(leaf, dref);
667 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
675 ptr += btrfs_extent_inline_ref_size(type);
682 * add all non-inline backrefs for bytenr to the list
684 static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
685 struct btrfs_path *path, u64 bytenr,
686 int info_level, struct list_head *prefs)
688 struct btrfs_root *extent_root = fs_info->extent_root;
691 struct extent_buffer *leaf;
692 struct btrfs_key key;
695 ret = btrfs_next_item(extent_root, path);
703 slot = path->slots[0];
704 leaf = path->nodes[0];
705 btrfs_item_key_to_cpu(leaf, &key, slot);
707 if (key.objectid != bytenr)
709 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
711 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
715 case BTRFS_SHARED_BLOCK_REF_KEY:
716 ret = __add_prelim_ref(prefs, 0, NULL,
717 info_level + 1, key.offset,
720 case BTRFS_SHARED_DATA_REF_KEY: {
721 struct btrfs_shared_data_ref *sdref;
724 sdref = btrfs_item_ptr(leaf, slot,
725 struct btrfs_shared_data_ref);
726 count = btrfs_shared_data_ref_count(leaf, sdref);
727 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
731 case BTRFS_TREE_BLOCK_REF_KEY:
732 ret = __add_prelim_ref(prefs, key.offset, NULL,
736 case BTRFS_EXTENT_DATA_REF_KEY: {
737 struct btrfs_extent_data_ref *dref;
741 dref = btrfs_item_ptr(leaf, slot,
742 struct btrfs_extent_data_ref);
743 count = btrfs_extent_data_ref_count(leaf, dref);
744 key.objectid = btrfs_extent_data_ref_objectid(leaf,
746 key.type = BTRFS_EXTENT_DATA_KEY;
747 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
748 root = btrfs_extent_data_ref_root(leaf, dref);
749 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
763 * this adds all existing backrefs (inline backrefs, backrefs and delayed
764 * refs) for the given bytenr to the refs list, merges duplicates and resolves
765 * indirect refs to their parent bytenr.
766 * When roots are found, they're added to the roots list
768 * FIXME some caching might speed things up
770 static int find_parent_nodes(struct btrfs_trans_handle *trans,
771 struct btrfs_fs_info *fs_info, u64 bytenr,
772 u64 delayed_ref_seq, u64 time_seq,
773 struct ulist *refs, struct ulist *roots,
774 const u64 *extent_item_pos)
776 struct btrfs_key key;
777 struct btrfs_path *path;
778 struct btrfs_delayed_ref_root *delayed_refs = NULL;
779 struct btrfs_delayed_ref_head *head;
782 int search_commit_root = (trans == BTRFS_BACKREF_SEARCH_COMMIT_ROOT);
783 struct list_head prefs_delayed;
784 struct list_head prefs;
785 struct __prelim_ref *ref;
787 INIT_LIST_HEAD(&prefs);
788 INIT_LIST_HEAD(&prefs_delayed);
790 key.objectid = bytenr;
791 key.type = BTRFS_EXTENT_ITEM_KEY;
792 key.offset = (u64)-1;
794 path = btrfs_alloc_path();
797 path->search_commit_root = !!search_commit_root;
800 * grab both a lock on the path and a lock on the delayed ref head.
801 * We need both to get a consistent picture of how the refs look
802 * at a specified point in time
807 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
812 if (trans != BTRFS_BACKREF_SEARCH_COMMIT_ROOT) {
814 * look if there are updates for this ref queued and lock the
817 delayed_refs = &trans->transaction->delayed_refs;
818 spin_lock(&delayed_refs->lock);
819 head = btrfs_find_delayed_ref_head(trans, bytenr);
821 if (!mutex_trylock(&head->mutex)) {
822 atomic_inc(&head->node.refs);
823 spin_unlock(&delayed_refs->lock);
825 btrfs_release_path(path);
828 * Mutex was contended, block until it's
829 * released and try again
831 mutex_lock(&head->mutex);
832 mutex_unlock(&head->mutex);
833 btrfs_put_delayed_ref(&head->node);
836 ret = __add_delayed_refs(head, delayed_ref_seq,
839 spin_unlock(&delayed_refs->lock);
843 spin_unlock(&delayed_refs->lock);
846 if (path->slots[0]) {
847 struct extent_buffer *leaf;
851 leaf = path->nodes[0];
852 slot = path->slots[0];
853 btrfs_item_key_to_cpu(leaf, &key, slot);
854 if (key.objectid == bytenr &&
855 key.type == BTRFS_EXTENT_ITEM_KEY) {
856 ret = __add_inline_refs(fs_info, path, bytenr,
857 &info_level, &prefs);
860 ret = __add_keyed_refs(fs_info, path, bytenr,
866 btrfs_release_path(path);
868 list_splice_init(&prefs_delayed, &prefs);
870 ret = __add_missing_keys(fs_info, &prefs);
874 ret = __merge_refs(&prefs, 1);
878 ret = __resolve_indirect_refs(fs_info, search_commit_root, time_seq,
879 &prefs, extent_item_pos);
883 ret = __merge_refs(&prefs, 2);
887 while (!list_empty(&prefs)) {
888 ref = list_first_entry(&prefs, struct __prelim_ref, list);
889 list_del(&ref->list);
892 if (ref->count && ref->root_id && ref->parent == 0) {
893 /* no parent == root of tree */
894 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
897 if (ref->count && ref->parent) {
898 struct extent_inode_elem *eie = NULL;
899 if (extent_item_pos && !ref->inode_list) {
901 struct extent_buffer *eb;
902 bsz = btrfs_level_size(fs_info->extent_root,
904 eb = read_tree_block(fs_info->extent_root,
905 ref->parent, bsz, 0);
907 ret = find_extent_in_eb(eb, bytenr,
908 *extent_item_pos, &eie);
909 ref->inode_list = eie;
910 free_extent_buffer(eb);
912 ret = ulist_add_merge(refs, ref->parent,
913 (unsigned long)ref->inode_list,
914 (unsigned long *)&eie, GFP_NOFS);
915 if (!ret && extent_item_pos) {
917 * we've recorded that parent, so we must extend
918 * its inode list here
923 eie->next = ref->inode_list;
932 mutex_unlock(&head->mutex);
933 btrfs_free_path(path);
934 while (!list_empty(&prefs)) {
935 ref = list_first_entry(&prefs, struct __prelim_ref, list);
936 list_del(&ref->list);
939 while (!list_empty(&prefs_delayed)) {
940 ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
942 list_del(&ref->list);
949 static void free_leaf_list(struct ulist *blocks)
951 struct ulist_node *node = NULL;
952 struct extent_inode_elem *eie;
953 struct extent_inode_elem *eie_next;
954 struct ulist_iterator uiter;
956 ULIST_ITER_INIT(&uiter);
957 while ((node = ulist_next(blocks, &uiter))) {
960 eie = (struct extent_inode_elem *)node->aux;
961 for (; eie; eie = eie_next) {
962 eie_next = eie->next;
972 * Finds all leafs with a reference to the specified combination of bytenr and
973 * offset. key_list_head will point to a list of corresponding keys (caller must
974 * free each list element). The leafs will be stored in the leafs ulist, which
975 * must be freed with ulist_free.
977 * returns 0 on success, <0 on error
979 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
980 struct btrfs_fs_info *fs_info, u64 bytenr,
981 u64 delayed_ref_seq, u64 time_seq,
982 struct ulist **leafs,
983 const u64 *extent_item_pos)
988 tmp = ulist_alloc(GFP_NOFS);
991 *leafs = ulist_alloc(GFP_NOFS);
997 ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq,
998 time_seq, *leafs, tmp, extent_item_pos);
1001 if (ret < 0 && ret != -ENOENT) {
1002 free_leaf_list(*leafs);
1010 * walk all backrefs for a given extent to find all roots that reference this
1011 * extent. Walking a backref means finding all extents that reference this
1012 * extent and in turn walk the backrefs of those, too. Naturally this is a
1013 * recursive process, but here it is implemented in an iterative fashion: We
1014 * find all referencing extents for the extent in question and put them on a
1015 * list. In turn, we find all referencing extents for those, further appending
1016 * to the list. The way we iterate the list allows adding more elements after
1017 * the current while iterating. The process stops when we reach the end of the
1018 * list. Found roots are added to the roots list.
1020 * returns 0 on success, < 0 on error.
1022 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1023 struct btrfs_fs_info *fs_info, u64 bytenr,
1024 u64 delayed_ref_seq, u64 time_seq,
1025 struct ulist **roots)
1028 struct ulist_node *node = NULL;
1029 struct ulist_iterator uiter;
1032 tmp = ulist_alloc(GFP_NOFS);
1035 *roots = ulist_alloc(GFP_NOFS);
1041 ULIST_ITER_INIT(&uiter);
1043 ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq,
1044 time_seq, tmp, *roots, NULL);
1045 if (ret < 0 && ret != -ENOENT) {
1050 node = ulist_next(tmp, &uiter);
1061 static int __inode_info(u64 inum, u64 ioff, u8 key_type,
1062 struct btrfs_root *fs_root, struct btrfs_path *path,
1063 struct btrfs_key *found_key)
1066 struct btrfs_key key;
1067 struct extent_buffer *eb;
1069 key.type = key_type;
1070 key.objectid = inum;
1073 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1077 eb = path->nodes[0];
1078 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1079 ret = btrfs_next_leaf(fs_root, path);
1082 eb = path->nodes[0];
1085 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1086 if (found_key->type != key.type || found_key->objectid != key.objectid)
1093 * this makes the path point to (inum INODE_ITEM ioff)
1095 int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
1096 struct btrfs_path *path)
1098 struct btrfs_key key;
1099 return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path,
1103 static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
1104 struct btrfs_path *path,
1105 struct btrfs_key *found_key)
1107 return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path,
1112 * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
1113 * of the path are separated by '/' and the path is guaranteed to be
1114 * 0-terminated. the path is only given within the current file system.
1115 * Therefore, it never starts with a '/'. the caller is responsible to provide
1116 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1117 * the start point of the resulting string is returned. this pointer is within
1119 * in case the path buffer would overflow, the pointer is decremented further
1120 * as if output was written to the buffer, though no more output is actually
1121 * generated. that way, the caller can determine how much space would be
1122 * required for the path to fit into the buffer. in that case, the returned
1123 * value will be smaller than dest. callers must check this!
1125 static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1126 struct btrfs_inode_ref *iref,
1127 struct extent_buffer *eb_in, u64 parent,
1128 char *dest, u32 size)
1134 s64 bytes_left = size - 1;
1135 struct extent_buffer *eb = eb_in;
1136 struct btrfs_key found_key;
1137 int leave_spinning = path->leave_spinning;
1139 if (bytes_left >= 0)
1140 dest[bytes_left] = '\0';
1142 path->leave_spinning = 1;
1144 len = btrfs_inode_ref_name_len(eb, iref);
1146 if (bytes_left >= 0)
1147 read_extent_buffer(eb, dest + bytes_left,
1148 (unsigned long)(iref + 1), len);
1150 btrfs_tree_read_unlock_blocking(eb);
1151 free_extent_buffer(eb);
1153 ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
1158 next_inum = found_key.offset;
1160 /* regular exit ahead */
1161 if (parent == next_inum)
1164 slot = path->slots[0];
1165 eb = path->nodes[0];
1166 /* make sure we can use eb after releasing the path */
1168 atomic_inc(&eb->refs);
1169 btrfs_tree_read_lock(eb);
1170 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1172 btrfs_release_path(path);
1174 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1177 if (bytes_left >= 0)
1178 dest[bytes_left] = '/';
1181 btrfs_release_path(path);
1182 path->leave_spinning = leave_spinning;
1185 return ERR_PTR(ret);
1187 return dest + bytes_left;
1191 * this makes the path point to (logical EXTENT_ITEM *)
1192 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1193 * tree blocks and <0 on error.
1195 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1196 struct btrfs_path *path, struct btrfs_key *found_key)
1201 struct extent_buffer *eb;
1202 struct btrfs_extent_item *ei;
1203 struct btrfs_key key;
1205 key.type = BTRFS_EXTENT_ITEM_KEY;
1206 key.objectid = logical;
1207 key.offset = (u64)-1;
1209 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1212 ret = btrfs_previous_item(fs_info->extent_root, path,
1213 0, BTRFS_EXTENT_ITEM_KEY);
1217 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1218 if (found_key->type != BTRFS_EXTENT_ITEM_KEY ||
1219 found_key->objectid > logical ||
1220 found_key->objectid + found_key->offset <= logical) {
1221 pr_debug("logical %llu is not within any extent\n",
1222 (unsigned long long)logical);
1226 eb = path->nodes[0];
1227 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1228 BUG_ON(item_size < sizeof(*ei));
1230 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1231 flags = btrfs_extent_flags(eb, ei);
1233 pr_debug("logical %llu is at position %llu within the extent (%llu "
1234 "EXTENT_ITEM %llu) flags %#llx size %u\n",
1235 (unsigned long long)logical,
1236 (unsigned long long)(logical - found_key->objectid),
1237 (unsigned long long)found_key->objectid,
1238 (unsigned long long)found_key->offset,
1239 (unsigned long long)flags, item_size);
1240 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1241 return BTRFS_EXTENT_FLAG_TREE_BLOCK;
1242 if (flags & BTRFS_EXTENT_FLAG_DATA)
1243 return BTRFS_EXTENT_FLAG_DATA;
1249 * helper function to iterate extent inline refs. ptr must point to a 0 value
1250 * for the first call and may be modified. it is used to track state.
1251 * if more refs exist, 0 is returned and the next call to
1252 * __get_extent_inline_ref must pass the modified ptr parameter to get the
1253 * next ref. after the last ref was processed, 1 is returned.
1254 * returns <0 on error
1256 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
1257 struct btrfs_extent_item *ei, u32 item_size,
1258 struct btrfs_extent_inline_ref **out_eiref,
1263 struct btrfs_tree_block_info *info;
1267 flags = btrfs_extent_flags(eb, ei);
1268 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1269 info = (struct btrfs_tree_block_info *)(ei + 1);
1271 (struct btrfs_extent_inline_ref *)(info + 1);
1273 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1275 *ptr = (unsigned long)*out_eiref;
1276 if ((void *)*ptr >= (void *)ei + item_size)
1280 end = (unsigned long)ei + item_size;
1281 *out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
1282 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
1284 *ptr += btrfs_extent_inline_ref_size(*out_type);
1285 WARN_ON(*ptr > end);
1287 return 1; /* last */
1293 * reads the tree block backref for an extent. tree level and root are returned
1294 * through out_level and out_root. ptr must point to a 0 value for the first
1295 * call and may be modified (see __get_extent_inline_ref comment).
1296 * returns 0 if data was provided, 1 if there was no more data to provide or
1299 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1300 struct btrfs_extent_item *ei, u32 item_size,
1301 u64 *out_root, u8 *out_level)
1305 struct btrfs_tree_block_info *info;
1306 struct btrfs_extent_inline_ref *eiref;
1308 if (*ptr == (unsigned long)-1)
1312 ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
1317 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1318 type == BTRFS_SHARED_BLOCK_REF_KEY)
1325 /* we can treat both ref types equally here */
1326 info = (struct btrfs_tree_block_info *)(ei + 1);
1327 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1328 *out_level = btrfs_tree_block_level(eb, info);
1331 *ptr = (unsigned long)-1;
1336 static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
1337 u64 root, u64 extent_item_objectid,
1338 iterate_extent_inodes_t *iterate, void *ctx)
1340 struct extent_inode_elem *eie;
1343 for (eie = inode_list; eie; eie = eie->next) {
1344 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
1345 "root %llu\n", extent_item_objectid,
1346 eie->inum, eie->offset, root);
1347 ret = iterate(eie->inum, eie->offset, root, ctx);
1349 pr_debug("stopping iteration for %llu due to ret=%d\n",
1350 extent_item_objectid, ret);
1359 * calls iterate() for every inode that references the extent identified by
1360 * the given parameters.
1361 * when the iterator function returns a non-zero value, iteration stops.
1363 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1364 u64 extent_item_objectid, u64 extent_item_pos,
1365 int search_commit_root,
1366 iterate_extent_inodes_t *iterate, void *ctx)
1369 struct list_head data_refs = LIST_HEAD_INIT(data_refs);
1370 struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
1371 struct btrfs_trans_handle *trans;
1372 struct ulist *refs = NULL;
1373 struct ulist *roots = NULL;
1374 struct ulist_node *ref_node = NULL;
1375 struct ulist_node *root_node = NULL;
1376 struct seq_list seq_elem = {};
1377 struct seq_list tree_mod_seq_elem = {};
1378 struct ulist_iterator ref_uiter;
1379 struct ulist_iterator root_uiter;
1380 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1382 pr_debug("resolving all inodes for extent %llu\n",
1383 extent_item_objectid);
1385 if (search_commit_root) {
1386 trans = BTRFS_BACKREF_SEARCH_COMMIT_ROOT;
1388 trans = btrfs_join_transaction(fs_info->extent_root);
1390 return PTR_ERR(trans);
1392 delayed_refs = &trans->transaction->delayed_refs;
1393 spin_lock(&delayed_refs->lock);
1394 btrfs_get_delayed_seq(delayed_refs, &seq_elem);
1395 spin_unlock(&delayed_refs->lock);
1396 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1399 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1400 seq_elem.seq, tree_mod_seq_elem.seq, &refs,
1405 ULIST_ITER_INIT(&ref_uiter);
1406 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1407 ret = btrfs_find_all_roots(trans, fs_info, ref_node->val,
1409 tree_mod_seq_elem.seq, &roots);
1412 ULIST_ITER_INIT(&root_uiter);
1413 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1414 pr_debug("root %llu references leaf %llu, data list "
1415 "%#lx\n", root_node->val, ref_node->val,
1417 ret = iterate_leaf_refs(
1418 (struct extent_inode_elem *)ref_node->aux,
1419 root_node->val, extent_item_objectid,
1426 free_leaf_list(refs);
1429 if (!search_commit_root) {
1430 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1431 btrfs_put_delayed_seq(delayed_refs, &seq_elem);
1432 btrfs_end_transaction(trans, fs_info->extent_root);
1438 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1439 struct btrfs_path *path,
1440 iterate_extent_inodes_t *iterate, void *ctx)
1443 u64 extent_item_pos;
1444 struct btrfs_key found_key;
1445 int search_commit_root = path->search_commit_root;
1447 ret = extent_from_logical(fs_info, logical, path,
1449 btrfs_release_path(path);
1450 if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1455 extent_item_pos = logical - found_key.objectid;
1456 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1457 extent_item_pos, search_commit_root,
1463 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
1464 struct btrfs_path *path,
1465 iterate_irefs_t *iterate, void *ctx)
1474 struct extent_buffer *eb;
1475 struct btrfs_item *item;
1476 struct btrfs_inode_ref *iref;
1477 struct btrfs_key found_key;
1480 path->leave_spinning = 1;
1481 ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
1486 ret = found ? 0 : -ENOENT;
1491 parent = found_key.offset;
1492 slot = path->slots[0];
1493 eb = path->nodes[0];
1494 /* make sure we can use eb after releasing the path */
1495 atomic_inc(&eb->refs);
1496 btrfs_tree_read_lock(eb);
1497 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1498 btrfs_release_path(path);
1500 item = btrfs_item_nr(eb, slot);
1501 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1503 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
1504 name_len = btrfs_inode_ref_name_len(eb, iref);
1505 /* path must be released before calling iterate()! */
1506 pr_debug("following ref at offset %u for inode %llu in "
1508 (unsigned long long)found_key.objectid,
1509 (unsigned long long)fs_root->objectid);
1510 ret = iterate(parent, iref, eb, ctx);
1513 len = sizeof(*iref) + name_len;
1514 iref = (struct btrfs_inode_ref *)((char *)iref + len);
1516 btrfs_tree_read_unlock_blocking(eb);
1517 free_extent_buffer(eb);
1520 btrfs_release_path(path);
1526 * returns 0 if the path could be dumped (probably truncated)
1527 * returns <0 in case of an error
1529 static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
1530 struct extent_buffer *eb, void *ctx)
1532 struct inode_fs_paths *ipath = ctx;
1535 int i = ipath->fspath->elem_cnt;
1536 const int s_ptr = sizeof(char *);
1539 bytes_left = ipath->fspath->bytes_left > s_ptr ?
1540 ipath->fspath->bytes_left - s_ptr : 0;
1542 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
1543 fspath = iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb,
1544 inum, fspath_min, bytes_left);
1546 return PTR_ERR(fspath);
1548 if (fspath > fspath_min) {
1549 pr_debug("path resolved: %s\n", fspath);
1550 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
1551 ++ipath->fspath->elem_cnt;
1552 ipath->fspath->bytes_left = fspath - fspath_min;
1554 pr_debug("missed path, not enough space. missing bytes: %lu, "
1555 "constructed so far: %s\n",
1556 (unsigned long)(fspath_min - fspath), fspath_min);
1557 ++ipath->fspath->elem_missed;
1558 ipath->fspath->bytes_missing += fspath_min - fspath;
1559 ipath->fspath->bytes_left = 0;
1566 * this dumps all file system paths to the inode into the ipath struct, provided
1567 * is has been created large enough. each path is zero-terminated and accessed
1568 * from ipath->fspath->val[i].
1569 * when it returns, there are ipath->fspath->elem_cnt number of paths available
1570 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
1571 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
1572 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
1573 * have been needed to return all paths.
1575 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
1577 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
1578 inode_to_path, ipath);
1581 struct btrfs_data_container *init_data_container(u32 total_bytes)
1583 struct btrfs_data_container *data;
1586 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
1587 data = kmalloc(alloc_bytes, GFP_NOFS);
1589 return ERR_PTR(-ENOMEM);
1591 if (total_bytes >= sizeof(*data)) {
1592 data->bytes_left = total_bytes - sizeof(*data);
1593 data->bytes_missing = 0;
1595 data->bytes_missing = sizeof(*data) - total_bytes;
1596 data->bytes_left = 0;
1600 data->elem_missed = 0;
1606 * allocates space to return multiple file system paths for an inode.
1607 * total_bytes to allocate are passed, note that space usable for actual path
1608 * information will be total_bytes - sizeof(struct inode_fs_paths).
1609 * the returned pointer must be freed with free_ipath() in the end.
1611 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
1612 struct btrfs_path *path)
1614 struct inode_fs_paths *ifp;
1615 struct btrfs_data_container *fspath;
1617 fspath = init_data_container(total_bytes);
1619 return (void *)fspath;
1621 ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
1624 return ERR_PTR(-ENOMEM);
1627 ifp->btrfs_path = path;
1628 ifp->fspath = fspath;
1629 ifp->fs_root = fs_root;
1634 void free_ipath(struct inode_fs_paths *ipath)
1638 kfree(ipath->fspath);