btrfs: fix resolving backrefs for inline extent followed by prealloc
[platform/kernel/linux-rpi.git] / fs / btrfs / backref.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5
6 #include <linux/mm.h>
7 #include <linux/rbtree.h>
8 #include <trace/events/btrfs.h>
9 #include "ctree.h"
10 #include "disk-io.h"
11 #include "backref.h"
12 #include "ulist.h"
13 #include "transaction.h"
14 #include "delayed-ref.h"
15 #include "locking.h"
16 #include "misc.h"
17 #include "tree-mod-log.h"
18
19 /* Just an arbitrary number so we can be sure this happened */
20 #define BACKREF_FOUND_SHARED 6
21
22 struct extent_inode_elem {
23         u64 inum;
24         u64 offset;
25         struct extent_inode_elem *next;
26 };
27
28 static int check_extent_in_eb(const struct btrfs_key *key,
29                               const struct extent_buffer *eb,
30                               const struct btrfs_file_extent_item *fi,
31                               u64 extent_item_pos,
32                               struct extent_inode_elem **eie,
33                               bool ignore_offset)
34 {
35         u64 offset = 0;
36         struct extent_inode_elem *e;
37
38         if (!ignore_offset &&
39             !btrfs_file_extent_compression(eb, fi) &&
40             !btrfs_file_extent_encryption(eb, fi) &&
41             !btrfs_file_extent_other_encoding(eb, fi)) {
42                 u64 data_offset;
43                 u64 data_len;
44
45                 data_offset = btrfs_file_extent_offset(eb, fi);
46                 data_len = btrfs_file_extent_num_bytes(eb, fi);
47
48                 if (extent_item_pos < data_offset ||
49                     extent_item_pos >= data_offset + data_len)
50                         return 1;
51                 offset = extent_item_pos - data_offset;
52         }
53
54         e = kmalloc(sizeof(*e), GFP_NOFS);
55         if (!e)
56                 return -ENOMEM;
57
58         e->next = *eie;
59         e->inum = key->objectid;
60         e->offset = key->offset + offset;
61         *eie = e;
62
63         return 0;
64 }
65
66 static void free_inode_elem_list(struct extent_inode_elem *eie)
67 {
68         struct extent_inode_elem *eie_next;
69
70         for (; eie; eie = eie_next) {
71                 eie_next = eie->next;
72                 kfree(eie);
73         }
74 }
75
76 static int find_extent_in_eb(const struct extent_buffer *eb,
77                              u64 wanted_disk_byte, u64 extent_item_pos,
78                              struct extent_inode_elem **eie,
79                              bool ignore_offset)
80 {
81         u64 disk_byte;
82         struct btrfs_key key;
83         struct btrfs_file_extent_item *fi;
84         int slot;
85         int nritems;
86         int extent_type;
87         int ret;
88
89         /*
90          * from the shared data ref, we only have the leaf but we need
91          * the key. thus, we must look into all items and see that we
92          * find one (some) with a reference to our extent item.
93          */
94         nritems = btrfs_header_nritems(eb);
95         for (slot = 0; slot < nritems; ++slot) {
96                 btrfs_item_key_to_cpu(eb, &key, slot);
97                 if (key.type != BTRFS_EXTENT_DATA_KEY)
98                         continue;
99                 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
100                 extent_type = btrfs_file_extent_type(eb, fi);
101                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
102                         continue;
103                 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
104                 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
105                 if (disk_byte != wanted_disk_byte)
106                         continue;
107
108                 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
109                 if (ret < 0)
110                         return ret;
111         }
112
113         return 0;
114 }
115
116 struct preftree {
117         struct rb_root_cached root;
118         unsigned int count;
119 };
120
121 #define PREFTREE_INIT   { .root = RB_ROOT_CACHED, .count = 0 }
122
123 struct preftrees {
124         struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
125         struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
126         struct preftree indirect_missing_keys;
127 };
128
129 /*
130  * Checks for a shared extent during backref search.
131  *
132  * The share_count tracks prelim_refs (direct and indirect) having a
133  * ref->count >0:
134  *  - incremented when a ref->count transitions to >0
135  *  - decremented when a ref->count transitions to <1
136  */
137 struct share_check {
138         u64 root_objectid;
139         u64 inum;
140         int share_count;
141         bool have_delayed_delete_refs;
142 };
143
144 static inline int extent_is_shared(struct share_check *sc)
145 {
146         return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
147 }
148
149 static struct kmem_cache *btrfs_prelim_ref_cache;
150
151 int __init btrfs_prelim_ref_init(void)
152 {
153         btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
154                                         sizeof(struct prelim_ref),
155                                         0,
156                                         SLAB_MEM_SPREAD,
157                                         NULL);
158         if (!btrfs_prelim_ref_cache)
159                 return -ENOMEM;
160         return 0;
161 }
162
163 void __cold btrfs_prelim_ref_exit(void)
164 {
165         kmem_cache_destroy(btrfs_prelim_ref_cache);
166 }
167
168 static void free_pref(struct prelim_ref *ref)
169 {
170         kmem_cache_free(btrfs_prelim_ref_cache, ref);
171 }
172
173 /*
174  * Return 0 when both refs are for the same block (and can be merged).
175  * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
176  * indicates a 'higher' block.
177  */
178 static int prelim_ref_compare(struct prelim_ref *ref1,
179                               struct prelim_ref *ref2)
180 {
181         if (ref1->level < ref2->level)
182                 return -1;
183         if (ref1->level > ref2->level)
184                 return 1;
185         if (ref1->root_id < ref2->root_id)
186                 return -1;
187         if (ref1->root_id > ref2->root_id)
188                 return 1;
189         if (ref1->key_for_search.type < ref2->key_for_search.type)
190                 return -1;
191         if (ref1->key_for_search.type > ref2->key_for_search.type)
192                 return 1;
193         if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
194                 return -1;
195         if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
196                 return 1;
197         if (ref1->key_for_search.offset < ref2->key_for_search.offset)
198                 return -1;
199         if (ref1->key_for_search.offset > ref2->key_for_search.offset)
200                 return 1;
201         if (ref1->parent < ref2->parent)
202                 return -1;
203         if (ref1->parent > ref2->parent)
204                 return 1;
205
206         return 0;
207 }
208
209 static void update_share_count(struct share_check *sc, int oldcount,
210                                int newcount)
211 {
212         if ((!sc) || (oldcount == 0 && newcount < 1))
213                 return;
214
215         if (oldcount > 0 && newcount < 1)
216                 sc->share_count--;
217         else if (oldcount < 1 && newcount > 0)
218                 sc->share_count++;
219 }
220
221 /*
222  * Add @newref to the @root rbtree, merging identical refs.
223  *
224  * Callers should assume that newref has been freed after calling.
225  */
226 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
227                               struct preftree *preftree,
228                               struct prelim_ref *newref,
229                               struct share_check *sc)
230 {
231         struct rb_root_cached *root;
232         struct rb_node **p;
233         struct rb_node *parent = NULL;
234         struct prelim_ref *ref;
235         int result;
236         bool leftmost = true;
237
238         root = &preftree->root;
239         p = &root->rb_root.rb_node;
240
241         while (*p) {
242                 parent = *p;
243                 ref = rb_entry(parent, struct prelim_ref, rbnode);
244                 result = prelim_ref_compare(ref, newref);
245                 if (result < 0) {
246                         p = &(*p)->rb_left;
247                 } else if (result > 0) {
248                         p = &(*p)->rb_right;
249                         leftmost = false;
250                 } else {
251                         /* Identical refs, merge them and free @newref */
252                         struct extent_inode_elem *eie = ref->inode_list;
253
254                         while (eie && eie->next)
255                                 eie = eie->next;
256
257                         if (!eie)
258                                 ref->inode_list = newref->inode_list;
259                         else
260                                 eie->next = newref->inode_list;
261                         trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
262                                                      preftree->count);
263                         /*
264                          * A delayed ref can have newref->count < 0.
265                          * The ref->count is updated to follow any
266                          * BTRFS_[ADD|DROP]_DELAYED_REF actions.
267                          */
268                         update_share_count(sc, ref->count,
269                                            ref->count + newref->count);
270                         ref->count += newref->count;
271                         free_pref(newref);
272                         return;
273                 }
274         }
275
276         update_share_count(sc, 0, newref->count);
277         preftree->count++;
278         trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
279         rb_link_node(&newref->rbnode, parent, p);
280         rb_insert_color_cached(&newref->rbnode, root, leftmost);
281 }
282
283 /*
284  * Release the entire tree.  We don't care about internal consistency so
285  * just free everything and then reset the tree root.
286  */
287 static void prelim_release(struct preftree *preftree)
288 {
289         struct prelim_ref *ref, *next_ref;
290
291         rbtree_postorder_for_each_entry_safe(ref, next_ref,
292                                              &preftree->root.rb_root, rbnode) {
293                 free_inode_elem_list(ref->inode_list);
294                 free_pref(ref);
295         }
296
297         preftree->root = RB_ROOT_CACHED;
298         preftree->count = 0;
299 }
300
301 /*
302  * the rules for all callers of this function are:
303  * - obtaining the parent is the goal
304  * - if you add a key, you must know that it is a correct key
305  * - if you cannot add the parent or a correct key, then we will look into the
306  *   block later to set a correct key
307  *
308  * delayed refs
309  * ============
310  *        backref type | shared | indirect | shared | indirect
311  * information         |   tree |     tree |   data |     data
312  * --------------------+--------+----------+--------+----------
313  *      parent logical |    y   |     -    |    -   |     -
314  *      key to resolve |    -   |     y    |    y   |     y
315  *  tree block logical |    -   |     -    |    -   |     -
316  *  root for resolving |    y   |     y    |    y   |     y
317  *
318  * - column 1:       we've the parent -> done
319  * - column 2, 3, 4: we use the key to find the parent
320  *
321  * on disk refs (inline or keyed)
322  * ==============================
323  *        backref type | shared | indirect | shared | indirect
324  * information         |   tree |     tree |   data |     data
325  * --------------------+--------+----------+--------+----------
326  *      parent logical |    y   |     -    |    y   |     -
327  *      key to resolve |    -   |     -    |    -   |     y
328  *  tree block logical |    y   |     y    |    y   |     y
329  *  root for resolving |    -   |     y    |    y   |     y
330  *
331  * - column 1, 3: we've the parent -> done
332  * - column 2:    we take the first key from the block to find the parent
333  *                (see add_missing_keys)
334  * - column 4:    we use the key to find the parent
335  *
336  * additional information that's available but not required to find the parent
337  * block might help in merging entries to gain some speed.
338  */
339 static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
340                           struct preftree *preftree, u64 root_id,
341                           const struct btrfs_key *key, int level, u64 parent,
342                           u64 wanted_disk_byte, int count,
343                           struct share_check *sc, gfp_t gfp_mask)
344 {
345         struct prelim_ref *ref;
346
347         if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
348                 return 0;
349
350         ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
351         if (!ref)
352                 return -ENOMEM;
353
354         ref->root_id = root_id;
355         if (key)
356                 ref->key_for_search = *key;
357         else
358                 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
359
360         ref->inode_list = NULL;
361         ref->level = level;
362         ref->count = count;
363         ref->parent = parent;
364         ref->wanted_disk_byte = wanted_disk_byte;
365         prelim_ref_insert(fs_info, preftree, ref, sc);
366         return extent_is_shared(sc);
367 }
368
369 /* direct refs use root == 0, key == NULL */
370 static int add_direct_ref(const struct btrfs_fs_info *fs_info,
371                           struct preftrees *preftrees, int level, u64 parent,
372                           u64 wanted_disk_byte, int count,
373                           struct share_check *sc, gfp_t gfp_mask)
374 {
375         return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
376                               parent, wanted_disk_byte, count, sc, gfp_mask);
377 }
378
379 /* indirect refs use parent == 0 */
380 static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
381                             struct preftrees *preftrees, u64 root_id,
382                             const struct btrfs_key *key, int level,
383                             u64 wanted_disk_byte, int count,
384                             struct share_check *sc, gfp_t gfp_mask)
385 {
386         struct preftree *tree = &preftrees->indirect;
387
388         if (!key)
389                 tree = &preftrees->indirect_missing_keys;
390         return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
391                               wanted_disk_byte, count, sc, gfp_mask);
392 }
393
394 static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
395 {
396         struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
397         struct rb_node *parent = NULL;
398         struct prelim_ref *ref = NULL;
399         struct prelim_ref target = {};
400         int result;
401
402         target.parent = bytenr;
403
404         while (*p) {
405                 parent = *p;
406                 ref = rb_entry(parent, struct prelim_ref, rbnode);
407                 result = prelim_ref_compare(ref, &target);
408
409                 if (result < 0)
410                         p = &(*p)->rb_left;
411                 else if (result > 0)
412                         p = &(*p)->rb_right;
413                 else
414                         return 1;
415         }
416         return 0;
417 }
418
419 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
420                            struct ulist *parents,
421                            struct preftrees *preftrees, struct prelim_ref *ref,
422                            int level, u64 time_seq, const u64 *extent_item_pos,
423                            bool ignore_offset)
424 {
425         int ret = 0;
426         int slot;
427         struct extent_buffer *eb;
428         struct btrfs_key key;
429         struct btrfs_key *key_for_search = &ref->key_for_search;
430         struct btrfs_file_extent_item *fi;
431         struct extent_inode_elem *eie = NULL, *old = NULL;
432         u64 disk_byte;
433         u64 wanted_disk_byte = ref->wanted_disk_byte;
434         u64 count = 0;
435         u64 data_offset;
436         u8 type;
437
438         if (level != 0) {
439                 eb = path->nodes[level];
440                 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
441                 if (ret < 0)
442                         return ret;
443                 return 0;
444         }
445
446         /*
447          * 1. We normally enter this function with the path already pointing to
448          *    the first item to check. But sometimes, we may enter it with
449          *    slot == nritems.
450          * 2. We are searching for normal backref but bytenr of this leaf
451          *    matches shared data backref
452          * 3. The leaf owner is not equal to the root we are searching
453          *
454          * For these cases, go to the next leaf before we continue.
455          */
456         eb = path->nodes[0];
457         if (path->slots[0] >= btrfs_header_nritems(eb) ||
458             is_shared_data_backref(preftrees, eb->start) ||
459             ref->root_id != btrfs_header_owner(eb)) {
460                 if (time_seq == BTRFS_SEQ_LAST)
461                         ret = btrfs_next_leaf(root, path);
462                 else
463                         ret = btrfs_next_old_leaf(root, path, time_seq);
464         }
465
466         while (!ret && count < ref->count) {
467                 eb = path->nodes[0];
468                 slot = path->slots[0];
469
470                 btrfs_item_key_to_cpu(eb, &key, slot);
471
472                 if (key.objectid != key_for_search->objectid ||
473                     key.type != BTRFS_EXTENT_DATA_KEY)
474                         break;
475
476                 /*
477                  * We are searching for normal backref but bytenr of this leaf
478                  * matches shared data backref, OR
479                  * the leaf owner is not equal to the root we are searching for
480                  */
481                 if (slot == 0 &&
482                     (is_shared_data_backref(preftrees, eb->start) ||
483                      ref->root_id != btrfs_header_owner(eb))) {
484                         if (time_seq == BTRFS_SEQ_LAST)
485                                 ret = btrfs_next_leaf(root, path);
486                         else
487                                 ret = btrfs_next_old_leaf(root, path, time_seq);
488                         continue;
489                 }
490                 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
491                 type = btrfs_file_extent_type(eb, fi);
492                 if (type == BTRFS_FILE_EXTENT_INLINE)
493                         goto next;
494                 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
495                 data_offset = btrfs_file_extent_offset(eb, fi);
496
497                 if (disk_byte == wanted_disk_byte) {
498                         eie = NULL;
499                         old = NULL;
500                         if (ref->key_for_search.offset == key.offset - data_offset)
501                                 count++;
502                         else
503                                 goto next;
504                         if (extent_item_pos) {
505                                 ret = check_extent_in_eb(&key, eb, fi,
506                                                 *extent_item_pos,
507                                                 &eie, ignore_offset);
508                                 if (ret < 0)
509                                         break;
510                         }
511                         if (ret > 0)
512                                 goto next;
513                         ret = ulist_add_merge_ptr(parents, eb->start,
514                                                   eie, (void **)&old, GFP_NOFS);
515                         if (ret < 0)
516                                 break;
517                         if (!ret && extent_item_pos) {
518                                 while (old->next)
519                                         old = old->next;
520                                 old->next = eie;
521                         }
522                         eie = NULL;
523                 }
524 next:
525                 if (time_seq == BTRFS_SEQ_LAST)
526                         ret = btrfs_next_item(root, path);
527                 else
528                         ret = btrfs_next_old_item(root, path, time_seq);
529         }
530
531         if (ret > 0)
532                 ret = 0;
533         else if (ret < 0)
534                 free_inode_elem_list(eie);
535         return ret;
536 }
537
538 /*
539  * resolve an indirect backref in the form (root_id, key, level)
540  * to a logical address
541  */
542 static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
543                                 struct btrfs_path *path, u64 time_seq,
544                                 struct preftrees *preftrees,
545                                 struct prelim_ref *ref, struct ulist *parents,
546                                 const u64 *extent_item_pos, bool ignore_offset)
547 {
548         struct btrfs_root *root;
549         struct extent_buffer *eb;
550         int ret = 0;
551         int root_level;
552         int level = ref->level;
553         struct btrfs_key search_key = ref->key_for_search;
554
555         /*
556          * If we're search_commit_root we could possibly be holding locks on
557          * other tree nodes.  This happens when qgroups does backref walks when
558          * adding new delayed refs.  To deal with this we need to look in cache
559          * for the root, and if we don't find it then we need to search the
560          * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
561          * here.
562          */
563         if (path->search_commit_root)
564                 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
565         else
566                 root = btrfs_get_fs_root(fs_info, ref->root_id, false);
567         if (IS_ERR(root)) {
568                 ret = PTR_ERR(root);
569                 goto out_free;
570         }
571
572         if (!path->search_commit_root &&
573             test_bit(BTRFS_ROOT_DELETING, &root->state)) {
574                 ret = -ENOENT;
575                 goto out;
576         }
577
578         if (btrfs_is_testing(fs_info)) {
579                 ret = -ENOENT;
580                 goto out;
581         }
582
583         if (path->search_commit_root)
584                 root_level = btrfs_header_level(root->commit_root);
585         else if (time_seq == BTRFS_SEQ_LAST)
586                 root_level = btrfs_header_level(root->node);
587         else
588                 root_level = btrfs_old_root_level(root, time_seq);
589
590         if (root_level + 1 == level)
591                 goto out;
592
593         /*
594          * We can often find data backrefs with an offset that is too large
595          * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
596          * subtracting a file's offset with the data offset of its
597          * corresponding extent data item. This can happen for example in the
598          * clone ioctl.
599          *
600          * So if we detect such case we set the search key's offset to zero to
601          * make sure we will find the matching file extent item at
602          * add_all_parents(), otherwise we will miss it because the offset
603          * taken form the backref is much larger then the offset of the file
604          * extent item. This can make us scan a very large number of file
605          * extent items, but at least it will not make us miss any.
606          *
607          * This is an ugly workaround for a behaviour that should have never
608          * existed, but it does and a fix for the clone ioctl would touch a lot
609          * of places, cause backwards incompatibility and would not fix the
610          * problem for extents cloned with older kernels.
611          */
612         if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
613             search_key.offset >= LLONG_MAX)
614                 search_key.offset = 0;
615         path->lowest_level = level;
616         if (time_seq == BTRFS_SEQ_LAST)
617                 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
618         else
619                 ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
620
621         btrfs_debug(fs_info,
622                 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
623                  ref->root_id, level, ref->count, ret,
624                  ref->key_for_search.objectid, ref->key_for_search.type,
625                  ref->key_for_search.offset);
626         if (ret < 0)
627                 goto out;
628
629         eb = path->nodes[level];
630         while (!eb) {
631                 if (WARN_ON(!level)) {
632                         ret = 1;
633                         goto out;
634                 }
635                 level--;
636                 eb = path->nodes[level];
637         }
638
639         ret = add_all_parents(root, path, parents, preftrees, ref, level,
640                               time_seq, extent_item_pos, ignore_offset);
641 out:
642         btrfs_put_root(root);
643 out_free:
644         path->lowest_level = 0;
645         btrfs_release_path(path);
646         return ret;
647 }
648
649 static struct extent_inode_elem *
650 unode_aux_to_inode_list(struct ulist_node *node)
651 {
652         if (!node)
653                 return NULL;
654         return (struct extent_inode_elem *)(uintptr_t)node->aux;
655 }
656
657 static void free_leaf_list(struct ulist *ulist)
658 {
659         struct ulist_node *node;
660         struct ulist_iterator uiter;
661
662         ULIST_ITER_INIT(&uiter);
663         while ((node = ulist_next(ulist, &uiter)))
664                 free_inode_elem_list(unode_aux_to_inode_list(node));
665
666         ulist_free(ulist);
667 }
668
669 /*
670  * We maintain three separate rbtrees: one for direct refs, one for
671  * indirect refs which have a key, and one for indirect refs which do not
672  * have a key. Each tree does merge on insertion.
673  *
674  * Once all of the references are located, we iterate over the tree of
675  * indirect refs with missing keys. An appropriate key is located and
676  * the ref is moved onto the tree for indirect refs. After all missing
677  * keys are thus located, we iterate over the indirect ref tree, resolve
678  * each reference, and then insert the resolved reference onto the
679  * direct tree (merging there too).
680  *
681  * New backrefs (i.e., for parent nodes) are added to the appropriate
682  * rbtree as they are encountered. The new backrefs are subsequently
683  * resolved as above.
684  */
685 static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
686                                  struct btrfs_path *path, u64 time_seq,
687                                  struct preftrees *preftrees,
688                                  const u64 *extent_item_pos,
689                                  struct share_check *sc, bool ignore_offset)
690 {
691         int err;
692         int ret = 0;
693         struct ulist *parents;
694         struct ulist_node *node;
695         struct ulist_iterator uiter;
696         struct rb_node *rnode;
697
698         parents = ulist_alloc(GFP_NOFS);
699         if (!parents)
700                 return -ENOMEM;
701
702         /*
703          * We could trade memory usage for performance here by iterating
704          * the tree, allocating new refs for each insertion, and then
705          * freeing the entire indirect tree when we're done.  In some test
706          * cases, the tree can grow quite large (~200k objects).
707          */
708         while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
709                 struct prelim_ref *ref;
710
711                 ref = rb_entry(rnode, struct prelim_ref, rbnode);
712                 if (WARN(ref->parent,
713                          "BUG: direct ref found in indirect tree")) {
714                         ret = -EINVAL;
715                         goto out;
716                 }
717
718                 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
719                 preftrees->indirect.count--;
720
721                 if (ref->count == 0) {
722                         free_pref(ref);
723                         continue;
724                 }
725
726                 if (sc && sc->root_objectid &&
727                     ref->root_id != sc->root_objectid) {
728                         free_pref(ref);
729                         ret = BACKREF_FOUND_SHARED;
730                         goto out;
731                 }
732                 err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
733                                            ref, parents, extent_item_pos,
734                                            ignore_offset);
735                 /*
736                  * we can only tolerate ENOENT,otherwise,we should catch error
737                  * and return directly.
738                  */
739                 if (err == -ENOENT) {
740                         prelim_ref_insert(fs_info, &preftrees->direct, ref,
741                                           NULL);
742                         continue;
743                 } else if (err) {
744                         free_pref(ref);
745                         ret = err;
746                         goto out;
747                 }
748
749                 /* we put the first parent into the ref at hand */
750                 ULIST_ITER_INIT(&uiter);
751                 node = ulist_next(parents, &uiter);
752                 ref->parent = node ? node->val : 0;
753                 ref->inode_list = unode_aux_to_inode_list(node);
754
755                 /* Add a prelim_ref(s) for any other parent(s). */
756                 while ((node = ulist_next(parents, &uiter))) {
757                         struct prelim_ref *new_ref;
758
759                         new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
760                                                    GFP_NOFS);
761                         if (!new_ref) {
762                                 free_pref(ref);
763                                 ret = -ENOMEM;
764                                 goto out;
765                         }
766                         memcpy(new_ref, ref, sizeof(*ref));
767                         new_ref->parent = node->val;
768                         new_ref->inode_list = unode_aux_to_inode_list(node);
769                         prelim_ref_insert(fs_info, &preftrees->direct,
770                                           new_ref, NULL);
771                 }
772
773                 /*
774                  * Now it's a direct ref, put it in the direct tree. We must
775                  * do this last because the ref could be merged/freed here.
776                  */
777                 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
778
779                 ulist_reinit(parents);
780                 cond_resched();
781         }
782 out:
783         /*
784          * We may have inode lists attached to refs in the parents ulist, so we
785          * must free them before freeing the ulist and its refs.
786          */
787         free_leaf_list(parents);
788         return ret;
789 }
790
791 /*
792  * read tree blocks and add keys where required.
793  */
794 static int add_missing_keys(struct btrfs_fs_info *fs_info,
795                             struct preftrees *preftrees, bool lock)
796 {
797         struct prelim_ref *ref;
798         struct extent_buffer *eb;
799         struct preftree *tree = &preftrees->indirect_missing_keys;
800         struct rb_node *node;
801
802         while ((node = rb_first_cached(&tree->root))) {
803                 ref = rb_entry(node, struct prelim_ref, rbnode);
804                 rb_erase_cached(node, &tree->root);
805
806                 BUG_ON(ref->parent);    /* should not be a direct ref */
807                 BUG_ON(ref->key_for_search.type);
808                 BUG_ON(!ref->wanted_disk_byte);
809
810                 eb = read_tree_block(fs_info, ref->wanted_disk_byte,
811                                      ref->root_id, 0, ref->level - 1, NULL);
812                 if (IS_ERR(eb)) {
813                         free_pref(ref);
814                         return PTR_ERR(eb);
815                 } else if (!extent_buffer_uptodate(eb)) {
816                         free_pref(ref);
817                         free_extent_buffer(eb);
818                         return -EIO;
819                 }
820                 if (lock)
821                         btrfs_tree_read_lock(eb);
822                 if (btrfs_header_level(eb) == 0)
823                         btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
824                 else
825                         btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
826                 if (lock)
827                         btrfs_tree_read_unlock(eb);
828                 free_extent_buffer(eb);
829                 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
830                 cond_resched();
831         }
832         return 0;
833 }
834
835 /*
836  * add all currently queued delayed refs from this head whose seq nr is
837  * smaller or equal that seq to the list
838  */
839 static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
840                             struct btrfs_delayed_ref_head *head, u64 seq,
841                             struct preftrees *preftrees, struct share_check *sc)
842 {
843         struct btrfs_delayed_ref_node *node;
844         struct btrfs_key key;
845         struct rb_node *n;
846         int count;
847         int ret = 0;
848
849         spin_lock(&head->lock);
850         for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
851                 node = rb_entry(n, struct btrfs_delayed_ref_node,
852                                 ref_node);
853                 if (node->seq > seq)
854                         continue;
855
856                 switch (node->action) {
857                 case BTRFS_ADD_DELAYED_EXTENT:
858                 case BTRFS_UPDATE_DELAYED_HEAD:
859                         WARN_ON(1);
860                         continue;
861                 case BTRFS_ADD_DELAYED_REF:
862                         count = node->ref_mod;
863                         break;
864                 case BTRFS_DROP_DELAYED_REF:
865                         count = node->ref_mod * -1;
866                         break;
867                 default:
868                         BUG();
869                 }
870                 switch (node->type) {
871                 case BTRFS_TREE_BLOCK_REF_KEY: {
872                         /* NORMAL INDIRECT METADATA backref */
873                         struct btrfs_delayed_tree_ref *ref;
874                         struct btrfs_key *key_ptr = NULL;
875
876                         if (head->extent_op && head->extent_op->update_key) {
877                                 btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
878                                 key_ptr = &key;
879                         }
880
881                         ref = btrfs_delayed_node_to_tree_ref(node);
882                         ret = add_indirect_ref(fs_info, preftrees, ref->root,
883                                                key_ptr, ref->level + 1,
884                                                node->bytenr, count, sc,
885                                                GFP_ATOMIC);
886                         break;
887                 }
888                 case BTRFS_SHARED_BLOCK_REF_KEY: {
889                         /* SHARED DIRECT METADATA backref */
890                         struct btrfs_delayed_tree_ref *ref;
891
892                         ref = btrfs_delayed_node_to_tree_ref(node);
893
894                         ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
895                                              ref->parent, node->bytenr, count,
896                                              sc, GFP_ATOMIC);
897                         break;
898                 }
899                 case BTRFS_EXTENT_DATA_REF_KEY: {
900                         /* NORMAL INDIRECT DATA backref */
901                         struct btrfs_delayed_data_ref *ref;
902                         ref = btrfs_delayed_node_to_data_ref(node);
903
904                         key.objectid = ref->objectid;
905                         key.type = BTRFS_EXTENT_DATA_KEY;
906                         key.offset = ref->offset;
907
908                         /*
909                          * If we have a share check context and a reference for
910                          * another inode, we can't exit immediately. This is
911                          * because even if this is a BTRFS_ADD_DELAYED_REF
912                          * reference we may find next a BTRFS_DROP_DELAYED_REF
913                          * which cancels out this ADD reference.
914                          *
915                          * If this is a DROP reference and there was no previous
916                          * ADD reference, then we need to signal that when we
917                          * process references from the extent tree (through
918                          * add_inline_refs() and add_keyed_refs()), we should
919                          * not exit early if we find a reference for another
920                          * inode, because one of the delayed DROP references
921                          * may cancel that reference in the extent tree.
922                          */
923                         if (sc && count < 0)
924                                 sc->have_delayed_delete_refs = true;
925
926                         ret = add_indirect_ref(fs_info, preftrees, ref->root,
927                                                &key, 0, node->bytenr, count, sc,
928                                                GFP_ATOMIC);
929                         break;
930                 }
931                 case BTRFS_SHARED_DATA_REF_KEY: {
932                         /* SHARED DIRECT FULL backref */
933                         struct btrfs_delayed_data_ref *ref;
934
935                         ref = btrfs_delayed_node_to_data_ref(node);
936
937                         ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
938                                              node->bytenr, count, sc,
939                                              GFP_ATOMIC);
940                         break;
941                 }
942                 default:
943                         WARN_ON(1);
944                 }
945                 /*
946                  * We must ignore BACKREF_FOUND_SHARED until all delayed
947                  * refs have been checked.
948                  */
949                 if (ret && (ret != BACKREF_FOUND_SHARED))
950                         break;
951         }
952         if (!ret)
953                 ret = extent_is_shared(sc);
954
955         spin_unlock(&head->lock);
956         return ret;
957 }
958
959 /*
960  * add all inline backrefs for bytenr to the list
961  *
962  * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
963  */
964 static int add_inline_refs(const struct btrfs_fs_info *fs_info,
965                            struct btrfs_path *path, u64 bytenr,
966                            int *info_level, struct preftrees *preftrees,
967                            struct share_check *sc)
968 {
969         int ret = 0;
970         int slot;
971         struct extent_buffer *leaf;
972         struct btrfs_key key;
973         struct btrfs_key found_key;
974         unsigned long ptr;
975         unsigned long end;
976         struct btrfs_extent_item *ei;
977         u64 flags;
978         u64 item_size;
979
980         /*
981          * enumerate all inline refs
982          */
983         leaf = path->nodes[0];
984         slot = path->slots[0];
985
986         item_size = btrfs_item_size_nr(leaf, slot);
987         BUG_ON(item_size < sizeof(*ei));
988
989         ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
990         flags = btrfs_extent_flags(leaf, ei);
991         btrfs_item_key_to_cpu(leaf, &found_key, slot);
992
993         ptr = (unsigned long)(ei + 1);
994         end = (unsigned long)ei + item_size;
995
996         if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
997             flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
998                 struct btrfs_tree_block_info *info;
999
1000                 info = (struct btrfs_tree_block_info *)ptr;
1001                 *info_level = btrfs_tree_block_level(leaf, info);
1002                 ptr += sizeof(struct btrfs_tree_block_info);
1003                 BUG_ON(ptr > end);
1004         } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
1005                 *info_level = found_key.offset;
1006         } else {
1007                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1008         }
1009
1010         while (ptr < end) {
1011                 struct btrfs_extent_inline_ref *iref;
1012                 u64 offset;
1013                 int type;
1014
1015                 iref = (struct btrfs_extent_inline_ref *)ptr;
1016                 type = btrfs_get_extent_inline_ref_type(leaf, iref,
1017                                                         BTRFS_REF_TYPE_ANY);
1018                 if (type == BTRFS_REF_TYPE_INVALID)
1019                         return -EUCLEAN;
1020
1021                 offset = btrfs_extent_inline_ref_offset(leaf, iref);
1022
1023                 switch (type) {
1024                 case BTRFS_SHARED_BLOCK_REF_KEY:
1025                         ret = add_direct_ref(fs_info, preftrees,
1026                                              *info_level + 1, offset,
1027                                              bytenr, 1, NULL, GFP_NOFS);
1028                         break;
1029                 case BTRFS_SHARED_DATA_REF_KEY: {
1030                         struct btrfs_shared_data_ref *sdref;
1031                         int count;
1032
1033                         sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1034                         count = btrfs_shared_data_ref_count(leaf, sdref);
1035
1036                         ret = add_direct_ref(fs_info, preftrees, 0, offset,
1037                                              bytenr, count, sc, GFP_NOFS);
1038                         break;
1039                 }
1040                 case BTRFS_TREE_BLOCK_REF_KEY:
1041                         ret = add_indirect_ref(fs_info, preftrees, offset,
1042                                                NULL, *info_level + 1,
1043                                                bytenr, 1, NULL, GFP_NOFS);
1044                         break;
1045                 case BTRFS_EXTENT_DATA_REF_KEY: {
1046                         struct btrfs_extent_data_ref *dref;
1047                         int count;
1048                         u64 root;
1049
1050                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1051                         count = btrfs_extent_data_ref_count(leaf, dref);
1052                         key.objectid = btrfs_extent_data_ref_objectid(leaf,
1053                                                                       dref);
1054                         key.type = BTRFS_EXTENT_DATA_KEY;
1055                         key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1056
1057                         if (sc && sc->inum && key.objectid != sc->inum &&
1058                             !sc->have_delayed_delete_refs) {
1059                                 ret = BACKREF_FOUND_SHARED;
1060                                 break;
1061                         }
1062
1063                         root = btrfs_extent_data_ref_root(leaf, dref);
1064
1065                         ret = add_indirect_ref(fs_info, preftrees, root,
1066                                                &key, 0, bytenr, count,
1067                                                sc, GFP_NOFS);
1068
1069                         break;
1070                 }
1071                 default:
1072                         WARN_ON(1);
1073                 }
1074                 if (ret)
1075                         return ret;
1076                 ptr += btrfs_extent_inline_ref_size(type);
1077         }
1078
1079         return 0;
1080 }
1081
1082 /*
1083  * add all non-inline backrefs for bytenr to the list
1084  *
1085  * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1086  */
1087 static int add_keyed_refs(struct btrfs_fs_info *fs_info,
1088                           struct btrfs_path *path, u64 bytenr,
1089                           int info_level, struct preftrees *preftrees,
1090                           struct share_check *sc)
1091 {
1092         struct btrfs_root *extent_root = fs_info->extent_root;
1093         int ret;
1094         int slot;
1095         struct extent_buffer *leaf;
1096         struct btrfs_key key;
1097
1098         while (1) {
1099                 ret = btrfs_next_item(extent_root, path);
1100                 if (ret < 0)
1101                         break;
1102                 if (ret) {
1103                         ret = 0;
1104                         break;
1105                 }
1106
1107                 slot = path->slots[0];
1108                 leaf = path->nodes[0];
1109                 btrfs_item_key_to_cpu(leaf, &key, slot);
1110
1111                 if (key.objectid != bytenr)
1112                         break;
1113                 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1114                         continue;
1115                 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1116                         break;
1117
1118                 switch (key.type) {
1119                 case BTRFS_SHARED_BLOCK_REF_KEY:
1120                         /* SHARED DIRECT METADATA backref */
1121                         ret = add_direct_ref(fs_info, preftrees,
1122                                              info_level + 1, key.offset,
1123                                              bytenr, 1, NULL, GFP_NOFS);
1124                         break;
1125                 case BTRFS_SHARED_DATA_REF_KEY: {
1126                         /* SHARED DIRECT FULL backref */
1127                         struct btrfs_shared_data_ref *sdref;
1128                         int count;
1129
1130                         sdref = btrfs_item_ptr(leaf, slot,
1131                                               struct btrfs_shared_data_ref);
1132                         count = btrfs_shared_data_ref_count(leaf, sdref);
1133                         ret = add_direct_ref(fs_info, preftrees, 0,
1134                                              key.offset, bytenr, count,
1135                                              sc, GFP_NOFS);
1136                         break;
1137                 }
1138                 case BTRFS_TREE_BLOCK_REF_KEY:
1139                         /* NORMAL INDIRECT METADATA backref */
1140                         ret = add_indirect_ref(fs_info, preftrees, key.offset,
1141                                                NULL, info_level + 1, bytenr,
1142                                                1, NULL, GFP_NOFS);
1143                         break;
1144                 case BTRFS_EXTENT_DATA_REF_KEY: {
1145                         /* NORMAL INDIRECT DATA backref */
1146                         struct btrfs_extent_data_ref *dref;
1147                         int count;
1148                         u64 root;
1149
1150                         dref = btrfs_item_ptr(leaf, slot,
1151                                               struct btrfs_extent_data_ref);
1152                         count = btrfs_extent_data_ref_count(leaf, dref);
1153                         key.objectid = btrfs_extent_data_ref_objectid(leaf,
1154                                                                       dref);
1155                         key.type = BTRFS_EXTENT_DATA_KEY;
1156                         key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1157
1158                         if (sc && sc->inum && key.objectid != sc->inum &&
1159                             !sc->have_delayed_delete_refs) {
1160                                 ret = BACKREF_FOUND_SHARED;
1161                                 break;
1162                         }
1163
1164                         root = btrfs_extent_data_ref_root(leaf, dref);
1165                         ret = add_indirect_ref(fs_info, preftrees, root,
1166                                                &key, 0, bytenr, count,
1167                                                sc, GFP_NOFS);
1168                         break;
1169                 }
1170                 default:
1171                         WARN_ON(1);
1172                 }
1173                 if (ret)
1174                         return ret;
1175
1176         }
1177
1178         return ret;
1179 }
1180
1181 /*
1182  * this adds all existing backrefs (inline backrefs, backrefs and delayed
1183  * refs) for the given bytenr to the refs list, merges duplicates and resolves
1184  * indirect refs to their parent bytenr.
1185  * When roots are found, they're added to the roots list
1186  *
1187  * If time_seq is set to BTRFS_SEQ_LAST, it will not search delayed_refs, and
1188  * behave much like trans == NULL case, the difference only lies in it will not
1189  * commit root.
1190  * The special case is for qgroup to search roots in commit_transaction().
1191  *
1192  * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1193  * shared extent is detected.
1194  *
1195  * Otherwise this returns 0 for success and <0 for an error.
1196  *
1197  * If ignore_offset is set to false, only extent refs whose offsets match
1198  * extent_item_pos are returned.  If true, every extent ref is returned
1199  * and extent_item_pos is ignored.
1200  *
1201  * FIXME some caching might speed things up
1202  */
1203 static int find_parent_nodes(struct btrfs_trans_handle *trans,
1204                              struct btrfs_fs_info *fs_info, u64 bytenr,
1205                              u64 time_seq, struct ulist *refs,
1206                              struct ulist *roots, const u64 *extent_item_pos,
1207                              struct share_check *sc, bool ignore_offset)
1208 {
1209         struct btrfs_key key;
1210         struct btrfs_path *path;
1211         struct btrfs_delayed_ref_root *delayed_refs = NULL;
1212         struct btrfs_delayed_ref_head *head;
1213         int info_level = 0;
1214         int ret;
1215         struct prelim_ref *ref;
1216         struct rb_node *node;
1217         struct extent_inode_elem *eie = NULL;
1218         struct preftrees preftrees = {
1219                 .direct = PREFTREE_INIT,
1220                 .indirect = PREFTREE_INIT,
1221                 .indirect_missing_keys = PREFTREE_INIT
1222         };
1223
1224         key.objectid = bytenr;
1225         key.offset = (u64)-1;
1226         if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1227                 key.type = BTRFS_METADATA_ITEM_KEY;
1228         else
1229                 key.type = BTRFS_EXTENT_ITEM_KEY;
1230
1231         path = btrfs_alloc_path();
1232         if (!path)
1233                 return -ENOMEM;
1234         if (!trans) {
1235                 path->search_commit_root = 1;
1236                 path->skip_locking = 1;
1237         }
1238
1239         if (time_seq == BTRFS_SEQ_LAST)
1240                 path->skip_locking = 1;
1241
1242         /*
1243          * grab both a lock on the path and a lock on the delayed ref head.
1244          * We need both to get a consistent picture of how the refs look
1245          * at a specified point in time
1246          */
1247 again:
1248         head = NULL;
1249
1250         ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1251         if (ret < 0)
1252                 goto out;
1253         if (ret == 0) {
1254                 /* This shouldn't happen, indicates a bug or fs corruption. */
1255                 ASSERT(ret != 0);
1256                 ret = -EUCLEAN;
1257                 goto out;
1258         }
1259
1260 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1261         if (trans && likely(trans->type != __TRANS_DUMMY) &&
1262             time_seq != BTRFS_SEQ_LAST) {
1263 #else
1264         if (trans && time_seq != BTRFS_SEQ_LAST) {
1265 #endif
1266                 /*
1267                  * look if there are updates for this ref queued and lock the
1268                  * head
1269                  */
1270                 delayed_refs = &trans->transaction->delayed_refs;
1271                 spin_lock(&delayed_refs->lock);
1272                 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1273                 if (head) {
1274                         if (!mutex_trylock(&head->mutex)) {
1275                                 refcount_inc(&head->refs);
1276                                 spin_unlock(&delayed_refs->lock);
1277
1278                                 btrfs_release_path(path);
1279
1280                                 /*
1281                                  * Mutex was contended, block until it's
1282                                  * released and try again
1283                                  */
1284                                 mutex_lock(&head->mutex);
1285                                 mutex_unlock(&head->mutex);
1286                                 btrfs_put_delayed_ref_head(head);
1287                                 goto again;
1288                         }
1289                         spin_unlock(&delayed_refs->lock);
1290                         ret = add_delayed_refs(fs_info, head, time_seq,
1291                                                &preftrees, sc);
1292                         mutex_unlock(&head->mutex);
1293                         if (ret)
1294                                 goto out;
1295                 } else {
1296                         spin_unlock(&delayed_refs->lock);
1297                 }
1298         }
1299
1300         if (path->slots[0]) {
1301                 struct extent_buffer *leaf;
1302                 int slot;
1303
1304                 path->slots[0]--;
1305                 leaf = path->nodes[0];
1306                 slot = path->slots[0];
1307                 btrfs_item_key_to_cpu(leaf, &key, slot);
1308                 if (key.objectid == bytenr &&
1309                     (key.type == BTRFS_EXTENT_ITEM_KEY ||
1310                      key.type == BTRFS_METADATA_ITEM_KEY)) {
1311                         ret = add_inline_refs(fs_info, path, bytenr,
1312                                               &info_level, &preftrees, sc);
1313                         if (ret)
1314                                 goto out;
1315                         ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1316                                              &preftrees, sc);
1317                         if (ret)
1318                                 goto out;
1319                 }
1320         }
1321
1322         btrfs_release_path(path);
1323
1324         ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1325         if (ret)
1326                 goto out;
1327
1328         WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1329
1330         ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1331                                     extent_item_pos, sc, ignore_offset);
1332         if (ret)
1333                 goto out;
1334
1335         WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1336
1337         /*
1338          * This walks the tree of merged and resolved refs. Tree blocks are
1339          * read in as needed. Unique entries are added to the ulist, and
1340          * the list of found roots is updated.
1341          *
1342          * We release the entire tree in one go before returning.
1343          */
1344         node = rb_first_cached(&preftrees.direct.root);
1345         while (node) {
1346                 ref = rb_entry(node, struct prelim_ref, rbnode);
1347                 node = rb_next(&ref->rbnode);
1348                 /*
1349                  * ref->count < 0 can happen here if there are delayed
1350                  * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1351                  * prelim_ref_insert() relies on this when merging
1352                  * identical refs to keep the overall count correct.
1353                  * prelim_ref_insert() will merge only those refs
1354                  * which compare identically.  Any refs having
1355                  * e.g. different offsets would not be merged,
1356                  * and would retain their original ref->count < 0.
1357                  */
1358                 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1359                         if (sc && sc->root_objectid &&
1360                             ref->root_id != sc->root_objectid) {
1361                                 ret = BACKREF_FOUND_SHARED;
1362                                 goto out;
1363                         }
1364
1365                         /* no parent == root of tree */
1366                         ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1367                         if (ret < 0)
1368                                 goto out;
1369                 }
1370                 if (ref->count && ref->parent) {
1371                         if (extent_item_pos && !ref->inode_list &&
1372                             ref->level == 0) {
1373                                 struct extent_buffer *eb;
1374
1375                                 eb = read_tree_block(fs_info, ref->parent, 0,
1376                                                      0, ref->level, NULL);
1377                                 if (IS_ERR(eb)) {
1378                                         ret = PTR_ERR(eb);
1379                                         goto out;
1380                                 } else if (!extent_buffer_uptodate(eb)) {
1381                                         free_extent_buffer(eb);
1382                                         ret = -EIO;
1383                                         goto out;
1384                                 }
1385
1386                                 if (!path->skip_locking)
1387                                         btrfs_tree_read_lock(eb);
1388                                 ret = find_extent_in_eb(eb, bytenr,
1389                                                         *extent_item_pos, &eie, ignore_offset);
1390                                 if (!path->skip_locking)
1391                                         btrfs_tree_read_unlock(eb);
1392                                 free_extent_buffer(eb);
1393                                 if (ret < 0)
1394                                         goto out;
1395                                 ref->inode_list = eie;
1396                                 /*
1397                                  * We transferred the list ownership to the ref,
1398                                  * so set to NULL to avoid a double free in case
1399                                  * an error happens after this.
1400                                  */
1401                                 eie = NULL;
1402                         }
1403                         ret = ulist_add_merge_ptr(refs, ref->parent,
1404                                                   ref->inode_list,
1405                                                   (void **)&eie, GFP_NOFS);
1406                         if (ret < 0)
1407                                 goto out;
1408                         if (!ret && extent_item_pos) {
1409                                 /*
1410                                  * We've recorded that parent, so we must extend
1411                                  * its inode list here.
1412                                  *
1413                                  * However if there was corruption we may not
1414                                  * have found an eie, return an error in this
1415                                  * case.
1416                                  */
1417                                 ASSERT(eie);
1418                                 if (!eie) {
1419                                         ret = -EUCLEAN;
1420                                         goto out;
1421                                 }
1422                                 while (eie->next)
1423                                         eie = eie->next;
1424                                 eie->next = ref->inode_list;
1425                         }
1426                         eie = NULL;
1427                         /*
1428                          * We have transferred the inode list ownership from
1429                          * this ref to the ref we added to the 'refs' ulist.
1430                          * So set this ref's inode list to NULL to avoid
1431                          * use-after-free when our caller uses it or double
1432                          * frees in case an error happens before we return.
1433                          */
1434                         ref->inode_list = NULL;
1435                 }
1436                 cond_resched();
1437         }
1438
1439 out:
1440         btrfs_free_path(path);
1441
1442         prelim_release(&preftrees.direct);
1443         prelim_release(&preftrees.indirect);
1444         prelim_release(&preftrees.indirect_missing_keys);
1445
1446         if (ret < 0)
1447                 free_inode_elem_list(eie);
1448         return ret;
1449 }
1450
1451 /*
1452  * Finds all leafs with a reference to the specified combination of bytenr and
1453  * offset. key_list_head will point to a list of corresponding keys (caller must
1454  * free each list element). The leafs will be stored in the leafs ulist, which
1455  * must be freed with ulist_free.
1456  *
1457  * returns 0 on success, <0 on error
1458  */
1459 int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1460                          struct btrfs_fs_info *fs_info, u64 bytenr,
1461                          u64 time_seq, struct ulist **leafs,
1462                          const u64 *extent_item_pos, bool ignore_offset)
1463 {
1464         int ret;
1465
1466         *leafs = ulist_alloc(GFP_NOFS);
1467         if (!*leafs)
1468                 return -ENOMEM;
1469
1470         ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1471                                 *leafs, NULL, extent_item_pos, NULL, ignore_offset);
1472         if (ret < 0 && ret != -ENOENT) {
1473                 free_leaf_list(*leafs);
1474                 return ret;
1475         }
1476
1477         return 0;
1478 }
1479
1480 /*
1481  * walk all backrefs for a given extent to find all roots that reference this
1482  * extent. Walking a backref means finding all extents that reference this
1483  * extent and in turn walk the backrefs of those, too. Naturally this is a
1484  * recursive process, but here it is implemented in an iterative fashion: We
1485  * find all referencing extents for the extent in question and put them on a
1486  * list. In turn, we find all referencing extents for those, further appending
1487  * to the list. The way we iterate the list allows adding more elements after
1488  * the current while iterating. The process stops when we reach the end of the
1489  * list. Found roots are added to the roots list.
1490  *
1491  * returns 0 on success, < 0 on error.
1492  */
1493 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1494                                      struct btrfs_fs_info *fs_info, u64 bytenr,
1495                                      u64 time_seq, struct ulist **roots,
1496                                      bool ignore_offset)
1497 {
1498         struct ulist *tmp;
1499         struct ulist_node *node = NULL;
1500         struct ulist_iterator uiter;
1501         int ret;
1502
1503         tmp = ulist_alloc(GFP_NOFS);
1504         if (!tmp)
1505                 return -ENOMEM;
1506         *roots = ulist_alloc(GFP_NOFS);
1507         if (!*roots) {
1508                 ulist_free(tmp);
1509                 return -ENOMEM;
1510         }
1511
1512         ULIST_ITER_INIT(&uiter);
1513         while (1) {
1514                 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1515                                         tmp, *roots, NULL, NULL, ignore_offset);
1516                 if (ret < 0 && ret != -ENOENT) {
1517                         ulist_free(tmp);
1518                         ulist_free(*roots);
1519                         *roots = NULL;
1520                         return ret;
1521                 }
1522                 node = ulist_next(tmp, &uiter);
1523                 if (!node)
1524                         break;
1525                 bytenr = node->val;
1526                 cond_resched();
1527         }
1528
1529         ulist_free(tmp);
1530         return 0;
1531 }
1532
1533 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1534                          struct btrfs_fs_info *fs_info, u64 bytenr,
1535                          u64 time_seq, struct ulist **roots,
1536                          bool skip_commit_root_sem)
1537 {
1538         int ret;
1539
1540         if (!trans && !skip_commit_root_sem)
1541                 down_read(&fs_info->commit_root_sem);
1542         ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1543                                         time_seq, roots, false);
1544         if (!trans && !skip_commit_root_sem)
1545                 up_read(&fs_info->commit_root_sem);
1546         return ret;
1547 }
1548
1549 /**
1550  * Check if an extent is shared or not
1551  *
1552  * @root:   root inode belongs to
1553  * @inum:   inode number of the inode whose extent we are checking
1554  * @bytenr: logical bytenr of the extent we are checking
1555  * @roots:  list of roots this extent is shared among
1556  * @tmp:    temporary list used for iteration
1557  *
1558  * btrfs_check_shared uses the backref walking code but will short
1559  * circuit as soon as it finds a root or inode that doesn't match the
1560  * one passed in. This provides a significant performance benefit for
1561  * callers (such as fiemap) which want to know whether the extent is
1562  * shared but do not need a ref count.
1563  *
1564  * This attempts to attach to the running transaction in order to account for
1565  * delayed refs, but continues on even when no running transaction exists.
1566  *
1567  * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1568  */
1569 int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1570                 struct ulist *roots, struct ulist *tmp)
1571 {
1572         struct btrfs_fs_info *fs_info = root->fs_info;
1573         struct btrfs_trans_handle *trans;
1574         struct ulist_iterator uiter;
1575         struct ulist_node *node;
1576         struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
1577         int ret = 0;
1578         struct share_check shared = {
1579                 .root_objectid = root->root_key.objectid,
1580                 .inum = inum,
1581                 .share_count = 0,
1582                 .have_delayed_delete_refs = false,
1583         };
1584
1585         ulist_init(roots);
1586         ulist_init(tmp);
1587
1588         trans = btrfs_join_transaction_nostart(root);
1589         if (IS_ERR(trans)) {
1590                 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1591                         ret = PTR_ERR(trans);
1592                         goto out;
1593                 }
1594                 trans = NULL;
1595                 down_read(&fs_info->commit_root_sem);
1596         } else {
1597                 btrfs_get_tree_mod_seq(fs_info, &elem);
1598         }
1599
1600         ULIST_ITER_INIT(&uiter);
1601         while (1) {
1602                 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1603                                         roots, NULL, &shared, false);
1604                 if (ret == BACKREF_FOUND_SHARED) {
1605                         /* this is the only condition under which we return 1 */
1606                         ret = 1;
1607                         break;
1608                 }
1609                 if (ret < 0 && ret != -ENOENT)
1610                         break;
1611                 ret = 0;
1612                 node = ulist_next(tmp, &uiter);
1613                 if (!node)
1614                         break;
1615                 bytenr = node->val;
1616                 shared.share_count = 0;
1617                 shared.have_delayed_delete_refs = false;
1618                 cond_resched();
1619         }
1620
1621         if (trans) {
1622                 btrfs_put_tree_mod_seq(fs_info, &elem);
1623                 btrfs_end_transaction(trans);
1624         } else {
1625                 up_read(&fs_info->commit_root_sem);
1626         }
1627 out:
1628         ulist_release(roots);
1629         ulist_release(tmp);
1630         return ret;
1631 }
1632
1633 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1634                           u64 start_off, struct btrfs_path *path,
1635                           struct btrfs_inode_extref **ret_extref,
1636                           u64 *found_off)
1637 {
1638         int ret, slot;
1639         struct btrfs_key key;
1640         struct btrfs_key found_key;
1641         struct btrfs_inode_extref *extref;
1642         const struct extent_buffer *leaf;
1643         unsigned long ptr;
1644
1645         key.objectid = inode_objectid;
1646         key.type = BTRFS_INODE_EXTREF_KEY;
1647         key.offset = start_off;
1648
1649         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1650         if (ret < 0)
1651                 return ret;
1652
1653         while (1) {
1654                 leaf = path->nodes[0];
1655                 slot = path->slots[0];
1656                 if (slot >= btrfs_header_nritems(leaf)) {
1657                         /*
1658                          * If the item at offset is not found,
1659                          * btrfs_search_slot will point us to the slot
1660                          * where it should be inserted. In our case
1661                          * that will be the slot directly before the
1662                          * next INODE_REF_KEY_V2 item. In the case
1663                          * that we're pointing to the last slot in a
1664                          * leaf, we must move one leaf over.
1665                          */
1666                         ret = btrfs_next_leaf(root, path);
1667                         if (ret) {
1668                                 if (ret >= 1)
1669                                         ret = -ENOENT;
1670                                 break;
1671                         }
1672                         continue;
1673                 }
1674
1675                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1676
1677                 /*
1678                  * Check that we're still looking at an extended ref key for
1679                  * this particular objectid. If we have different
1680                  * objectid or type then there are no more to be found
1681                  * in the tree and we can exit.
1682                  */
1683                 ret = -ENOENT;
1684                 if (found_key.objectid != inode_objectid)
1685                         break;
1686                 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1687                         break;
1688
1689                 ret = 0;
1690                 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1691                 extref = (struct btrfs_inode_extref *)ptr;
1692                 *ret_extref = extref;
1693                 if (found_off)
1694                         *found_off = found_key.offset;
1695                 break;
1696         }
1697
1698         return ret;
1699 }
1700
1701 /*
1702  * this iterates to turn a name (from iref/extref) into a full filesystem path.
1703  * Elements of the path are separated by '/' and the path is guaranteed to be
1704  * 0-terminated. the path is only given within the current file system.
1705  * Therefore, it never starts with a '/'. the caller is responsible to provide
1706  * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1707  * the start point of the resulting string is returned. this pointer is within
1708  * dest, normally.
1709  * in case the path buffer would overflow, the pointer is decremented further
1710  * as if output was written to the buffer, though no more output is actually
1711  * generated. that way, the caller can determine how much space would be
1712  * required for the path to fit into the buffer. in that case, the returned
1713  * value will be smaller than dest. callers must check this!
1714  */
1715 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1716                         u32 name_len, unsigned long name_off,
1717                         struct extent_buffer *eb_in, u64 parent,
1718                         char *dest, u32 size)
1719 {
1720         int slot;
1721         u64 next_inum;
1722         int ret;
1723         s64 bytes_left = ((s64)size) - 1;
1724         struct extent_buffer *eb = eb_in;
1725         struct btrfs_key found_key;
1726         struct btrfs_inode_ref *iref;
1727
1728         if (bytes_left >= 0)
1729                 dest[bytes_left] = '\0';
1730
1731         while (1) {
1732                 bytes_left -= name_len;
1733                 if (bytes_left >= 0)
1734                         read_extent_buffer(eb, dest + bytes_left,
1735                                            name_off, name_len);
1736                 if (eb != eb_in) {
1737                         if (!path->skip_locking)
1738                                 btrfs_tree_read_unlock(eb);
1739                         free_extent_buffer(eb);
1740                 }
1741                 ret = btrfs_find_item(fs_root, path, parent, 0,
1742                                 BTRFS_INODE_REF_KEY, &found_key);
1743                 if (ret > 0)
1744                         ret = -ENOENT;
1745                 if (ret)
1746                         break;
1747
1748                 next_inum = found_key.offset;
1749
1750                 /* regular exit ahead */
1751                 if (parent == next_inum)
1752                         break;
1753
1754                 slot = path->slots[0];
1755                 eb = path->nodes[0];
1756                 /* make sure we can use eb after releasing the path */
1757                 if (eb != eb_in) {
1758                         path->nodes[0] = NULL;
1759                         path->locks[0] = 0;
1760                 }
1761                 btrfs_release_path(path);
1762                 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1763
1764                 name_len = btrfs_inode_ref_name_len(eb, iref);
1765                 name_off = (unsigned long)(iref + 1);
1766
1767                 parent = next_inum;
1768                 --bytes_left;
1769                 if (bytes_left >= 0)
1770                         dest[bytes_left] = '/';
1771         }
1772
1773         btrfs_release_path(path);
1774
1775         if (ret)
1776                 return ERR_PTR(ret);
1777
1778         return dest + bytes_left;
1779 }
1780
1781 /*
1782  * this makes the path point to (logical EXTENT_ITEM *)
1783  * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1784  * tree blocks and <0 on error.
1785  */
1786 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1787                         struct btrfs_path *path, struct btrfs_key *found_key,
1788                         u64 *flags_ret)
1789 {
1790         int ret;
1791         u64 flags;
1792         u64 size = 0;
1793         u32 item_size;
1794         const struct extent_buffer *eb;
1795         struct btrfs_extent_item *ei;
1796         struct btrfs_key key;
1797
1798         if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1799                 key.type = BTRFS_METADATA_ITEM_KEY;
1800         else
1801                 key.type = BTRFS_EXTENT_ITEM_KEY;
1802         key.objectid = logical;
1803         key.offset = (u64)-1;
1804
1805         ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1806         if (ret < 0)
1807                 return ret;
1808
1809         ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1810         if (ret) {
1811                 if (ret > 0)
1812                         ret = -ENOENT;
1813                 return ret;
1814         }
1815         btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1816         if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1817                 size = fs_info->nodesize;
1818         else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1819                 size = found_key->offset;
1820
1821         if (found_key->objectid > logical ||
1822             found_key->objectid + size <= logical) {
1823                 btrfs_debug(fs_info,
1824                         "logical %llu is not within any extent", logical);
1825                 return -ENOENT;
1826         }
1827
1828         eb = path->nodes[0];
1829         item_size = btrfs_item_size_nr(eb, path->slots[0]);
1830         BUG_ON(item_size < sizeof(*ei));
1831
1832         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1833         flags = btrfs_extent_flags(eb, ei);
1834
1835         btrfs_debug(fs_info,
1836                 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1837                  logical, logical - found_key->objectid, found_key->objectid,
1838                  found_key->offset, flags, item_size);
1839
1840         WARN_ON(!flags_ret);
1841         if (flags_ret) {
1842                 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1843                         *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1844                 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1845                         *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1846                 else
1847                         BUG();
1848                 return 0;
1849         }
1850
1851         return -EIO;
1852 }
1853
1854 /*
1855  * helper function to iterate extent inline refs. ptr must point to a 0 value
1856  * for the first call and may be modified. it is used to track state.
1857  * if more refs exist, 0 is returned and the next call to
1858  * get_extent_inline_ref must pass the modified ptr parameter to get the
1859  * next ref. after the last ref was processed, 1 is returned.
1860  * returns <0 on error
1861  */
1862 static int get_extent_inline_ref(unsigned long *ptr,
1863                                  const struct extent_buffer *eb,
1864                                  const struct btrfs_key *key,
1865                                  const struct btrfs_extent_item *ei,
1866                                  u32 item_size,
1867                                  struct btrfs_extent_inline_ref **out_eiref,
1868                                  int *out_type)
1869 {
1870         unsigned long end;
1871         u64 flags;
1872         struct btrfs_tree_block_info *info;
1873
1874         if (!*ptr) {
1875                 /* first call */
1876                 flags = btrfs_extent_flags(eb, ei);
1877                 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1878                         if (key->type == BTRFS_METADATA_ITEM_KEY) {
1879                                 /* a skinny metadata extent */
1880                                 *out_eiref =
1881                                      (struct btrfs_extent_inline_ref *)(ei + 1);
1882                         } else {
1883                                 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1884                                 info = (struct btrfs_tree_block_info *)(ei + 1);
1885                                 *out_eiref =
1886                                    (struct btrfs_extent_inline_ref *)(info + 1);
1887                         }
1888                 } else {
1889                         *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1890                 }
1891                 *ptr = (unsigned long)*out_eiref;
1892                 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1893                         return -ENOENT;
1894         }
1895
1896         end = (unsigned long)ei + item_size;
1897         *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1898         *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1899                                                      BTRFS_REF_TYPE_ANY);
1900         if (*out_type == BTRFS_REF_TYPE_INVALID)
1901                 return -EUCLEAN;
1902
1903         *ptr += btrfs_extent_inline_ref_size(*out_type);
1904         WARN_ON(*ptr > end);
1905         if (*ptr == end)
1906                 return 1; /* last */
1907
1908         return 0;
1909 }
1910
1911 /*
1912  * reads the tree block backref for an extent. tree level and root are returned
1913  * through out_level and out_root. ptr must point to a 0 value for the first
1914  * call and may be modified (see get_extent_inline_ref comment).
1915  * returns 0 if data was provided, 1 if there was no more data to provide or
1916  * <0 on error.
1917  */
1918 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1919                             struct btrfs_key *key, struct btrfs_extent_item *ei,
1920                             u32 item_size, u64 *out_root, u8 *out_level)
1921 {
1922         int ret;
1923         int type;
1924         struct btrfs_extent_inline_ref *eiref;
1925
1926         if (*ptr == (unsigned long)-1)
1927                 return 1;
1928
1929         while (1) {
1930                 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1931                                               &eiref, &type);
1932                 if (ret < 0)
1933                         return ret;
1934
1935                 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1936                     type == BTRFS_SHARED_BLOCK_REF_KEY)
1937                         break;
1938
1939                 if (ret == 1)
1940                         return 1;
1941         }
1942
1943         /* we can treat both ref types equally here */
1944         *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1945
1946         if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1947                 struct btrfs_tree_block_info *info;
1948
1949                 info = (struct btrfs_tree_block_info *)(ei + 1);
1950                 *out_level = btrfs_tree_block_level(eb, info);
1951         } else {
1952                 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1953                 *out_level = (u8)key->offset;
1954         }
1955
1956         if (ret == 1)
1957                 *ptr = (unsigned long)-1;
1958
1959         return 0;
1960 }
1961
1962 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1963                              struct extent_inode_elem *inode_list,
1964                              u64 root, u64 extent_item_objectid,
1965                              iterate_extent_inodes_t *iterate, void *ctx)
1966 {
1967         struct extent_inode_elem *eie;
1968         int ret = 0;
1969
1970         for (eie = inode_list; eie; eie = eie->next) {
1971                 btrfs_debug(fs_info,
1972                             "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1973                             extent_item_objectid, eie->inum,
1974                             eie->offset, root);
1975                 ret = iterate(eie->inum, eie->offset, root, ctx);
1976                 if (ret) {
1977                         btrfs_debug(fs_info,
1978                                     "stopping iteration for %llu due to ret=%d",
1979                                     extent_item_objectid, ret);
1980                         break;
1981                 }
1982         }
1983
1984         return ret;
1985 }
1986
1987 /*
1988  * calls iterate() for every inode that references the extent identified by
1989  * the given parameters.
1990  * when the iterator function returns a non-zero value, iteration stops.
1991  */
1992 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1993                                 u64 extent_item_objectid, u64 extent_item_pos,
1994                                 int search_commit_root,
1995                                 iterate_extent_inodes_t *iterate, void *ctx,
1996                                 bool ignore_offset)
1997 {
1998         int ret;
1999         struct btrfs_trans_handle *trans = NULL;
2000         struct ulist *refs = NULL;
2001         struct ulist *roots = NULL;
2002         struct ulist_node *ref_node = NULL;
2003         struct ulist_node *root_node = NULL;
2004         struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
2005         struct ulist_iterator ref_uiter;
2006         struct ulist_iterator root_uiter;
2007
2008         btrfs_debug(fs_info, "resolving all inodes for extent %llu",
2009                         extent_item_objectid);
2010
2011         if (!search_commit_root) {
2012                 trans = btrfs_attach_transaction(fs_info->extent_root);
2013                 if (IS_ERR(trans)) {
2014                         if (PTR_ERR(trans) != -ENOENT &&
2015                             PTR_ERR(trans) != -EROFS)
2016                                 return PTR_ERR(trans);
2017                         trans = NULL;
2018                 }
2019         }
2020
2021         if (trans)
2022                 btrfs_get_tree_mod_seq(fs_info, &seq_elem);
2023         else
2024                 down_read(&fs_info->commit_root_sem);
2025
2026         ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
2027                                    seq_elem.seq, &refs,
2028                                    &extent_item_pos, ignore_offset);
2029         if (ret)
2030                 goto out;
2031
2032         ULIST_ITER_INIT(&ref_uiter);
2033         while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2034                 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
2035                                                 seq_elem.seq, &roots,
2036                                                 ignore_offset);
2037                 if (ret)
2038                         break;
2039                 ULIST_ITER_INIT(&root_uiter);
2040                 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
2041                         btrfs_debug(fs_info,
2042                                     "root %llu references leaf %llu, data list %#llx",
2043                                     root_node->val, ref_node->val,
2044                                     ref_node->aux);
2045                         ret = iterate_leaf_refs(fs_info,
2046                                                 (struct extent_inode_elem *)
2047                                                 (uintptr_t)ref_node->aux,
2048                                                 root_node->val,
2049                                                 extent_item_objectid,
2050                                                 iterate, ctx);
2051                 }
2052                 ulist_free(roots);
2053         }
2054
2055         free_leaf_list(refs);
2056 out:
2057         if (trans) {
2058                 btrfs_put_tree_mod_seq(fs_info, &seq_elem);
2059                 btrfs_end_transaction(trans);
2060         } else {
2061                 up_read(&fs_info->commit_root_sem);
2062         }
2063
2064         return ret;
2065 }
2066
2067 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
2068 {
2069         struct btrfs_data_container *inodes = ctx;
2070         const size_t c = 3 * sizeof(u64);
2071
2072         if (inodes->bytes_left >= c) {
2073                 inodes->bytes_left -= c;
2074                 inodes->val[inodes->elem_cnt] = inum;
2075                 inodes->val[inodes->elem_cnt + 1] = offset;
2076                 inodes->val[inodes->elem_cnt + 2] = root;
2077                 inodes->elem_cnt += 3;
2078         } else {
2079                 inodes->bytes_missing += c - inodes->bytes_left;
2080                 inodes->bytes_left = 0;
2081                 inodes->elem_missed += 3;
2082         }
2083
2084         return 0;
2085 }
2086
2087 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2088                                 struct btrfs_path *path,
2089                                 void *ctx, bool ignore_offset)
2090 {
2091         int ret;
2092         u64 extent_item_pos;
2093         u64 flags = 0;
2094         struct btrfs_key found_key;
2095         int search_commit_root = path->search_commit_root;
2096
2097         ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2098         btrfs_release_path(path);
2099         if (ret < 0)
2100                 return ret;
2101         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2102                 return -EINVAL;
2103
2104         extent_item_pos = logical - found_key.objectid;
2105         ret = iterate_extent_inodes(fs_info, found_key.objectid,
2106                                         extent_item_pos, search_commit_root,
2107                                         build_ino_list, ctx, ignore_offset);
2108
2109         return ret;
2110 }
2111
2112 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
2113                               struct extent_buffer *eb, void *ctx);
2114
2115 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
2116                               struct btrfs_path *path,
2117                               iterate_irefs_t *iterate, void *ctx)
2118 {
2119         int ret = 0;
2120         int slot;
2121         u32 cur;
2122         u32 len;
2123         u32 name_len;
2124         u64 parent = 0;
2125         int found = 0;
2126         struct extent_buffer *eb;
2127         struct btrfs_item *item;
2128         struct btrfs_inode_ref *iref;
2129         struct btrfs_key found_key;
2130
2131         while (!ret) {
2132                 ret = btrfs_find_item(fs_root, path, inum,
2133                                 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2134                                 &found_key);
2135
2136                 if (ret < 0)
2137                         break;
2138                 if (ret) {
2139                         ret = found ? 0 : -ENOENT;
2140                         break;
2141                 }
2142                 ++found;
2143
2144                 parent = found_key.offset;
2145                 slot = path->slots[0];
2146                 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2147                 if (!eb) {
2148                         ret = -ENOMEM;
2149                         break;
2150                 }
2151                 btrfs_release_path(path);
2152
2153                 item = btrfs_item_nr(slot);
2154                 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2155
2156                 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
2157                         name_len = btrfs_inode_ref_name_len(eb, iref);
2158                         /* path must be released before calling iterate()! */
2159                         btrfs_debug(fs_root->fs_info,
2160                                 "following ref at offset %u for inode %llu in tree %llu",
2161                                 cur, found_key.objectid,
2162                                 fs_root->root_key.objectid);
2163                         ret = iterate(parent, name_len,
2164                                       (unsigned long)(iref + 1), eb, ctx);
2165                         if (ret)
2166                                 break;
2167                         len = sizeof(*iref) + name_len;
2168                         iref = (struct btrfs_inode_ref *)((char *)iref + len);
2169                 }
2170                 free_extent_buffer(eb);
2171         }
2172
2173         btrfs_release_path(path);
2174
2175         return ret;
2176 }
2177
2178 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2179                                  struct btrfs_path *path,
2180                                  iterate_irefs_t *iterate, void *ctx)
2181 {
2182         int ret;
2183         int slot;
2184         u64 offset = 0;
2185         u64 parent;
2186         int found = 0;
2187         struct extent_buffer *eb;
2188         struct btrfs_inode_extref *extref;
2189         u32 item_size;
2190         u32 cur_offset;
2191         unsigned long ptr;
2192
2193         while (1) {
2194                 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2195                                             &offset);
2196                 if (ret < 0)
2197                         break;
2198                 if (ret) {
2199                         ret = found ? 0 : -ENOENT;
2200                         break;
2201                 }
2202                 ++found;
2203
2204                 slot = path->slots[0];
2205                 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2206                 if (!eb) {
2207                         ret = -ENOMEM;
2208                         break;
2209                 }
2210                 btrfs_release_path(path);
2211
2212                 item_size = btrfs_item_size_nr(eb, slot);
2213                 ptr = btrfs_item_ptr_offset(eb, slot);
2214                 cur_offset = 0;
2215
2216                 while (cur_offset < item_size) {
2217                         u32 name_len;
2218
2219                         extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2220                         parent = btrfs_inode_extref_parent(eb, extref);
2221                         name_len = btrfs_inode_extref_name_len(eb, extref);
2222                         ret = iterate(parent, name_len,
2223                                       (unsigned long)&extref->name, eb, ctx);
2224                         if (ret)
2225                                 break;
2226
2227                         cur_offset += btrfs_inode_extref_name_len(eb, extref);
2228                         cur_offset += sizeof(*extref);
2229                 }
2230                 free_extent_buffer(eb);
2231
2232                 offset++;
2233         }
2234
2235         btrfs_release_path(path);
2236
2237         return ret;
2238 }
2239
2240 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
2241                          struct btrfs_path *path, iterate_irefs_t *iterate,
2242                          void *ctx)
2243 {
2244         int ret;
2245         int found_refs = 0;
2246
2247         ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
2248         if (!ret)
2249                 ++found_refs;
2250         else if (ret != -ENOENT)
2251                 return ret;
2252
2253         ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
2254         if (ret == -ENOENT && found_refs)
2255                 return 0;
2256
2257         return ret;
2258 }
2259
2260 /*
2261  * returns 0 if the path could be dumped (probably truncated)
2262  * returns <0 in case of an error
2263  */
2264 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2265                          struct extent_buffer *eb, void *ctx)
2266 {
2267         struct inode_fs_paths *ipath = ctx;
2268         char *fspath;
2269         char *fspath_min;
2270         int i = ipath->fspath->elem_cnt;
2271         const int s_ptr = sizeof(char *);
2272         u32 bytes_left;
2273
2274         bytes_left = ipath->fspath->bytes_left > s_ptr ?
2275                                         ipath->fspath->bytes_left - s_ptr : 0;
2276
2277         fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2278         fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2279                                    name_off, eb, inum, fspath_min, bytes_left);
2280         if (IS_ERR(fspath))
2281                 return PTR_ERR(fspath);
2282
2283         if (fspath > fspath_min) {
2284                 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2285                 ++ipath->fspath->elem_cnt;
2286                 ipath->fspath->bytes_left = fspath - fspath_min;
2287         } else {
2288                 ++ipath->fspath->elem_missed;
2289                 ipath->fspath->bytes_missing += fspath_min - fspath;
2290                 ipath->fspath->bytes_left = 0;
2291         }
2292
2293         return 0;
2294 }
2295
2296 /*
2297  * this dumps all file system paths to the inode into the ipath struct, provided
2298  * is has been created large enough. each path is zero-terminated and accessed
2299  * from ipath->fspath->val[i].
2300  * when it returns, there are ipath->fspath->elem_cnt number of paths available
2301  * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2302  * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2303  * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2304  * have been needed to return all paths.
2305  */
2306 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2307 {
2308         return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2309                              inode_to_path, ipath);
2310 }
2311
2312 struct btrfs_data_container *init_data_container(u32 total_bytes)
2313 {
2314         struct btrfs_data_container *data;
2315         size_t alloc_bytes;
2316
2317         alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2318         data = kvmalloc(alloc_bytes, GFP_KERNEL);
2319         if (!data)
2320                 return ERR_PTR(-ENOMEM);
2321
2322         if (total_bytes >= sizeof(*data)) {
2323                 data->bytes_left = total_bytes - sizeof(*data);
2324                 data->bytes_missing = 0;
2325         } else {
2326                 data->bytes_missing = sizeof(*data) - total_bytes;
2327                 data->bytes_left = 0;
2328         }
2329
2330         data->elem_cnt = 0;
2331         data->elem_missed = 0;
2332
2333         return data;
2334 }
2335
2336 /*
2337  * allocates space to return multiple file system paths for an inode.
2338  * total_bytes to allocate are passed, note that space usable for actual path
2339  * information will be total_bytes - sizeof(struct inode_fs_paths).
2340  * the returned pointer must be freed with free_ipath() in the end.
2341  */
2342 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2343                                         struct btrfs_path *path)
2344 {
2345         struct inode_fs_paths *ifp;
2346         struct btrfs_data_container *fspath;
2347
2348         fspath = init_data_container(total_bytes);
2349         if (IS_ERR(fspath))
2350                 return ERR_CAST(fspath);
2351
2352         ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2353         if (!ifp) {
2354                 kvfree(fspath);
2355                 return ERR_PTR(-ENOMEM);
2356         }
2357
2358         ifp->btrfs_path = path;
2359         ifp->fspath = fspath;
2360         ifp->fs_root = fs_root;
2361
2362         return ifp;
2363 }
2364
2365 void free_ipath(struct inode_fs_paths *ipath)
2366 {
2367         if (!ipath)
2368                 return;
2369         kvfree(ipath->fspath);
2370         kfree(ipath);
2371 }
2372
2373 struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2374                 struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2375 {
2376         struct btrfs_backref_iter *ret;
2377
2378         ret = kzalloc(sizeof(*ret), gfp_flag);
2379         if (!ret)
2380                 return NULL;
2381
2382         ret->path = btrfs_alloc_path();
2383         if (!ret->path) {
2384                 kfree(ret);
2385                 return NULL;
2386         }
2387
2388         /* Current backref iterator only supports iteration in commit root */
2389         ret->path->search_commit_root = 1;
2390         ret->path->skip_locking = 1;
2391         ret->fs_info = fs_info;
2392
2393         return ret;
2394 }
2395
2396 int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2397 {
2398         struct btrfs_fs_info *fs_info = iter->fs_info;
2399         struct btrfs_path *path = iter->path;
2400         struct btrfs_extent_item *ei;
2401         struct btrfs_key key;
2402         int ret;
2403
2404         key.objectid = bytenr;
2405         key.type = BTRFS_METADATA_ITEM_KEY;
2406         key.offset = (u64)-1;
2407         iter->bytenr = bytenr;
2408
2409         ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2410         if (ret < 0)
2411                 return ret;
2412         if (ret == 0) {
2413                 ret = -EUCLEAN;
2414                 goto release;
2415         }
2416         if (path->slots[0] == 0) {
2417                 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2418                 ret = -EUCLEAN;
2419                 goto release;
2420         }
2421         path->slots[0]--;
2422
2423         btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2424         if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2425              key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2426                 ret = -ENOENT;
2427                 goto release;
2428         }
2429         memcpy(&iter->cur_key, &key, sizeof(key));
2430         iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2431                                                     path->slots[0]);
2432         iter->end_ptr = (u32)(iter->item_ptr +
2433                         btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2434         ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2435                             struct btrfs_extent_item);
2436
2437         /*
2438          * Only support iteration on tree backref yet.
2439          *
2440          * This is an extra precaution for non skinny-metadata, where
2441          * EXTENT_ITEM is also used for tree blocks, that we can only use
2442          * extent flags to determine if it's a tree block.
2443          */
2444         if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2445                 ret = -ENOTSUPP;
2446                 goto release;
2447         }
2448         iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2449
2450         /* If there is no inline backref, go search for keyed backref */
2451         if (iter->cur_ptr >= iter->end_ptr) {
2452                 ret = btrfs_next_item(fs_info->extent_root, path);
2453
2454                 /* No inline nor keyed ref */
2455                 if (ret > 0) {
2456                         ret = -ENOENT;
2457                         goto release;
2458                 }
2459                 if (ret < 0)
2460                         goto release;
2461
2462                 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2463                                 path->slots[0]);
2464                 if (iter->cur_key.objectid != bytenr ||
2465                     (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2466                      iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2467                         ret = -ENOENT;
2468                         goto release;
2469                 }
2470                 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2471                                                            path->slots[0]);
2472                 iter->item_ptr = iter->cur_ptr;
2473                 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2474                                       path->nodes[0], path->slots[0]));
2475         }
2476
2477         return 0;
2478 release:
2479         btrfs_backref_iter_release(iter);
2480         return ret;
2481 }
2482
2483 /*
2484  * Go to the next backref item of current bytenr, can be either inlined or
2485  * keyed.
2486  *
2487  * Caller needs to check whether it's inline ref or not by iter->cur_key.
2488  *
2489  * Return 0 if we get next backref without problem.
2490  * Return >0 if there is no extra backref for this bytenr.
2491  * Return <0 if there is something wrong happened.
2492  */
2493 int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2494 {
2495         struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2496         struct btrfs_path *path = iter->path;
2497         struct btrfs_extent_inline_ref *iref;
2498         int ret;
2499         u32 size;
2500
2501         if (btrfs_backref_iter_is_inline_ref(iter)) {
2502                 /* We're still inside the inline refs */
2503                 ASSERT(iter->cur_ptr < iter->end_ptr);
2504
2505                 if (btrfs_backref_has_tree_block_info(iter)) {
2506                         /* First tree block info */
2507                         size = sizeof(struct btrfs_tree_block_info);
2508                 } else {
2509                         /* Use inline ref type to determine the size */
2510                         int type;
2511
2512                         iref = (struct btrfs_extent_inline_ref *)
2513                                 ((unsigned long)iter->cur_ptr);
2514                         type = btrfs_extent_inline_ref_type(eb, iref);
2515
2516                         size = btrfs_extent_inline_ref_size(type);
2517                 }
2518                 iter->cur_ptr += size;
2519                 if (iter->cur_ptr < iter->end_ptr)
2520                         return 0;
2521
2522                 /* All inline items iterated, fall through */
2523         }
2524
2525         /* We're at keyed items, there is no inline item, go to the next one */
2526         ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
2527         if (ret)
2528                 return ret;
2529
2530         btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2531         if (iter->cur_key.objectid != iter->bytenr ||
2532             (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2533              iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2534                 return 1;
2535         iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2536                                         path->slots[0]);
2537         iter->cur_ptr = iter->item_ptr;
2538         iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2539                                                 path->slots[0]);
2540         return 0;
2541 }
2542
2543 void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2544                               struct btrfs_backref_cache *cache, int is_reloc)
2545 {
2546         int i;
2547
2548         cache->rb_root = RB_ROOT;
2549         for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2550                 INIT_LIST_HEAD(&cache->pending[i]);
2551         INIT_LIST_HEAD(&cache->changed);
2552         INIT_LIST_HEAD(&cache->detached);
2553         INIT_LIST_HEAD(&cache->leaves);
2554         INIT_LIST_HEAD(&cache->pending_edge);
2555         INIT_LIST_HEAD(&cache->useless_node);
2556         cache->fs_info = fs_info;
2557         cache->is_reloc = is_reloc;
2558 }
2559
2560 struct btrfs_backref_node *btrfs_backref_alloc_node(
2561                 struct btrfs_backref_cache *cache, u64 bytenr, int level)
2562 {
2563         struct btrfs_backref_node *node;
2564
2565         ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2566         node = kzalloc(sizeof(*node), GFP_NOFS);
2567         if (!node)
2568                 return node;
2569
2570         INIT_LIST_HEAD(&node->list);
2571         INIT_LIST_HEAD(&node->upper);
2572         INIT_LIST_HEAD(&node->lower);
2573         RB_CLEAR_NODE(&node->rb_node);
2574         cache->nr_nodes++;
2575         node->level = level;
2576         node->bytenr = bytenr;
2577
2578         return node;
2579 }
2580
2581 struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2582                 struct btrfs_backref_cache *cache)
2583 {
2584         struct btrfs_backref_edge *edge;
2585
2586         edge = kzalloc(sizeof(*edge), GFP_NOFS);
2587         if (edge)
2588                 cache->nr_edges++;
2589         return edge;
2590 }
2591
2592 /*
2593  * Drop the backref node from cache, also cleaning up all its
2594  * upper edges and any uncached nodes in the path.
2595  *
2596  * This cleanup happens bottom up, thus the node should either
2597  * be the lowest node in the cache or a detached node.
2598  */
2599 void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2600                                 struct btrfs_backref_node *node)
2601 {
2602         struct btrfs_backref_node *upper;
2603         struct btrfs_backref_edge *edge;
2604
2605         if (!node)
2606                 return;
2607
2608         BUG_ON(!node->lowest && !node->detached);
2609         while (!list_empty(&node->upper)) {
2610                 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2611                                   list[LOWER]);
2612                 upper = edge->node[UPPER];
2613                 list_del(&edge->list[LOWER]);
2614                 list_del(&edge->list[UPPER]);
2615                 btrfs_backref_free_edge(cache, edge);
2616
2617                 /*
2618                  * Add the node to leaf node list if no other child block
2619                  * cached.
2620                  */
2621                 if (list_empty(&upper->lower)) {
2622                         list_add_tail(&upper->lower, &cache->leaves);
2623                         upper->lowest = 1;
2624                 }
2625         }
2626
2627         btrfs_backref_drop_node(cache, node);
2628 }
2629
2630 /*
2631  * Release all nodes/edges from current cache
2632  */
2633 void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2634 {
2635         struct btrfs_backref_node *node;
2636         int i;
2637
2638         while (!list_empty(&cache->detached)) {
2639                 node = list_entry(cache->detached.next,
2640                                   struct btrfs_backref_node, list);
2641                 btrfs_backref_cleanup_node(cache, node);
2642         }
2643
2644         while (!list_empty(&cache->leaves)) {
2645                 node = list_entry(cache->leaves.next,
2646                                   struct btrfs_backref_node, lower);
2647                 btrfs_backref_cleanup_node(cache, node);
2648         }
2649
2650         cache->last_trans = 0;
2651
2652         for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2653                 ASSERT(list_empty(&cache->pending[i]));
2654         ASSERT(list_empty(&cache->pending_edge));
2655         ASSERT(list_empty(&cache->useless_node));
2656         ASSERT(list_empty(&cache->changed));
2657         ASSERT(list_empty(&cache->detached));
2658         ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2659         ASSERT(!cache->nr_nodes);
2660         ASSERT(!cache->nr_edges);
2661 }
2662
2663 /*
2664  * Handle direct tree backref
2665  *
2666  * Direct tree backref means, the backref item shows its parent bytenr
2667  * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
2668  *
2669  * @ref_key:    The converted backref key.
2670  *              For keyed backref, it's the item key.
2671  *              For inlined backref, objectid is the bytenr,
2672  *              type is btrfs_inline_ref_type, offset is
2673  *              btrfs_inline_ref_offset.
2674  */
2675 static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2676                                       struct btrfs_key *ref_key,
2677                                       struct btrfs_backref_node *cur)
2678 {
2679         struct btrfs_backref_edge *edge;
2680         struct btrfs_backref_node *upper;
2681         struct rb_node *rb_node;
2682
2683         ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2684
2685         /* Only reloc root uses backref pointing to itself */
2686         if (ref_key->objectid == ref_key->offset) {
2687                 struct btrfs_root *root;
2688
2689                 cur->is_reloc_root = 1;
2690                 /* Only reloc backref cache cares about a specific root */
2691                 if (cache->is_reloc) {
2692                         root = find_reloc_root(cache->fs_info, cur->bytenr);
2693                         if (!root)
2694                                 return -ENOENT;
2695                         cur->root = root;
2696                 } else {
2697                         /*
2698                          * For generic purpose backref cache, reloc root node
2699                          * is useless.
2700                          */
2701                         list_add(&cur->list, &cache->useless_node);
2702                 }
2703                 return 0;
2704         }
2705
2706         edge = btrfs_backref_alloc_edge(cache);
2707         if (!edge)
2708                 return -ENOMEM;
2709
2710         rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2711         if (!rb_node) {
2712                 /* Parent node not yet cached */
2713                 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2714                                            cur->level + 1);
2715                 if (!upper) {
2716                         btrfs_backref_free_edge(cache, edge);
2717                         return -ENOMEM;
2718                 }
2719
2720                 /*
2721                  *  Backrefs for the upper level block isn't cached, add the
2722                  *  block to pending list
2723                  */
2724                 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2725         } else {
2726                 /* Parent node already cached */
2727                 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2728                 ASSERT(upper->checked);
2729                 INIT_LIST_HEAD(&edge->list[UPPER]);
2730         }
2731         btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2732         return 0;
2733 }
2734
2735 /*
2736  * Handle indirect tree backref
2737  *
2738  * Indirect tree backref means, we only know which tree the node belongs to.
2739  * We still need to do a tree search to find out the parents. This is for
2740  * TREE_BLOCK_REF backref (keyed or inlined).
2741  *
2742  * @ref_key:    The same as @ref_key in  handle_direct_tree_backref()
2743  * @tree_key:   The first key of this tree block.
2744  * @path:       A clean (released) path, to avoid allocating path every time
2745  *              the function get called.
2746  */
2747 static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
2748                                         struct btrfs_path *path,
2749                                         struct btrfs_key *ref_key,
2750                                         struct btrfs_key *tree_key,
2751                                         struct btrfs_backref_node *cur)
2752 {
2753         struct btrfs_fs_info *fs_info = cache->fs_info;
2754         struct btrfs_backref_node *upper;
2755         struct btrfs_backref_node *lower;
2756         struct btrfs_backref_edge *edge;
2757         struct extent_buffer *eb;
2758         struct btrfs_root *root;
2759         struct rb_node *rb_node;
2760         int level;
2761         bool need_check = true;
2762         int ret;
2763
2764         root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2765         if (IS_ERR(root))
2766                 return PTR_ERR(root);
2767         if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2768                 cur->cowonly = 1;
2769
2770         if (btrfs_root_level(&root->root_item) == cur->level) {
2771                 /* Tree root */
2772                 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2773                 /*
2774                  * For reloc backref cache, we may ignore reloc root.  But for
2775                  * general purpose backref cache, we can't rely on
2776                  * btrfs_should_ignore_reloc_root() as it may conflict with
2777                  * current running relocation and lead to missing root.
2778                  *
2779                  * For general purpose backref cache, reloc root detection is
2780                  * completely relying on direct backref (key->offset is parent
2781                  * bytenr), thus only do such check for reloc cache.
2782                  */
2783                 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2784                         btrfs_put_root(root);
2785                         list_add(&cur->list, &cache->useless_node);
2786                 } else {
2787                         cur->root = root;
2788                 }
2789                 return 0;
2790         }
2791
2792         level = cur->level + 1;
2793
2794         /* Search the tree to find parent blocks referring to the block */
2795         path->search_commit_root = 1;
2796         path->skip_locking = 1;
2797         path->lowest_level = level;
2798         ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2799         path->lowest_level = 0;
2800         if (ret < 0) {
2801                 btrfs_put_root(root);
2802                 return ret;
2803         }
2804         if (ret > 0 && path->slots[level] > 0)
2805                 path->slots[level]--;
2806
2807         eb = path->nodes[level];
2808         if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2809                 btrfs_err(fs_info,
2810 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2811                           cur->bytenr, level - 1, root->root_key.objectid,
2812                           tree_key->objectid, tree_key->type, tree_key->offset);
2813                 btrfs_put_root(root);
2814                 ret = -ENOENT;
2815                 goto out;
2816         }
2817         lower = cur;
2818
2819         /* Add all nodes and edges in the path */
2820         for (; level < BTRFS_MAX_LEVEL; level++) {
2821                 if (!path->nodes[level]) {
2822                         ASSERT(btrfs_root_bytenr(&root->root_item) ==
2823                                lower->bytenr);
2824                         /* Same as previous should_ignore_reloc_root() call */
2825                         if (btrfs_should_ignore_reloc_root(root) &&
2826                             cache->is_reloc) {
2827                                 btrfs_put_root(root);
2828                                 list_add(&lower->list, &cache->useless_node);
2829                         } else {
2830                                 lower->root = root;
2831                         }
2832                         break;
2833                 }
2834
2835                 edge = btrfs_backref_alloc_edge(cache);
2836                 if (!edge) {
2837                         btrfs_put_root(root);
2838                         ret = -ENOMEM;
2839                         goto out;
2840                 }
2841
2842                 eb = path->nodes[level];
2843                 rb_node = rb_simple_search(&cache->rb_root, eb->start);
2844                 if (!rb_node) {
2845                         upper = btrfs_backref_alloc_node(cache, eb->start,
2846                                                          lower->level + 1);
2847                         if (!upper) {
2848                                 btrfs_put_root(root);
2849                                 btrfs_backref_free_edge(cache, edge);
2850                                 ret = -ENOMEM;
2851                                 goto out;
2852                         }
2853                         upper->owner = btrfs_header_owner(eb);
2854                         if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2855                                 upper->cowonly = 1;
2856
2857                         /*
2858                          * If we know the block isn't shared we can avoid
2859                          * checking its backrefs.
2860                          */
2861                         if (btrfs_block_can_be_shared(root, eb))
2862                                 upper->checked = 0;
2863                         else
2864                                 upper->checked = 1;
2865
2866                         /*
2867                          * Add the block to pending list if we need to check its
2868                          * backrefs, we only do this once while walking up a
2869                          * tree as we will catch anything else later on.
2870                          */
2871                         if (!upper->checked && need_check) {
2872                                 need_check = false;
2873                                 list_add_tail(&edge->list[UPPER],
2874                                               &cache->pending_edge);
2875                         } else {
2876                                 if (upper->checked)
2877                                         need_check = true;
2878                                 INIT_LIST_HEAD(&edge->list[UPPER]);
2879                         }
2880                 } else {
2881                         upper = rb_entry(rb_node, struct btrfs_backref_node,
2882                                          rb_node);
2883                         ASSERT(upper->checked);
2884                         INIT_LIST_HEAD(&edge->list[UPPER]);
2885                         if (!upper->owner)
2886                                 upper->owner = btrfs_header_owner(eb);
2887                 }
2888                 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2889
2890                 if (rb_node) {
2891                         btrfs_put_root(root);
2892                         break;
2893                 }
2894                 lower = upper;
2895                 upper = NULL;
2896         }
2897 out:
2898         btrfs_release_path(path);
2899         return ret;
2900 }
2901
2902 /*
2903  * Add backref node @cur into @cache.
2904  *
2905  * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
2906  *       links aren't yet bi-directional. Needs to finish such links.
2907  *       Use btrfs_backref_finish_upper_links() to finish such linkage.
2908  *
2909  * @path:       Released path for indirect tree backref lookup
2910  * @iter:       Released backref iter for extent tree search
2911  * @node_key:   The first key of the tree block
2912  */
2913 int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
2914                                 struct btrfs_path *path,
2915                                 struct btrfs_backref_iter *iter,
2916                                 struct btrfs_key *node_key,
2917                                 struct btrfs_backref_node *cur)
2918 {
2919         struct btrfs_fs_info *fs_info = cache->fs_info;
2920         struct btrfs_backref_edge *edge;
2921         struct btrfs_backref_node *exist;
2922         int ret;
2923
2924         ret = btrfs_backref_iter_start(iter, cur->bytenr);
2925         if (ret < 0)
2926                 return ret;
2927         /*
2928          * We skip the first btrfs_tree_block_info, as we don't use the key
2929          * stored in it, but fetch it from the tree block
2930          */
2931         if (btrfs_backref_has_tree_block_info(iter)) {
2932                 ret = btrfs_backref_iter_next(iter);
2933                 if (ret < 0)
2934                         goto out;
2935                 /* No extra backref? This means the tree block is corrupted */
2936                 if (ret > 0) {
2937                         ret = -EUCLEAN;
2938                         goto out;
2939                 }
2940         }
2941         WARN_ON(cur->checked);
2942         if (!list_empty(&cur->upper)) {
2943                 /*
2944                  * The backref was added previously when processing backref of
2945                  * type BTRFS_TREE_BLOCK_REF_KEY
2946                  */
2947                 ASSERT(list_is_singular(&cur->upper));
2948                 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2949                                   list[LOWER]);
2950                 ASSERT(list_empty(&edge->list[UPPER]));
2951                 exist = edge->node[UPPER];
2952                 /*
2953                  * Add the upper level block to pending list if we need check
2954                  * its backrefs
2955                  */
2956                 if (!exist->checked)
2957                         list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2958         } else {
2959                 exist = NULL;
2960         }
2961
2962         for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
2963                 struct extent_buffer *eb;
2964                 struct btrfs_key key;
2965                 int type;
2966
2967                 cond_resched();
2968                 eb = btrfs_backref_get_eb(iter);
2969
2970                 key.objectid = iter->bytenr;
2971                 if (btrfs_backref_iter_is_inline_ref(iter)) {
2972                         struct btrfs_extent_inline_ref *iref;
2973
2974                         /* Update key for inline backref */
2975                         iref = (struct btrfs_extent_inline_ref *)
2976                                 ((unsigned long)iter->cur_ptr);
2977                         type = btrfs_get_extent_inline_ref_type(eb, iref,
2978                                                         BTRFS_REF_TYPE_BLOCK);
2979                         if (type == BTRFS_REF_TYPE_INVALID) {
2980                                 ret = -EUCLEAN;
2981                                 goto out;
2982                         }
2983                         key.type = type;
2984                         key.offset = btrfs_extent_inline_ref_offset(eb, iref);
2985                 } else {
2986                         key.type = iter->cur_key.type;
2987                         key.offset = iter->cur_key.offset;
2988                 }
2989
2990                 /*
2991                  * Parent node found and matches current inline ref, no need to
2992                  * rebuild this node for this inline ref
2993                  */
2994                 if (exist &&
2995                     ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
2996                       exist->owner == key.offset) ||
2997                      (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
2998                       exist->bytenr == key.offset))) {
2999                         exist = NULL;
3000                         continue;
3001                 }
3002
3003                 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3004                 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
3005                         ret = handle_direct_tree_backref(cache, &key, cur);
3006                         if (ret < 0)
3007                                 goto out;
3008                         continue;
3009                 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
3010                         ret = -EINVAL;
3011                         btrfs_print_v0_err(fs_info);
3012                         btrfs_handle_fs_error(fs_info, ret, NULL);
3013                         goto out;
3014                 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
3015                         continue;
3016                 }
3017
3018                 /*
3019                  * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
3020                  * means the root objectid. We need to search the tree to get
3021                  * its parent bytenr.
3022                  */
3023                 ret = handle_indirect_tree_backref(cache, path, &key, node_key,
3024                                                    cur);
3025                 if (ret < 0)
3026                         goto out;
3027         }
3028         ret = 0;
3029         cur->checked = 1;
3030         WARN_ON(exist);
3031 out:
3032         btrfs_backref_iter_release(iter);
3033         return ret;
3034 }
3035
3036 /*
3037  * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3038  */
3039 int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3040                                      struct btrfs_backref_node *start)
3041 {
3042         struct list_head *useless_node = &cache->useless_node;
3043         struct btrfs_backref_edge *edge;
3044         struct rb_node *rb_node;
3045         LIST_HEAD(pending_edge);
3046
3047         ASSERT(start->checked);
3048
3049         /* Insert this node to cache if it's not COW-only */
3050         if (!start->cowonly) {
3051                 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3052                                            &start->rb_node);
3053                 if (rb_node)
3054                         btrfs_backref_panic(cache->fs_info, start->bytenr,
3055                                             -EEXIST);
3056                 list_add_tail(&start->lower, &cache->leaves);
3057         }
3058
3059         /*
3060          * Use breadth first search to iterate all related edges.
3061          *
3062          * The starting points are all the edges of this node
3063          */
3064         list_for_each_entry(edge, &start->upper, list[LOWER])
3065                 list_add_tail(&edge->list[UPPER], &pending_edge);
3066
3067         while (!list_empty(&pending_edge)) {
3068                 struct btrfs_backref_node *upper;
3069                 struct btrfs_backref_node *lower;
3070
3071                 edge = list_first_entry(&pending_edge,
3072                                 struct btrfs_backref_edge, list[UPPER]);
3073                 list_del_init(&edge->list[UPPER]);
3074                 upper = edge->node[UPPER];
3075                 lower = edge->node[LOWER];
3076
3077                 /* Parent is detached, no need to keep any edges */
3078                 if (upper->detached) {
3079                         list_del(&edge->list[LOWER]);
3080                         btrfs_backref_free_edge(cache, edge);
3081
3082                         /* Lower node is orphan, queue for cleanup */
3083                         if (list_empty(&lower->upper))
3084                                 list_add(&lower->list, useless_node);
3085                         continue;
3086                 }
3087
3088                 /*
3089                  * All new nodes added in current build_backref_tree() haven't
3090                  * been linked to the cache rb tree.
3091                  * So if we have upper->rb_node populated, this means a cache
3092                  * hit. We only need to link the edge, as @upper and all its
3093                  * parents have already been linked.
3094                  */
3095                 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3096                         if (upper->lowest) {
3097                                 list_del_init(&upper->lower);
3098                                 upper->lowest = 0;
3099                         }
3100
3101                         list_add_tail(&edge->list[UPPER], &upper->lower);
3102                         continue;
3103                 }
3104
3105                 /* Sanity check, we shouldn't have any unchecked nodes */
3106                 if (!upper->checked) {
3107                         ASSERT(0);
3108                         return -EUCLEAN;
3109                 }
3110
3111                 /* Sanity check, COW-only node has non-COW-only parent */
3112                 if (start->cowonly != upper->cowonly) {
3113                         ASSERT(0);
3114                         return -EUCLEAN;
3115                 }
3116
3117                 /* Only cache non-COW-only (subvolume trees) tree blocks */
3118                 if (!upper->cowonly) {
3119                         rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3120                                                    &upper->rb_node);
3121                         if (rb_node) {
3122                                 btrfs_backref_panic(cache->fs_info,
3123                                                 upper->bytenr, -EEXIST);
3124                                 return -EUCLEAN;
3125                         }
3126                 }
3127
3128                 list_add_tail(&edge->list[UPPER], &upper->lower);
3129
3130                 /*
3131                  * Also queue all the parent edges of this uncached node
3132                  * to finish the upper linkage
3133                  */
3134                 list_for_each_entry(edge, &upper->upper, list[LOWER])
3135                         list_add_tail(&edge->list[UPPER], &pending_edge);
3136         }
3137         return 0;
3138 }
3139
3140 void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3141                                  struct btrfs_backref_node *node)
3142 {
3143         struct btrfs_backref_node *lower;
3144         struct btrfs_backref_node *upper;
3145         struct btrfs_backref_edge *edge;
3146
3147         while (!list_empty(&cache->useless_node)) {
3148                 lower = list_first_entry(&cache->useless_node,
3149                                    struct btrfs_backref_node, list);
3150                 list_del_init(&lower->list);
3151         }
3152         while (!list_empty(&cache->pending_edge)) {
3153                 edge = list_first_entry(&cache->pending_edge,
3154                                 struct btrfs_backref_edge, list[UPPER]);
3155                 list_del(&edge->list[UPPER]);
3156                 list_del(&edge->list[LOWER]);
3157                 lower = edge->node[LOWER];
3158                 upper = edge->node[UPPER];
3159                 btrfs_backref_free_edge(cache, edge);
3160
3161                 /*
3162                  * Lower is no longer linked to any upper backref nodes and
3163                  * isn't in the cache, we can free it ourselves.
3164                  */
3165                 if (list_empty(&lower->upper) &&
3166                     RB_EMPTY_NODE(&lower->rb_node))
3167                         list_add(&lower->list, &cache->useless_node);
3168
3169                 if (!RB_EMPTY_NODE(&upper->rb_node))
3170                         continue;
3171
3172                 /* Add this guy's upper edges to the list to process */
3173                 list_for_each_entry(edge, &upper->upper, list[LOWER])
3174                         list_add_tail(&edge->list[UPPER],
3175                                       &cache->pending_edge);
3176                 if (list_empty(&upper->upper))
3177                         list_add(&upper->list, &cache->useless_node);
3178         }
3179
3180         while (!list_empty(&cache->useless_node)) {
3181                 lower = list_first_entry(&cache->useless_node,
3182                                    struct btrfs_backref_node, list);
3183                 list_del_init(&lower->list);
3184                 if (lower == node)
3185                         node = NULL;
3186                 btrfs_backref_drop_node(cache, lower);
3187         }
3188
3189         btrfs_backref_cleanup_node(cache, node);
3190         ASSERT(list_empty(&cache->useless_node) &&
3191                list_empty(&cache->pending_edge));
3192 }