85705dca2565033af35d4bef4d9d5c0f68fda340
[platform/upstream/btrfs-progs.git] / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include "kerncompat.h"
22 #include "radix-tree.h"
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "print-tree.h"
26 #include "transaction.h"
27
28 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
29                                  btrfs_root *extent_root);
30 static int run_pending(struct btrfs_trans_handle *trans, struct btrfs_root
31                        *extent_root);
32
33 /*
34  * pending extents are blocks that we're trying to allocate in the extent
35  * map while trying to grow the map because of other allocations.  To avoid
36  * recursing, they are tagged in the radix tree and cleaned up after
37  * other allocations are done.  The pending tag is also used in the same
38  * manner for deletes.
39  */
40
41 static int inc_block_ref(struct btrfs_trans_handle *trans, struct btrfs_root
42                          *root, u64 blocknr)
43 {
44         struct btrfs_path path;
45         int ret;
46         struct btrfs_key key;
47         struct btrfs_leaf *l;
48         struct btrfs_extent_item *item;
49         u32 refs;
50
51         btrfs_init_path(&path);
52         key.objectid = blocknr;
53         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
54         key.offset = 1;
55         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, &path,
56                                 0, 1);
57         if (ret != 0)
58                 BUG();
59         BUG_ON(ret != 0);
60         l = &path.nodes[0]->leaf;
61         item = btrfs_item_ptr(l, path.slots[0], struct btrfs_extent_item);
62         refs = btrfs_extent_refs(item);
63         btrfs_set_extent_refs(item, refs + 1);
64
65         BUG_ON(list_empty(&path.nodes[0]->dirty));
66         btrfs_release_path(root->fs_info->extent_root, &path);
67         finish_current_insert(trans, root->fs_info->extent_root);
68         run_pending(trans, root->fs_info->extent_root);
69         return 0;
70 }
71
72 static int lookup_block_ref(struct btrfs_trans_handle *trans, struct btrfs_root
73                             *root, u64 blocknr, u32 *refs)
74 {
75         struct btrfs_path path;
76         int ret;
77         struct btrfs_key key;
78         struct btrfs_leaf *l;
79         struct btrfs_extent_item *item;
80         btrfs_init_path(&path);
81         key.objectid = blocknr;
82         key.offset = 1;
83         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
84         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, &path,
85                                 0, 0);
86         if (ret != 0)
87                 BUG();
88         l = &path.nodes[0]->leaf;
89         item = btrfs_item_ptr(l, path.slots[0], struct btrfs_extent_item);
90         *refs = btrfs_extent_refs(item);
91         btrfs_release_path(root->fs_info->extent_root, &path);
92         return 0;
93 }
94
95 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
96                   struct btrfs_buffer *buf)
97 {
98         u64 blocknr;
99         int i;
100
101         if (!root->ref_cows)
102                 return 0;
103         if (btrfs_is_leaf(&buf->node))
104                 return 0;
105
106         for (i = 0; i < btrfs_header_nritems(&buf->node.header); i++) {
107                 blocknr = btrfs_node_blockptr(&buf->node, i);
108                 inc_block_ref(trans, root, blocknr);
109         }
110         return 0;
111 }
112
113 static int write_one_cache_group(struct btrfs_trans_handle *trans,
114                                  struct btrfs_root *root,
115                                  struct btrfs_path *path,
116                                  struct btrfs_block_group_cache *cache)
117 {
118         int ret;
119         int pending_ret;
120         struct btrfs_root *extent_root = root->fs_info->extent_root;
121         struct btrfs_block_group_item *bi;
122
123         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
124                                 &cache->key, path, 0, 1);
125         BUG_ON(ret);
126         bi = btrfs_item_ptr(&path->nodes[0]->leaf, path->slots[0],
127                             struct btrfs_block_group_item);
128         memcpy(bi, &cache->item, sizeof(*bi));
129         dirty_tree_block(trans, extent_root, path->nodes[0]);
130         btrfs_release_path(extent_root, path);
131         finish_current_insert(trans, root);
132         pending_ret = run_pending(trans, root);
133         if (ret)
134                 return ret;
135         if (pending_ret)
136                 return pending_ret;
137         return 0;
138
139 }
140
141 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
142                                     struct btrfs_root *root)
143 {
144         struct btrfs_block_group_cache *cache[8];
145         int ret;
146         int err = 0;
147         int werr = 0;
148         struct radix_tree_root *radix = &root->fs_info->block_group_radix;
149         int i;
150         struct btrfs_path path;
151         btrfs_init_path(&path);
152
153         while(1) {
154                 ret = radix_tree_gang_lookup_tag(radix, (void *)cache,
155                                                  0, ARRAY_SIZE(cache),
156                                                  BTRFS_BLOCK_GROUP_DIRTY);
157                 if (!ret)
158                         break;
159                 for (i = 0; i < ret; i++) {
160                         radix_tree_tag_clear(radix, cache[i]->key.objectid +
161                                              cache[i]->key.offset -1,
162                                              BTRFS_BLOCK_GROUP_DIRTY);
163                         err = write_one_cache_group(trans, root,
164                                                     &path, cache[i]);
165                         if (err)
166                                 werr = err;
167                 }
168         }
169         return werr;
170 }
171
172 static int update_block_group(struct btrfs_trans_handle *trans,
173                               struct btrfs_root *root,
174                               u64 blocknr, u64 num, int alloc)
175 {
176         struct btrfs_block_group_cache *cache;
177         struct btrfs_fs_info *info = root->fs_info;
178         u64 total = num;
179         u64 old_val;
180         u64 block_in_group;
181         int ret;
182
183         while(total) {
184                 ret = radix_tree_gang_lookup(&info->block_group_radix,
185                                              (void *)&cache, blocknr, 1);
186                 if (!ret)
187                         return -1;
188                 radix_tree_tag_set(&info->block_group_radix,
189                                    cache->key.objectid + cache->key.offset - 1,
190                                    BTRFS_BLOCK_GROUP_DIRTY);
191
192                 block_in_group = blocknr - cache->key.objectid;
193                 old_val = btrfs_block_group_used(&cache->item);
194                 if (total > cache->key.offset - block_in_group)
195                         num = cache->key.offset - block_in_group;
196                 else
197                         num = total;
198                 total -= num;
199                 blocknr += num;
200                 if (alloc)
201                         old_val += num;
202                 else
203                         old_val -= num;
204                 btrfs_set_block_group_used(&cache->item, old_val);
205         }
206         return 0;
207 }
208
209 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, struct
210                                btrfs_root *root)
211 {
212         u64 first = 0;
213         struct pending_extent *pe;
214         struct pending_extent *next;
215
216         pe = find_first_pending_extent(&root->fs_info->pinned_tree, 0);
217         if (pe)
218                 first = pe->start;
219         while(pe) {
220                 next = next_pending_extent(pe);
221                 remove_pending_extent(&root->fs_info->pinned_tree, pe);
222                 free_pending_extent(pe);
223                 pe = next;
224         }
225         root->fs_info->last_insert.objectid = first;
226         root->fs_info->last_insert.offset = 0;
227         return 0;
228 }
229
230 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
231                                  btrfs_root *extent_root)
232 {
233         struct btrfs_key ins;
234         struct btrfs_extent_item extent_item;
235         int ret;
236         u64 super_blocks_used, root_blocks_used;
237         struct btrfs_fs_info *info = extent_root->fs_info;
238         struct pending_extent *pe;
239         struct pending_extent *next;
240         struct pending_tree *pending_tree = &info->pending_tree;
241
242         btrfs_set_extent_refs(&extent_item, 1);
243         btrfs_set_extent_owner(&extent_item, extent_root->root_key.objectid);
244         ins.offset = 1;
245         btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
246         pe = find_first_pending_extent(pending_tree, 0);
247         while(pe) {
248                 ins.offset = pe->size;
249                 ins.objectid = pe->start;
250
251                 remove_pending_extent(pending_tree, pe);
252                 next = next_pending_extent(pe);
253                 if (!next)
254                         next = find_first_pending_extent(pending_tree, 0);
255
256                 free_pending_extent(pe);
257                 pe = next;
258
259
260                 super_blocks_used = btrfs_super_blocks_used(info->disk_super);
261                 btrfs_set_super_blocks_used(info->disk_super,
262                                             super_blocks_used + 1);
263                 root_blocks_used = btrfs_root_blocks_used(&extent_root->root_item);
264                 btrfs_set_root_blocks_used(&extent_root->root_item,
265                                            root_blocks_used + 1);
266                 ret = btrfs_insert_item(trans, extent_root, &ins, &extent_item,
267                                         sizeof(extent_item));
268                 if (ret) {
269                         btrfs_print_tree(extent_root, extent_root->node);
270                 }
271                 BUG_ON(ret);
272         }
273         return 0;
274 }
275
276 /*
277  * remove an extent from the root, returns 0 on success
278  */
279 static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
280                          *root, u64 blocknr, u64 num_blocks, int pin)
281 {
282         struct btrfs_path path;
283         struct btrfs_key key;
284         struct btrfs_fs_info *info = root->fs_info;
285         struct btrfs_root *extent_root = info->extent_root;
286         int ret;
287         struct btrfs_extent_item *ei;
288         u32 refs;
289
290         BUG_ON(pin && num_blocks != 1);
291         key.objectid = blocknr;
292         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
293         key.offset = num_blocks;
294
295         btrfs_init_path(&path);
296         ret = btrfs_search_slot(trans, extent_root, &key, &path, -1, 1);
297         if (ret) {
298                 btrfs_print_tree(extent_root, extent_root->node);
299                 printf("failed to find %llu\n",
300                        (unsigned long long)key.objectid);
301                 BUG();
302         }
303         ei = btrfs_item_ptr(&path.nodes[0]->leaf, path.slots[0],
304                             struct btrfs_extent_item);
305         BUG_ON(ei->refs == 0);
306         refs = btrfs_extent_refs(ei) - 1;
307         btrfs_set_extent_refs(ei, refs);
308         if (refs == 0) {
309                 u64 super_blocks_used, root_blocks_used;
310                 if (pin) {
311                         int err;
312                         err = insert_pending_extent(&info->pinned_tree,
313                                                     blocknr, 1);
314                         BUG_ON(err);
315                 }
316                 super_blocks_used = btrfs_super_blocks_used(info->disk_super);
317                 btrfs_set_super_blocks_used(info->disk_super,
318                                             super_blocks_used - num_blocks);
319                 root_blocks_used = btrfs_root_blocks_used(&root->root_item);
320                 btrfs_set_root_blocks_used(&root->root_item,
321                                            root_blocks_used - num_blocks);
322
323                 ret = btrfs_del_item(trans, extent_root, &path);
324                 if (!pin && extent_root->fs_info->last_insert.objectid >
325                     blocknr)
326                         extent_root->fs_info->last_insert.objectid = blocknr;
327                 if (ret)
328                         BUG();
329                 ret = update_block_group(trans, root, blocknr, num_blocks, 0);
330                 BUG_ON(ret);
331         }
332         btrfs_release_path(extent_root, &path);
333         finish_current_insert(trans, extent_root);
334         return ret;
335 }
336
337 /*
338  * find all the blocks marked as pending in the radix tree and remove
339  * them from the extent map
340  */
341 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
342                                btrfs_root *extent_root)
343 {
344         int ret;
345         struct pending_extent *pe;
346         struct pending_extent *next;
347         struct pending_tree *del_pending = &extent_root->fs_info->del_pending;
348
349         pe = find_first_pending_extent(del_pending, 0);
350         while(pe) {
351                 remove_pending_extent(del_pending, pe);
352                 ret = __free_extent(trans, extent_root,
353                                     pe->start, 1, 1);
354                 BUG_ON(ret);
355                 next = next_pending_extent(pe);
356                 if (!next)
357                         next = find_first_pending_extent(del_pending, 0);
358                 free_pending_extent(pe);
359                 pe = next;
360         }
361         return 0;
362 }
363
364 static int run_pending(struct btrfs_trans_handle *trans, struct btrfs_root
365                        *extent_root)
366 {
367         del_pending_extents(trans, extent_root);
368         return 0;
369 }
370
371
372 /*
373  * remove an extent from the root, returns 0 on success
374  */
375 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
376                       *root, u64 blocknr, u64 num_blocks, int pin)
377 {
378         struct btrfs_root *extent_root = root->fs_info->extent_root;
379         int pending_ret;
380         int ret;
381
382         if (root == extent_root) {
383                 ret = insert_pending_extent(&root->fs_info->del_pending,
384                                             blocknr, num_blocks);
385                 BUG_ON(ret);
386                 return 0;
387         }
388         ret = __free_extent(trans, root, blocknr, num_blocks, pin);
389         pending_ret = run_pending(trans, root->fs_info->extent_root);
390         return ret ? ret : pending_ret;
391 }
392
393 /*
394  * walks the btree of allocated extents and find a hole of a given size.
395  * The key ins is changed to record the hole:
396  * ins->objectid == block start
397  * ins->flags = BTRFS_EXTENT_ITEM_KEY
398  * ins->offset == number of blocks
399  * Any available blocks before search_start are skipped.
400  */
401 static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
402                             *orig_root, u64 num_blocks, u64 search_start, u64
403                             search_end, struct btrfs_key *ins)
404 {
405         struct btrfs_path path;
406         struct btrfs_key key;
407         int ret;
408         u64 hole_size = 0;
409         int slot = 0;
410         u64 last_block = 0;
411         int start_found;
412         struct btrfs_leaf *l;
413         struct btrfs_root * root = orig_root->fs_info->extent_root;
414         int total_needed = num_blocks;
415
416         if (root->fs_info->last_insert.objectid > search_start)
417                 search_start = root->fs_info->last_insert.objectid;
418
419         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
420
421 check_failed:
422         btrfs_init_path(&path);
423         ins->objectid = search_start;
424         ins->offset = 0;
425         start_found = 0;
426         ret = btrfs_search_slot(trans, root, ins, &path, 0, 0);
427         if (ret < 0)
428                 goto error;
429
430         if (path.slots[0] > 0)
431                 path.slots[0]--;
432
433         while (1) {
434                 l = &path.nodes[0]->leaf;
435                 slot = path.slots[0];
436                 if (slot >= btrfs_header_nritems(&l->header)) {
437                         ret = btrfs_next_leaf(root, &path);
438                         if (ret == 0)
439                                 continue;
440                         if (ret < 0)
441                                 goto error;
442                         if (!start_found) {
443                                 ins->objectid = search_start;
444                                 ins->offset = (u64)-1 - search_start;
445                                 start_found = 1;
446                                 goto check_pending;
447                         }
448                         ins->objectid = last_block > search_start ?
449                                         last_block : search_start;
450                         ins->offset = (u64)-1 - ins->objectid;
451                         goto check_pending;
452                 }
453                 btrfs_disk_key_to_cpu(&key, &l->items[slot].key);
454                 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
455                         goto next;
456                 if (key.objectid >= search_start) {
457                         if (start_found) {
458                                 if (last_block < search_start)
459                                         last_block = search_start;
460                                 hole_size = key.objectid - last_block;
461                                 if (hole_size > total_needed) {
462                                         ins->objectid = last_block;
463                                         ins->offset = hole_size;
464                                         goto check_pending;
465                                 }
466                         }
467                 }
468                 start_found = 1;
469                 last_block = key.objectid + key.offset;
470 next:
471                 path.slots[0]++;
472         }
473         // FIXME -ENOSPC
474 check_pending:
475         /* we have to make sure we didn't find an extent that has already
476          * been allocated by the map tree or the original allocation
477          */
478         btrfs_release_path(root, &path);
479         BUG_ON(ins->objectid < search_start);
480         if (find_pending_extent(&root->fs_info->pinned_tree,
481                                 ins->objectid, total_needed)) {
482                 search_start = ins->objectid + total_needed;
483                 goto check_failed;
484         }
485         if (find_pending_extent(&root->fs_info->pending_tree,
486                                 ins->objectid, total_needed)) {
487                 search_start = ins->objectid + total_needed;
488                 goto check_failed;
489         }
490         root->fs_info->last_insert.objectid = ins->objectid;
491         ins->offset = num_blocks;
492         return 0;
493 error:
494         btrfs_release_path(root, &path);
495         return ret;
496 }
497 /*
498  * finds a free extent and does all the dirty work required for allocation
499  * returns the key for the extent through ins, and a tree buffer for
500  * the first block of the extent through buf.
501  *
502  * returns 0 if everything worked, non-zero otherwise.
503  */
504 static int alloc_extent(struct btrfs_trans_handle *trans, struct btrfs_root
505                         *root, u64 owner, u64 num_blocks,
506                         u64 search_start, u64
507                         search_end, struct btrfs_key *ins)
508 {
509         int ret;
510         int pending_ret;
511         u64 super_blocks_used, root_blocks_used;
512         struct btrfs_fs_info *info = root->fs_info;
513         struct btrfs_root *extent_root = info->extent_root;
514         struct btrfs_extent_item extent_item;
515
516         btrfs_set_extent_refs(&extent_item, 1);
517         btrfs_set_extent_owner(&extent_item, owner);
518
519         ret = find_free_extent(trans, root, num_blocks, search_start,
520                                search_end, ins);
521         if (ret)
522                 return ret;
523
524         if (root == extent_root) {
525                 ret = insert_pending_extent(&root->fs_info->pending_tree,
526                                             ins->objectid, ins->offset);
527                 BUG_ON(ret);
528                 return 0;
529         }
530         super_blocks_used = btrfs_super_blocks_used(info->disk_super);
531         btrfs_set_super_blocks_used(info->disk_super, super_blocks_used +
532                                     num_blocks);
533         root_blocks_used = btrfs_root_blocks_used(&root->root_item);
534         btrfs_set_root_blocks_used(&root->root_item, root_blocks_used +
535                                    num_blocks);
536
537         ret = btrfs_insert_item(trans, extent_root, ins, &extent_item,
538                                 sizeof(extent_item));
539
540         finish_current_insert(trans, extent_root);
541         pending_ret = run_pending(trans, extent_root);
542         if (ret)
543                 return ret;
544         if (pending_ret)
545                 return pending_ret;
546         return 0;
547 }
548
549 /*
550  * helper function to allocate a block for a given tree
551  * returns the tree buffer or NULL.
552  */
553 struct btrfs_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
554                                             struct btrfs_root *root)
555 {
556         struct btrfs_key ins;
557         int ret;
558         struct btrfs_buffer *buf;
559
560         ret = alloc_extent(trans, root, root->root_key.objectid,
561                            1, 0, (unsigned long)-1, &ins);
562         if (ret) {
563                 BUG();
564                 return NULL;
565         }
566         ret = update_block_group(trans, root, ins.objectid, ins.offset, 1);
567         buf = find_tree_block(root, ins.objectid);
568         btrfs_set_header_generation(&buf->node.header,
569                                     root->root_key.offset + 1);
570         btrfs_set_header_blocknr(&buf->node.header, buf->blocknr);
571         memcpy(buf->node.header.fsid, root->fs_info->disk_super->fsid,
572                sizeof(buf->node.header.fsid));
573         dirty_tree_block(trans, root, buf);
574         return buf;
575
576 }
577
578 /*
579  * helper function for drop_snapshot, this walks down the tree dropping ref
580  * counts as it goes.
581  */
582 static int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root
583                           *root, struct btrfs_path *path, int *level)
584 {
585         struct btrfs_buffer *next;
586         struct btrfs_buffer *cur;
587         u64 blocknr;
588         int ret;
589         u32 refs;
590
591         ret = lookup_block_ref(trans, root, path->nodes[*level]->blocknr,
592                                &refs);
593         BUG_ON(ret);
594         if (refs > 1)
595                 goto out;
596         /*
597          * walk down to the last node level and free all the leaves
598          */
599         while(*level > 0) {
600                 cur = path->nodes[*level];
601                 if (path->slots[*level] >=
602                     btrfs_header_nritems(&cur->node.header))
603                         break;
604                 blocknr = btrfs_node_blockptr(&cur->node, path->slots[*level]);
605                 ret = lookup_block_ref(trans, root, blocknr, &refs);
606                 if (refs != 1 || *level == 1) {
607                         path->slots[*level]++;
608                         ret = btrfs_free_extent(trans, root, blocknr, 1, 1);
609                         BUG_ON(ret);
610                         continue;
611                 }
612                 BUG_ON(ret);
613                 next = read_tree_block(root, blocknr);
614                 if (path->nodes[*level-1])
615                         btrfs_block_release(root, path->nodes[*level-1]);
616                 path->nodes[*level-1] = next;
617                 *level = btrfs_header_level(&next->node.header);
618                 path->slots[*level] = 0;
619         }
620 out:
621         ret = btrfs_free_extent(trans, root, path->nodes[*level]->blocknr, 1,
622                                 1);
623         btrfs_block_release(root, path->nodes[*level]);
624         path->nodes[*level] = NULL;
625         *level += 1;
626         BUG_ON(ret);
627         return 0;
628 }
629
630 /*
631  * helper for dropping snapshots.  This walks back up the tree in the path
632  * to find the first node higher up where we haven't yet gone through
633  * all the slots
634  */
635 static int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root
636                         *root, struct btrfs_path *path, int *level)
637 {
638         int i;
639         int slot;
640         int ret;
641         for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
642                 slot = path->slots[i];
643                 if (slot <
644                     btrfs_header_nritems(&path->nodes[i]->node.header)- 1) {
645                         path->slots[i]++;
646                         *level = i;
647                         return 0;
648                 } else {
649                         ret = btrfs_free_extent(trans, root,
650                                                 path->nodes[*level]->blocknr,
651                                                 1, 1);
652                         btrfs_block_release(root, path->nodes[*level]);
653                         path->nodes[*level] = NULL;
654                         *level = i + 1;
655                         BUG_ON(ret);
656                 }
657         }
658         return 1;
659 }
660
661 /*
662  * drop the reference count on the tree rooted at 'snap'.  This traverses
663  * the tree freeing any blocks that have a ref count of zero after being
664  * decremented.
665  */
666 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
667                         *root, struct btrfs_buffer *snap)
668 {
669         int ret = 0;
670         int wret;
671         int level;
672         struct btrfs_path path;
673         int i;
674         int orig_level;
675
676         btrfs_init_path(&path);
677
678         level = btrfs_header_level(&snap->node.header);
679         orig_level = level;
680         path.nodes[level] = snap;
681         path.slots[level] = 0;
682         while(1) {
683                 wret = walk_down_tree(trans, root, &path, &level);
684                 if (wret > 0)
685                         break;
686                 if (wret < 0)
687                         ret = wret;
688
689                 wret = walk_up_tree(trans, root, &path, &level);
690                 if (wret > 0)
691                         break;
692                 if (wret < 0)
693                         ret = wret;
694         }
695         for (i = 0; i <= orig_level; i++) {
696                 if (path.nodes[i]) {
697                         btrfs_block_release(root, path.nodes[i]);
698                 }
699         }
700         return ret;
701 }
702
703 int btrfs_free_block_groups(struct btrfs_fs_info *info)
704 {
705         int ret;
706         struct btrfs_block_group_cache *cache[8];
707         int i;
708
709         while(1) {
710                 ret = radix_tree_gang_lookup(&info->block_group_radix,
711                                              (void *)cache, 0,
712                                              ARRAY_SIZE(cache));
713                 if (!ret)
714                         break;
715                 for (i = 0; i < ret; i++) {
716                         radix_tree_delete(&info->block_group_radix,
717                                           cache[i]->key.objectid +
718                                           cache[i]->key.offset - 1);
719                         free(cache[i]);
720                 }
721         }
722         return 0;
723 }
724
725 int btrfs_read_block_groups(struct btrfs_root *root)
726 {
727         struct btrfs_path path;
728         int ret;
729         int err = 0;
730         struct btrfs_block_group_item *bi;
731         struct btrfs_block_group_cache *cache;
732         struct btrfs_key key;
733         struct btrfs_key found_key;
734         struct btrfs_leaf *leaf;
735         u64 group_size_blocks = BTRFS_BLOCK_GROUP_SIZE / root->sectorsize;
736
737         root = root->fs_info->extent_root;
738         key.objectid = 0;
739         key.offset = group_size_blocks;
740         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
741         btrfs_init_path(&path);
742
743         while(1) {
744                 ret = btrfs_search_slot(NULL, root->fs_info->extent_root,
745                                         &key, &path, 0, 0);
746                 if (ret != 0) {
747                         err = ret;
748                         break;
749                 }
750                 leaf = &path.nodes[0]->leaf;
751                 btrfs_disk_key_to_cpu(&found_key,
752                                       &leaf->items[path.slots[0]].key);
753                 cache = malloc(sizeof(*cache));
754                 if (!cache) {
755                         err = -1;
756                         break;
757                 }
758                 bi = btrfs_item_ptr(leaf, path.slots[0],
759                                     struct btrfs_block_group_item);
760                 memcpy(&cache->item, bi, sizeof(*bi));
761                 memcpy(&cache->key, &found_key, sizeof(found_key));
762                 key.objectid = found_key.objectid + found_key.offset;
763                 btrfs_release_path(root, &path);
764                 ret = radix_tree_insert(&root->fs_info->block_group_radix,
765                                         found_key.objectid +
766                                         found_key.offset - 1, (void *)cache);
767                 BUG_ON(ret);
768                 if (key.objectid >=
769                     btrfs_super_total_blocks(root->fs_info->disk_super))
770                         break;
771         }
772         btrfs_release_path(root, &path);
773         return 0;
774 }
775
776 int btrfs_insert_block_group(struct btrfs_trans_handle *trans,
777                              struct btrfs_root *root,
778                              struct btrfs_key *key,
779                              struct btrfs_block_group_item *bi)
780 {
781         int ret;
782         int pending_ret;
783
784         root = root->fs_info->extent_root;
785         ret = btrfs_insert_item(trans, root, key, bi, sizeof(*bi));
786         finish_current_insert(trans, root);
787         pending_ret = run_pending(trans, root);
788         if (ret)
789                 return ret;
790         if (pending_ret)
791                 return pending_ret;
792         return ret;
793 }