Update btrfs-progs to better match the kernel
[platform/upstream/btrfs-progs.git] / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include "kerncompat.h"
22 #include "radix-tree.h"
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "print-tree.h"
26 #include "transaction.h"
27
28 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
29                                  btrfs_root *extent_root);
30 static int run_pending(struct btrfs_trans_handle *trans, struct btrfs_root
31                        *extent_root);
32
33 static int inc_block_ref(struct btrfs_trans_handle *trans, struct btrfs_root
34                          *root, u64 bytenr, u32 blocksize)
35 {
36         struct btrfs_path path;
37         int ret;
38         struct btrfs_key key;
39         struct btrfs_leaf *l;
40         struct btrfs_extent_item *item;
41         u32 refs;
42
43         btrfs_init_path(&path);
44         key.objectid = bytenr;
45         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
46         key.offset = blocksize;
47         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, &path,
48                                 0, 1);
49         if (ret != 0)
50                 BUG();
51         BUG_ON(ret != 0);
52         l = &path.nodes[0]->leaf;
53         item = btrfs_item_ptr(l, path.slots[0], struct btrfs_extent_item);
54         refs = btrfs_extent_refs(item);
55         btrfs_set_extent_refs(item, refs + 1);
56
57         BUG_ON(list_empty(&path.nodes[0]->dirty));
58         btrfs_release_path(root->fs_info->extent_root, &path);
59         finish_current_insert(trans, root->fs_info->extent_root);
60         run_pending(trans, root->fs_info->extent_root);
61         return 0;
62 }
63
64 static int lookup_block_ref(struct btrfs_trans_handle *trans, struct btrfs_root
65                             *root, u64 bytenr, u32 blocksize, u32 *refs)
66 {
67         struct btrfs_path path;
68         int ret;
69         struct btrfs_key key;
70         struct btrfs_leaf *l;
71         struct btrfs_extent_item *item;
72         btrfs_init_path(&path);
73         key.objectid = bytenr;
74         key.offset = blocksize;
75         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
76         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, &path,
77                                 0, 0);
78         if (ret != 0)
79                 BUG();
80         l = &path.nodes[0]->leaf;
81         item = btrfs_item_ptr(l, path.slots[0], struct btrfs_extent_item);
82         *refs = btrfs_extent_refs(item);
83         btrfs_release_path(root->fs_info->extent_root, &path);
84         return 0;
85 }
86
87 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
88                   struct btrfs_buffer *buf)
89 {
90         u64 bytenr;
91         u32 blocksize;
92         int i;
93         int level;
94
95         if (!root->ref_cows)
96                 return 0;
97
98         level = btrfs_header_level(&buf->node.header) - 1;
99         blocksize = btrfs_level_size(root, level);
100
101         if (btrfs_is_leaf(&buf->node))
102                 return 0;
103
104         for (i = 0; i < btrfs_header_nritems(&buf->node.header); i++) {
105                 bytenr = btrfs_node_blockptr(&buf->node, i);
106                 inc_block_ref(trans, root, bytenr, blocksize);
107         }
108
109         return 0;
110 }
111
112 int btrfs_inc_root_ref(struct btrfs_trans_handle *trans,
113                        struct btrfs_root *root)
114 {
115         return inc_block_ref(trans, root, root->node->bytenr,
116                              root->node->size);
117 }
118
119 static int write_one_cache_group(struct btrfs_trans_handle *trans,
120                                  struct btrfs_root *root,
121                                  struct btrfs_path *path,
122                                  struct btrfs_block_group_cache *cache)
123 {
124         int ret;
125         int pending_ret;
126         struct btrfs_root *extent_root = root->fs_info->extent_root;
127         struct btrfs_block_group_item *bi;
128
129         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
130                                 &cache->key, path, 0, 1);
131         BUG_ON(ret);
132         bi = btrfs_item_ptr(&path->nodes[0]->leaf, path->slots[0],
133                             struct btrfs_block_group_item);
134         memcpy(bi, &cache->item, sizeof(*bi));
135         dirty_tree_block(trans, extent_root, path->nodes[0]);
136         btrfs_release_path(extent_root, path);
137         finish_current_insert(trans, root);
138         pending_ret = run_pending(trans, root);
139         if (ret)
140                 return ret;
141         if (pending_ret)
142                 return pending_ret;
143         return 0;
144
145 }
146
147 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
148                                     struct btrfs_root *root)
149 {
150         struct btrfs_block_group_cache *bg;
151         struct cache_extent *cache;
152         int err = 0;
153         int werr = 0;
154         struct cache_tree *bg_cache = &root->fs_info->block_group_cache;
155         struct btrfs_path path;
156         btrfs_init_path(&path);
157         u64 start = 0;
158
159         while(1) {
160                 cache = find_first_cache_extent(bg_cache, start);
161                 if (!cache)
162                         break;
163                 bg = container_of(cache, struct btrfs_block_group_cache,
164                                         cache);
165                 start = cache->start + cache->size;
166                 if (bg->dirty) {
167                         err = write_one_cache_group(trans, root,
168                                                     &path, bg);
169                         if (err)
170                                 werr = err;
171                 }
172                 bg->dirty = 0;
173         }
174         return werr;
175 }
176
177 static int update_block_group(struct btrfs_trans_handle *trans,
178                               struct btrfs_root *root,
179                               u64 bytenr, u64 num, int alloc)
180 {
181         struct btrfs_block_group_cache *bg;
182         struct cache_extent *cache;
183         struct btrfs_fs_info *info = root->fs_info;
184         u64 total = num;
185         u64 old_val;
186         u64 byte_in_group;
187
188         while(total) {
189                 cache = find_first_cache_extent(&info->block_group_cache,
190                                                 bytenr);
191                 if (!cache)
192                         return -1;
193                 bg = container_of(cache, struct btrfs_block_group_cache,
194                                         cache);
195                 bg->dirty = 1;
196                 byte_in_group = bytenr - bg->key.objectid;
197                 old_val = btrfs_block_group_used(&bg->item);
198                 if (total > bg->key.offset - byte_in_group)
199                         num = bg->key.offset - byte_in_group;
200                 else
201                         num = total;
202                 total -= num;
203                 bytenr += num;
204                 if (alloc)
205                         old_val += num;
206                 else
207                         old_val -= num;
208                 btrfs_set_block_group_used(&bg->item, old_val);
209         }
210         return 0;
211 }
212
213 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, struct
214                                btrfs_root *root)
215 {
216         u64 first = 0;
217         struct cache_extent *pe;
218         struct cache_extent *next;
219
220         pe = find_first_cache_extent(&root->fs_info->pinned_tree, 0);
221         if (pe)
222                 first = pe->start;
223         while(pe) {
224                 next = next_cache_extent(pe);
225                 remove_cache_extent(&root->fs_info->pinned_tree, pe);
226                 free_cache_extent(pe);
227                 pe = next;
228         }
229         root->fs_info->last_insert.objectid = first;
230         root->fs_info->last_insert.offset = 0;
231         return 0;
232 }
233
234 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
235                                  btrfs_root *extent_root)
236 {
237         struct btrfs_key ins;
238         struct btrfs_extent_item extent_item;
239         int ret;
240         struct btrfs_fs_info *info = extent_root->fs_info;
241         struct cache_extent *pe;
242         struct cache_extent *next;
243         struct cache_tree *pending_tree = &info->pending_tree;
244
245         btrfs_set_extent_refs(&extent_item, 1);
246         btrfs_set_extent_owner(&extent_item, extent_root->root_key.objectid);
247         ins.offset = 1;
248         btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
249         pe = find_first_cache_extent(pending_tree, 0);
250         while(pe) {
251                 ins.offset = pe->size;
252                 ins.objectid = pe->start;
253
254                 remove_cache_extent(pending_tree, pe);
255                 next = next_cache_extent(pe);
256                 if (!next)
257                         next = find_first_cache_extent(pending_tree, 0);
258
259                 free_cache_extent(pe);
260                 pe = next;
261
262                 ret = btrfs_insert_item(trans, extent_root, &ins, &extent_item,
263                                         sizeof(extent_item));
264                 if (ret) {
265                         btrfs_print_tree(extent_root, extent_root->node);
266                 }
267                 BUG_ON(ret);
268         }
269         return 0;
270 }
271
272 /*
273  * remove an extent from the root, returns 0 on success
274  */
275 static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
276                          *root, u64 bytenr, u64 num_bytes, int pin)
277 {
278         struct btrfs_path path;
279         struct btrfs_key key;
280         struct btrfs_fs_info *info = root->fs_info;
281         struct btrfs_root *extent_root = info->extent_root;
282         int ret;
283         struct btrfs_extent_item *ei;
284         u32 refs;
285
286         key.objectid = bytenr;
287         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
288         key.offset = num_bytes;
289
290         btrfs_init_path(&path);
291         ret = btrfs_search_slot(trans, extent_root, &key, &path, -1, 1);
292         if (ret) {
293                 btrfs_print_tree(extent_root, extent_root->node);
294                 printf("failed to find %llu\n",
295                        (unsigned long long)key.objectid);
296                 BUG();
297         }
298         ei = btrfs_item_ptr(&path.nodes[0]->leaf, path.slots[0],
299                             struct btrfs_extent_item);
300         BUG_ON(ei->refs == 0);
301         refs = btrfs_extent_refs(ei) - 1;
302         btrfs_set_extent_refs(ei, refs);
303         if (refs == 0) {
304                 u64 super_bytes_used, root_bytes_used;
305                 if (pin) {
306                         int err;
307                         err = insert_cache_extent(&info->pinned_tree,
308                                                     bytenr, num_bytes);
309                         BUG_ON(err);
310                 }
311                 super_bytes_used = btrfs_super_bytes_used(info->disk_super);
312                 btrfs_set_super_bytes_used(info->disk_super,
313                                             super_bytes_used - num_bytes);
314                 root_bytes_used = btrfs_root_bytes_used(&root->root_item);
315                 btrfs_set_root_bytes_used(&root->root_item,
316                                           root_bytes_used - num_bytes);
317
318                 ret = btrfs_del_item(trans, extent_root, &path);
319                 if (!pin && extent_root->fs_info->last_insert.objectid >
320                     bytenr)
321                         extent_root->fs_info->last_insert.objectid = bytenr;
322                 if (ret)
323                         BUG();
324                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
325                 BUG_ON(ret);
326         }
327         btrfs_release_path(extent_root, &path);
328         finish_current_insert(trans, extent_root);
329         return ret;
330 }
331
332 /*
333  * find all the blocks marked as pending in the radix tree and remove
334  * them from the extent map
335  */
336 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
337                                btrfs_root *extent_root)
338 {
339         int ret;
340         struct cache_extent *pe;
341         struct cache_extent *next;
342         struct cache_tree *del_pending = &extent_root->fs_info->del_pending;
343
344         pe = find_first_cache_extent(del_pending, 0);
345         while(pe) {
346                 remove_cache_extent(del_pending, pe);
347                 ret = __free_extent(trans, extent_root,
348                                     pe->start, pe->size, 1);
349                 BUG_ON(ret);
350                 next = next_cache_extent(pe);
351                 if (!next)
352                         next = find_first_cache_extent(del_pending, 0);
353                 free_cache_extent(pe);
354                 pe = next;
355         }
356         return 0;
357 }
358
359 static int run_pending(struct btrfs_trans_handle *trans, struct btrfs_root
360                        *extent_root)
361 {
362         del_pending_extents(trans, extent_root);
363         return 0;
364 }
365
366
367 /*
368  * remove an extent from the root, returns 0 on success
369  */
370 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
371                       *root, u64 bytenr, u64 num_bytes, int pin)
372 {
373         struct btrfs_root *extent_root = root->fs_info->extent_root;
374         int pending_ret;
375         int ret;
376
377         if (root == extent_root) {
378                 ret = insert_cache_extent(&root->fs_info->del_pending,
379                                             bytenr, num_bytes);
380                 BUG_ON(ret);
381                 return 0;
382         }
383         ret = __free_extent(trans, root, bytenr, num_bytes, pin);
384         pending_ret = run_pending(trans, root->fs_info->extent_root);
385         return ret ? ret : pending_ret;
386 }
387
388 static u64 stripe_align(struct btrfs_root *root, u64 val)
389 {
390         u64 mask = ((u64)root->stripesize - 1);
391         u64 ret = (val + mask) & ~mask;
392         return ret;
393 }
394
395 /*
396  * walks the btree of allocated extents and find a hole of a given size.
397  * The key ins is changed to record the hole:
398  * ins->objectid == block start
399  * ins->flags = BTRFS_EXTENT_ITEM_KEY
400  * ins->offset == number of blocks
401  * Any available blocks before search_start are skipped.
402  */
403 static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
404                             *orig_root, u64 total_needed, u64 search_start,
405                             u64 search_end, struct btrfs_key *ins)
406 {
407         struct btrfs_path path;
408         struct btrfs_key key;
409         int ret;
410         u64 hole_size = 0;
411         int slot = 0;
412         u64 last_byte = 0;
413         u64 aligned;
414         int start_found;
415         struct btrfs_leaf *l;
416         struct btrfs_root * root = orig_root->fs_info->extent_root;
417
418         if (root->fs_info->last_insert.objectid > search_start)
419                 search_start = root->fs_info->last_insert.objectid;
420
421         search_start = stripe_align(root, search_start);
422         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
423
424 check_failed:
425         btrfs_init_path(&path);
426         ins->objectid = search_start;
427         ins->offset = 0;
428         start_found = 0;
429         ret = btrfs_search_slot(trans, root, ins, &path, 0, 0);
430         if (ret < 0)
431                 goto error;
432
433         if (path.slots[0] > 0)
434                 path.slots[0]--;
435
436         while (1) {
437                 l = &path.nodes[0]->leaf;
438                 slot = path.slots[0];
439                 if (slot >= btrfs_header_nritems(&l->header)) {
440                         ret = btrfs_next_leaf(root, &path);
441                         if (ret == 0)
442                                 continue;
443                         if (ret < 0)
444                                 goto error;
445                         if (!start_found) {
446                                 aligned = stripe_align(root, search_start);
447                                 ins->objectid = aligned;
448                                 ins->offset = (u64)-1 - aligned;
449                                 start_found = 1;
450                                 goto check_pending;
451                         }
452                         ins->objectid = stripe_align(root,
453                                                      last_byte > search_start ?
454                                                      last_byte : search_start);
455                         ins->offset = (u64)-1 - ins->objectid;
456                         goto check_pending;
457                 }
458                 btrfs_disk_key_to_cpu(&key, &l->items[slot].key);
459                 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
460                         goto next;
461                 if (key.objectid >= search_start) {
462                         if (start_found) {
463                                 if (last_byte < search_start)
464                                         last_byte = search_start;
465                                 aligned = stripe_align(root, last_byte);
466                                 hole_size = key.objectid - aligned;
467                                 if (key.objectid > aligned &&
468                                     hole_size > total_needed) {
469                                         ins->objectid = aligned;
470                                         ins->offset = hole_size;
471                                         goto check_pending;
472                                 }
473                         }
474                 }
475                 start_found = 1;
476                 last_byte = key.objectid + key.offset;
477 next:
478                 path.slots[0]++;
479         }
480         // FIXME -ENOSPC
481 check_pending:
482         /* we have to make sure we didn't find an extent that has already
483          * been allocated by the map tree or the original allocation
484          */
485         btrfs_release_path(root, &path);
486         BUG_ON(ins->objectid < search_start);
487         if (find_cache_extent(&root->fs_info->pinned_tree,
488                                 ins->objectid, total_needed)) {
489                 search_start = ins->objectid + total_needed;
490                 goto check_failed;
491         }
492         if (find_cache_extent(&root->fs_info->pending_tree,
493                                 ins->objectid, total_needed)) {
494                 search_start = ins->objectid + total_needed;
495                 goto check_failed;
496         }
497         root->fs_info->last_insert.objectid = ins->objectid;
498         ins->offset = total_needed;
499         return 0;
500 error:
501         btrfs_release_path(root, &path);
502         return ret;
503 }
504 /*
505  * finds a free extent and does all the dirty work required for allocation
506  * returns the key for the extent through ins, and a tree buffer for
507  * the first block of the extent through buf.
508  *
509  * returns 0 if everything worked, non-zero otherwise.
510  */
511 static int alloc_extent(struct btrfs_trans_handle *trans,
512                         struct btrfs_root *root, u64 owner,
513                         u64 num_bytes, u64 search_start,
514                         u64 search_end, struct btrfs_key *ins)
515 {
516         int ret;
517         int pending_ret;
518         u64 super_bytes_used, root_bytes_used;
519         struct btrfs_fs_info *info = root->fs_info;
520         struct btrfs_root *extent_root = info->extent_root;
521         struct btrfs_extent_item extent_item;
522
523         btrfs_set_extent_refs(&extent_item, 1);
524         btrfs_set_extent_owner(&extent_item, owner);
525
526         ret = find_free_extent(trans, root, num_bytes, search_start,
527                                search_end, ins);
528         if (ret)
529                 return ret;
530
531         super_bytes_used = btrfs_super_bytes_used(info->disk_super);
532         btrfs_set_super_bytes_used(info->disk_super, super_bytes_used +
533                                     num_bytes);
534         root_bytes_used = btrfs_root_bytes_used(&root->root_item);
535         btrfs_set_root_bytes_used(&root->root_item, root_bytes_used +
536                                    num_bytes);
537         if (root == extent_root) {
538                 ret = insert_cache_extent(&root->fs_info->pending_tree,
539                                             ins->objectid, ins->offset);
540                 BUG_ON(ret);
541                 goto update_block;
542         }
543         ret = btrfs_insert_item(trans, extent_root, ins, &extent_item,
544                                 sizeof(extent_item));
545
546         finish_current_insert(trans, extent_root);
547         pending_ret = run_pending(trans, extent_root);
548         if (ret)
549                 return ret;
550         if (pending_ret)
551                 return pending_ret;
552 update_block:
553         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
554         BUG_ON(ret);
555         return 0;
556 }
557 /*
558  * helper function to allocate a block for a given tree
559  * returns the tree buffer or NULL.
560  */
561 struct btrfs_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
562                                             struct btrfs_root *root,
563                                             u32 blocksize)
564 {
565         struct btrfs_key ins;
566         int ret;
567         struct btrfs_buffer *buf;
568         ret = alloc_extent(trans, root, root->root_key.objectid,
569                            blocksize, 0, (u64)-1, &ins);
570         if (ret) {
571                 BUG();
572                 return NULL;
573         }
574         buf = find_tree_block(root, ins.objectid, blocksize);
575         btrfs_set_header_generation(&buf->node.header,
576                                     root->root_key.offset + 1);
577         btrfs_set_header_bytenr(&buf->node.header, buf->bytenr);
578         memcpy(buf->node.header.fsid, root->fs_info->disk_super->fsid,
579                sizeof(buf->node.header.fsid));
580         dirty_tree_block(trans, root, buf);
581         return buf;
582
583 }
584
585 /*
586  * helper function for drop_snapshot, this walks down the tree dropping ref
587  * counts as it goes.
588  */
589 static int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root
590                           *root, struct btrfs_path *path, int *level)
591 {
592         struct btrfs_buffer *next;
593         struct btrfs_buffer *cur;
594         u64 bytenr;
595         int ret;
596         u32 refs;
597
598         ret = lookup_block_ref(trans, root, path->nodes[*level]->bytenr,
599                                btrfs_level_size(root, *level), &refs);
600         BUG_ON(ret);
601         if (refs > 1)
602                 goto out;
603         /*
604          * walk down to the last node level and free all the leaves
605          */
606         while(*level > 0) {
607                 u32 size = btrfs_level_size(root, *level - 1);
608
609                 cur = path->nodes[*level];
610                 if (path->slots[*level] >=
611                     btrfs_header_nritems(&cur->node.header))
612                         break;
613                 bytenr = btrfs_node_blockptr(&cur->node, path->slots[*level]);
614                 ret = lookup_block_ref(trans, root, bytenr, size, &refs);
615                 if (refs != 1 || *level == 1) {
616                         path->slots[*level]++;
617                         ret = btrfs_free_extent(trans, root, bytenr, size, 1);
618                         BUG_ON(ret);
619                         continue;
620                 }
621                 BUG_ON(ret);
622                 next = read_tree_block(root, bytenr, size);
623                 if (path->nodes[*level-1])
624                         btrfs_block_release(root, path->nodes[*level-1]);
625                 path->nodes[*level-1] = next;
626                 *level = btrfs_header_level(&next->node.header);
627                 path->slots[*level] = 0;
628         }
629 out:
630         ret = btrfs_free_extent(trans, root, path->nodes[*level]->bytenr,
631                                 btrfs_level_size(root, *level), 1);
632         btrfs_block_release(root, path->nodes[*level]);
633         path->nodes[*level] = NULL;
634         *level += 1;
635         BUG_ON(ret);
636         return 0;
637 }
638
639 /*
640  * helper for dropping snapshots.  This walks back up the tree in the path
641  * to find the first node higher up where we haven't yet gone through
642  * all the slots
643  */
644 static int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root
645                         *root, struct btrfs_path *path, int *level)
646 {
647         int i;
648         int slot;
649         int ret;
650         for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
651                 slot = path->slots[i];
652                 if (slot <
653                     btrfs_header_nritems(&path->nodes[i]->node.header)- 1) {
654                         path->slots[i]++;
655                         *level = i;
656                         return 0;
657                 } else {
658                         ret = btrfs_free_extent(trans, root,
659                                         path->nodes[*level]->bytenr,
660                                         btrfs_level_size(root, *level), 1);
661                         btrfs_block_release(root, path->nodes[*level]);
662                         path->nodes[*level] = NULL;
663                         *level = i + 1;
664                         BUG_ON(ret);
665                 }
666         }
667         return 1;
668 }
669
670 /*
671  * drop the reference count on the tree rooted at 'snap'.  This traverses
672  * the tree freeing any blocks that have a ref count of zero after being
673  * decremented.
674  */
675 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
676                         *root, struct btrfs_buffer *snap)
677 {
678         int ret = 0;
679         int wret;
680         int level;
681         struct btrfs_path path;
682         int i;
683         int orig_level;
684
685         btrfs_init_path(&path);
686
687         level = btrfs_header_level(&snap->node.header);
688         orig_level = level;
689         path.nodes[level] = snap;
690         path.slots[level] = 0;
691         while(1) {
692                 wret = walk_down_tree(trans, root, &path, &level);
693                 if (wret > 0)
694                         break;
695                 if (wret < 0)
696                         ret = wret;
697
698                 wret = walk_up_tree(trans, root, &path, &level);
699                 if (wret > 0)
700                         break;
701                 if (wret < 0)
702                         ret = wret;
703         }
704         for (i = 0; i <= orig_level; i++) {
705                 if (path.nodes[i]) {
706                         btrfs_block_release(root, path.nodes[i]);
707                 }
708         }
709         return ret;
710 }
711
712 int btrfs_free_block_groups(struct btrfs_fs_info *info)
713 {
714         struct btrfs_block_group_cache *bg;
715         struct cache_extent *cache;
716
717         while(1) {
718                 cache = find_first_cache_extent(&info->block_group_cache, 0);
719                 if (!cache)
720                         break;
721                 bg = container_of(cache, struct btrfs_block_group_cache,
722                                         cache);
723                 remove_cache_extent(&info->block_group_cache, cache);
724                 free(bg);
725         }
726         return 0;
727 }
728
729 int btrfs_read_block_groups(struct btrfs_root *root)
730 {
731         struct btrfs_path path;
732         int ret;
733         int err = 0;
734         struct btrfs_block_group_item *bi;
735         struct btrfs_block_group_cache *bg;
736         struct cache_tree *bg_cache;
737         struct btrfs_key key;
738         struct btrfs_key found_key;
739         struct btrfs_leaf *leaf;
740         u64 group_size = BTRFS_BLOCK_GROUP_SIZE;
741
742         root = root->fs_info->extent_root;
743         bg_cache = &root->fs_info->block_group_cache;
744         key.objectid = 0;
745         key.offset = group_size;
746         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
747         btrfs_init_path(&path);
748
749         while(1) {
750                 ret = btrfs_search_slot(NULL, root->fs_info->extent_root,
751                                         &key, &path, 0, 0);
752                 if (ret != 0) {
753                         err = ret;
754                         break;
755                 }
756                 leaf = &path.nodes[0]->leaf;
757                 btrfs_disk_key_to_cpu(&found_key,
758                                       &leaf->items[path.slots[0]].key);
759                 bg = malloc(sizeof(*bg));
760                 if (!bg) {
761                         err = -1;
762                         break;
763                 }
764                 bi = btrfs_item_ptr(leaf, path.slots[0],
765                                     struct btrfs_block_group_item);
766                 memcpy(&bg->item, bi, sizeof(*bi));
767                 memcpy(&bg->key, &found_key, sizeof(found_key));
768                 key.objectid = found_key.objectid + found_key.offset;
769                 btrfs_release_path(root, &path);
770                 bg->cache.start = found_key.objectid;
771                 bg->cache.size = found_key.offset;
772                 bg->dirty = 0;
773                 ret = insert_existing_cache_extent(bg_cache, &bg->cache);
774                 BUG_ON(ret);
775                 if (key.objectid >=
776                     btrfs_super_total_bytes(root->fs_info->disk_super))
777                         break;
778         }
779         btrfs_release_path(root, &path);
780         return 0;
781 }
782
783 int btrfs_insert_block_group(struct btrfs_trans_handle *trans,
784                              struct btrfs_root *root,
785                              struct btrfs_key *key,
786                              struct btrfs_block_group_item *bi)
787 {
788         int ret;
789         int pending_ret;
790
791         root = root->fs_info->extent_root;
792         ret = btrfs_insert_item(trans, root, key, bi, sizeof(*bi));
793         finish_current_insert(trans, root);
794         pending_ret = run_pending(trans, root);
795         if (ret)
796                 return ret;
797         if (pending_ret)
798                 return pending_ret;
799         return ret;
800 }