Add generation numbers to block pointers
[platform/upstream/btrfs-progs.git] / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include "kerncompat.h"
22 #include "radix-tree.h"
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "print-tree.h"
26 #include "transaction.h"
27
28 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
29                                  btrfs_root *extent_root);
30 static int run_pending(struct btrfs_trans_handle *trans, struct btrfs_root
31                        *extent_root);
32
33 static int inc_block_ref(struct btrfs_trans_handle *trans, struct btrfs_root
34                          *root, u64 bytenr, u32 blocksize)
35 {
36         struct btrfs_path path;
37         int ret;
38         struct btrfs_key key;
39         struct btrfs_leaf *l;
40         struct btrfs_extent_item *item;
41         u32 refs;
42
43         btrfs_init_path(&path);
44         key.objectid = bytenr;
45         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
46         key.offset = blocksize;
47         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, &path,
48                                 0, 1);
49         if (ret != 0)
50                 BUG();
51         BUG_ON(ret != 0);
52         l = &path.nodes[0]->leaf;
53         item = btrfs_item_ptr(l, path.slots[0], struct btrfs_extent_item);
54         refs = btrfs_extent_refs(item);
55         btrfs_set_extent_refs(item, refs + 1);
56
57         BUG_ON(list_empty(&path.nodes[0]->dirty));
58         btrfs_release_path(root->fs_info->extent_root, &path);
59         finish_current_insert(trans, root->fs_info->extent_root);
60         run_pending(trans, root->fs_info->extent_root);
61         return 0;
62 }
63
64 static int lookup_block_ref(struct btrfs_trans_handle *trans, struct btrfs_root
65                             *root, u64 bytenr, u32 blocksize, u32 *refs)
66 {
67         struct btrfs_path path;
68         int ret;
69         struct btrfs_key key;
70         struct btrfs_leaf *l;
71         struct btrfs_extent_item *item;
72         btrfs_init_path(&path);
73         key.objectid = bytenr;
74         key.offset = blocksize;
75         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
76         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, &path,
77                                 0, 0);
78         if (ret != 0)
79                 BUG();
80         l = &path.nodes[0]->leaf;
81         item = btrfs_item_ptr(l, path.slots[0], struct btrfs_extent_item);
82         *refs = btrfs_extent_refs(item);
83         btrfs_release_path(root->fs_info->extent_root, &path);
84         return 0;
85 }
86
87 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
88                   struct btrfs_buffer *buf)
89 {
90         u64 bytenr;
91         u32 blocksize;
92         int i;
93         int level;
94
95         if (!root->ref_cows)
96                 return 0;
97
98         level = btrfs_header_level(&buf->node.header) - 1;
99         blocksize = btrfs_level_size(root, level);
100
101         if (btrfs_is_leaf(&buf->node))
102                 return 0;
103
104         for (i = 0; i < btrfs_header_nritems(&buf->node.header); i++) {
105                 bytenr = btrfs_node_blockptr(&buf->node, i);
106                 inc_block_ref(trans, root, bytenr, blocksize);
107         }
108
109         return 0;
110 }
111
112 int btrfs_inc_root_ref(struct btrfs_trans_handle *trans,
113                        struct btrfs_root *root)
114 {
115         return inc_block_ref(trans, root, root->node->bytenr,
116                              root->node->size);
117 }
118
119 static int write_one_cache_group(struct btrfs_trans_handle *trans,
120                                  struct btrfs_root *root,
121                                  struct btrfs_path *path,
122                                  struct btrfs_block_group_cache *cache)
123 {
124         int ret;
125         int pending_ret;
126         struct btrfs_root *extent_root = root->fs_info->extent_root;
127         struct btrfs_block_group_item *bi;
128
129         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
130                                 &cache->key, path, 0, 1);
131         BUG_ON(ret);
132         bi = btrfs_item_ptr(&path->nodes[0]->leaf, path->slots[0],
133                             struct btrfs_block_group_item);
134         memcpy(bi, &cache->item, sizeof(*bi));
135         dirty_tree_block(trans, extent_root, path->nodes[0]);
136         btrfs_release_path(extent_root, path);
137         finish_current_insert(trans, root);
138         pending_ret = run_pending(trans, root);
139         if (ret)
140                 return ret;
141         if (pending_ret)
142                 return pending_ret;
143         return 0;
144
145 }
146
147 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
148                                     struct btrfs_root *root)
149 {
150         struct btrfs_block_group_cache *bg;
151         struct cache_extent *cache;
152         int err = 0;
153         int werr = 0;
154         struct cache_tree *bg_cache = &root->fs_info->block_group_cache;
155         struct btrfs_path path;
156         btrfs_init_path(&path);
157         u64 start = 0;
158
159         while(1) {
160                 cache = find_first_cache_extent(bg_cache, start);
161                 if (!cache)
162                         break;
163                 bg = container_of(cache, struct btrfs_block_group_cache,
164                                         cache);
165                 start = cache->start + cache->size;
166                 if (bg->dirty) {
167                         err = write_one_cache_group(trans, root,
168                                                     &path, bg);
169                         if (err)
170                                 werr = err;
171                 }
172                 bg->dirty = 0;
173         }
174         return werr;
175 }
176
177 static int update_block_group(struct btrfs_trans_handle *trans,
178                               struct btrfs_root *root,
179                               u64 bytenr, u64 num, int alloc)
180 {
181         struct btrfs_block_group_cache *bg;
182         struct cache_extent *cache;
183         struct btrfs_fs_info *info = root->fs_info;
184         u64 total = num;
185         u64 old_val;
186         u64 byte_in_group;
187
188         while(total) {
189                 cache = find_first_cache_extent(&info->block_group_cache,
190                                                 bytenr);
191                 if (!cache)
192                         return -1;
193                 bg = container_of(cache, struct btrfs_block_group_cache,
194                                         cache);
195                 bg->dirty = 1;
196                 byte_in_group = bytenr - bg->key.objectid;
197                 old_val = btrfs_block_group_used(&bg->item);
198                 if (total > bg->key.offset - byte_in_group)
199                         num = bg->key.offset - byte_in_group;
200                 else
201                         num = total;
202                 total -= num;
203                 bytenr += num;
204                 if (alloc)
205                         old_val += num;
206                 else
207                         old_val -= num;
208                 btrfs_set_block_group_used(&bg->item, old_val);
209         }
210         return 0;
211 }
212
213 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, struct
214                                btrfs_root *root)
215 {
216         u64 first = 0;
217         struct cache_extent *pe;
218         struct cache_extent *next;
219
220         pe = find_first_cache_extent(&root->fs_info->pinned_tree, 0);
221         if (pe)
222                 first = pe->start;
223         while(pe) {
224                 next = next_cache_extent(pe);
225                 remove_cache_extent(&root->fs_info->pinned_tree, pe);
226                 free_cache_extent(pe);
227                 pe = next;
228         }
229         root->fs_info->last_insert.objectid = first;
230         root->fs_info->last_insert.offset = 0;
231         return 0;
232 }
233
234 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
235                                  btrfs_root *extent_root)
236 {
237         struct btrfs_key ins;
238         struct btrfs_extent_item extent_item;
239         int ret;
240         struct btrfs_fs_info *info = extent_root->fs_info;
241         struct cache_extent *pe;
242         struct cache_extent *next;
243         struct cache_tree *pending_tree = &info->pending_tree;
244
245         btrfs_set_extent_refs(&extent_item, 1);
246         btrfs_set_extent_owner(&extent_item, extent_root->root_key.objectid);
247         ins.offset = 1;
248         btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
249         pe = find_first_cache_extent(pending_tree, 0);
250         while(pe) {
251                 ins.offset = pe->size;
252                 ins.objectid = pe->start;
253
254                 remove_cache_extent(pending_tree, pe);
255                 next = next_cache_extent(pe);
256                 if (!next)
257                         next = find_first_cache_extent(pending_tree, 0);
258
259                 free_cache_extent(pe);
260                 pe = next;
261
262                 ret = btrfs_insert_item(trans, extent_root, &ins, &extent_item,
263                                         sizeof(extent_item));
264                 if (ret) {
265                         btrfs_print_tree(extent_root, extent_root->node);
266                 }
267                 BUG_ON(ret);
268         }
269         return 0;
270 }
271
272 /*
273  * remove an extent from the root, returns 0 on success
274  */
275 static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
276                          *root, u64 bytenr, u64 num_bytes, int pin)
277 {
278         struct btrfs_path path;
279         struct btrfs_key key;
280         struct btrfs_fs_info *info = root->fs_info;
281         struct btrfs_root *extent_root = info->extent_root;
282         int ret;
283         struct btrfs_extent_item *ei;
284         u32 refs;
285
286         key.objectid = bytenr;
287         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
288         key.offset = num_bytes;
289
290         btrfs_init_path(&path);
291         ret = btrfs_search_slot(trans, extent_root, &key, &path, -1, 1);
292         if (ret) {
293                 btrfs_print_tree(extent_root, extent_root->node);
294                 printf("failed to find %llu\n",
295                        (unsigned long long)key.objectid);
296                 BUG();
297         }
298         ei = btrfs_item_ptr(&path.nodes[0]->leaf, path.slots[0],
299                             struct btrfs_extent_item);
300         BUG_ON(ei->refs == 0);
301         refs = btrfs_extent_refs(ei) - 1;
302         btrfs_set_extent_refs(ei, refs);
303         if (refs == 0) {
304                 u64 super_bytes_used, root_bytes_used;
305                 if (pin) {
306                         int err;
307                         err = insert_cache_extent(&info->pinned_tree,
308                                                     bytenr, num_bytes);
309                         BUG_ON(err);
310                 }
311                 super_bytes_used = btrfs_super_bytes_used(info->disk_super);
312                 btrfs_set_super_bytes_used(info->disk_super,
313                                             super_bytes_used - num_bytes);
314                 root_bytes_used = btrfs_root_bytes_used(&root->root_item);
315                 btrfs_set_root_bytes_used(&root->root_item,
316                                           root_bytes_used - num_bytes);
317
318                 ret = btrfs_del_item(trans, extent_root, &path);
319                 if (!pin && extent_root->fs_info->last_insert.objectid >
320                     bytenr)
321                         extent_root->fs_info->last_insert.objectid = bytenr;
322                 if (ret)
323                         BUG();
324                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
325                 BUG_ON(ret);
326         }
327         btrfs_release_path(extent_root, &path);
328         finish_current_insert(trans, extent_root);
329         return ret;
330 }
331
332 /*
333  * find all the blocks marked as pending in the radix tree and remove
334  * them from the extent map
335  */
336 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
337                                btrfs_root *extent_root)
338 {
339         int ret;
340         struct cache_extent *pe;
341         struct cache_extent *next;
342         struct cache_tree *del_pending = &extent_root->fs_info->del_pending;
343
344         pe = find_first_cache_extent(del_pending, 0);
345         while(pe) {
346                 remove_cache_extent(del_pending, pe);
347                 ret = __free_extent(trans, extent_root,
348                                     pe->start, pe->size, 1);
349                 BUG_ON(ret);
350                 next = next_cache_extent(pe);
351                 if (!next)
352                         next = find_first_cache_extent(del_pending, 0);
353                 free_cache_extent(pe);
354                 pe = next;
355         }
356         return 0;
357 }
358
359 static int run_pending(struct btrfs_trans_handle *trans, struct btrfs_root
360                        *extent_root)
361 {
362         del_pending_extents(trans, extent_root);
363         return 0;
364 }
365
366
367 /*
368  * remove an extent from the root, returns 0 on success
369  */
370 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
371                       *root, u64 bytenr, u64 num_bytes, int pin)
372 {
373         struct btrfs_root *extent_root = root->fs_info->extent_root;
374         int pending_ret;
375         int ret;
376
377         if (root == extent_root) {
378                 ret = insert_cache_extent(&root->fs_info->del_pending,
379                                             bytenr, num_bytes);
380                 BUG_ON(ret);
381                 return 0;
382         }
383         ret = __free_extent(trans, root, bytenr, num_bytes, pin);
384         pending_ret = run_pending(trans, root->fs_info->extent_root);
385         return ret ? ret : pending_ret;
386 }
387
388 static u64 stripe_align(struct btrfs_root *root, u64 val)
389 {
390         u64 mask = ((u64)root->stripesize - 1);
391         u64 ret = (val + mask) & ~mask;
392         return ret;
393 }
394
395 /*
396  * walks the btree of allocated extents and find a hole of a given size.
397  * The key ins is changed to record the hole:
398  * ins->objectid == block start
399  * ins->flags = BTRFS_EXTENT_ITEM_KEY
400  * ins->offset == number of blocks
401  * Any available blocks before search_start are skipped.
402  */
403 static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
404                             *orig_root, u64 total_needed, u64 search_start,
405                             u64 search_end, struct btrfs_key *ins)
406 {
407         struct btrfs_path path;
408         struct btrfs_key key;
409         int ret;
410         u64 hole_size = 0;
411         int slot = 0;
412         u64 last_byte = 0;
413         u64 aligned;
414         int start_found;
415         struct btrfs_leaf *l;
416         struct btrfs_root * root = orig_root->fs_info->extent_root;
417
418         if (root->fs_info->last_insert.objectid > search_start)
419                 search_start = root->fs_info->last_insert.objectid;
420
421         search_start = stripe_align(root, search_start);
422         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
423
424 check_failed:
425         btrfs_init_path(&path);
426         ins->objectid = search_start;
427         ins->offset = 0;
428         start_found = 0;
429         ret = btrfs_search_slot(trans, root, ins, &path, 0, 0);
430         if (ret < 0)
431                 goto error;
432
433         if (path.slots[0] > 0)
434                 path.slots[0]--;
435
436         while (1) {
437                 l = &path.nodes[0]->leaf;
438                 slot = path.slots[0];
439                 if (slot >= btrfs_header_nritems(&l->header)) {
440                         ret = btrfs_next_leaf(root, &path);
441                         if (ret == 0)
442                                 continue;
443                         if (ret < 0)
444                                 goto error;
445                         if (!start_found) {
446                                 aligned = stripe_align(root, search_start);
447                                 ins->objectid = aligned;
448                                 ins->offset = (u64)-1 - aligned;
449                                 start_found = 1;
450                                 goto check_pending;
451                         }
452                         ins->objectid = stripe_align(root,
453                                                      last_byte > search_start ?
454                                                      last_byte : search_start);
455                         ins->offset = (u64)-1 - ins->objectid;
456                         goto check_pending;
457                 }
458                 btrfs_disk_key_to_cpu(&key, &l->items[slot].key);
459                 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
460                         goto next;
461                 if (key.objectid >= search_start) {
462                         if (start_found) {
463                                 if (last_byte < search_start)
464                                         last_byte = search_start;
465                                 aligned = stripe_align(root, last_byte);
466                                 hole_size = key.objectid - aligned;
467                                 if (key.objectid > aligned &&
468                                     hole_size > total_needed) {
469                                         ins->objectid = aligned;
470                                         ins->offset = hole_size;
471                                         goto check_pending;
472                                 }
473                         }
474                 }
475                 start_found = 1;
476                 last_byte = key.objectid + key.offset;
477 next:
478                 path.slots[0]++;
479         }
480         // FIXME -ENOSPC
481 check_pending:
482         /* we have to make sure we didn't find an extent that has already
483          * been allocated by the map tree or the original allocation
484          */
485         btrfs_release_path(root, &path);
486         BUG_ON(ins->objectid < search_start);
487         if (find_cache_extent(&root->fs_info->pinned_tree,
488                                 ins->objectid, total_needed)) {
489                 search_start = ins->objectid + total_needed;
490                 goto check_failed;
491         }
492         if (find_cache_extent(&root->fs_info->pending_tree,
493                                 ins->objectid, total_needed)) {
494                 search_start = ins->objectid + total_needed;
495                 goto check_failed;
496         }
497         root->fs_info->last_insert.objectid = ins->objectid;
498         ins->offset = total_needed;
499         return 0;
500 error:
501         btrfs_release_path(root, &path);
502         return ret;
503 }
504 /*
505  * finds a free extent and does all the dirty work required for allocation
506  * returns the key for the extent through ins, and a tree buffer for
507  * the first block of the extent through buf.
508  *
509  * returns 0 if everything worked, non-zero otherwise.
510  */
511 static int alloc_extent(struct btrfs_trans_handle *trans,
512                         struct btrfs_root *root, u64 owner,
513                         u64 num_bytes, u64 search_start,
514                         u64 search_end, struct btrfs_key *ins)
515 {
516         int ret;
517         int pending_ret;
518         u64 super_bytes_used, root_bytes_used;
519         struct btrfs_fs_info *info = root->fs_info;
520         struct btrfs_root *extent_root = info->extent_root;
521         struct btrfs_extent_item extent_item;
522
523         btrfs_set_extent_refs(&extent_item, 1);
524         btrfs_set_extent_owner(&extent_item, owner);
525
526         ret = find_free_extent(trans, root, num_bytes, search_start,
527                                search_end, ins);
528         if (ret)
529                 return ret;
530
531         super_bytes_used = btrfs_super_bytes_used(info->disk_super);
532         btrfs_set_super_bytes_used(info->disk_super, super_bytes_used +
533                                     num_bytes);
534         root_bytes_used = btrfs_root_bytes_used(&root->root_item);
535         btrfs_set_root_bytes_used(&root->root_item, root_bytes_used +
536                                    num_bytes);
537         if (root == extent_root) {
538                 ret = insert_cache_extent(&root->fs_info->pending_tree,
539                                             ins->objectid, ins->offset);
540                 BUG_ON(ret);
541                 goto update_block;
542         }
543         ret = btrfs_insert_item(trans, extent_root, ins, &extent_item,
544                                 sizeof(extent_item));
545
546         finish_current_insert(trans, extent_root);
547         pending_ret = run_pending(trans, extent_root);
548         if (ret)
549                 return ret;
550         if (pending_ret)
551                 return pending_ret;
552 update_block:
553         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
554         BUG_ON(ret);
555         return 0;
556 }
557 /*
558  * helper function to allocate a block for a given tree
559  * returns the tree buffer or NULL.
560  */
561 struct btrfs_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
562                                             struct btrfs_root *root,
563                                             u32 blocksize)
564 {
565         struct btrfs_key ins;
566         int ret;
567         struct btrfs_buffer *buf;
568         ret = alloc_extent(trans, root, root->root_key.objectid,
569                            blocksize, 0, (u64)-1, &ins);
570         if (ret) {
571                 BUG();
572                 return NULL;
573         }
574         buf = find_tree_block(root, ins.objectid, blocksize);
575         btrfs_set_header_generation(&buf->node.header, trans->transid);
576         btrfs_set_header_bytenr(&buf->node.header, buf->bytenr);
577         memcpy(buf->node.header.fsid, root->fs_info->disk_super->fsid,
578                sizeof(buf->node.header.fsid));
579         dirty_tree_block(trans, root, buf);
580         return buf;
581
582 }
583
584 /*
585  * helper function for drop_snapshot, this walks down the tree dropping ref
586  * counts as it goes.
587  */
588 static int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root
589                           *root, struct btrfs_path *path, int *level)
590 {
591         struct btrfs_buffer *next;
592         struct btrfs_buffer *cur;
593         u64 bytenr;
594         int ret;
595         u32 refs;
596
597         ret = lookup_block_ref(trans, root, path->nodes[*level]->bytenr,
598                                btrfs_level_size(root, *level), &refs);
599         BUG_ON(ret);
600         if (refs > 1)
601                 goto out;
602         /*
603          * walk down to the last node level and free all the leaves
604          */
605         while(*level > 0) {
606                 u32 size = btrfs_level_size(root, *level - 1);
607
608                 cur = path->nodes[*level];
609                 if (path->slots[*level] >=
610                     btrfs_header_nritems(&cur->node.header))
611                         break;
612                 bytenr = btrfs_node_blockptr(&cur->node, path->slots[*level]);
613                 ret = lookup_block_ref(trans, root, bytenr, size, &refs);
614                 if (refs != 1 || *level == 1) {
615                         path->slots[*level]++;
616                         ret = btrfs_free_extent(trans, root, bytenr, size, 1);
617                         BUG_ON(ret);
618                         continue;
619                 }
620                 BUG_ON(ret);
621                 next = read_tree_block(root, bytenr, size);
622                 if (path->nodes[*level-1])
623                         btrfs_block_release(root, path->nodes[*level-1]);
624                 path->nodes[*level-1] = next;
625                 *level = btrfs_header_level(&next->node.header);
626                 path->slots[*level] = 0;
627         }
628 out:
629         ret = btrfs_free_extent(trans, root, path->nodes[*level]->bytenr,
630                                 btrfs_level_size(root, *level), 1);
631         btrfs_block_release(root, path->nodes[*level]);
632         path->nodes[*level] = NULL;
633         *level += 1;
634         BUG_ON(ret);
635         return 0;
636 }
637
638 /*
639  * helper for dropping snapshots.  This walks back up the tree in the path
640  * to find the first node higher up where we haven't yet gone through
641  * all the slots
642  */
643 static int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root
644                         *root, struct btrfs_path *path, int *level)
645 {
646         int i;
647         int slot;
648         int ret;
649         for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
650                 slot = path->slots[i];
651                 if (slot <
652                     btrfs_header_nritems(&path->nodes[i]->node.header)- 1) {
653                         path->slots[i]++;
654                         *level = i;
655                         return 0;
656                 } else {
657                         ret = btrfs_free_extent(trans, root,
658                                         path->nodes[*level]->bytenr,
659                                         btrfs_level_size(root, *level), 1);
660                         btrfs_block_release(root, path->nodes[*level]);
661                         path->nodes[*level] = NULL;
662                         *level = i + 1;
663                         BUG_ON(ret);
664                 }
665         }
666         return 1;
667 }
668
669 /*
670  * drop the reference count on the tree rooted at 'snap'.  This traverses
671  * the tree freeing any blocks that have a ref count of zero after being
672  * decremented.
673  */
674 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
675                         *root, struct btrfs_buffer *snap)
676 {
677         int ret = 0;
678         int wret;
679         int level;
680         struct btrfs_path path;
681         int i;
682         int orig_level;
683
684         btrfs_init_path(&path);
685
686         level = btrfs_header_level(&snap->node.header);
687         orig_level = level;
688         path.nodes[level] = snap;
689         path.slots[level] = 0;
690         while(1) {
691                 wret = walk_down_tree(trans, root, &path, &level);
692                 if (wret > 0)
693                         break;
694                 if (wret < 0)
695                         ret = wret;
696
697                 wret = walk_up_tree(trans, root, &path, &level);
698                 if (wret > 0)
699                         break;
700                 if (wret < 0)
701                         ret = wret;
702         }
703         for (i = 0; i <= orig_level; i++) {
704                 if (path.nodes[i]) {
705                         btrfs_block_release(root, path.nodes[i]);
706                 }
707         }
708         return ret;
709 }
710
711 int btrfs_free_block_groups(struct btrfs_fs_info *info)
712 {
713         struct btrfs_block_group_cache *bg;
714         struct cache_extent *cache;
715
716         while(1) {
717                 cache = find_first_cache_extent(&info->block_group_cache, 0);
718                 if (!cache)
719                         break;
720                 bg = container_of(cache, struct btrfs_block_group_cache,
721                                         cache);
722                 remove_cache_extent(&info->block_group_cache, cache);
723                 free(bg);
724         }
725         return 0;
726 }
727
728 int btrfs_read_block_groups(struct btrfs_root *root)
729 {
730         struct btrfs_path path;
731         int ret;
732         int err = 0;
733         struct btrfs_block_group_item *bi;
734         struct btrfs_block_group_cache *bg;
735         struct cache_tree *bg_cache;
736         struct btrfs_key key;
737         struct btrfs_key found_key;
738         struct btrfs_leaf *leaf;
739         u64 group_size = BTRFS_BLOCK_GROUP_SIZE;
740
741         root = root->fs_info->extent_root;
742         bg_cache = &root->fs_info->block_group_cache;
743         key.objectid = 0;
744         key.offset = group_size;
745         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
746         btrfs_init_path(&path);
747
748         while(1) {
749                 ret = btrfs_search_slot(NULL, root->fs_info->extent_root,
750                                         &key, &path, 0, 0);
751                 if (ret != 0) {
752                         err = ret;
753                         break;
754                 }
755                 leaf = &path.nodes[0]->leaf;
756                 btrfs_disk_key_to_cpu(&found_key,
757                                       &leaf->items[path.slots[0]].key);
758                 bg = malloc(sizeof(*bg));
759                 if (!bg) {
760                         err = -1;
761                         break;
762                 }
763                 bi = btrfs_item_ptr(leaf, path.slots[0],
764                                     struct btrfs_block_group_item);
765                 memcpy(&bg->item, bi, sizeof(*bi));
766                 memcpy(&bg->key, &found_key, sizeof(found_key));
767                 key.objectid = found_key.objectid + found_key.offset;
768                 btrfs_release_path(root, &path);
769                 bg->cache.start = found_key.objectid;
770                 bg->cache.size = found_key.offset;
771                 bg->dirty = 0;
772                 ret = insert_existing_cache_extent(bg_cache, &bg->cache);
773                 BUG_ON(ret);
774                 if (key.objectid >=
775                     btrfs_super_total_bytes(root->fs_info->disk_super))
776                         break;
777         }
778         btrfs_release_path(root, &path);
779         return 0;
780 }
781
782 int btrfs_insert_block_group(struct btrfs_trans_handle *trans,
783                              struct btrfs_root *root,
784                              struct btrfs_key *key,
785                              struct btrfs_block_group_item *bi)
786 {
787         int ret;
788         int pending_ret;
789
790         root = root->fs_info->extent_root;
791         ret = btrfs_insert_item(trans, root, key, bi, sizeof(*bi));
792         finish_current_insert(trans, root);
793         pending_ret = run_pending(trans, root);
794         if (ret)
795                 return ret;
796         if (pending_ret)
797                 return pending_ret;
798         return ret;
799 }