Add simple stripe size parameter
[platform/upstream/btrfs-progs.git] / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include "kerncompat.h"
22 #include "radix-tree.h"
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "print-tree.h"
26 #include "transaction.h"
27
28 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
29                                  btrfs_root *extent_root);
30 static int run_pending(struct btrfs_trans_handle *trans, struct btrfs_root
31                        *extent_root);
32
33 static int inc_block_ref(struct btrfs_trans_handle *trans, struct btrfs_root
34                          *root, u64 bytenr, u32 blocksize)
35 {
36         struct btrfs_path path;
37         int ret;
38         struct btrfs_key key;
39         struct btrfs_leaf *l;
40         struct btrfs_extent_item *item;
41         u32 refs;
42
43         btrfs_init_path(&path);
44         key.objectid = bytenr;
45         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
46         key.offset = blocksize;
47         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, &path,
48                                 0, 1);
49         if (ret != 0)
50                 BUG();
51         BUG_ON(ret != 0);
52         l = &path.nodes[0]->leaf;
53         item = btrfs_item_ptr(l, path.slots[0], struct btrfs_extent_item);
54         refs = btrfs_extent_refs(item);
55         btrfs_set_extent_refs(item, refs + 1);
56
57         BUG_ON(list_empty(&path.nodes[0]->dirty));
58         btrfs_release_path(root->fs_info->extent_root, &path);
59         finish_current_insert(trans, root->fs_info->extent_root);
60         run_pending(trans, root->fs_info->extent_root);
61         return 0;
62 }
63
64 static int lookup_block_ref(struct btrfs_trans_handle *trans, struct btrfs_root
65                             *root, u64 bytenr, u32 blocksize, u32 *refs)
66 {
67         struct btrfs_path path;
68         int ret;
69         struct btrfs_key key;
70         struct btrfs_leaf *l;
71         struct btrfs_extent_item *item;
72         btrfs_init_path(&path);
73         key.objectid = bytenr;
74         key.offset = blocksize;
75         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
76         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, &path,
77                                 0, 0);
78         if (ret != 0)
79                 BUG();
80         l = &path.nodes[0]->leaf;
81         item = btrfs_item_ptr(l, path.slots[0], struct btrfs_extent_item);
82         *refs = btrfs_extent_refs(item);
83         btrfs_release_path(root->fs_info->extent_root, &path);
84         return 0;
85 }
86
87 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
88                   struct btrfs_buffer *buf)
89 {
90         u64 bytenr;
91         int i;
92
93         if (!root->ref_cows)
94                 return 0;
95
96         if (btrfs_is_leaf(&buf->node))
97                 return 0;
98
99         for (i = 0; i < btrfs_header_nritems(&buf->node.header); i++) {
100                 bytenr = btrfs_node_blockptr(&buf->node, i);
101                 inc_block_ref(trans, root, bytenr, root->nodesize);
102         }
103         return 0;
104 }
105
106 static int write_one_cache_group(struct btrfs_trans_handle *trans,
107                                  struct btrfs_root *root,
108                                  struct btrfs_path *path,
109                                  struct btrfs_block_group_cache *cache)
110 {
111         int ret;
112         int pending_ret;
113         struct btrfs_root *extent_root = root->fs_info->extent_root;
114         struct btrfs_block_group_item *bi;
115
116         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
117                                 &cache->key, path, 0, 1);
118         BUG_ON(ret);
119         bi = btrfs_item_ptr(&path->nodes[0]->leaf, path->slots[0],
120                             struct btrfs_block_group_item);
121         memcpy(bi, &cache->item, sizeof(*bi));
122         dirty_tree_block(trans, extent_root, path->nodes[0]);
123         btrfs_release_path(extent_root, path);
124         finish_current_insert(trans, root);
125         pending_ret = run_pending(trans, root);
126         if (ret)
127                 return ret;
128         if (pending_ret)
129                 return pending_ret;
130         return 0;
131
132 }
133
134 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
135                                     struct btrfs_root *root)
136 {
137         struct btrfs_block_group_cache *bg;
138         struct cache_extent *cache;
139         int err = 0;
140         int werr = 0;
141         struct cache_tree *bg_cache = &root->fs_info->block_group_cache;
142         struct btrfs_path path;
143         btrfs_init_path(&path);
144         u64 start = 0;
145
146         while(1) {
147                 cache = find_first_cache_extent(bg_cache, start);
148                 if (!cache)
149                         break;
150                 bg = container_of(cache, struct btrfs_block_group_cache,
151                                         cache);
152                 start = cache->start + cache->size;
153                 if (bg->dirty) {
154                         err = write_one_cache_group(trans, root,
155                                                     &path, bg);
156                         if (err)
157                                 werr = err;
158                 }
159                 bg->dirty = 0;
160         }
161         return werr;
162 }
163
164 static int update_block_group(struct btrfs_trans_handle *trans,
165                               struct btrfs_root *root,
166                               u64 bytenr, u64 num, int alloc)
167 {
168         struct btrfs_block_group_cache *bg;
169         struct cache_extent *cache;
170         struct btrfs_fs_info *info = root->fs_info;
171         u64 total = num;
172         u64 old_val;
173         u64 byte_in_group;
174
175         while(total) {
176                 cache = find_first_cache_extent(&info->block_group_cache,
177                                                 bytenr);
178                 if (!cache)
179                         return -1;
180                 bg = container_of(cache, struct btrfs_block_group_cache,
181                                         cache);
182                 bg->dirty = 1;
183                 byte_in_group = bytenr - bg->key.objectid;
184                 old_val = btrfs_block_group_used(&bg->item);
185                 if (total > bg->key.offset - byte_in_group)
186                         num = bg->key.offset - byte_in_group;
187                 else
188                         num = total;
189                 total -= num;
190                 bytenr += num;
191                 if (alloc)
192                         old_val += num;
193                 else
194                         old_val -= num;
195                 btrfs_set_block_group_used(&bg->item, old_val);
196         }
197         return 0;
198 }
199
200 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, struct
201                                btrfs_root *root)
202 {
203         u64 first = 0;
204         struct cache_extent *pe;
205         struct cache_extent *next;
206
207         pe = find_first_cache_extent(&root->fs_info->pinned_tree, 0);
208         if (pe)
209                 first = pe->start;
210         while(pe) {
211                 next = next_cache_extent(pe);
212                 remove_cache_extent(&root->fs_info->pinned_tree, pe);
213                 free_cache_extent(pe);
214                 pe = next;
215         }
216         root->fs_info->last_insert.objectid = first;
217         root->fs_info->last_insert.offset = 0;
218         return 0;
219 }
220
221 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
222                                  btrfs_root *extent_root)
223 {
224         struct btrfs_key ins;
225         struct btrfs_extent_item extent_item;
226         int ret;
227         struct btrfs_fs_info *info = extent_root->fs_info;
228         struct cache_extent *pe;
229         struct cache_extent *next;
230         struct cache_tree *pending_tree = &info->pending_tree;
231
232         btrfs_set_extent_refs(&extent_item, 1);
233         btrfs_set_extent_owner(&extent_item, extent_root->root_key.objectid);
234         ins.offset = 1;
235         btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
236         pe = find_first_cache_extent(pending_tree, 0);
237         while(pe) {
238                 ins.offset = pe->size;
239                 ins.objectid = pe->start;
240
241                 remove_cache_extent(pending_tree, pe);
242                 next = next_cache_extent(pe);
243                 if (!next)
244                         next = find_first_cache_extent(pending_tree, 0);
245
246                 free_cache_extent(pe);
247                 pe = next;
248
249                 ret = btrfs_insert_item(trans, extent_root, &ins, &extent_item,
250                                         sizeof(extent_item));
251                 if (ret) {
252                         btrfs_print_tree(extent_root, extent_root->node);
253                 }
254                 BUG_ON(ret);
255         }
256         return 0;
257 }
258
259 /*
260  * remove an extent from the root, returns 0 on success
261  */
262 static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
263                          *root, u64 bytenr, u64 num_bytes, int pin)
264 {
265         struct btrfs_path path;
266         struct btrfs_key key;
267         struct btrfs_fs_info *info = root->fs_info;
268         struct btrfs_root *extent_root = info->extent_root;
269         int ret;
270         struct btrfs_extent_item *ei;
271         u32 refs;
272
273         key.objectid = bytenr;
274         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
275         key.offset = num_bytes;
276
277         btrfs_init_path(&path);
278         ret = btrfs_search_slot(trans, extent_root, &key, &path, -1, 1);
279         if (ret) {
280                 btrfs_print_tree(extent_root, extent_root->node);
281                 printf("failed to find %llu\n",
282                        (unsigned long long)key.objectid);
283                 BUG();
284         }
285         ei = btrfs_item_ptr(&path.nodes[0]->leaf, path.slots[0],
286                             struct btrfs_extent_item);
287         BUG_ON(ei->refs == 0);
288         refs = btrfs_extent_refs(ei) - 1;
289         btrfs_set_extent_refs(ei, refs);
290         if (refs == 0) {
291                 u64 super_bytes_used, root_bytes_used;
292                 if (pin) {
293                         int err;
294                         err = insert_cache_extent(&info->pinned_tree,
295                                                     bytenr, num_bytes);
296                         BUG_ON(err);
297                 }
298                 super_bytes_used = btrfs_super_bytes_used(info->disk_super);
299                 btrfs_set_super_bytes_used(info->disk_super,
300                                             super_bytes_used - num_bytes);
301                 root_bytes_used = btrfs_root_bytes_used(&root->root_item);
302                 btrfs_set_root_bytes_used(&root->root_item,
303                                           root_bytes_used - num_bytes);
304
305                 ret = btrfs_del_item(trans, extent_root, &path);
306                 if (!pin && extent_root->fs_info->last_insert.objectid >
307                     bytenr)
308                         extent_root->fs_info->last_insert.objectid = bytenr;
309                 if (ret)
310                         BUG();
311                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
312                 BUG_ON(ret);
313         }
314         btrfs_release_path(extent_root, &path);
315         finish_current_insert(trans, extent_root);
316         return ret;
317 }
318
319 /*
320  * find all the blocks marked as pending in the radix tree and remove
321  * them from the extent map
322  */
323 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
324                                btrfs_root *extent_root)
325 {
326         int ret;
327         struct cache_extent *pe;
328         struct cache_extent *next;
329         struct cache_tree *del_pending = &extent_root->fs_info->del_pending;
330
331         pe = find_first_cache_extent(del_pending, 0);
332         while(pe) {
333                 remove_cache_extent(del_pending, pe);
334                 ret = __free_extent(trans, extent_root,
335                                     pe->start, pe->size, 1);
336                 BUG_ON(ret);
337                 next = next_cache_extent(pe);
338                 if (!next)
339                         next = find_first_cache_extent(del_pending, 0);
340                 free_cache_extent(pe);
341                 pe = next;
342         }
343         return 0;
344 }
345
346 static int run_pending(struct btrfs_trans_handle *trans, struct btrfs_root
347                        *extent_root)
348 {
349         del_pending_extents(trans, extent_root);
350         return 0;
351 }
352
353
354 /*
355  * remove an extent from the root, returns 0 on success
356  */
357 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
358                       *root, u64 bytenr, u64 num_bytes, int pin)
359 {
360         struct btrfs_root *extent_root = root->fs_info->extent_root;
361         int pending_ret;
362         int ret;
363
364         if (root == extent_root) {
365                 ret = insert_cache_extent(&root->fs_info->del_pending,
366                                             bytenr, num_bytes);
367                 BUG_ON(ret);
368                 return 0;
369         }
370         ret = __free_extent(trans, root, bytenr, num_bytes, pin);
371         pending_ret = run_pending(trans, root->fs_info->extent_root);
372         return ret ? ret : pending_ret;
373 }
374
375 static u64 stripe_align(struct btrfs_root *root, u64 val)
376 {
377         u64 mask = ((u64)root->stripesize - 1);
378         u64 ret = (val + mask) & ~mask;
379         return ret;
380 }
381
382 /*
383  * walks the btree of allocated extents and find a hole of a given size.
384  * The key ins is changed to record the hole:
385  * ins->objectid == block start
386  * ins->flags = BTRFS_EXTENT_ITEM_KEY
387  * ins->offset == number of blocks
388  * Any available blocks before search_start are skipped.
389  */
390 static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
391                             *orig_root, u64 total_needed, u64 search_start,
392                             u64 search_end, struct btrfs_key *ins)
393 {
394         struct btrfs_path path;
395         struct btrfs_key key;
396         int ret;
397         u64 hole_size = 0;
398         int slot = 0;
399         u64 last_byte = 0;
400         u64 aligned;
401         int start_found;
402         struct btrfs_leaf *l;
403         struct btrfs_root * root = orig_root->fs_info->extent_root;
404
405         if (root->fs_info->last_insert.objectid > search_start)
406                 search_start = root->fs_info->last_insert.objectid;
407
408         search_start = stripe_align(root, search_start);
409         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
410
411 check_failed:
412         btrfs_init_path(&path);
413         ins->objectid = search_start;
414         ins->offset = 0;
415         start_found = 0;
416         ret = btrfs_search_slot(trans, root, ins, &path, 0, 0);
417         if (ret < 0)
418                 goto error;
419
420         if (path.slots[0] > 0)
421                 path.slots[0]--;
422
423         while (1) {
424                 l = &path.nodes[0]->leaf;
425                 slot = path.slots[0];
426                 if (slot >= btrfs_header_nritems(&l->header)) {
427                         ret = btrfs_next_leaf(root, &path);
428                         if (ret == 0)
429                                 continue;
430                         if (ret < 0)
431                                 goto error;
432                         if (!start_found) {
433                                 aligned = stripe_align(root, search_start);
434                                 ins->objectid = aligned;
435                                 ins->offset = (u64)-1 - aligned;
436                                 start_found = 1;
437                                 goto check_pending;
438                         }
439                         ins->objectid = stripe_align(root,
440                                                      last_byte > search_start ?
441                                                      last_byte : search_start);
442                         ins->offset = (u64)-1 - ins->objectid;
443                         goto check_pending;
444                 }
445                 btrfs_disk_key_to_cpu(&key, &l->items[slot].key);
446                 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
447                         goto next;
448                 if (key.objectid >= search_start) {
449                         if (start_found) {
450                                 if (last_byte < search_start)
451                                         last_byte = search_start;
452                                 aligned = stripe_align(root, last_byte);
453                                 hole_size = key.objectid - aligned;
454                                 if (key.objectid > aligned &&
455                                     hole_size > total_needed) {
456                                         ins->objectid = aligned;
457                                         ins->offset = hole_size;
458                                         goto check_pending;
459                                 }
460                         }
461                 }
462                 start_found = 1;
463                 last_byte = key.objectid + key.offset;
464 next:
465                 path.slots[0]++;
466         }
467         // FIXME -ENOSPC
468 check_pending:
469         /* we have to make sure we didn't find an extent that has already
470          * been allocated by the map tree or the original allocation
471          */
472         btrfs_release_path(root, &path);
473         BUG_ON(ins->objectid < search_start);
474         if (find_cache_extent(&root->fs_info->pinned_tree,
475                                 ins->objectid, total_needed)) {
476                 search_start = ins->objectid + total_needed;
477                 goto check_failed;
478         }
479         if (find_cache_extent(&root->fs_info->pending_tree,
480                                 ins->objectid, total_needed)) {
481                 search_start = ins->objectid + total_needed;
482                 goto check_failed;
483         }
484         root->fs_info->last_insert.objectid = ins->objectid;
485         ins->offset = total_needed;
486         return 0;
487 error:
488         btrfs_release_path(root, &path);
489         return ret;
490 }
491 /*
492  * finds a free extent and does all the dirty work required for allocation
493  * returns the key for the extent through ins, and a tree buffer for
494  * the first block of the extent through buf.
495  *
496  * returns 0 if everything worked, non-zero otherwise.
497  */
498 static int alloc_extent(struct btrfs_trans_handle *trans,
499                         struct btrfs_root *root, u64 owner,
500                         u64 num_bytes, u64 search_start,
501                         u64 search_end, struct btrfs_key *ins)
502 {
503         int ret;
504         int pending_ret;
505         u64 super_bytes_used, root_bytes_used;
506         struct btrfs_fs_info *info = root->fs_info;
507         struct btrfs_root *extent_root = info->extent_root;
508         struct btrfs_extent_item extent_item;
509
510         btrfs_set_extent_refs(&extent_item, 1);
511         btrfs_set_extent_owner(&extent_item, owner);
512
513         ret = find_free_extent(trans, root, num_bytes, search_start,
514                                search_end, ins);
515         if (ret)
516                 return ret;
517
518         super_bytes_used = btrfs_super_bytes_used(info->disk_super);
519         btrfs_set_super_bytes_used(info->disk_super, super_bytes_used +
520                                     num_bytes);
521         root_bytes_used = btrfs_root_bytes_used(&root->root_item);
522         btrfs_set_root_bytes_used(&root->root_item, root_bytes_used +
523                                    num_bytes);
524         if (root == extent_root) {
525                 ret = insert_cache_extent(&root->fs_info->pending_tree,
526                                             ins->objectid, ins->offset);
527                 BUG_ON(ret);
528                 return 0;
529         }
530
531         ret = btrfs_insert_item(trans, extent_root, ins, &extent_item,
532                                 sizeof(extent_item));
533
534         finish_current_insert(trans, extent_root);
535         pending_ret = run_pending(trans, extent_root);
536         if (ret)
537                 return ret;
538         if (pending_ret)
539                 return pending_ret;
540         return 0;
541 }
542
543 /*
544  * helper function to allocate a block for a given tree
545  * returns the tree buffer or NULL.
546  */
547 struct btrfs_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
548                                             struct btrfs_root *root,
549                                             u32 blocksize)
550 {
551         struct btrfs_key ins;
552         int ret;
553         struct btrfs_buffer *buf;
554
555         ret = alloc_extent(trans, root, root->root_key.objectid,
556                            blocksize, 0, (unsigned long)-1, &ins);
557         if (ret) {
558                 BUG();
559                 return NULL;
560         }
561         ret = update_block_group(trans, root, ins.objectid, ins.offset, 1);
562         buf = find_tree_block(root, ins.objectid, blocksize);
563         btrfs_set_header_generation(&buf->node.header,
564                                     root->root_key.offset + 1);
565         btrfs_set_header_bytenr(&buf->node.header, buf->bytenr);
566         memcpy(buf->node.header.fsid, root->fs_info->disk_super->fsid,
567                sizeof(buf->node.header.fsid));
568         dirty_tree_block(trans, root, buf);
569         return buf;
570
571 }
572
573 /*
574  * helper function for drop_snapshot, this walks down the tree dropping ref
575  * counts as it goes.
576  */
577 static int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root
578                           *root, struct btrfs_path *path, int *level)
579 {
580         struct btrfs_buffer *next;
581         struct btrfs_buffer *cur;
582         u64 bytenr;
583         int ret;
584         u32 refs;
585
586         ret = lookup_block_ref(trans, root, path->nodes[*level]->bytenr,
587                                btrfs_level_size(root, *level), &refs);
588         BUG_ON(ret);
589         if (refs > 1)
590                 goto out;
591         /*
592          * walk down to the last node level and free all the leaves
593          */
594         while(*level > 0) {
595                 u32 size = btrfs_level_size(root, *level - 1);
596
597                 cur = path->nodes[*level];
598                 if (path->slots[*level] >=
599                     btrfs_header_nritems(&cur->node.header))
600                         break;
601                 bytenr = btrfs_node_blockptr(&cur->node, path->slots[*level]);
602                 ret = lookup_block_ref(trans, root, bytenr, size, &refs);
603                 if (refs != 1 || *level == 1) {
604                         path->slots[*level]++;
605                         ret = btrfs_free_extent(trans, root, bytenr, size, 1);
606                         BUG_ON(ret);
607                         continue;
608                 }
609                 BUG_ON(ret);
610                 next = read_tree_block(root, bytenr, size);
611                 if (path->nodes[*level-1])
612                         btrfs_block_release(root, path->nodes[*level-1]);
613                 path->nodes[*level-1] = next;
614                 *level = btrfs_header_level(&next->node.header);
615                 path->slots[*level] = 0;
616         }
617 out:
618         ret = btrfs_free_extent(trans, root, path->nodes[*level]->bytenr,
619                                 btrfs_level_size(root, *level), 1);
620         btrfs_block_release(root, path->nodes[*level]);
621         path->nodes[*level] = NULL;
622         *level += 1;
623         BUG_ON(ret);
624         return 0;
625 }
626
627 /*
628  * helper for dropping snapshots.  This walks back up the tree in the path
629  * to find the first node higher up where we haven't yet gone through
630  * all the slots
631  */
632 static int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root
633                         *root, struct btrfs_path *path, int *level)
634 {
635         int i;
636         int slot;
637         int ret;
638         for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
639                 slot = path->slots[i];
640                 if (slot <
641                     btrfs_header_nritems(&path->nodes[i]->node.header)- 1) {
642                         path->slots[i]++;
643                         *level = i;
644                         return 0;
645                 } else {
646                         ret = btrfs_free_extent(trans, root,
647                                         path->nodes[*level]->bytenr,
648                                         btrfs_level_size(root, *level), 1);
649                         btrfs_block_release(root, path->nodes[*level]);
650                         path->nodes[*level] = NULL;
651                         *level = i + 1;
652                         BUG_ON(ret);
653                 }
654         }
655         return 1;
656 }
657
658 /*
659  * drop the reference count on the tree rooted at 'snap'.  This traverses
660  * the tree freeing any blocks that have a ref count of zero after being
661  * decremented.
662  */
663 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
664                         *root, struct btrfs_buffer *snap)
665 {
666         int ret = 0;
667         int wret;
668         int level;
669         struct btrfs_path path;
670         int i;
671         int orig_level;
672
673         btrfs_init_path(&path);
674
675         level = btrfs_header_level(&snap->node.header);
676         orig_level = level;
677         path.nodes[level] = snap;
678         path.slots[level] = 0;
679         while(1) {
680                 wret = walk_down_tree(trans, root, &path, &level);
681                 if (wret > 0)
682                         break;
683                 if (wret < 0)
684                         ret = wret;
685
686                 wret = walk_up_tree(trans, root, &path, &level);
687                 if (wret > 0)
688                         break;
689                 if (wret < 0)
690                         ret = wret;
691         }
692         for (i = 0; i <= orig_level; i++) {
693                 if (path.nodes[i]) {
694                         btrfs_block_release(root, path.nodes[i]);
695                 }
696         }
697         return ret;
698 }
699
700 int btrfs_free_block_groups(struct btrfs_fs_info *info)
701 {
702         struct btrfs_block_group_cache *bg;
703         struct cache_extent *cache;
704
705         while(1) {
706                 cache = find_first_cache_extent(&info->block_group_cache, 0);
707                 if (!cache)
708                         break;
709                 bg = container_of(cache, struct btrfs_block_group_cache,
710                                         cache);
711                 remove_cache_extent(&info->block_group_cache, cache);
712                 free(bg);
713         }
714         return 0;
715 }
716
717 int btrfs_read_block_groups(struct btrfs_root *root)
718 {
719         struct btrfs_path path;
720         int ret;
721         int err = 0;
722         struct btrfs_block_group_item *bi;
723         struct btrfs_block_group_cache *bg;
724         struct cache_tree *bg_cache;
725         struct btrfs_key key;
726         struct btrfs_key found_key;
727         struct btrfs_leaf *leaf;
728         u64 group_size = BTRFS_BLOCK_GROUP_SIZE;
729
730         root = root->fs_info->extent_root;
731         bg_cache = &root->fs_info->block_group_cache;
732         key.objectid = 0;
733         key.offset = group_size;
734         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
735         btrfs_init_path(&path);
736
737         while(1) {
738                 ret = btrfs_search_slot(NULL, root->fs_info->extent_root,
739                                         &key, &path, 0, 0);
740                 if (ret != 0) {
741                         err = ret;
742                         break;
743                 }
744                 leaf = &path.nodes[0]->leaf;
745                 btrfs_disk_key_to_cpu(&found_key,
746                                       &leaf->items[path.slots[0]].key);
747                 bg = malloc(sizeof(*bg));
748                 if (!bg) {
749                         err = -1;
750                         break;
751                 }
752                 bi = btrfs_item_ptr(leaf, path.slots[0],
753                                     struct btrfs_block_group_item);
754                 memcpy(&bg->item, bi, sizeof(*bi));
755                 memcpy(&bg->key, &found_key, sizeof(found_key));
756                 key.objectid = found_key.objectid + found_key.offset;
757                 btrfs_release_path(root, &path);
758                 bg->cache.start = found_key.objectid;
759                 bg->cache.size = found_key.offset;
760                 bg->dirty = 0;
761                 ret = insert_existing_cache_extent(bg_cache, &bg->cache);
762                 BUG_ON(ret);
763                 if (key.objectid >=
764                     btrfs_super_total_bytes(root->fs_info->disk_super))
765                         break;
766         }
767         btrfs_release_path(root, &path);
768         return 0;
769 }
770
771 int btrfs_insert_block_group(struct btrfs_trans_handle *trans,
772                              struct btrfs_root *root,
773                              struct btrfs_key *key,
774                              struct btrfs_block_group_item *bi)
775 {
776         int ret;
777         int pending_ret;
778
779         root = root->fs_info->extent_root;
780         ret = btrfs_insert_item(trans, root, key, bi, sizeof(*bi));
781         finish_current_insert(trans, root);
782         pending_ret = run_pending(trans, root);
783         if (ret)
784                 return ret;
785         if (pending_ret)
786                 return pending_ret;
787         return ret;
788 }