2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
21 #include "kerncompat.h"
22 #include "radix-tree.h"
25 #include "print-tree.h"
26 #include "transaction.h"
28 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
29 btrfs_root *extent_root);
30 static int run_pending(struct btrfs_trans_handle *trans, struct btrfs_root
34 * pending extents are blocks that we're trying to allocate in the extent
35 * map while trying to grow the map because of other allocations. To avoid
36 * recursing, they are tagged in the radix tree and cleaned up after
37 * other allocations are done. The pending tag is also used in the same
41 static int inc_block_ref(struct btrfs_trans_handle *trans, struct btrfs_root
44 struct btrfs_path path;
48 struct btrfs_extent_item *item;
51 btrfs_init_path(&path);
52 key.objectid = blocknr;
53 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
55 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, &path,
60 l = &path.nodes[0]->leaf;
61 item = btrfs_item_ptr(l, path.slots[0], struct btrfs_extent_item);
62 refs = btrfs_extent_refs(item);
63 btrfs_set_extent_refs(item, refs + 1);
65 BUG_ON(list_empty(&path.nodes[0]->dirty));
66 btrfs_release_path(root->fs_info->extent_root, &path);
67 finish_current_insert(trans, root->fs_info->extent_root);
68 run_pending(trans, root->fs_info->extent_root);
72 static int lookup_block_ref(struct btrfs_trans_handle *trans, struct btrfs_root
73 *root, u64 blocknr, u32 *refs)
75 struct btrfs_path path;
79 struct btrfs_extent_item *item;
80 btrfs_init_path(&path);
81 key.objectid = blocknr;
83 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
84 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, &path,
88 l = &path.nodes[0]->leaf;
89 item = btrfs_item_ptr(l, path.slots[0], struct btrfs_extent_item);
90 *refs = btrfs_extent_refs(item);
91 btrfs_release_path(root->fs_info->extent_root, &path);
95 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
96 struct btrfs_buffer *buf)
103 if (btrfs_is_leaf(&buf->node))
106 for (i = 0; i < btrfs_header_nritems(&buf->node.header); i++) {
107 blocknr = btrfs_node_blockptr(&buf->node, i);
108 inc_block_ref(trans, root, blocknr);
113 static int write_one_cache_group(struct btrfs_trans_handle *trans,
114 struct btrfs_root *root,
115 struct btrfs_path *path,
116 struct btrfs_block_group_cache *cache)
120 struct btrfs_root *extent_root = root->fs_info->extent_root;
121 struct btrfs_block_group_item *bi;
123 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
124 &cache->key, path, 0, 1);
126 bi = btrfs_item_ptr(&path->nodes[0]->leaf, path->slots[0],
127 struct btrfs_block_group_item);
128 memcpy(bi, &cache->item, sizeof(*bi));
129 dirty_tree_block(trans, extent_root, path->nodes[0]);
130 btrfs_release_path(extent_root, path);
131 finish_current_insert(trans, root);
132 pending_ret = run_pending(trans, root);
141 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
142 struct btrfs_root *root)
144 struct btrfs_block_group_cache *cache[8];
148 struct radix_tree_root *radix = &root->fs_info->block_group_radix;
150 struct btrfs_path path;
151 btrfs_init_path(&path);
154 ret = radix_tree_gang_lookup_tag(radix, (void *)cache,
155 0, ARRAY_SIZE(cache),
156 BTRFS_BLOCK_GROUP_DIRTY);
159 for (i = 0; i < ret; i++) {
160 radix_tree_tag_clear(radix, cache[i]->key.objectid +
161 cache[i]->key.offset -1,
162 BTRFS_BLOCK_GROUP_DIRTY);
163 err = write_one_cache_group(trans, root,
172 static int update_block_group(struct btrfs_trans_handle *trans,
173 struct btrfs_root *root,
174 u64 blocknr, u64 num, int alloc)
176 struct btrfs_block_group_cache *cache;
177 struct btrfs_fs_info *info = root->fs_info;
184 ret = radix_tree_gang_lookup(&info->block_group_radix,
185 (void *)&cache, blocknr, 1);
188 radix_tree_tag_set(&info->block_group_radix,
189 cache->key.objectid + cache->key.offset - 1,
190 BTRFS_BLOCK_GROUP_DIRTY);
192 block_in_group = blocknr - cache->key.objectid;
193 old_val = btrfs_block_group_used(&cache->item);
194 if (total > cache->key.offset - block_in_group)
195 num = cache->key.offset - block_in_group;
204 btrfs_set_block_group_used(&cache->item, old_val);
209 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, struct
213 struct pending_extent *pe;
214 struct pending_extent *next;
216 pe = find_first_pending_extent(&root->fs_info->pinned_tree, 0);
220 next = next_pending_extent(pe);
221 remove_pending_extent(&root->fs_info->pinned_tree, pe);
222 free_pending_extent(pe);
225 root->fs_info->last_insert.objectid = first;
226 root->fs_info->last_insert.offset = 0;
230 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
231 btrfs_root *extent_root)
233 struct btrfs_key ins;
234 struct btrfs_extent_item extent_item;
236 u64 super_blocks_used, root_blocks_used;
237 struct btrfs_fs_info *info = extent_root->fs_info;
238 struct pending_extent *pe;
239 struct pending_extent *next;
240 struct pending_tree *pending_tree = &info->pending_tree;
242 btrfs_set_extent_refs(&extent_item, 1);
243 btrfs_set_extent_owner(&extent_item, extent_root->root_key.objectid);
245 btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
246 pe = find_first_pending_extent(pending_tree, 0);
248 ins.offset = pe->size;
249 ins.objectid = pe->start;
251 remove_pending_extent(pending_tree, pe);
252 next = next_pending_extent(pe);
254 next = find_first_pending_extent(pending_tree, 0);
256 free_pending_extent(pe);
260 super_blocks_used = btrfs_super_blocks_used(info->disk_super);
261 btrfs_set_super_blocks_used(info->disk_super,
262 super_blocks_used + 1);
263 root_blocks_used = btrfs_root_blocks_used(&extent_root->root_item);
264 btrfs_set_root_blocks_used(&extent_root->root_item,
265 root_blocks_used + 1);
266 ret = btrfs_insert_item(trans, extent_root, &ins, &extent_item,
267 sizeof(extent_item));
269 btrfs_print_tree(extent_root, extent_root->node);
277 * remove an extent from the root, returns 0 on success
279 static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
280 *root, u64 blocknr, u64 num_blocks, int pin)
282 struct btrfs_path path;
283 struct btrfs_key key;
284 struct btrfs_fs_info *info = root->fs_info;
285 struct btrfs_root *extent_root = info->extent_root;
287 struct btrfs_extent_item *ei;
290 BUG_ON(pin && num_blocks != 1);
291 key.objectid = blocknr;
292 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
293 key.offset = num_blocks;
295 btrfs_init_path(&path);
296 ret = btrfs_search_slot(trans, extent_root, &key, &path, -1, 1);
298 btrfs_print_tree(extent_root, extent_root->node);
299 printf("failed to find %llu\n",
300 (unsigned long long)key.objectid);
303 ei = btrfs_item_ptr(&path.nodes[0]->leaf, path.slots[0],
304 struct btrfs_extent_item);
305 BUG_ON(ei->refs == 0);
306 refs = btrfs_extent_refs(ei) - 1;
307 btrfs_set_extent_refs(ei, refs);
309 u64 super_blocks_used, root_blocks_used;
312 err = insert_pending_extent(&info->pinned_tree,
316 super_blocks_used = btrfs_super_blocks_used(info->disk_super);
317 btrfs_set_super_blocks_used(info->disk_super,
318 super_blocks_used - num_blocks);
319 root_blocks_used = btrfs_root_blocks_used(&root->root_item);
320 btrfs_set_root_blocks_used(&root->root_item,
321 root_blocks_used - num_blocks);
323 ret = btrfs_del_item(trans, extent_root, &path);
324 if (!pin && extent_root->fs_info->last_insert.objectid >
326 extent_root->fs_info->last_insert.objectid = blocknr;
329 ret = update_block_group(trans, root, blocknr, num_blocks, 0);
332 btrfs_release_path(extent_root, &path);
333 finish_current_insert(trans, extent_root);
338 * find all the blocks marked as pending in the radix tree and remove
339 * them from the extent map
341 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
342 btrfs_root *extent_root)
345 struct pending_extent *pe;
346 struct pending_extent *next;
347 struct pending_tree *del_pending = &extent_root->fs_info->del_pending;
349 pe = find_first_pending_extent(del_pending, 0);
351 remove_pending_extent(del_pending, pe);
352 ret = __free_extent(trans, extent_root,
355 next = next_pending_extent(pe);
357 next = find_first_pending_extent(del_pending, 0);
358 free_pending_extent(pe);
364 static int run_pending(struct btrfs_trans_handle *trans, struct btrfs_root
367 del_pending_extents(trans, extent_root);
373 * remove an extent from the root, returns 0 on success
375 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
376 *root, u64 blocknr, u64 num_blocks, int pin)
378 struct btrfs_root *extent_root = root->fs_info->extent_root;
382 if (root == extent_root) {
383 ret = insert_pending_extent(&root->fs_info->del_pending,
384 blocknr, num_blocks);
388 ret = __free_extent(trans, root, blocknr, num_blocks, pin);
389 pending_ret = run_pending(trans, root->fs_info->extent_root);
390 return ret ? ret : pending_ret;
394 * walks the btree of allocated extents and find a hole of a given size.
395 * The key ins is changed to record the hole:
396 * ins->objectid == block start
397 * ins->flags = BTRFS_EXTENT_ITEM_KEY
398 * ins->offset == number of blocks
399 * Any available blocks before search_start are skipped.
401 static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
402 *orig_root, u64 num_blocks, u64 search_start, u64
403 search_end, struct btrfs_key *ins)
405 struct btrfs_path path;
406 struct btrfs_key key;
412 struct btrfs_leaf *l;
413 struct btrfs_root * root = orig_root->fs_info->extent_root;
414 int total_needed = num_blocks;
416 if (root->fs_info->last_insert.objectid > search_start)
417 search_start = root->fs_info->last_insert.objectid;
419 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
422 btrfs_init_path(&path);
423 ins->objectid = search_start;
426 ret = btrfs_search_slot(trans, root, ins, &path, 0, 0);
430 if (path.slots[0] > 0)
434 l = &path.nodes[0]->leaf;
435 slot = path.slots[0];
436 if (slot >= btrfs_header_nritems(&l->header)) {
437 ret = btrfs_next_leaf(root, &path);
443 ins->objectid = search_start;
444 ins->offset = (u64)-1 - search_start;
448 ins->objectid = last_block > search_start ?
449 last_block : search_start;
450 ins->offset = (u64)-1 - ins->objectid;
453 btrfs_disk_key_to_cpu(&key, &l->items[slot].key);
454 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
456 if (key.objectid >= search_start) {
458 if (last_block < search_start)
459 last_block = search_start;
460 hole_size = key.objectid - last_block;
461 if (hole_size > total_needed) {
462 ins->objectid = last_block;
463 ins->offset = hole_size;
469 last_block = key.objectid + key.offset;
475 /* we have to make sure we didn't find an extent that has already
476 * been allocated by the map tree or the original allocation
478 btrfs_release_path(root, &path);
479 BUG_ON(ins->objectid < search_start);
480 if (find_pending_extent(&root->fs_info->pinned_tree,
481 ins->objectid, total_needed)) {
482 search_start = ins->objectid + total_needed;
485 if (find_pending_extent(&root->fs_info->pending_tree,
486 ins->objectid, total_needed)) {
487 search_start = ins->objectid + total_needed;
490 root->fs_info->last_insert.objectid = ins->objectid;
491 ins->offset = num_blocks;
494 btrfs_release_path(root, &path);
498 * finds a free extent and does all the dirty work required for allocation
499 * returns the key for the extent through ins, and a tree buffer for
500 * the first block of the extent through buf.
502 * returns 0 if everything worked, non-zero otherwise.
504 static int alloc_extent(struct btrfs_trans_handle *trans, struct btrfs_root
505 *root, u64 owner, u64 num_blocks,
506 u64 search_start, u64
507 search_end, struct btrfs_key *ins)
511 u64 super_blocks_used, root_blocks_used;
512 struct btrfs_fs_info *info = root->fs_info;
513 struct btrfs_root *extent_root = info->extent_root;
514 struct btrfs_extent_item extent_item;
516 btrfs_set_extent_refs(&extent_item, 1);
517 btrfs_set_extent_owner(&extent_item, owner);
519 ret = find_free_extent(trans, root, num_blocks, search_start,
524 if (root == extent_root) {
525 ret = insert_pending_extent(&root->fs_info->pending_tree,
526 ins->objectid, ins->offset);
530 super_blocks_used = btrfs_super_blocks_used(info->disk_super);
531 btrfs_set_super_blocks_used(info->disk_super, super_blocks_used +
533 root_blocks_used = btrfs_root_blocks_used(&root->root_item);
534 btrfs_set_root_blocks_used(&root->root_item, root_blocks_used +
537 ret = btrfs_insert_item(trans, extent_root, ins, &extent_item,
538 sizeof(extent_item));
540 finish_current_insert(trans, extent_root);
541 pending_ret = run_pending(trans, extent_root);
550 * helper function to allocate a block for a given tree
551 * returns the tree buffer or NULL.
553 struct btrfs_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
554 struct btrfs_root *root)
556 struct btrfs_key ins;
558 struct btrfs_buffer *buf;
560 ret = alloc_extent(trans, root, root->root_key.objectid,
561 1, 0, (unsigned long)-1, &ins);
566 ret = update_block_group(trans, root, ins.objectid, ins.offset, 1);
567 buf = find_tree_block(root, ins.objectid);
568 btrfs_set_header_generation(&buf->node.header,
569 root->root_key.offset + 1);
570 btrfs_set_header_blocknr(&buf->node.header, buf->blocknr);
571 memcpy(buf->node.header.fsid, root->fs_info->disk_super->fsid,
572 sizeof(buf->node.header.fsid));
573 dirty_tree_block(trans, root, buf);
579 * helper function for drop_snapshot, this walks down the tree dropping ref
582 static int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root
583 *root, struct btrfs_path *path, int *level)
585 struct btrfs_buffer *next;
586 struct btrfs_buffer *cur;
591 ret = lookup_block_ref(trans, root, path->nodes[*level]->blocknr,
597 * walk down to the last node level and free all the leaves
600 cur = path->nodes[*level];
601 if (path->slots[*level] >=
602 btrfs_header_nritems(&cur->node.header))
604 blocknr = btrfs_node_blockptr(&cur->node, path->slots[*level]);
605 ret = lookup_block_ref(trans, root, blocknr, &refs);
606 if (refs != 1 || *level == 1) {
607 path->slots[*level]++;
608 ret = btrfs_free_extent(trans, root, blocknr, 1, 1);
613 next = read_tree_block(root, blocknr);
614 if (path->nodes[*level-1])
615 btrfs_block_release(root, path->nodes[*level-1]);
616 path->nodes[*level-1] = next;
617 *level = btrfs_header_level(&next->node.header);
618 path->slots[*level] = 0;
621 ret = btrfs_free_extent(trans, root, path->nodes[*level]->blocknr, 1,
623 btrfs_block_release(root, path->nodes[*level]);
624 path->nodes[*level] = NULL;
631 * helper for dropping snapshots. This walks back up the tree in the path
632 * to find the first node higher up where we haven't yet gone through
635 static int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root
636 *root, struct btrfs_path *path, int *level)
641 for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
642 slot = path->slots[i];
644 btrfs_header_nritems(&path->nodes[i]->node.header)- 1) {
649 ret = btrfs_free_extent(trans, root,
650 path->nodes[*level]->blocknr,
652 btrfs_block_release(root, path->nodes[*level]);
653 path->nodes[*level] = NULL;
662 * drop the reference count on the tree rooted at 'snap'. This traverses
663 * the tree freeing any blocks that have a ref count of zero after being
666 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
667 *root, struct btrfs_buffer *snap)
672 struct btrfs_path path;
676 btrfs_init_path(&path);
678 level = btrfs_header_level(&snap->node.header);
680 path.nodes[level] = snap;
681 path.slots[level] = 0;
683 wret = walk_down_tree(trans, root, &path, &level);
689 wret = walk_up_tree(trans, root, &path, &level);
695 for (i = 0; i <= orig_level; i++) {
697 btrfs_block_release(root, path.nodes[i]);
703 int btrfs_free_block_groups(struct btrfs_fs_info *info)
706 struct btrfs_block_group_cache *cache[8];
710 ret = radix_tree_gang_lookup(&info->block_group_radix,
715 for (i = 0; i < ret; i++) {
716 radix_tree_delete(&info->block_group_radix,
717 cache[i]->key.objectid +
718 cache[i]->key.offset - 1);
725 int btrfs_read_block_groups(struct btrfs_root *root)
727 struct btrfs_path path;
730 struct btrfs_block_group_item *bi;
731 struct btrfs_block_group_cache *cache;
732 struct btrfs_key key;
733 struct btrfs_key found_key;
734 struct btrfs_leaf *leaf;
735 u64 group_size_blocks = BTRFS_BLOCK_GROUP_SIZE / root->sectorsize;
737 root = root->fs_info->extent_root;
739 key.offset = group_size_blocks;
740 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
741 btrfs_init_path(&path);
744 ret = btrfs_search_slot(NULL, root->fs_info->extent_root,
750 leaf = &path.nodes[0]->leaf;
751 btrfs_disk_key_to_cpu(&found_key,
752 &leaf->items[path.slots[0]].key);
753 cache = malloc(sizeof(*cache));
758 bi = btrfs_item_ptr(leaf, path.slots[0],
759 struct btrfs_block_group_item);
760 memcpy(&cache->item, bi, sizeof(*bi));
761 memcpy(&cache->key, &found_key, sizeof(found_key));
762 key.objectid = found_key.objectid + found_key.offset;
763 btrfs_release_path(root, &path);
764 ret = radix_tree_insert(&root->fs_info->block_group_radix,
766 found_key.offset - 1, (void *)cache);
769 btrfs_super_total_blocks(root->fs_info->disk_super))
772 btrfs_release_path(root, &path);
776 int btrfs_insert_block_group(struct btrfs_trans_handle *trans,
777 struct btrfs_root *root,
778 struct btrfs_key *key,
779 struct btrfs_block_group_item *bi)
784 root = root->fs_info->extent_root;
785 ret = btrfs_insert_item(trans, root, key, bi, sizeof(*bi));
786 finish_current_insert(trans, root);
787 pending_ret = run_pending(trans, root);