3 #include "kerncompat.h"
4 #include "radix-tree.h"
7 #include "print-tree.h"
9 static int find_free_extent(struct btrfs_root *orig_root, u64 num_blocks,
10 u64 search_start, u64 search_end,
11 struct btrfs_key *ins);
12 static int finish_current_insert(struct btrfs_root *extent_root);
13 static int run_pending(struct btrfs_root *extent_root);
16 * pending extents are blocks that we're trying to allocate in the extent
17 * map while trying to grow the map because of other allocations. To avoid
18 * recursing, they are tagged in the radix tree and cleaned up after
19 * other allocations are done. The pending tag is also used in the same
22 #define CTREE_EXTENT_PENDING_DEL 0
24 static int inc_block_ref(struct btrfs_root *root, u64 blocknr)
26 struct btrfs_path path;
30 struct btrfs_extent_item *item;
34 find_free_extent(root->extent_root, 0, 0, (u64)-1, &ins);
35 btrfs_init_path(&path);
36 key.objectid = blocknr;
38 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
40 ret = btrfs_search_slot(root->extent_root, &key, &path, 0, 1);
44 l = &path.nodes[0]->leaf;
45 item = btrfs_item_ptr(l, path.slots[0], struct btrfs_extent_item);
46 refs = btrfs_extent_refs(item);
47 btrfs_set_extent_refs(item, refs + 1);
49 BUG_ON(list_empty(&path.nodes[0]->dirty));
50 btrfs_release_path(root->extent_root, &path);
51 finish_current_insert(root->extent_root);
52 run_pending(root->extent_root);
56 static int lookup_block_ref(struct btrfs_root *root, u64 blocknr, u32 *refs)
58 struct btrfs_path path;
62 struct btrfs_extent_item *item;
63 btrfs_init_path(&path);
64 key.objectid = blocknr;
67 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
68 ret = btrfs_search_slot(root->extent_root, &key, &path, 0, 0);
71 l = &path.nodes[0]->leaf;
72 item = btrfs_item_ptr(l, path.slots[0], struct btrfs_extent_item);
73 *refs = btrfs_extent_refs(item);
74 btrfs_release_path(root->extent_root, &path);
78 int btrfs_inc_ref(struct btrfs_root *root, struct btrfs_buffer *buf)
85 if (btrfs_is_leaf(&buf->node))
88 for (i = 0; i < btrfs_header_nritems(&buf->node.header); i++) {
89 blocknr = btrfs_node_blockptr(&buf->node, i);
90 inc_block_ref(root, blocknr);
95 int btrfs_finish_extent_commit(struct btrfs_root *root)
97 unsigned long gang[8];
103 ret = radix_tree_gang_lookup(&root->pinned_radix,
110 for (i = 0; i < ret; i++) {
111 radix_tree_delete(&root->pinned_radix, gang[i]);
114 root->last_insert.objectid = first;
115 root->last_insert.offset = 0;
119 static int finish_current_insert(struct btrfs_root *extent_root)
121 struct btrfs_key ins;
122 struct btrfs_extent_item extent_item;
126 btrfs_set_extent_refs(&extent_item, 1);
127 btrfs_set_extent_owner(&extent_item,
128 btrfs_header_parentid(&extent_root->node->node.header));
131 btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
133 for (i = 0; i < extent_root->current_insert.flags; i++) {
134 ins.objectid = extent_root->current_insert.objectid + i;
135 ret = btrfs_insert_item(extent_root, &ins, &extent_item,
136 sizeof(extent_item));
139 extent_root->current_insert.offset = 0;
144 * remove an extent from the root, returns 0 on success
146 static int __free_extent(struct btrfs_root *root, u64 blocknr, u64 num_blocks,
149 struct btrfs_path path;
150 struct btrfs_key key;
151 struct btrfs_root *extent_root = root->extent_root;
153 struct btrfs_extent_item *ei;
154 struct btrfs_key ins;
157 BUG_ON(pin && num_blocks != 1);
158 key.objectid = blocknr;
160 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
161 key.offset = num_blocks;
163 find_free_extent(root, 0, 0, (u64)-1, &ins);
164 btrfs_init_path(&path);
165 ret = btrfs_search_slot(extent_root, &key, &path, -1, 1);
167 printf("failed to find %Lu\n", key.objectid);
168 btrfs_print_tree(extent_root, extent_root->node);
169 printf("failed to find %Lu\n", key.objectid);
172 ei = btrfs_item_ptr(&path.nodes[0]->leaf, path.slots[0],
173 struct btrfs_extent_item);
174 BUG_ON(ei->refs == 0);
175 refs = btrfs_extent_refs(ei) - 1;
176 btrfs_set_extent_refs(ei, refs);
180 radix_tree_preload(GFP_KERNEL);
181 err = radix_tree_insert(&extent_root->pinned_radix,
182 blocknr, (void *)blocknr);
184 radix_tree_preload_end();
186 ret = btrfs_del_item(extent_root, &path);
187 if (!pin && extent_root->last_insert.objectid > blocknr)
188 extent_root->last_insert.objectid = blocknr;
192 btrfs_release_path(extent_root, &path);
193 finish_current_insert(extent_root);
198 * find all the blocks marked as pending in the radix tree and remove
199 * them from the extent map
201 static int del_pending_extents(struct btrfs_root *extent_root)
204 struct btrfs_buffer *gang[4];
208 ret = radix_tree_gang_lookup_tag(&extent_root->cache_radix,
211 CTREE_EXTENT_PENDING_DEL);
214 for (i = 0; i < ret; i++) {
215 ret = __free_extent(extent_root,
216 gang[i]->blocknr, 1, 1);
217 radix_tree_tag_clear(&extent_root->cache_radix,
219 CTREE_EXTENT_PENDING_DEL);
220 btrfs_block_release(extent_root, gang[i]);
226 static int run_pending(struct btrfs_root *extent_root)
228 while(radix_tree_tagged(&extent_root->cache_radix,
229 CTREE_EXTENT_PENDING_DEL))
230 del_pending_extents(extent_root);
236 * remove an extent from the root, returns 0 on success
238 int btrfs_free_extent(struct btrfs_root *root, u64 blocknr, u64 num_blocks,
241 struct btrfs_root *extent_root = root->extent_root;
242 struct btrfs_buffer *t;
246 if (root == extent_root) {
247 t = find_tree_block(root, blocknr);
248 radix_tree_tag_set(&root->cache_radix, blocknr,
249 CTREE_EXTENT_PENDING_DEL);
252 ret = __free_extent(root, blocknr, num_blocks, pin);
253 pending_ret = run_pending(root->extent_root);
254 return ret ? ret : pending_ret;
258 * walks the btree of allocated extents and find a hole of a given size.
259 * The key ins is changed to record the hole:
260 * ins->objectid == block start
261 * ins->flags = BTRFS_EXTENT_ITEM_KEY
262 * ins->offset == number of blocks
263 * Any available blocks before search_start are skipped.
265 static int find_free_extent(struct btrfs_root *orig_root, u64 num_blocks,
266 u64 search_start, u64 search_end,
267 struct btrfs_key *ins)
269 struct btrfs_path path;
270 struct btrfs_key key;
277 struct btrfs_leaf *l;
278 struct btrfs_root * root = orig_root->extent_root;
279 int total_needed = num_blocks;
281 total_needed += (btrfs_header_level(&root->node->node.header) + 1) * 3;
282 if (root->last_insert.objectid > search_start)
283 search_start = root->last_insert.objectid;
286 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
289 btrfs_init_path(&path);
290 ins->objectid = search_start;
293 ret = btrfs_search_slot(root, ins, &path, 0, 0);
297 if (path.slots[0] > 0)
301 l = &path.nodes[0]->leaf;
302 slot = path.slots[0];
303 if (slot >= btrfs_header_nritems(&l->header)) {
304 ret = btrfs_next_leaf(root, &path);
310 ins->objectid = search_start;
311 ins->offset = (u64)-1;
315 ins->objectid = last_block > search_start ?
316 last_block : search_start;
317 ins->offset = (u64)-1;
320 btrfs_disk_key_to_cpu(&key, &l->items[slot].key);
321 if (key.objectid >= search_start) {
323 if (last_block < search_start)
324 last_block = search_start;
325 hole_size = key.objectid - last_block;
326 if (hole_size > total_needed) {
327 ins->objectid = last_block;
328 ins->offset = hole_size;
334 last_block = key.objectid + key.offset;
339 /* we have to make sure we didn't find an extent that has already
340 * been allocated by the map tree or the original allocation
342 btrfs_release_path(root, &path);
343 BUG_ON(ins->objectid < search_start);
344 for (test_block = ins->objectid;
345 test_block < ins->objectid + total_needed; test_block++) {
346 if (radix_tree_lookup(&root->pinned_radix, test_block)) {
347 search_start = test_block + 1;
351 BUG_ON(root->current_insert.offset);
352 root->current_insert.offset = total_needed - num_blocks;
353 root->current_insert.objectid = ins->objectid + num_blocks;
354 root->current_insert.flags = 0;
355 root->last_insert.objectid = ins->objectid;
356 ins->offset = num_blocks;
359 btrfs_release_path(root, &path);
364 * finds a free extent and does all the dirty work required for allocation
365 * returns the key for the extent through ins, and a tree buffer for
366 * the first block of the extent through buf.
368 * returns 0 if everything worked, non-zero otherwise.
370 static int alloc_extent(struct btrfs_root *root, u64 num_blocks,
371 u64 search_start, u64 search_end, u64 owner,
372 struct btrfs_key *ins)
376 struct btrfs_root *extent_root = root->extent_root;
377 struct btrfs_extent_item extent_item;
379 btrfs_set_extent_refs(&extent_item, 1);
380 btrfs_set_extent_owner(&extent_item, owner);
382 if (root == extent_root) {
383 BUG_ON(extent_root->current_insert.offset == 0);
384 BUG_ON(num_blocks != 1);
385 BUG_ON(extent_root->current_insert.flags ==
386 extent_root->current_insert.offset);
388 ins->objectid = extent_root->current_insert.objectid +
389 extent_root->current_insert.flags++;
392 ret = find_free_extent(root, num_blocks, search_start,
397 ret = btrfs_insert_item(extent_root, ins, &extent_item,
398 sizeof(extent_item));
400 finish_current_insert(extent_root);
401 pending_ret = run_pending(extent_root);
410 * helper function to allocate a block for a given tree
411 * returns the tree buffer or NULL.
413 struct btrfs_buffer *btrfs_alloc_free_block(struct btrfs_root *root)
415 struct btrfs_key ins;
417 struct btrfs_buffer *buf;
419 ret = alloc_extent(root, 1, 0, (unsigned long)-1,
420 btrfs_header_parentid(&root->node->node.header),
426 buf = find_tree_block(root, ins.objectid);
427 dirty_tree_block(root, buf);
432 * helper function for drop_snapshot, this walks down the tree dropping ref
435 static int walk_down_tree(struct btrfs_root *root,
436 struct btrfs_path *path, int *level)
438 struct btrfs_buffer *next;
439 struct btrfs_buffer *cur;
444 ret = lookup_block_ref(root, path->nodes[*level]->blocknr, &refs);
449 * walk down to the last node level and free all the leaves
452 cur = path->nodes[*level];
453 if (path->slots[*level] >=
454 btrfs_header_nritems(&cur->node.header))
456 blocknr = btrfs_node_blockptr(&cur->node, path->slots[*level]);
457 ret = lookup_block_ref(root, blocknr, &refs);
458 if (refs != 1 || *level == 1) {
459 path->slots[*level]++;
460 ret = btrfs_free_extent(root, blocknr, 1, 1);
465 next = read_tree_block(root, blocknr);
466 if (path->nodes[*level-1])
467 btrfs_block_release(root, path->nodes[*level-1]);
468 path->nodes[*level-1] = next;
469 *level = btrfs_header_level(&next->node.header);
470 path->slots[*level] = 0;
473 ret = btrfs_free_extent(root, path->nodes[*level]->blocknr, 1, 1);
474 btrfs_block_release(root, path->nodes[*level]);
475 path->nodes[*level] = NULL;
482 * helper for dropping snapshots. This walks back up the tree in the path
483 * to find the first node higher up where we haven't yet gone through
486 static int walk_up_tree(struct btrfs_root *root, struct btrfs_path *path,
492 for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
493 slot = path->slots[i];
495 btrfs_header_nritems(&path->nodes[i]->node.header)- 1) {
500 ret = btrfs_free_extent(root,
501 path->nodes[*level]->blocknr, 1, 1);
502 btrfs_block_release(root, path->nodes[*level]);
503 path->nodes[*level] = NULL;
512 * drop the reference count on the tree rooted at 'snap'. This traverses
513 * the tree freeing any blocks that have a ref count of zero after being
516 int btrfs_drop_snapshot(struct btrfs_root *root, struct btrfs_buffer *snap)
521 struct btrfs_path path;
525 btrfs_init_path(&path);
527 level = btrfs_header_level(&snap->node.header);
529 path.nodes[level] = snap;
530 path.slots[level] = 0;
532 wret = walk_down_tree(root, &path, &level);
538 wret = walk_up_tree(root, &path, &level);
544 for (i = 0; i <= orig_level; i++) {
546 btrfs_block_release(root, path.nodes[i]);