3 #include "kerncompat.h"
4 #include "radix-tree.h"
7 #include "print-tree.h"
10 * pending extents are blocks that we're trying to allocate in the extent
11 * map while trying to grow the map because of other allocations. To avoid
12 * recursing, they are tagged in the radix tree and cleaned up after
13 * other allocations are done. The pending tag is also used in the same
16 #define CTREE_EXTENT_PENDING_ADD 0
17 #define CTREE_EXTENT_PENDING_DEL 1
19 static int inc_block_ref(struct ctree_root *root, u64 blocknr)
21 struct ctree_path path;
25 struct extent_item *item;
27 key.objectid = blocknr;
30 ret = search_slot(root->extent_root, &key, &path, 0, 1);
34 l = &path.nodes[0]->leaf;
35 item = (struct extent_item *)(l->data +
36 l->items[path.slots[0]].offset);
39 BUG_ON(list_empty(&path.nodes[0]->dirty));
40 release_path(root->extent_root, &path);
44 static int lookup_block_ref(struct ctree_root *root, u64 blocknr, int *refs)
46 struct ctree_path path;
50 struct extent_item *item;
52 key.objectid = blocknr;
55 ret = search_slot(root->extent_root, &key, &path, 0, 0);
58 l = &path.nodes[0]->leaf;
59 item = (struct extent_item *)(l->data +
60 l->items[path.slots[0]].offset);
62 release_path(root->extent_root, &path);
66 int btrfs_inc_ref(struct ctree_root *root, struct tree_buffer *buf)
71 if (root == root->extent_root)
73 if (is_leaf(buf->node.header.flags))
76 for (i = 0; i < buf->node.header.nritems; i++) {
77 blocknr = buf->node.blockptrs[i];
78 inc_block_ref(root, blocknr);
83 int btrfs_finish_extent_commit(struct ctree_root *root)
85 struct ctree_root *extent_root = root->extent_root;
86 unsigned long gang[8];
91 ret = radix_tree_gang_lookup(&extent_root->pinned_radix,
96 for (i = 0; i < ret; i++)
97 radix_tree_delete(&extent_root->pinned_radix, gang[i]);
103 * remove an extent from the root, returns 0 on success
105 int __free_extent(struct ctree_root *root, u64 blocknr, u64 num_blocks)
107 struct ctree_path path;
109 struct ctree_root *extent_root = root->extent_root;
112 struct extent_item *ei;
113 key.objectid = blocknr;
115 key.offset = num_blocks;
118 ret = search_slot(extent_root, &key, &path, -1, 1);
120 printf("failed to find %Lu\n", key.objectid);
121 print_tree(extent_root, extent_root->node);
122 printf("failed to find %Lu\n", key.objectid);
125 item = path.nodes[0]->leaf.items + path.slots[0];
126 ei = (struct extent_item *)(path.nodes[0]->leaf.data + item->offset);
127 BUG_ON(ei->refs == 0);
130 if (root == extent_root) {
132 radix_tree_preload(GFP_KERNEL);
133 err = radix_tree_insert(&extent_root->pinned_radix,
134 blocknr, (void *)blocknr);
136 radix_tree_preload_end();
138 ret = del_item(extent_root, &path);
142 release_path(extent_root, &path);
147 * insert all of the pending extents reserved during the original
148 * allocation. (CTREE_EXTENT_PENDING). Returns zero if it all worked out
150 static int insert_pending_extents(struct ctree_root *extent_root)
154 struct extent_item item;
155 struct tree_buffer *gang[4];
159 item.owner = extent_root->node->node.header.parentid;
162 ret = radix_tree_gang_lookup_tag(&extent_root->cache_radix,
165 CTREE_EXTENT_PENDING_ADD);
168 for (i = 0; i < ret; i++) {
169 key.objectid = gang[i]->blocknr;
172 ret = insert_item(extent_root, &key, &item,
175 printf("%Lu already in tree\n", key.objectid);
176 print_tree(extent_root, extent_root->node);
178 // FIXME undo it and return sane
181 radix_tree_tag_clear(&extent_root->cache_radix,
183 CTREE_EXTENT_PENDING_ADD);
184 tree_block_release(extent_root, gang[i]);
191 * find all the blocks marked as pending in the radix tree and remove
192 * them from the extent map
194 static int del_pending_extents(struct ctree_root *extent_root)
197 struct tree_buffer *gang[4];
201 ret = radix_tree_gang_lookup_tag(&extent_root->cache_radix,
204 CTREE_EXTENT_PENDING_DEL);
207 for (i = 0; i < ret; i++) {
208 ret = __free_extent(extent_root, gang[i]->blocknr, 1);
209 radix_tree_tag_clear(&extent_root->cache_radix,
211 CTREE_EXTENT_PENDING_DEL);
212 tree_block_release(extent_root, gang[i]);
218 static int run_pending(struct ctree_root *extent_root)
220 while(radix_tree_tagged(&extent_root->cache_radix,
221 CTREE_EXTENT_PENDING_DEL) ||
222 radix_tree_tagged(&extent_root->cache_radix,
223 CTREE_EXTENT_PENDING_ADD)) {
224 insert_pending_extents(extent_root);
225 del_pending_extents(extent_root);
232 * remove an extent from the root, returns 0 on success
234 int free_extent(struct ctree_root *root, u64 blocknr, u64 num_blocks)
237 struct ctree_root *extent_root = root->extent_root;
238 struct tree_buffer *t;
242 if (root == extent_root) {
243 t = find_tree_block(root, blocknr);
244 if (radix_tree_tag_get(&root->cache_radix, blocknr,
245 CTREE_EXTENT_PENDING_ADD)) {
246 radix_tree_tag_clear(&root->cache_radix,
248 CTREE_EXTENT_PENDING_ADD);
250 tree_block_release(root, t);
251 /* once for the pending add */
252 tree_block_release(root, t);
254 radix_tree_tag_set(&root->cache_radix, blocknr,
255 CTREE_EXTENT_PENDING_DEL);
259 key.objectid = blocknr;
261 key.offset = num_blocks;
262 ret = __free_extent(root, blocknr, num_blocks);
263 pending_ret = run_pending(root->extent_root);
264 return ret ? ret : pending_ret;
268 * walks the btree of allocated extents and find a hole of a given size.
269 * The key ins is changed to record the hole:
270 * ins->objectid == block start
272 * ins->offset == number of blocks
273 * Any available blocks before search_start are skipped.
275 static int find_free_extent(struct ctree_root *orig_root, u64 num_blocks,
276 u64 search_start, u64 search_end, struct key *ins)
278 struct ctree_path path;
286 struct ctree_root * root = orig_root->extent_root;
290 ins->objectid = search_start;
294 ret = search_slot(root, ins, &path, 0, 0);
299 l = &path.nodes[0]->leaf;
300 slot = path.slots[0];
301 if (slot >= l->header.nritems) {
302 ret = next_leaf(root, &path);
308 ins->objectid = search_start;
309 ins->offset = num_blocks;
313 ins->objectid = last_block > search_start ?
314 last_block : search_start;
315 ins->offset = num_blocks;
318 key = &l->items[slot].key;
319 if (key->objectid >= search_start) {
321 hole_size = key->objectid - last_block;
322 if (hole_size > num_blocks) {
323 ins->objectid = last_block;
324 ins->offset = num_blocks;
329 last_block = key->objectid + key->offset;
335 /* we have to make sure we didn't find an extent that has already
336 * been allocated by the map tree or the original allocation
338 release_path(root, &path);
339 BUG_ON(ins->objectid < search_start);
340 if (1 || orig_root->extent_root == orig_root) {
341 BUG_ON(num_blocks != 1);
342 if ((root->current_insert.objectid <= ins->objectid &&
343 root->current_insert.objectid +
344 root->current_insert.offset > ins->objectid) ||
345 (root->current_insert.objectid > ins->objectid &&
346 root->current_insert.objectid <= ins->objectid +
348 radix_tree_lookup(&root->pinned_radix, ins->objectid) ||
349 radix_tree_tag_get(&root->cache_radix, ins->objectid,
350 CTREE_EXTENT_PENDING_ADD)) {
351 search_start = ins->objectid + 1;
355 if (ins->offset != 1)
359 release_path(root, &path);
364 * finds a free extent and does all the dirty work required for allocation
365 * returns the key for the extent through ins, and a tree buffer for
366 * the first block of the extent through buf.
368 * returns 0 if everything worked, non-zero otherwise.
370 int alloc_extent(struct ctree_root *root, u64 num_blocks, u64 search_start,
371 u64 search_end, u64 owner, struct key *ins,
372 struct tree_buffer **buf)
376 struct extent_item extent_item;
377 extent_item.refs = 1;
378 extent_item.owner = owner;
380 ret = find_free_extent(root, num_blocks, search_start, search_end, ins);
383 if (root != root->extent_root) {
384 memcpy(&root->extent_root->current_insert, ins, sizeof(*ins));
385 ret = insert_item(root->extent_root, ins, &extent_item,
386 sizeof(extent_item));
387 memset(&root->extent_root->current_insert, 0,
389 pending_ret = run_pending(root->extent_root);
394 *buf = find_tree_block(root, ins->objectid);
395 dirty_tree_block(root, *buf);
398 /* we're allocating an extent for the extent tree, don't recurse */
399 BUG_ON(ins->offset != 1);
400 *buf = find_tree_block(root, ins->objectid);
402 radix_tree_tag_set(&root->cache_radix, ins->objectid,
403 CTREE_EXTENT_PENDING_ADD);
405 dirty_tree_block(root, *buf);
411 * helper function to allocate a block for a given tree
412 * returns the tree buffer or NULL.
414 struct tree_buffer *alloc_free_block(struct ctree_root *root)
418 struct tree_buffer *buf = NULL;
420 ret = alloc_extent(root, 1, 0, (unsigned long)-1,
421 root->node->node.header.parentid,
427 if (root != root->extent_root)
428 BUG_ON(radix_tree_tag_get(&root->extent_root->cache_radix,
430 CTREE_EXTENT_PENDING_ADD));
434 int btrfs_drop_snapshot(struct ctree_root *root, struct tree_buffer *snap)
439 u64 blocknr = snap->blocknr;
441 level = node_level(snap->node.header.flags);
442 ret = lookup_block_ref(root, snap->blocknr, &refs);
444 if (refs == 1 && level != 0) {
445 struct node *n = &snap->node;
446 struct tree_buffer *b;
448 for (i = 0; i < n->header.nritems; i++) {
449 b = read_tree_block(root, n->blockptrs[i]);
450 /* FIXME, don't recurse here */
451 ret = btrfs_drop_snapshot(root, b);
453 tree_block_release(root, b);
456 ret = free_extent(root, blocknr, 1);