3 #include "kerncompat.h"
4 #include "radix-tree.h"
7 #include "print-tree.h"
9 static int find_free_extent(struct ctree_root *orig_root, u64 num_blocks,
10 u64 search_start, u64 search_end, struct key *ins);
11 static int finish_current_insert(struct ctree_root *extent_root);
12 static int run_pending(struct ctree_root *extent_root);
15 * pending extents are blocks that we're trying to allocate in the extent
16 * map while trying to grow the map because of other allocations. To avoid
17 * recursing, they are tagged in the radix tree and cleaned up after
18 * other allocations are done. The pending tag is also used in the same
21 #define CTREE_EXTENT_PENDING_DEL 0
23 static int inc_block_ref(struct ctree_root *root, u64 blocknr)
25 struct ctree_path path;
29 struct extent_item *item;
32 find_free_extent(root->extent_root, 0, 0, (u64)-1, &ins);
34 key.objectid = blocknr;
37 ret = search_slot(root->extent_root, &key, &path, 0, 1);
41 l = &path.nodes[0]->leaf;
42 item = (struct extent_item *)(l->data +
43 l->items[path.slots[0]].offset);
46 BUG_ON(list_empty(&path.nodes[0]->dirty));
47 release_path(root->extent_root, &path);
48 finish_current_insert(root->extent_root);
49 run_pending(root->extent_root);
53 static int lookup_block_ref(struct ctree_root *root, u64 blocknr, int *refs)
55 struct ctree_path path;
59 struct extent_item *item;
61 key.objectid = blocknr;
64 ret = search_slot(root->extent_root, &key, &path, 0, 0);
67 l = &path.nodes[0]->leaf;
68 item = (struct extent_item *)(l->data +
69 l->items[path.slots[0]].offset);
71 release_path(root->extent_root, &path);
75 int btrfs_inc_ref(struct ctree_root *root, struct tree_buffer *buf)
80 if (root == root->extent_root)
82 if (is_leaf(buf->node.header.flags))
85 for (i = 0; i < buf->node.header.nritems; i++) {
86 blocknr = buf->node.blockptrs[i];
87 inc_block_ref(root, blocknr);
92 int btrfs_finish_extent_commit(struct ctree_root *root)
94 struct ctree_root *extent_root = root->extent_root;
95 unsigned long gang[8];
100 ret = radix_tree_gang_lookup(&extent_root->pinned_radix,
105 for (i = 0; i < ret; i++)
106 radix_tree_delete(&extent_root->pinned_radix, gang[i]);
111 static int finish_current_insert(struct ctree_root *extent_root)
114 struct extent_item extent_item;
118 extent_item.refs = 1;
119 extent_item.owner = extent_root->node->node.header.parentid;
123 for (i = 0; i < extent_root->current_insert.flags; i++) {
124 ins.objectid = extent_root->current_insert.objectid + i;
125 ret = insert_item(extent_root, &ins, &extent_item,
126 sizeof(extent_item));
129 extent_root->current_insert.offset = 0;
134 * remove an extent from the root, returns 0 on success
136 int __free_extent(struct ctree_root *root, u64 blocknr, u64 num_blocks)
138 struct ctree_path path;
140 struct ctree_root *extent_root = root->extent_root;
143 struct extent_item *ei;
146 key.objectid = blocknr;
148 key.offset = num_blocks;
150 find_free_extent(root, 0, 0, (u64)-1, &ins);
152 ret = search_slot(extent_root, &key, &path, -1, 1);
154 printf("failed to find %Lu\n", key.objectid);
155 print_tree(extent_root, extent_root->node);
156 printf("failed to find %Lu\n", key.objectid);
159 item = path.nodes[0]->leaf.items + path.slots[0];
160 ei = (struct extent_item *)(path.nodes[0]->leaf.data + item->offset);
161 BUG_ON(ei->refs == 0);
164 if (root == extent_root) {
166 radix_tree_preload(GFP_KERNEL);
167 err = radix_tree_insert(&extent_root->pinned_radix,
168 blocknr, (void *)blocknr);
170 radix_tree_preload_end();
172 ret = del_item(extent_root, &path);
176 release_path(extent_root, &path);
177 finish_current_insert(extent_root);
182 * find all the blocks marked as pending in the radix tree and remove
183 * them from the extent map
185 static int del_pending_extents(struct ctree_root *extent_root)
188 struct tree_buffer *gang[4];
192 ret = radix_tree_gang_lookup_tag(&extent_root->cache_radix,
195 CTREE_EXTENT_PENDING_DEL);
198 for (i = 0; i < ret; i++) {
199 ret = __free_extent(extent_root, gang[i]->blocknr, 1);
200 radix_tree_tag_clear(&extent_root->cache_radix,
202 CTREE_EXTENT_PENDING_DEL);
203 tree_block_release(extent_root, gang[i]);
209 static int run_pending(struct ctree_root *extent_root)
211 while(radix_tree_tagged(&extent_root->cache_radix,
212 CTREE_EXTENT_PENDING_DEL))
213 del_pending_extents(extent_root);
219 * remove an extent from the root, returns 0 on success
221 int free_extent(struct ctree_root *root, u64 blocknr, u64 num_blocks)
224 struct ctree_root *extent_root = root->extent_root;
225 struct tree_buffer *t;
229 if (root == extent_root) {
230 t = find_tree_block(root, blocknr);
231 radix_tree_tag_set(&root->cache_radix, blocknr,
232 CTREE_EXTENT_PENDING_DEL);
235 key.objectid = blocknr;
237 key.offset = num_blocks;
238 ret = __free_extent(root, blocknr, num_blocks);
239 pending_ret = run_pending(root->extent_root);
240 return ret ? ret : pending_ret;
244 * walks the btree of allocated extents and find a hole of a given size.
245 * The key ins is changed to record the hole:
246 * ins->objectid == block start
248 * ins->offset == number of blocks
249 * Any available blocks before search_start are skipped.
251 static int find_free_extent(struct ctree_root *orig_root, u64 num_blocks,
252 u64 search_start, u64 search_end, struct key *ins)
254 struct ctree_path path;
263 struct ctree_root * root = orig_root->extent_root;
264 int total_needed = num_blocks + MAX_LEVEL * 3;
268 ins->objectid = search_start;
272 ret = search_slot(root, ins, &path, 0, 0);
277 l = &path.nodes[0]->leaf;
278 slot = path.slots[0];
279 if (slot >= l->header.nritems) {
280 ret = next_leaf(root, &path);
286 ins->objectid = search_start;
287 ins->offset = (u64)-1;
291 ins->objectid = last_block > search_start ?
292 last_block : search_start;
293 ins->offset = (u64)-1;
297 int last_slot = l->header.nritems - 1;
298 u64 span = l->items[last_slot].key.objectid;
299 span -= l->items[slot].key.objectid;
300 if (span + total_needed > last_slot - slot) {
301 path.slots[0] = last_slot + 1;
302 key = &l->items[last_slot].key;
303 last_block = key->objectid + key->offset;
308 key = &l->items[slot].key;
309 if (key->objectid >= search_start) {
311 hole_size = key->objectid - last_block;
312 if (hole_size > total_needed) {
313 ins->objectid = last_block;
314 ins->offset = hole_size;
319 last_block = key->objectid + key->offset;
325 /* we have to make sure we didn't find an extent that has already
326 * been allocated by the map tree or the original allocation
328 release_path(root, &path);
329 BUG_ON(ins->objectid < search_start);
330 for (test_block = ins->objectid;
331 test_block < ins->objectid + total_needed; test_block++) {
332 if (radix_tree_lookup(&root->pinned_radix, test_block)) {
333 search_start = test_block + 1;
337 BUG_ON(root->current_insert.offset);
338 root->current_insert.offset = total_needed;
339 root->current_insert.objectid = ins->objectid + num_blocks;
340 root->current_insert.flags = 0;
341 ins->offset = num_blocks;
344 release_path(root, &path);
349 * finds a free extent and does all the dirty work required for allocation
350 * returns the key for the extent through ins, and a tree buffer for
351 * the first block of the extent through buf.
353 * returns 0 if everything worked, non-zero otherwise.
355 int alloc_extent(struct ctree_root *root, u64 num_blocks, u64 search_start,
356 u64 search_end, u64 owner, struct key *ins)
360 struct ctree_root *extent_root = root->extent_root;
361 struct extent_item extent_item;
363 extent_item.refs = 1;
364 extent_item.owner = owner;
366 if (root == extent_root) {
367 BUG_ON(extent_root->current_insert.offset == 0);
368 BUG_ON(num_blocks != 1);
369 BUG_ON(extent_root->current_insert.flags ==
370 extent_root->current_insert.offset);
372 ins->objectid = extent_root->current_insert.objectid +
373 extent_root->current_insert.flags++;
376 ret = find_free_extent(root, num_blocks, search_start,
381 ret = insert_item(extent_root, ins, &extent_item,
382 sizeof(extent_item));
384 finish_current_insert(extent_root);
385 pending_ret = run_pending(extent_root);
394 * helper function to allocate a block for a given tree
395 * returns the tree buffer or NULL.
397 struct tree_buffer *alloc_free_block(struct ctree_root *root)
401 struct tree_buffer *buf;
403 ret = alloc_extent(root, 1, 0, (unsigned long)-1,
404 root->node->node.header.parentid,
410 buf = find_tree_block(root, ins.objectid);
411 dirty_tree_block(root, buf);
415 int btrfs_drop_snapshot(struct ctree_root *root, struct tree_buffer *snap)
420 u64 blocknr = snap->blocknr;
422 level = node_level(snap->node.header.flags);
423 ret = lookup_block_ref(root, snap->blocknr, &refs);
425 if (refs == 1 && level != 0) {
426 struct node *n = &snap->node;
427 struct tree_buffer *b;
429 for (i = 0; i < n->header.nritems; i++) {
430 b = read_tree_block(root, n->blockptrs[i]);
431 /* FIXME, don't recurse here */
432 ret = btrfs_drop_snapshot(root, b);
434 tree_block_release(root, b);
437 ret = free_extent(root, blocknr, 1);