get/set for struct header fields
[platform/upstream/btrfs-progs.git] / extent-tree.c
1 #include <stdio.h>
2 #include <stdlib.h>
3 #include "kerncompat.h"
4 #include "radix-tree.h"
5 #include "ctree.h"
6 #include "disk-io.h"
7 #include "print-tree.h"
8
9 static int find_free_extent(struct ctree_root *orig_root, u64 num_blocks,
10                             u64 search_start, u64 search_end, struct key *ins);
11 static int finish_current_insert(struct ctree_root *extent_root);
12 static int run_pending(struct ctree_root *extent_root);
13
14 /*
15  * pending extents are blocks that we're trying to allocate in the extent
16  * map while trying to grow the map because of other allocations.  To avoid
17  * recursing, they are tagged in the radix tree and cleaned up after
18  * other allocations are done.  The pending tag is also used in the same
19  * manner for deletes.
20  */
21 #define CTREE_EXTENT_PENDING_DEL 0
22
23 static int inc_block_ref(struct ctree_root *root, u64 blocknr)
24 {
25         struct ctree_path path;
26         int ret;
27         struct key key;
28         struct leaf *l;
29         struct extent_item *item;
30         struct key ins;
31
32         find_free_extent(root->extent_root, 0, 0, (u64)-1, &ins);
33         init_path(&path);
34         key.objectid = blocknr;
35         key.flags = 0;
36         key.offset = 1;
37         ret = search_slot(root->extent_root, &key, &path, 0, 1);
38         if (ret != 0)
39                 BUG();
40         BUG_ON(ret != 0);
41         l = &path.nodes[0]->leaf;
42         item = (struct extent_item *)(l->data +
43                                       l->items[path.slots[0]].offset);
44         item->refs++;
45
46         BUG_ON(list_empty(&path.nodes[0]->dirty));
47         release_path(root->extent_root, &path);
48         finish_current_insert(root->extent_root);
49         run_pending(root->extent_root);
50         return 0;
51 }
52
53 static int lookup_block_ref(struct ctree_root *root, u64 blocknr, u32 *refs)
54 {
55         struct ctree_path path;
56         int ret;
57         struct key key;
58         struct leaf *l;
59         struct extent_item *item;
60         init_path(&path);
61         key.objectid = blocknr;
62         key.flags = 0;
63         key.offset = 1;
64         ret = search_slot(root->extent_root, &key, &path, 0, 0);
65         if (ret != 0)
66                 BUG();
67         l = &path.nodes[0]->leaf;
68         item = (struct extent_item *)(l->data +
69                                       l->items[path.slots[0]].offset);
70         *refs = item->refs;
71         release_path(root->extent_root, &path);
72         return 0;
73 }
74
75 int btrfs_inc_ref(struct ctree_root *root, struct tree_buffer *buf)
76 {
77         u64 blocknr;
78         int i;
79
80         if (root == root->extent_root)
81                 return 0;
82         if (btrfs_is_leaf(&buf->node))
83                 return 0;
84
85         for (i = 0; i < btrfs_header_nritems(&buf->node.header); i++) {
86                 blocknr = buf->node.blockptrs[i];
87                 inc_block_ref(root, blocknr);
88         }
89         return 0;
90 }
91
92 int btrfs_finish_extent_commit(struct ctree_root *root)
93 {
94         struct ctree_root *extent_root = root->extent_root;
95         unsigned long gang[8];
96         int ret;
97         int i;
98
99         while(1) {
100                 ret = radix_tree_gang_lookup(&extent_root->pinned_radix,
101                                                  (void **)gang, 0,
102                                                  ARRAY_SIZE(gang));
103                 if (!ret)
104                         break;
105                 for (i = 0; i < ret; i++) {
106                         radix_tree_delete(&extent_root->pinned_radix, gang[i]);
107                 }
108         }
109         extent_root->last_insert.objectid = 0;
110         extent_root->last_insert.offset = 0;
111         return 0;
112 }
113
114 static int finish_current_insert(struct ctree_root *extent_root)
115 {
116         struct key ins;
117         struct extent_item extent_item;
118         int i;
119         int ret;
120
121         extent_item.refs = 1;
122         extent_item.owner =
123                 btrfs_header_parentid(&extent_root->node->node.header);
124         ins.offset = 1;
125         ins.flags = 0;
126
127         for (i = 0; i < extent_root->current_insert.flags; i++) {
128                 ins.objectid = extent_root->current_insert.objectid + i;
129                 ret = insert_item(extent_root, &ins, &extent_item,
130                                   sizeof(extent_item));
131                 BUG_ON(ret);
132         }
133         extent_root->current_insert.offset = 0;
134         return 0;
135 }
136
137 /*
138  * remove an extent from the root, returns 0 on success
139  */
140 int __free_extent(struct ctree_root *root, u64 blocknr, u64 num_blocks)
141 {
142         struct ctree_path path;
143         struct key key;
144         struct ctree_root *extent_root = root->extent_root;
145         int ret;
146         struct item *item;
147         struct extent_item *ei;
148         struct key ins;
149
150         key.objectid = blocknr;
151         key.flags = 0;
152         key.offset = num_blocks;
153
154         find_free_extent(root, 0, 0, (u64)-1, &ins);
155         init_path(&path);
156         ret = search_slot(extent_root, &key, &path, -1, 1);
157         if (ret) {
158                 printf("failed to find %Lu\n", key.objectid);
159                 print_tree(extent_root, extent_root->node);
160                 printf("failed to find %Lu\n", key.objectid);
161                 BUG();
162         }
163         item = path.nodes[0]->leaf.items + path.slots[0];
164         ei = (struct extent_item *)(path.nodes[0]->leaf.data + item->offset);
165         BUG_ON(ei->refs == 0);
166         ei->refs--;
167         if (ei->refs == 0) {
168                 if (root == extent_root) {
169                         int err;
170                         radix_tree_preload(GFP_KERNEL);
171                         err = radix_tree_insert(&extent_root->pinned_radix,
172                                           blocknr, (void *)blocknr);
173                         BUG_ON(err);
174                         radix_tree_preload_end();
175                 }
176                 ret = del_item(extent_root, &path);
177                 if (root != extent_root &&
178                     extent_root->last_insert.objectid < blocknr)
179                         extent_root->last_insert.objectid = blocknr;
180                 if (ret)
181                         BUG();
182         }
183         release_path(extent_root, &path);
184         finish_current_insert(extent_root);
185         return ret;
186 }
187
188 /*
189  * find all the blocks marked as pending in the radix tree and remove
190  * them from the extent map
191  */
192 static int del_pending_extents(struct ctree_root *extent_root)
193 {
194         int ret;
195         struct tree_buffer *gang[4];
196         int i;
197
198         while(1) {
199                 ret = radix_tree_gang_lookup_tag(&extent_root->cache_radix,
200                                                  (void **)gang, 0,
201                                                  ARRAY_SIZE(gang),
202                                                  CTREE_EXTENT_PENDING_DEL);
203                 if (!ret)
204                         break;
205                 for (i = 0; i < ret; i++) {
206                         ret = __free_extent(extent_root, gang[i]->blocknr, 1);
207                         radix_tree_tag_clear(&extent_root->cache_radix,
208                                                 gang[i]->blocknr,
209                                                 CTREE_EXTENT_PENDING_DEL);
210                         tree_block_release(extent_root, gang[i]);
211                 }
212         }
213         return 0;
214 }
215
216 static int run_pending(struct ctree_root *extent_root)
217 {
218         while(radix_tree_tagged(&extent_root->cache_radix,
219                                 CTREE_EXTENT_PENDING_DEL))
220                 del_pending_extents(extent_root);
221         return 0;
222 }
223
224
225 /*
226  * remove an extent from the root, returns 0 on success
227  */
228 int free_extent(struct ctree_root *root, u64 blocknr, u64 num_blocks)
229 {
230         struct key key;
231         struct ctree_root *extent_root = root->extent_root;
232         struct tree_buffer *t;
233         int pending_ret;
234         int ret;
235
236         if (root == extent_root) {
237                 t = find_tree_block(root, blocknr);
238                 radix_tree_tag_set(&root->cache_radix, blocknr,
239                                    CTREE_EXTENT_PENDING_DEL);
240                 return 0;
241         }
242         key.objectid = blocknr;
243         key.flags = 0;
244         key.offset = num_blocks;
245         ret = __free_extent(root, blocknr, num_blocks);
246         pending_ret = run_pending(root->extent_root);
247         return ret ? ret : pending_ret;
248 }
249
250 /*
251  * walks the btree of allocated extents and find a hole of a given size.
252  * The key ins is changed to record the hole:
253  * ins->objectid == block start
254  * ins->flags = 0
255  * ins->offset == number of blocks
256  * Any available blocks before search_start are skipped.
257  */
258 static int find_free_extent(struct ctree_root *orig_root, u64 num_blocks,
259                             u64 search_start, u64 search_end, struct key *ins)
260 {
261         struct ctree_path path;
262         struct key *key;
263         int ret;
264         u64 hole_size = 0;
265         int slot = 0;
266         u64 last_block;
267         u64 test_block;
268         int start_found;
269         struct leaf *l;
270         struct ctree_root * root = orig_root->extent_root;
271         int total_needed = num_blocks;
272
273         total_needed += (btrfs_header_level(&root->node->node.header) + 1) * 3;
274         if (root->last_insert.objectid > search_start)
275                 search_start = root->last_insert.objectid;
276 check_failed:
277         init_path(&path);
278         ins->objectid = search_start;
279         ins->offset = 0;
280         ins->flags = 0;
281         start_found = 0;
282         ret = search_slot(root, ins, &path, 0, 0);
283         if (ret < 0)
284                 goto error;
285
286         if (path.slots[0] > 0)
287                 path.slots[0]--;
288
289         while (1) {
290                 l = &path.nodes[0]->leaf;
291                 slot = path.slots[0];
292                 if (slot >= btrfs_header_nritems(&l->header)) {
293                         ret = next_leaf(root, &path);
294                         if (ret == 0)
295                                 continue;
296                         if (ret < 0)
297                                 goto error;
298                         if (!start_found) {
299                                 ins->objectid = search_start;
300                                 ins->offset = (u64)-1;
301                                 start_found = 1;
302                                 goto check_pending;
303                         }
304                         ins->objectid = last_block > search_start ?
305                                         last_block : search_start;
306                         ins->offset = (u64)-1;
307                         goto check_pending;
308                 }
309                 key = &l->items[slot].key;
310                 if (key->objectid >= search_start) {
311                         if (start_found) {
312                                 if (last_block < search_start)
313                                         last_block = search_start;
314                                 hole_size = key->objectid - last_block;
315                                 if (hole_size > total_needed) {
316                                         ins->objectid = last_block;
317                                         ins->offset = hole_size;
318                                         goto check_pending;
319                                 }
320                         }
321                 }
322                 start_found = 1;
323                 last_block = key->objectid + key->offset;
324                 path.slots[0]++;
325         }
326         // FIXME -ENOSPC
327 check_pending:
328         /* we have to make sure we didn't find an extent that has already
329          * been allocated by the map tree or the original allocation
330          */
331         release_path(root, &path);
332         BUG_ON(ins->objectid < search_start);
333         for (test_block = ins->objectid;
334              test_block < ins->objectid + total_needed; test_block++) {
335                 if (radix_tree_lookup(&root->pinned_radix, test_block)) {
336                         search_start = test_block + 1;
337                         goto check_failed;
338                 }
339         }
340         BUG_ON(root->current_insert.offset);
341         root->current_insert.offset = total_needed - num_blocks;
342         root->current_insert.objectid = ins->objectid + num_blocks;
343         root->current_insert.flags = 0;
344         root->last_insert.objectid = ins->objectid;
345         ins->offset = num_blocks;
346         return 0;
347 error:
348         release_path(root, &path);
349         return ret;
350 }
351
352 /*
353  * finds a free extent and does all the dirty work required for allocation
354  * returns the key for the extent through ins, and a tree buffer for
355  * the first block of the extent through buf.
356  *
357  * returns 0 if everything worked, non-zero otherwise.
358  */
359 int alloc_extent(struct ctree_root *root, u64 num_blocks, u64 search_start,
360                          u64 search_end, u64 owner, struct key *ins)
361 {
362         int ret;
363         int pending_ret;
364         struct ctree_root *extent_root = root->extent_root;
365         struct extent_item extent_item;
366
367         extent_item.refs = 1;
368         extent_item.owner = owner;
369
370         if (root == extent_root) {
371                 BUG_ON(extent_root->current_insert.offset == 0);
372                 BUG_ON(num_blocks != 1);
373                 BUG_ON(extent_root->current_insert.flags ==
374                        extent_root->current_insert.offset);
375                 ins->offset = 1;
376                 ins->objectid = extent_root->current_insert.objectid +
377                                 extent_root->current_insert.flags++;
378                 return 0;
379         }
380         ret = find_free_extent(root, num_blocks, search_start,
381                                search_end, ins);
382         if (ret)
383                 return ret;
384
385         ret = insert_item(extent_root, ins, &extent_item,
386                           sizeof(extent_item));
387
388         finish_current_insert(extent_root);
389         pending_ret = run_pending(extent_root);
390         if (ret)
391                 return ret;
392         if (pending_ret)
393                 return pending_ret;
394         return 0;
395 }
396
397 /*
398  * helper function to allocate a block for a given tree
399  * returns the tree buffer or NULL.
400  */
401 struct tree_buffer *alloc_free_block(struct ctree_root *root)
402 {
403         struct key ins;
404         int ret;
405         struct tree_buffer *buf;
406
407         ret = alloc_extent(root, 1, 0, (unsigned long)-1,
408                            btrfs_header_parentid(&root->node->node.header),
409                            &ins);
410         if (ret) {
411                 BUG();
412                 return NULL;
413         }
414         buf = find_tree_block(root, ins.objectid);
415         dirty_tree_block(root, buf);
416         return buf;
417 }
418
419 int walk_down_tree(struct ctree_root *root, struct ctree_path *path, int *level)
420 {
421         struct tree_buffer *next;
422         struct tree_buffer *cur;
423         u64 blocknr;
424         int ret;
425         u32 refs;
426
427         ret = lookup_block_ref(root, path->nodes[*level]->blocknr, &refs);
428         BUG_ON(ret);
429         if (refs > 1)
430                 goto out;
431         while(*level > 0) {
432                 cur = path->nodes[*level];
433                 if (path->slots[*level] >=
434                     btrfs_header_nritems(&cur->node.header))
435                         break;
436                 blocknr = cur->node.blockptrs[path->slots[*level]];
437                 ret = lookup_block_ref(root, blocknr, &refs);
438                 if (refs != 1 || *level == 1) {
439                         path->slots[*level]++;
440                         ret = free_extent(root, blocknr, 1);
441                         BUG_ON(ret);
442                         continue;
443                 }
444                 BUG_ON(ret);
445                 next = read_tree_block(root, blocknr);
446                 if (path->nodes[*level-1])
447                         tree_block_release(root, path->nodes[*level-1]);
448                 path->nodes[*level-1] = next;
449                 *level = btrfs_header_level(&next->node.header);
450                 path->slots[*level] = 0;
451         }
452 out:
453         ret = free_extent(root, path->nodes[*level]->blocknr, 1);
454         tree_block_release(root, path->nodes[*level]);
455         path->nodes[*level] = NULL;
456         *level += 1;
457         BUG_ON(ret);
458         return 0;
459 }
460
461 int walk_up_tree(struct ctree_root *root, struct ctree_path *path, int *level)
462 {
463         int i;
464         int slot;
465         int ret;
466         for(i = *level; i < MAX_LEVEL - 1 && path->nodes[i]; i++) {
467                 slot = path->slots[i];
468                 if (slot <
469                     btrfs_header_nritems(&path->nodes[i]->node.header)- 1) {
470                         path->slots[i]++;
471                         *level = i;
472                         return 0;
473                 } else {
474                         ret = free_extent(root,
475                                           path->nodes[*level]->blocknr, 1);
476                         tree_block_release(root, path->nodes[*level]);
477                         path->nodes[*level] = NULL;
478                         *level = i + 1;
479                         BUG_ON(ret);
480                 }
481         }
482         return 1;
483 }
484
485 int btrfs_drop_snapshot(struct ctree_root *root, struct tree_buffer *snap)
486 {
487         int ret;
488         int level;
489         struct ctree_path path;
490         int i;
491         int orig_level;
492
493         init_path(&path);
494
495         level = btrfs_header_level(&snap->node.header);
496         orig_level = level;
497         path.nodes[level] = snap;
498         path.slots[level] = 0;
499         while(1) {
500                 ret = walk_down_tree(root, &path, &level);
501                 if (ret > 0)
502                         break;
503                 ret = walk_up_tree(root, &path, &level);
504                 if (ret > 0)
505                         break;
506         }
507         for (i = 0; i <= orig_level; i++) {
508                 if (path.nodes[i]) {
509                         tree_block_release(root, path.nodes[i]);
510                 }
511         }
512
513         return 0;
514 }