struct extent_item endian
[platform/upstream/btrfs-progs.git] / extent-tree.c
1 #include <stdio.h>
2 #include <stdlib.h>
3 #include "kerncompat.h"
4 #include "radix-tree.h"
5 #include "ctree.h"
6 #include "disk-io.h"
7 #include "print-tree.h"
8
9 static int find_free_extent(struct ctree_root *orig_root, u64 num_blocks,
10                             u64 search_start, u64 search_end,
11                             struct btrfs_key *ins);
12 static int finish_current_insert(struct ctree_root *extent_root);
13 static int run_pending(struct ctree_root *extent_root);
14
15 /*
16  * pending extents are blocks that we're trying to allocate in the extent
17  * map while trying to grow the map because of other allocations.  To avoid
18  * recursing, they are tagged in the radix tree and cleaned up after
19  * other allocations are done.  The pending tag is also used in the same
20  * manner for deletes.
21  */
22 #define CTREE_EXTENT_PENDING_DEL 0
23
24 static int inc_block_ref(struct ctree_root *root, u64 blocknr)
25 {
26         struct ctree_path path;
27         int ret;
28         struct btrfs_key key;
29         struct leaf *l;
30         struct extent_item *item;
31         struct btrfs_key ins;
32         u32 refs;
33
34         find_free_extent(root->extent_root, 0, 0, (u64)-1, &ins);
35         init_path(&path);
36         key.objectid = blocknr;
37         key.flags = 0;
38         key.offset = 1;
39         ret = search_slot(root->extent_root, &key, &path, 0, 1);
40         if (ret != 0)
41                 BUG();
42         BUG_ON(ret != 0);
43         l = &path.nodes[0]->leaf;
44         item = (struct extent_item *)(l->data + btrfs_item_offset(l->items +
45                                                                 path.slots[0]));
46         refs = btrfs_extent_refs(item);
47         btrfs_set_extent_refs(item, refs + 1);
48
49         BUG_ON(list_empty(&path.nodes[0]->dirty));
50         release_path(root->extent_root, &path);
51         finish_current_insert(root->extent_root);
52         run_pending(root->extent_root);
53         return 0;
54 }
55
56 static int lookup_block_ref(struct ctree_root *root, u64 blocknr, u32 *refs)
57 {
58         struct ctree_path path;
59         int ret;
60         struct btrfs_key key;
61         struct leaf *l;
62         struct extent_item *item;
63         init_path(&path);
64         key.objectid = blocknr;
65         key.flags = 0;
66         key.offset = 1;
67         ret = search_slot(root->extent_root, &key, &path, 0, 0);
68         if (ret != 0)
69                 BUG();
70         l = &path.nodes[0]->leaf;
71         item = (struct extent_item *)(l->data +
72                                       btrfs_item_offset(l->items +
73                                                         path.slots[0]));
74         *refs = btrfs_extent_refs(item);
75         release_path(root->extent_root, &path);
76         return 0;
77 }
78
79 int btrfs_inc_ref(struct ctree_root *root, struct tree_buffer *buf)
80 {
81         u64 blocknr;
82         int i;
83
84         if (root == root->extent_root)
85                 return 0;
86         if (btrfs_is_leaf(&buf->node))
87                 return 0;
88
89         for (i = 0; i < btrfs_header_nritems(&buf->node.header); i++) {
90                 blocknr = btrfs_node_blockptr(&buf->node, i);
91                 inc_block_ref(root, blocknr);
92         }
93         return 0;
94 }
95
96 int btrfs_finish_extent_commit(struct ctree_root *root)
97 {
98         struct ctree_root *extent_root = root->extent_root;
99         unsigned long gang[8];
100         int ret;
101         int i;
102
103         while(1) {
104                 ret = radix_tree_gang_lookup(&extent_root->pinned_radix,
105                                                  (void **)gang, 0,
106                                                  ARRAY_SIZE(gang));
107                 if (!ret)
108                         break;
109                 for (i = 0; i < ret; i++) {
110                         radix_tree_delete(&extent_root->pinned_radix, gang[i]);
111                 }
112         }
113         extent_root->last_insert.objectid = 0;
114         extent_root->last_insert.offset = 0;
115         return 0;
116 }
117
118 static int finish_current_insert(struct ctree_root *extent_root)
119 {
120         struct btrfs_key ins;
121         struct extent_item extent_item;
122         int i;
123         int ret;
124
125         btrfs_set_extent_refs(&extent_item, 1);
126         btrfs_set_extent_owner(&extent_item,
127                 btrfs_header_parentid(&extent_root->node->node.header));
128         ins.offset = 1;
129         ins.flags = 0;
130
131         for (i = 0; i < extent_root->current_insert.flags; i++) {
132                 ins.objectid = extent_root->current_insert.objectid + i;
133                 ret = insert_item(extent_root, &ins, &extent_item,
134                                   sizeof(extent_item));
135                 BUG_ON(ret);
136         }
137         extent_root->current_insert.offset = 0;
138         return 0;
139 }
140
141 /*
142  * remove an extent from the root, returns 0 on success
143  */
144 int __free_extent(struct ctree_root *root, u64 blocknr, u64 num_blocks)
145 {
146         struct ctree_path path;
147         struct btrfs_key key;
148         struct ctree_root *extent_root = root->extent_root;
149         int ret;
150         struct btrfs_item *item;
151         struct extent_item *ei;
152         struct btrfs_key ins;
153         u32 refs;
154
155         key.objectid = blocknr;
156         key.flags = 0;
157         key.offset = num_blocks;
158
159         find_free_extent(root, 0, 0, (u64)-1, &ins);
160         init_path(&path);
161         ret = search_slot(extent_root, &key, &path, -1, 1);
162         if (ret) {
163                 printf("failed to find %Lu\n", key.objectid);
164                 print_tree(extent_root, extent_root->node);
165                 printf("failed to find %Lu\n", key.objectid);
166                 BUG();
167         }
168         item = path.nodes[0]->leaf.items + path.slots[0];
169         ei = (struct extent_item *)(path.nodes[0]->leaf.data +
170                                     btrfs_item_offset(item));
171         BUG_ON(ei->refs == 0);
172         refs = btrfs_extent_refs(ei) - 1;
173         btrfs_set_extent_refs(ei, refs);
174         if (refs == 0) {
175                 if (root == extent_root) {
176                         int err;
177                         radix_tree_preload(GFP_KERNEL);
178                         err = radix_tree_insert(&extent_root->pinned_radix,
179                                           blocknr, (void *)blocknr);
180                         BUG_ON(err);
181                         radix_tree_preload_end();
182                 }
183                 ret = del_item(extent_root, &path);
184                 if (root != extent_root &&
185                     extent_root->last_insert.objectid < blocknr)
186                         extent_root->last_insert.objectid = blocknr;
187                 if (ret)
188                         BUG();
189         }
190         release_path(extent_root, &path);
191         finish_current_insert(extent_root);
192         return ret;
193 }
194
195 /*
196  * find all the blocks marked as pending in the radix tree and remove
197  * them from the extent map
198  */
199 static int del_pending_extents(struct ctree_root *extent_root)
200 {
201         int ret;
202         struct tree_buffer *gang[4];
203         int i;
204
205         while(1) {
206                 ret = radix_tree_gang_lookup_tag(&extent_root->cache_radix,
207                                                  (void **)gang, 0,
208                                                  ARRAY_SIZE(gang),
209                                                  CTREE_EXTENT_PENDING_DEL);
210                 if (!ret)
211                         break;
212                 for (i = 0; i < ret; i++) {
213                         ret = __free_extent(extent_root, gang[i]->blocknr, 1);
214                         radix_tree_tag_clear(&extent_root->cache_radix,
215                                                 gang[i]->blocknr,
216                                                 CTREE_EXTENT_PENDING_DEL);
217                         tree_block_release(extent_root, gang[i]);
218                 }
219         }
220         return 0;
221 }
222
223 static int run_pending(struct ctree_root *extent_root)
224 {
225         while(radix_tree_tagged(&extent_root->cache_radix,
226                                 CTREE_EXTENT_PENDING_DEL))
227                 del_pending_extents(extent_root);
228         return 0;
229 }
230
231
232 /*
233  * remove an extent from the root, returns 0 on success
234  */
235 int free_extent(struct ctree_root *root, u64 blocknr, u64 num_blocks)
236 {
237         struct btrfs_key key;
238         struct ctree_root *extent_root = root->extent_root;
239         struct tree_buffer *t;
240         int pending_ret;
241         int ret;
242
243         if (root == extent_root) {
244                 t = find_tree_block(root, blocknr);
245                 radix_tree_tag_set(&root->cache_radix, blocknr,
246                                    CTREE_EXTENT_PENDING_DEL);
247                 return 0;
248         }
249         key.objectid = blocknr;
250         key.flags = 0;
251         key.offset = num_blocks;
252         ret = __free_extent(root, blocknr, num_blocks);
253         pending_ret = run_pending(root->extent_root);
254         return ret ? ret : pending_ret;
255 }
256
257 /*
258  * walks the btree of allocated extents and find a hole of a given size.
259  * The key ins is changed to record the hole:
260  * ins->objectid == block start
261  * ins->flags = 0
262  * ins->offset == number of blocks
263  * Any available blocks before search_start are skipped.
264  */
265 static int find_free_extent(struct ctree_root *orig_root, u64 num_blocks,
266                             u64 search_start, u64 search_end,
267                             struct btrfs_key *ins)
268 {
269         struct ctree_path path;
270         struct btrfs_key key;
271         int ret;
272         u64 hole_size = 0;
273         int slot = 0;
274         u64 last_block;
275         u64 test_block;
276         int start_found;
277         struct leaf *l;
278         struct ctree_root * root = orig_root->extent_root;
279         int total_needed = num_blocks;
280
281         total_needed += (btrfs_header_level(&root->node->node.header) + 1) * 3;
282         if (root->last_insert.objectid > search_start)
283                 search_start = root->last_insert.objectid;
284 check_failed:
285         init_path(&path);
286         ins->objectid = search_start;
287         ins->offset = 0;
288         ins->flags = 0;
289         start_found = 0;
290         ret = search_slot(root, ins, &path, 0, 0);
291         if (ret < 0)
292                 goto error;
293
294         if (path.slots[0] > 0)
295                 path.slots[0]--;
296
297         while (1) {
298                 l = &path.nodes[0]->leaf;
299                 slot = path.slots[0];
300                 if (slot >= btrfs_header_nritems(&l->header)) {
301                         ret = next_leaf(root, &path);
302                         if (ret == 0)
303                                 continue;
304                         if (ret < 0)
305                                 goto error;
306                         if (!start_found) {
307                                 ins->objectid = search_start;
308                                 ins->offset = (u64)-1;
309                                 start_found = 1;
310                                 goto check_pending;
311                         }
312                         ins->objectid = last_block > search_start ?
313                                         last_block : search_start;
314                         ins->offset = (u64)-1;
315                         goto check_pending;
316                 }
317                 btrfs_disk_key_to_cpu(&key, &l->items[slot].key);
318                 if (key.objectid >= search_start) {
319                         if (start_found) {
320                                 if (last_block < search_start)
321                                         last_block = search_start;
322                                 hole_size = key.objectid - last_block;
323                                 if (hole_size > total_needed) {
324                                         ins->objectid = last_block;
325                                         ins->offset = hole_size;
326                                         goto check_pending;
327                                 }
328                         }
329                 }
330                 start_found = 1;
331                 last_block = key.objectid + key.offset;
332                 path.slots[0]++;
333         }
334         // FIXME -ENOSPC
335 check_pending:
336         /* we have to make sure we didn't find an extent that has already
337          * been allocated by the map tree or the original allocation
338          */
339         release_path(root, &path);
340         BUG_ON(ins->objectid < search_start);
341         for (test_block = ins->objectid;
342              test_block < ins->objectid + total_needed; test_block++) {
343                 if (radix_tree_lookup(&root->pinned_radix, test_block)) {
344                         search_start = test_block + 1;
345                         goto check_failed;
346                 }
347         }
348         BUG_ON(root->current_insert.offset);
349         root->current_insert.offset = total_needed - num_blocks;
350         root->current_insert.objectid = ins->objectid + num_blocks;
351         root->current_insert.flags = 0;
352         root->last_insert.objectid = ins->objectid;
353         ins->offset = num_blocks;
354         return 0;
355 error:
356         release_path(root, &path);
357         return ret;
358 }
359
360 /*
361  * finds a free extent and does all the dirty work required for allocation
362  * returns the key for the extent through ins, and a tree buffer for
363  * the first block of the extent through buf.
364  *
365  * returns 0 if everything worked, non-zero otherwise.
366  */
367 int alloc_extent(struct ctree_root *root, u64 num_blocks, u64 search_start,
368                          u64 search_end, u64 owner, struct btrfs_key *ins)
369 {
370         int ret;
371         int pending_ret;
372         struct ctree_root *extent_root = root->extent_root;
373         struct extent_item extent_item;
374
375         btrfs_set_extent_refs(&extent_item, 1);
376         btrfs_set_extent_owner(&extent_item, owner);
377
378         if (root == extent_root) {
379                 BUG_ON(extent_root->current_insert.offset == 0);
380                 BUG_ON(num_blocks != 1);
381                 BUG_ON(extent_root->current_insert.flags ==
382                        extent_root->current_insert.offset);
383                 ins->offset = 1;
384                 ins->objectid = extent_root->current_insert.objectid +
385                                 extent_root->current_insert.flags++;
386                 return 0;
387         }
388         ret = find_free_extent(root, num_blocks, search_start,
389                                search_end, ins);
390         if (ret)
391                 return ret;
392
393         ret = insert_item(extent_root, ins, &extent_item,
394                           sizeof(extent_item));
395
396         finish_current_insert(extent_root);
397         pending_ret = run_pending(extent_root);
398         if (ret)
399                 return ret;
400         if (pending_ret)
401                 return pending_ret;
402         return 0;
403 }
404
405 /*
406  * helper function to allocate a block for a given tree
407  * returns the tree buffer or NULL.
408  */
409 struct tree_buffer *alloc_free_block(struct ctree_root *root)
410 {
411         struct btrfs_key ins;
412         int ret;
413         struct tree_buffer *buf;
414
415         ret = alloc_extent(root, 1, 0, (unsigned long)-1,
416                            btrfs_header_parentid(&root->node->node.header),
417                            &ins);
418         if (ret) {
419                 BUG();
420                 return NULL;
421         }
422         buf = find_tree_block(root, ins.objectid);
423         dirty_tree_block(root, buf);
424         return buf;
425 }
426
427 int walk_down_tree(struct ctree_root *root, struct ctree_path *path, int *level)
428 {
429         struct tree_buffer *next;
430         struct tree_buffer *cur;
431         u64 blocknr;
432         int ret;
433         u32 refs;
434
435         ret = lookup_block_ref(root, path->nodes[*level]->blocknr, &refs);
436         BUG_ON(ret);
437         if (refs > 1)
438                 goto out;
439         while(*level > 0) {
440                 cur = path->nodes[*level];
441                 if (path->slots[*level] >=
442                     btrfs_header_nritems(&cur->node.header))
443                         break;
444                 blocknr = btrfs_node_blockptr(&cur->node, path->slots[*level]);
445                 ret = lookup_block_ref(root, blocknr, &refs);
446                 if (refs != 1 || *level == 1) {
447                         path->slots[*level]++;
448                         ret = free_extent(root, blocknr, 1);
449                         BUG_ON(ret);
450                         continue;
451                 }
452                 BUG_ON(ret);
453                 next = read_tree_block(root, blocknr);
454                 if (path->nodes[*level-1])
455                         tree_block_release(root, path->nodes[*level-1]);
456                 path->nodes[*level-1] = next;
457                 *level = btrfs_header_level(&next->node.header);
458                 path->slots[*level] = 0;
459         }
460 out:
461         ret = free_extent(root, path->nodes[*level]->blocknr, 1);
462         tree_block_release(root, path->nodes[*level]);
463         path->nodes[*level] = NULL;
464         *level += 1;
465         BUG_ON(ret);
466         return 0;
467 }
468
469 int walk_up_tree(struct ctree_root *root, struct ctree_path *path, int *level)
470 {
471         int i;
472         int slot;
473         int ret;
474         for(i = *level; i < MAX_LEVEL - 1 && path->nodes[i]; i++) {
475                 slot = path->slots[i];
476                 if (slot <
477                     btrfs_header_nritems(&path->nodes[i]->node.header)- 1) {
478                         path->slots[i]++;
479                         *level = i;
480                         return 0;
481                 } else {
482                         ret = free_extent(root,
483                                           path->nodes[*level]->blocknr, 1);
484                         tree_block_release(root, path->nodes[*level]);
485                         path->nodes[*level] = NULL;
486                         *level = i + 1;
487                         BUG_ON(ret);
488                 }
489         }
490         return 1;
491 }
492
493 int btrfs_drop_snapshot(struct ctree_root *root, struct tree_buffer *snap)
494 {
495         int ret;
496         int level;
497         struct ctree_path path;
498         int i;
499         int orig_level;
500
501         init_path(&path);
502
503         level = btrfs_header_level(&snap->node.header);
504         orig_level = level;
505         path.nodes[level] = snap;
506         path.slots[level] = 0;
507         while(1) {
508                 ret = walk_down_tree(root, &path, &level);
509                 if (ret > 0)
510                         break;
511                 ret = walk_up_tree(root, &path, &level);
512                 if (ret > 0)
513                         break;
514         }
515         for (i = 0; i <= orig_level; i++) {
516                 if (path.nodes[i]) {
517                         tree_block_release(root, path.nodes[i]);
518                 }
519         }
520
521         return 0;
522 }