2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include "transaction.h"
21 #include "print-tree.h"
25 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
26 *root, struct btrfs_path *path, int level);
27 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
28 *root, struct btrfs_key *ins_key,
29 struct btrfs_path *path, int data_size, int extend);
30 static int push_node_left(struct btrfs_trans_handle *trans,
31 struct btrfs_root *root, struct extent_buffer *dst,
32 struct extent_buffer *src, int empty);
33 static int balance_node_right(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root,
35 struct extent_buffer *dst_buf,
36 struct extent_buffer *src_buf);
38 inline void btrfs_init_path(struct btrfs_path *p)
40 memset(p, 0, sizeof(*p));
43 struct btrfs_path *btrfs_alloc_path(void)
45 struct btrfs_path *path;
46 path = kzalloc(sizeof(struct btrfs_path), GFP_NOFS);
50 void btrfs_free_path(struct btrfs_path *p)
54 btrfs_release_path(p);
58 void btrfs_release_path(struct btrfs_path *p)
61 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
64 free_extent_buffer(p->nodes[i]);
66 memset(p, 0, sizeof(*p));
69 void add_root_to_dirty_list(struct btrfs_root *root)
71 if (root->track_dirty && list_empty(&root->dirty_list)) {
72 list_add(&root->dirty_list,
73 &root->fs_info->dirty_cowonly_roots);
77 int btrfs_copy_root(struct btrfs_trans_handle *trans,
78 struct btrfs_root *root,
79 struct extent_buffer *buf,
80 struct extent_buffer **cow_ret, u64 new_root_objectid)
82 struct extent_buffer *cow;
85 struct btrfs_root *new_root;
86 struct btrfs_disk_key disk_key;
88 new_root = kmalloc(sizeof(*new_root), GFP_NOFS);
92 memcpy(new_root, root, sizeof(*new_root));
93 new_root->root_key.objectid = new_root_objectid;
95 WARN_ON(root->ref_cows && trans->transid !=
96 root->fs_info->running_transaction->transid);
97 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
99 level = btrfs_header_level(buf);
101 btrfs_item_key(buf, &disk_key, 0);
103 btrfs_node_key(buf, &disk_key, 0);
104 cow = btrfs_alloc_free_block(trans, new_root, buf->len,
105 new_root_objectid, &disk_key,
106 level, buf->start, 0);
112 copy_extent_buffer(cow, buf, 0, 0, cow->len);
113 btrfs_set_header_bytenr(cow, cow->start);
114 btrfs_set_header_generation(cow, trans->transid);
115 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
116 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
117 BTRFS_HEADER_FLAG_RELOC);
118 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
119 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
121 btrfs_set_header_owner(cow, new_root_objectid);
123 write_extent_buffer(cow, root->fs_info->fsid,
124 btrfs_header_fsid(), BTRFS_FSID_SIZE);
126 WARN_ON(btrfs_header_generation(buf) > trans->transid);
127 ret = btrfs_inc_ref(trans, new_root, cow, 0);
133 btrfs_mark_buffer_dirty(cow);
139 * check if the tree block can be shared by multiple trees
141 static int btrfs_block_can_be_shared(struct btrfs_root *root,
142 struct extent_buffer *buf)
145 * Tree blocks not in reference counted trees and tree roots
146 * are never shared. If a block was allocated after the last
147 * snapshot and the block was not allocated by tree relocation,
148 * we know the block is not shared.
150 if (root->ref_cows &&
151 buf != root->node && buf != root->commit_root &&
152 (btrfs_header_generation(buf) <=
153 btrfs_root_last_snapshot(&root->root_item) ||
154 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
156 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
157 if (root->ref_cows &&
158 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
164 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
165 struct btrfs_root *root,
166 struct extent_buffer *buf,
167 struct extent_buffer *cow)
176 * Backrefs update rules:
178 * Always use full backrefs for extent pointers in tree block
179 * allocated by tree relocation.
181 * If a shared tree block is no longer referenced by its owner
182 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
183 * use full backrefs for extent pointers in tree block.
185 * If a tree block is been relocating
186 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
187 * use full backrefs for extent pointers in tree block.
188 * The reason for this is some operations (such as drop tree)
189 * are only allowed for blocks use full backrefs.
192 if (btrfs_block_can_be_shared(root, buf)) {
193 ret = btrfs_lookup_extent_info(trans, root, buf->start,
194 btrfs_header_level(buf), 1,
200 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
201 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
202 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
207 owner = btrfs_header_owner(buf);
208 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) &&
209 owner == BTRFS_TREE_RELOC_OBJECTID);
212 if ((owner == root->root_key.objectid ||
213 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
214 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
215 ret = btrfs_inc_ref(trans, root, buf, 1);
218 if (root->root_key.objectid ==
219 BTRFS_TREE_RELOC_OBJECTID) {
220 ret = btrfs_dec_ref(trans, root, buf, 0);
222 ret = btrfs_inc_ref(trans, root, cow, 1);
225 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
228 if (root->root_key.objectid ==
229 BTRFS_TREE_RELOC_OBJECTID)
230 ret = btrfs_inc_ref(trans, root, cow, 1);
232 ret = btrfs_inc_ref(trans, root, cow, 0);
235 if (new_flags != 0) {
236 ret = btrfs_set_block_flags(trans, root, buf->start,
237 btrfs_header_level(buf),
242 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
243 if (root->root_key.objectid ==
244 BTRFS_TREE_RELOC_OBJECTID)
245 ret = btrfs_inc_ref(trans, root, cow, 1);
247 ret = btrfs_inc_ref(trans, root, cow, 0);
249 ret = btrfs_dec_ref(trans, root, buf, 1);
252 clean_tree_block(trans, root, buf);
257 int __btrfs_cow_block(struct btrfs_trans_handle *trans,
258 struct btrfs_root *root,
259 struct extent_buffer *buf,
260 struct extent_buffer *parent, int parent_slot,
261 struct extent_buffer **cow_ret,
262 u64 search_start, u64 empty_size)
264 struct extent_buffer *cow;
265 struct btrfs_disk_key disk_key;
268 WARN_ON(root->ref_cows && trans->transid !=
269 root->fs_info->running_transaction->transid);
270 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
272 level = btrfs_header_level(buf);
275 btrfs_item_key(buf, &disk_key, 0);
277 btrfs_node_key(buf, &disk_key, 0);
279 cow = btrfs_alloc_free_block(trans, root, buf->len,
280 root->root_key.objectid, &disk_key,
281 level, search_start, empty_size);
285 copy_extent_buffer(cow, buf, 0, 0, cow->len);
286 btrfs_set_header_bytenr(cow, cow->start);
287 btrfs_set_header_generation(cow, trans->transid);
288 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
289 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
290 BTRFS_HEADER_FLAG_RELOC);
291 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
292 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
294 btrfs_set_header_owner(cow, root->root_key.objectid);
296 write_extent_buffer(cow, root->fs_info->fsid,
297 btrfs_header_fsid(), BTRFS_FSID_SIZE);
299 WARN_ON(!(buf->flags & EXTENT_BAD_TRANSID) &&
300 btrfs_header_generation(buf) > trans->transid);
302 update_ref_for_cow(trans, root, buf, cow);
304 if (buf == root->node) {
306 extent_buffer_get(cow);
308 btrfs_free_extent(trans, root, buf->start, buf->len,
309 0, root->root_key.objectid, level, 0);
310 free_extent_buffer(buf);
311 add_root_to_dirty_list(root);
313 btrfs_set_node_blockptr(parent, parent_slot,
315 WARN_ON(trans->transid == 0);
316 btrfs_set_node_ptr_generation(parent, parent_slot,
318 btrfs_mark_buffer_dirty(parent);
319 WARN_ON(btrfs_header_generation(parent) != trans->transid);
321 btrfs_free_extent(trans, root, buf->start, buf->len,
322 0, root->root_key.objectid, level, 1);
324 if (!list_empty(&buf->recow)) {
325 list_del_init(&buf->recow);
326 free_extent_buffer(buf);
328 free_extent_buffer(buf);
329 btrfs_mark_buffer_dirty(cow);
334 static inline int should_cow_block(struct btrfs_trans_handle *trans,
335 struct btrfs_root *root,
336 struct extent_buffer *buf)
338 if (btrfs_header_generation(buf) == trans->transid &&
339 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
340 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
341 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
346 int btrfs_cow_block(struct btrfs_trans_handle *trans,
347 struct btrfs_root *root, struct extent_buffer *buf,
348 struct extent_buffer *parent, int parent_slot,
349 struct extent_buffer **cow_ret)
354 if (trans->transaction != root->fs_info->running_transaction) {
355 printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
356 root->fs_info->running_transaction->transid);
360 if (trans->transid != root->fs_info->generation) {
361 printk(KERN_CRIT "trans %llu running %llu\n",
362 (unsigned long long)trans->transid,
363 (unsigned long long)root->fs_info->generation);
366 if (!should_cow_block(trans, root, buf)) {
371 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
372 ret = __btrfs_cow_block(trans, root, buf, parent,
373 parent_slot, cow_ret, search_start, 0);
377 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
379 if (k1->objectid > k2->objectid)
381 if (k1->objectid < k2->objectid)
383 if (k1->type > k2->type)
385 if (k1->type < k2->type)
387 if (k1->offset > k2->offset)
389 if (k1->offset < k2->offset)
395 * compare two keys in a memcmp fashion
397 static int btrfs_comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
401 btrfs_disk_key_to_cpu(&k1, disk);
402 return btrfs_comp_cpu_keys(&k1, k2);
406 * The leaf data grows from end-to-front in the node.
407 * this returns the address of the start of the last item,
408 * which is the stop of the leaf data stack
410 static inline unsigned int leaf_data_end(struct btrfs_root *root,
411 struct extent_buffer *leaf)
413 u32 nr = btrfs_header_nritems(leaf);
415 return BTRFS_LEAF_DATA_SIZE(root);
416 return btrfs_item_offset_nr(leaf, nr - 1);
419 enum btrfs_tree_block_status
420 btrfs_check_node(struct btrfs_root *root, struct btrfs_disk_key *parent_key,
421 struct extent_buffer *buf)
424 struct btrfs_key cpukey;
425 struct btrfs_disk_key key;
426 u32 nritems = btrfs_header_nritems(buf);
427 enum btrfs_tree_block_status ret = BTRFS_TREE_BLOCK_INVALID_NRITEMS;
429 if (nritems == 0 || nritems > BTRFS_NODEPTRS_PER_BLOCK(root))
432 ret = BTRFS_TREE_BLOCK_INVALID_PARENT_KEY;
433 if (parent_key && parent_key->type) {
434 btrfs_node_key(buf, &key, 0);
435 if (memcmp(parent_key, &key, sizeof(key)))
438 ret = BTRFS_TREE_BLOCK_BAD_KEY_ORDER;
439 for (i = 0; nritems > 1 && i < nritems - 2; i++) {
440 btrfs_node_key(buf, &key, i);
441 btrfs_node_key_to_cpu(buf, &cpukey, i + 1);
442 if (btrfs_comp_keys(&key, &cpukey) >= 0)
445 return BTRFS_TREE_BLOCK_CLEAN;
447 if (btrfs_header_owner(buf) == BTRFS_EXTENT_TREE_OBJECTID) {
449 btrfs_disk_key_to_cpu(&cpukey, parent_key);
451 btrfs_node_key_to_cpu(buf, &cpukey, 0);
452 btrfs_add_corrupt_extent_record(root->fs_info, &cpukey,
453 buf->start, buf->len,
454 btrfs_header_level(buf));
459 enum btrfs_tree_block_status
460 btrfs_check_leaf(struct btrfs_root *root, struct btrfs_disk_key *parent_key,
461 struct extent_buffer *buf)
464 struct btrfs_key cpukey;
465 struct btrfs_disk_key key;
466 u32 nritems = btrfs_header_nritems(buf);
467 enum btrfs_tree_block_status ret = BTRFS_TREE_BLOCK_INVALID_NRITEMS;
469 if (nritems * sizeof(struct btrfs_item) > buf->len) {
470 fprintf(stderr, "invalid number of items %llu\n",
471 (unsigned long long)buf->start);
475 if (btrfs_header_level(buf) != 0) {
476 ret = BTRFS_TREE_BLOCK_INVALID_LEVEL;
477 fprintf(stderr, "leaf is not a leaf %llu\n",
478 (unsigned long long)btrfs_header_bytenr(buf));
481 if (btrfs_leaf_free_space(root, buf) < 0) {
482 ret = BTRFS_TREE_BLOCK_INVALID_FREE_SPACE;
483 fprintf(stderr, "leaf free space incorrect %llu %d\n",
484 (unsigned long long)btrfs_header_bytenr(buf),
485 btrfs_leaf_free_space(root, buf));
490 return BTRFS_TREE_BLOCK_CLEAN;
492 btrfs_item_key(buf, &key, 0);
493 if (parent_key && parent_key->type &&
494 memcmp(parent_key, &key, sizeof(key))) {
495 ret = BTRFS_TREE_BLOCK_INVALID_PARENT_KEY;
496 fprintf(stderr, "leaf parent key incorrect %llu\n",
497 (unsigned long long)btrfs_header_bytenr(buf));
500 for (i = 0; nritems > 1 && i < nritems - 1; i++) {
501 btrfs_item_key(buf, &key, i);
502 btrfs_item_key_to_cpu(buf, &cpukey, i + 1);
503 if (btrfs_comp_keys(&key, &cpukey) >= 0) {
504 ret = BTRFS_TREE_BLOCK_BAD_KEY_ORDER;
505 fprintf(stderr, "bad key ordering %d %d\n", i, i+1);
508 if (btrfs_item_offset_nr(buf, i) !=
509 btrfs_item_end_nr(buf, i + 1)) {
510 ret = BTRFS_TREE_BLOCK_INVALID_OFFSETS;
511 fprintf(stderr, "incorrect offsets %u %u\n",
512 btrfs_item_offset_nr(buf, i),
513 btrfs_item_end_nr(buf, i + 1));
516 if (i == 0 && btrfs_item_end_nr(buf, i) !=
517 BTRFS_LEAF_DATA_SIZE(root)) {
518 ret = BTRFS_TREE_BLOCK_INVALID_OFFSETS;
519 fprintf(stderr, "bad item end %u wanted %u\n",
520 btrfs_item_end_nr(buf, i),
521 (unsigned)BTRFS_LEAF_DATA_SIZE(root));
526 for (i = 0; i < nritems; i++) {
527 if (btrfs_item_end_nr(buf, i) > BTRFS_LEAF_DATA_SIZE(root)) {
528 btrfs_item_key(buf, &key, 0);
529 btrfs_print_key(&key);
531 ret = BTRFS_TREE_BLOCK_INVALID_OFFSETS;
532 fprintf(stderr, "slot end outside of leaf %llu > %llu\n",
533 (unsigned long long)btrfs_item_end_nr(buf, i),
534 (unsigned long long)BTRFS_LEAF_DATA_SIZE(root));
539 return BTRFS_TREE_BLOCK_CLEAN;
541 if (btrfs_header_owner(buf) == BTRFS_EXTENT_TREE_OBJECTID) {
543 btrfs_disk_key_to_cpu(&cpukey, parent_key);
545 btrfs_item_key_to_cpu(buf, &cpukey, 0);
547 btrfs_add_corrupt_extent_record(root->fs_info, &cpukey,
548 buf->start, buf->len, 0);
553 static int noinline check_block(struct btrfs_root *root,
554 struct btrfs_path *path, int level)
556 struct btrfs_disk_key key;
557 struct btrfs_disk_key *key_ptr = NULL;
558 struct extent_buffer *parent;
559 enum btrfs_tree_block_status ret;
561 if (path->skip_check_block)
563 if (path->nodes[level + 1]) {
564 parent = path->nodes[level + 1];
565 btrfs_node_key(parent, &key, path->slots[level + 1]);
569 ret = btrfs_check_leaf(root, key_ptr, path->nodes[0]);
571 ret = btrfs_check_node(root, key_ptr, path->nodes[level]);
572 if (ret == BTRFS_TREE_BLOCK_CLEAN)
578 * search for key in the extent_buffer. The items start at offset p,
579 * and they are item_size apart. There are 'max' items in p.
581 * the slot in the array is returned via slot, and it points to
582 * the place where you would insert key if it is not found in
585 * slot may point to max if the key is bigger than all of the keys
587 static int generic_bin_search(struct extent_buffer *eb, unsigned long p,
588 int item_size, struct btrfs_key *key,
595 unsigned long offset;
596 struct btrfs_disk_key *tmp;
599 mid = (low + high) / 2;
600 offset = p + mid * item_size;
602 tmp = (struct btrfs_disk_key *)(eb->data + offset);
603 ret = btrfs_comp_keys(tmp, key);
619 * simple bin_search frontend that does the right thing for
622 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
623 int level, int *slot)
626 return generic_bin_search(eb,
627 offsetof(struct btrfs_leaf, items),
628 sizeof(struct btrfs_item),
629 key, btrfs_header_nritems(eb),
632 return generic_bin_search(eb,
633 offsetof(struct btrfs_node, ptrs),
634 sizeof(struct btrfs_key_ptr),
635 key, btrfs_header_nritems(eb),
639 struct extent_buffer *read_node_slot(struct btrfs_root *root,
640 struct extent_buffer *parent, int slot)
642 int level = btrfs_header_level(parent);
645 if (slot >= btrfs_header_nritems(parent))
651 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
653 btrfs_node_ptr_generation(parent, slot));
656 static int balance_level(struct btrfs_trans_handle *trans,
657 struct btrfs_root *root,
658 struct btrfs_path *path, int level)
660 struct extent_buffer *right = NULL;
661 struct extent_buffer *mid;
662 struct extent_buffer *left = NULL;
663 struct extent_buffer *parent = NULL;
667 int orig_slot = path->slots[level];
673 mid = path->nodes[level];
674 WARN_ON(btrfs_header_generation(mid) != trans->transid);
676 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
678 if (level < BTRFS_MAX_LEVEL - 1) {
679 parent = path->nodes[level + 1];
680 pslot = path->slots[level + 1];
684 * deal with the case where there is only one pointer in the root
685 * by promoting the node below to a root
688 struct extent_buffer *child;
690 if (btrfs_header_nritems(mid) != 1)
693 /* promote the child to a root */
694 child = read_node_slot(root, mid, 0);
695 BUG_ON(!extent_buffer_uptodate(child));
696 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
700 add_root_to_dirty_list(root);
701 path->nodes[level] = NULL;
702 clean_tree_block(trans, root, mid);
703 wait_on_tree_block_writeback(root, mid);
704 /* once for the path */
705 free_extent_buffer(mid);
707 ret = btrfs_free_extent(trans, root, mid->start, mid->len,
708 0, root->root_key.objectid,
710 /* once for the root ptr */
711 free_extent_buffer(mid);
714 if (btrfs_header_nritems(mid) >
715 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
718 left = read_node_slot(root, parent, pslot - 1);
719 if (extent_buffer_uptodate(left)) {
720 wret = btrfs_cow_block(trans, root, left,
721 parent, pslot - 1, &left);
727 right = read_node_slot(root, parent, pslot + 1);
728 if (extent_buffer_uptodate(right)) {
729 wret = btrfs_cow_block(trans, root, right,
730 parent, pslot + 1, &right);
737 /* first, try to make some room in the middle buffer */
739 orig_slot += btrfs_header_nritems(left);
740 wret = push_node_left(trans, root, left, mid, 1);
746 * then try to empty the right most buffer into the middle
749 wret = push_node_left(trans, root, mid, right, 1);
750 if (wret < 0 && wret != -ENOSPC)
752 if (btrfs_header_nritems(right) == 0) {
753 u64 bytenr = right->start;
754 u32 blocksize = right->len;
756 clean_tree_block(trans, root, right);
757 wait_on_tree_block_writeback(root, right);
758 free_extent_buffer(right);
760 wret = btrfs_del_ptr(trans, root, path,
761 level + 1, pslot + 1);
764 wret = btrfs_free_extent(trans, root, bytenr,
766 root->root_key.objectid,
771 struct btrfs_disk_key right_key;
772 btrfs_node_key(right, &right_key, 0);
773 btrfs_set_node_key(parent, &right_key, pslot + 1);
774 btrfs_mark_buffer_dirty(parent);
777 if (btrfs_header_nritems(mid) == 1) {
779 * we're not allowed to leave a node with one item in the
780 * tree during a delete. A deletion from lower in the tree
781 * could try to delete the only pointer in this node.
782 * So, pull some keys from the left.
783 * There has to be a left pointer at this point because
784 * otherwise we would have pulled some pointers from the
788 wret = balance_node_right(trans, root, mid, left);
794 wret = push_node_left(trans, root, left, mid, 1);
800 if (btrfs_header_nritems(mid) == 0) {
801 /* we've managed to empty the middle node, drop it */
802 u64 bytenr = mid->start;
803 u32 blocksize = mid->len;
804 clean_tree_block(trans, root, mid);
805 wait_on_tree_block_writeback(root, mid);
806 free_extent_buffer(mid);
808 wret = btrfs_del_ptr(trans, root, path, level + 1, pslot);
811 wret = btrfs_free_extent(trans, root, bytenr, blocksize,
812 0, root->root_key.objectid,
817 /* update the parent key to reflect our changes */
818 struct btrfs_disk_key mid_key;
819 btrfs_node_key(mid, &mid_key, 0);
820 btrfs_set_node_key(parent, &mid_key, pslot);
821 btrfs_mark_buffer_dirty(parent);
824 /* update the path */
826 if (btrfs_header_nritems(left) > orig_slot) {
827 extent_buffer_get(left);
828 path->nodes[level] = left;
829 path->slots[level + 1] -= 1;
830 path->slots[level] = orig_slot;
832 free_extent_buffer(mid);
834 orig_slot -= btrfs_header_nritems(left);
835 path->slots[level] = orig_slot;
838 /* double check we haven't messed things up */
839 check_block(root, path, level);
841 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
845 free_extent_buffer(right);
847 free_extent_buffer(left);
851 /* returns zero if the push worked, non-zero otherwise */
852 static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans,
853 struct btrfs_root *root,
854 struct btrfs_path *path, int level)
856 struct extent_buffer *right = NULL;
857 struct extent_buffer *mid;
858 struct extent_buffer *left = NULL;
859 struct extent_buffer *parent = NULL;
863 int orig_slot = path->slots[level];
868 mid = path->nodes[level];
869 WARN_ON(btrfs_header_generation(mid) != trans->transid);
871 if (level < BTRFS_MAX_LEVEL - 1) {
872 parent = path->nodes[level + 1];
873 pslot = path->slots[level + 1];
879 left = read_node_slot(root, parent, pslot - 1);
881 /* first, try to make some room in the middle buffer */
882 if (extent_buffer_uptodate(left)) {
884 left_nr = btrfs_header_nritems(left);
885 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
888 ret = btrfs_cow_block(trans, root, left, parent,
893 wret = push_node_left(trans, root,
900 struct btrfs_disk_key disk_key;
901 orig_slot += left_nr;
902 btrfs_node_key(mid, &disk_key, 0);
903 btrfs_set_node_key(parent, &disk_key, pslot);
904 btrfs_mark_buffer_dirty(parent);
905 if (btrfs_header_nritems(left) > orig_slot) {
906 path->nodes[level] = left;
907 path->slots[level + 1] -= 1;
908 path->slots[level] = orig_slot;
909 free_extent_buffer(mid);
912 btrfs_header_nritems(left);
913 path->slots[level] = orig_slot;
914 free_extent_buffer(left);
918 free_extent_buffer(left);
920 right= read_node_slot(root, parent, pslot + 1);
923 * then try to empty the right most buffer into the middle
925 if (extent_buffer_uptodate(right)) {
927 right_nr = btrfs_header_nritems(right);
928 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
931 ret = btrfs_cow_block(trans, root, right,
937 wret = balance_node_right(trans, root,
944 struct btrfs_disk_key disk_key;
946 btrfs_node_key(right, &disk_key, 0);
947 btrfs_set_node_key(parent, &disk_key, pslot + 1);
948 btrfs_mark_buffer_dirty(parent);
950 if (btrfs_header_nritems(mid) <= orig_slot) {
951 path->nodes[level] = right;
952 path->slots[level + 1] += 1;
953 path->slots[level] = orig_slot -
954 btrfs_header_nritems(mid);
955 free_extent_buffer(mid);
957 free_extent_buffer(right);
961 free_extent_buffer(right);
967 * readahead one full node of leaves
969 void reada_for_search(struct btrfs_root *root, struct btrfs_path *path,
970 int level, int slot, u64 objectid)
972 struct extent_buffer *node;
973 struct btrfs_disk_key disk_key;
979 int direction = path->reada;
980 struct extent_buffer *eb;
988 if (!path->nodes[level])
991 node = path->nodes[level];
992 search = btrfs_node_blockptr(node, slot);
993 blocksize = root->nodesize;
994 eb = btrfs_find_tree_block(root, search, blocksize);
996 free_extent_buffer(eb);
1000 highest_read = search;
1001 lowest_read = search;
1003 nritems = btrfs_header_nritems(node);
1006 if (direction < 0) {
1010 } else if (direction > 0) {
1015 if (path->reada < 0 && objectid) {
1016 btrfs_node_key(node, &disk_key, nr);
1017 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1020 search = btrfs_node_blockptr(node, nr);
1021 if ((search >= lowest_read && search <= highest_read) ||
1022 (search < lowest_read && lowest_read - search <= 32768) ||
1023 (search > highest_read && search - highest_read <= 32768)) {
1024 readahead_tree_block(root, search, blocksize,
1025 btrfs_node_ptr_generation(node, nr));
1029 if (path->reada < 2 && (nread > (256 * 1024) || nscan > 32))
1031 if(nread > (1024 * 1024) || nscan > 128)
1034 if (search < lowest_read)
1035 lowest_read = search;
1036 if (search > highest_read)
1037 highest_read = search;
1041 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path,
1042 u64 iobjectid, u64 ioff, u8 key_type,
1043 struct btrfs_key *found_key)
1046 struct btrfs_key key;
1047 struct extent_buffer *eb;
1048 struct btrfs_path *path;
1050 key.type = key_type;
1051 key.objectid = iobjectid;
1054 if (found_path == NULL) {
1055 path = btrfs_alloc_path();
1061 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1062 if ((ret < 0) || (found_key == NULL))
1065 eb = path->nodes[0];
1066 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1067 ret = btrfs_next_leaf(fs_root, path);
1070 eb = path->nodes[0];
1073 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1074 if (found_key->type != key.type ||
1075 found_key->objectid != key.objectid) {
1081 if (path != found_path)
1082 btrfs_free_path(path);
1087 * look for key in the tree. path is filled in with nodes along the way
1088 * if key is found, we return zero and you can find the item in the leaf
1089 * level of the path (level 0)
1091 * If the key isn't found, the path points to the slot where it should
1092 * be inserted, and 1 is returned. If there are other errors during the
1093 * search a negative error number is returned.
1095 * if ins_len > 0, nodes and leaves will be split as we walk down the
1096 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1099 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1100 *root, struct btrfs_key *key, struct btrfs_path *p, int
1103 struct extent_buffer *b;
1107 int should_reada = p->reada;
1108 u8 lowest_level = 0;
1110 lowest_level = p->lowest_level;
1111 WARN_ON(lowest_level && ins_len > 0);
1112 WARN_ON(p->nodes[0] != NULL);
1114 WARN_ON(!mutex_is_locked(&root->fs_info->fs_mutex));
1118 extent_buffer_get(b);
1120 level = btrfs_header_level(b);
1123 wret = btrfs_cow_block(trans, root, b,
1124 p->nodes[level + 1],
1125 p->slots[level + 1],
1128 free_extent_buffer(b);
1132 BUG_ON(!cow && ins_len);
1133 if (level != btrfs_header_level(b))
1135 level = btrfs_header_level(b);
1136 p->nodes[level] = b;
1137 ret = check_block(root, p, level);
1140 ret = bin_search(b, key, level, &slot);
1142 if (ret && slot > 0)
1144 p->slots[level] = slot;
1145 if ((p->search_for_split || ins_len > 0) &&
1146 btrfs_header_nritems(b) >=
1147 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1148 int sret = split_node(trans, root, p, level);
1152 b = p->nodes[level];
1153 slot = p->slots[level];
1154 } else if (ins_len < 0) {
1155 int sret = balance_level(trans, root, p,
1159 b = p->nodes[level];
1161 btrfs_release_path(p);
1164 slot = p->slots[level];
1165 BUG_ON(btrfs_header_nritems(b) == 1);
1167 /* this is only true while dropping a snapshot */
1168 if (level == lowest_level)
1172 reada_for_search(root, p, level, slot,
1175 b = read_node_slot(root, b, slot);
1176 if (!extent_buffer_uptodate(b))
1179 p->slots[level] = slot;
1181 ins_len > btrfs_leaf_free_space(root, b)) {
1182 int sret = split_leaf(trans, root, key,
1183 p, ins_len, ret == 0);
1195 * adjust the pointers going up the tree, starting at level
1196 * making sure the right key of each node is points to 'key'.
1197 * This is used after shifting pointers to the left, so it stops
1198 * fixing up pointers when a given leaf/node is not in slot 0 of the
1201 void btrfs_fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
1202 struct btrfs_disk_key *key, int level)
1205 struct extent_buffer *t;
1207 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1208 int tslot = path->slots[i];
1209 if (!path->nodes[i])
1212 btrfs_set_node_key(t, key, tslot);
1213 btrfs_mark_buffer_dirty(path->nodes[i]);
1222 * This function isn't completely safe. It's the caller's responsibility
1223 * that the new key won't break the order
1225 int btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
1226 struct btrfs_key *new_key)
1228 struct btrfs_disk_key disk_key;
1229 struct extent_buffer *eb;
1232 eb = path->nodes[0];
1233 slot = path->slots[0];
1235 btrfs_item_key(eb, &disk_key, slot - 1);
1236 if (btrfs_comp_keys(&disk_key, new_key) >= 0)
1239 if (slot < btrfs_header_nritems(eb) - 1) {
1240 btrfs_item_key(eb, &disk_key, slot + 1);
1241 if (btrfs_comp_keys(&disk_key, new_key) <= 0)
1245 btrfs_cpu_key_to_disk(&disk_key, new_key);
1246 btrfs_set_item_key(eb, &disk_key, slot);
1247 btrfs_mark_buffer_dirty(eb);
1249 btrfs_fixup_low_keys(root, path, &disk_key, 1);
1254 * update an item key without the safety checks. This is meant to be called by
1257 void btrfs_set_item_key_unsafe(struct btrfs_root *root,
1258 struct btrfs_path *path,
1259 struct btrfs_key *new_key)
1261 struct btrfs_disk_key disk_key;
1262 struct extent_buffer *eb;
1265 eb = path->nodes[0];
1266 slot = path->slots[0];
1268 btrfs_cpu_key_to_disk(&disk_key, new_key);
1269 btrfs_set_item_key(eb, &disk_key, slot);
1270 btrfs_mark_buffer_dirty(eb);
1272 btrfs_fixup_low_keys(root, path, &disk_key, 1);
1276 * try to push data from one node into the next node left in the
1279 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1280 * error, and > 0 if there was no room in the left hand block.
1282 static int push_node_left(struct btrfs_trans_handle *trans,
1283 struct btrfs_root *root, struct extent_buffer *dst,
1284 struct extent_buffer *src, int empty)
1291 src_nritems = btrfs_header_nritems(src);
1292 dst_nritems = btrfs_header_nritems(dst);
1293 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1294 WARN_ON(btrfs_header_generation(src) != trans->transid);
1295 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1297 if (!empty && src_nritems <= 8)
1300 if (push_items <= 0) {
1305 push_items = min(src_nritems, push_items);
1306 if (push_items < src_nritems) {
1307 /* leave at least 8 pointers in the node if
1308 * we aren't going to empty it
1310 if (src_nritems - push_items < 8) {
1311 if (push_items <= 8)
1317 push_items = min(src_nritems - 8, push_items);
1319 copy_extent_buffer(dst, src,
1320 btrfs_node_key_ptr_offset(dst_nritems),
1321 btrfs_node_key_ptr_offset(0),
1322 push_items * sizeof(struct btrfs_key_ptr));
1324 if (push_items < src_nritems) {
1325 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
1326 btrfs_node_key_ptr_offset(push_items),
1327 (src_nritems - push_items) *
1328 sizeof(struct btrfs_key_ptr));
1330 btrfs_set_header_nritems(src, src_nritems - push_items);
1331 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1332 btrfs_mark_buffer_dirty(src);
1333 btrfs_mark_buffer_dirty(dst);
1339 * try to push data from one node into the next node right in the
1342 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
1343 * error, and > 0 if there was no room in the right hand block.
1345 * this will only push up to 1/2 the contents of the left node over
1347 static int balance_node_right(struct btrfs_trans_handle *trans,
1348 struct btrfs_root *root,
1349 struct extent_buffer *dst,
1350 struct extent_buffer *src)
1358 WARN_ON(btrfs_header_generation(src) != trans->transid);
1359 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1361 src_nritems = btrfs_header_nritems(src);
1362 dst_nritems = btrfs_header_nritems(dst);
1363 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1364 if (push_items <= 0) {
1368 if (src_nritems < 4) {
1372 max_push = src_nritems / 2 + 1;
1373 /* don't try to empty the node */
1374 if (max_push >= src_nritems) {
1378 if (max_push < push_items)
1379 push_items = max_push;
1381 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
1382 btrfs_node_key_ptr_offset(0),
1384 sizeof(struct btrfs_key_ptr));
1386 copy_extent_buffer(dst, src,
1387 btrfs_node_key_ptr_offset(0),
1388 btrfs_node_key_ptr_offset(src_nritems - push_items),
1389 push_items * sizeof(struct btrfs_key_ptr));
1391 btrfs_set_header_nritems(src, src_nritems - push_items);
1392 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1394 btrfs_mark_buffer_dirty(src);
1395 btrfs_mark_buffer_dirty(dst);
1401 * helper function to insert a new root level in the tree.
1402 * A new node is allocated, and a single item is inserted to
1403 * point to the existing root
1405 * returns zero on success or < 0 on failure.
1407 static int noinline insert_new_root(struct btrfs_trans_handle *trans,
1408 struct btrfs_root *root,
1409 struct btrfs_path *path, int level)
1412 struct extent_buffer *lower;
1413 struct extent_buffer *c;
1414 struct extent_buffer *old;
1415 struct btrfs_disk_key lower_key;
1417 BUG_ON(path->nodes[level]);
1418 BUG_ON(path->nodes[level-1] != root->node);
1420 lower = path->nodes[level-1];
1422 btrfs_item_key(lower, &lower_key, 0);
1424 btrfs_node_key(lower, &lower_key, 0);
1426 c = btrfs_alloc_free_block(trans, root, root->nodesize,
1427 root->root_key.objectid, &lower_key,
1428 level, root->node->start, 0);
1433 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
1434 btrfs_set_header_nritems(c, 1);
1435 btrfs_set_header_level(c, level);
1436 btrfs_set_header_bytenr(c, c->start);
1437 btrfs_set_header_generation(c, trans->transid);
1438 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
1439 btrfs_set_header_owner(c, root->root_key.objectid);
1441 write_extent_buffer(c, root->fs_info->fsid,
1442 btrfs_header_fsid(), BTRFS_FSID_SIZE);
1444 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
1445 btrfs_header_chunk_tree_uuid(c),
1448 btrfs_set_node_key(c, &lower_key, 0);
1449 btrfs_set_node_blockptr(c, 0, lower->start);
1450 lower_gen = btrfs_header_generation(lower);
1451 WARN_ON(lower_gen != trans->transid);
1453 btrfs_set_node_ptr_generation(c, 0, lower_gen);
1455 btrfs_mark_buffer_dirty(c);
1460 /* the super has an extra ref to root->node */
1461 free_extent_buffer(old);
1463 add_root_to_dirty_list(root);
1464 extent_buffer_get(c);
1465 path->nodes[level] = c;
1466 path->slots[level] = 0;
1471 * worker function to insert a single pointer in a node.
1472 * the node should have enough room for the pointer already
1474 * slot and level indicate where you want the key to go, and
1475 * blocknr is the block the key points to.
1477 * returns zero on success and < 0 on any error
1479 static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
1480 *root, struct btrfs_path *path, struct btrfs_disk_key
1481 *key, u64 bytenr, int slot, int level)
1483 struct extent_buffer *lower;
1486 BUG_ON(!path->nodes[level]);
1487 lower = path->nodes[level];
1488 nritems = btrfs_header_nritems(lower);
1491 if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
1493 if (slot != nritems) {
1494 memmove_extent_buffer(lower,
1495 btrfs_node_key_ptr_offset(slot + 1),
1496 btrfs_node_key_ptr_offset(slot),
1497 (nritems - slot) * sizeof(struct btrfs_key_ptr));
1499 btrfs_set_node_key(lower, key, slot);
1500 btrfs_set_node_blockptr(lower, slot, bytenr);
1501 WARN_ON(trans->transid == 0);
1502 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
1503 btrfs_set_header_nritems(lower, nritems + 1);
1504 btrfs_mark_buffer_dirty(lower);
1509 * split the node at the specified level in path in two.
1510 * The path is corrected to point to the appropriate node after the split
1512 * Before splitting this tries to make some room in the node by pushing
1513 * left and right, if either one works, it returns right away.
1515 * returns 0 on success and < 0 on failure
1517 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
1518 *root, struct btrfs_path *path, int level)
1520 struct extent_buffer *c;
1521 struct extent_buffer *split;
1522 struct btrfs_disk_key disk_key;
1528 c = path->nodes[level];
1529 WARN_ON(btrfs_header_generation(c) != trans->transid);
1530 if (c == root->node) {
1531 /* trying to split the root, lets make a new one */
1532 ret = insert_new_root(trans, root, path, level + 1);
1536 ret = push_nodes_for_insert(trans, root, path, level);
1537 c = path->nodes[level];
1538 if (!ret && btrfs_header_nritems(c) <
1539 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
1545 c_nritems = btrfs_header_nritems(c);
1546 mid = (c_nritems + 1) / 2;
1547 btrfs_node_key(c, &disk_key, mid);
1549 split = btrfs_alloc_free_block(trans, root, root->nodesize,
1550 root->root_key.objectid,
1551 &disk_key, level, c->start, 0);
1553 return PTR_ERR(split);
1555 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
1556 btrfs_set_header_level(split, btrfs_header_level(c));
1557 btrfs_set_header_bytenr(split, split->start);
1558 btrfs_set_header_generation(split, trans->transid);
1559 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
1560 btrfs_set_header_owner(split, root->root_key.objectid);
1561 write_extent_buffer(split, root->fs_info->fsid,
1562 btrfs_header_fsid(), BTRFS_FSID_SIZE);
1563 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
1564 btrfs_header_chunk_tree_uuid(split),
1568 copy_extent_buffer(split, c,
1569 btrfs_node_key_ptr_offset(0),
1570 btrfs_node_key_ptr_offset(mid),
1571 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
1572 btrfs_set_header_nritems(split, c_nritems - mid);
1573 btrfs_set_header_nritems(c, mid);
1576 btrfs_mark_buffer_dirty(c);
1577 btrfs_mark_buffer_dirty(split);
1579 wret = insert_ptr(trans, root, path, &disk_key, split->start,
1580 path->slots[level + 1] + 1,
1585 if (path->slots[level] >= mid) {
1586 path->slots[level] -= mid;
1587 free_extent_buffer(c);
1588 path->nodes[level] = split;
1589 path->slots[level + 1] += 1;
1591 free_extent_buffer(split);
1597 * how many bytes are required to store the items in a leaf. start
1598 * and nr indicate which items in the leaf to check. This totals up the
1599 * space used both by the item structs and the item data
1601 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
1604 int nritems = btrfs_header_nritems(l);
1605 int end = min(nritems, start + nr) - 1;
1609 data_len = btrfs_item_end_nr(l, start);
1610 data_len = data_len - btrfs_item_offset_nr(l, end);
1611 data_len += sizeof(struct btrfs_item) * nr;
1612 WARN_ON(data_len < 0);
1617 * The space between the end of the leaf items and
1618 * the start of the leaf data. IOW, how much room
1619 * the leaf has left for both items and data
1621 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf)
1623 u32 nodesize = (root ? BTRFS_LEAF_DATA_SIZE(root) : leaf->len);
1624 int nritems = btrfs_header_nritems(leaf);
1626 ret = nodesize - leaf_space_used(leaf, 0, nritems);
1628 printk("leaf free space ret %d, leaf data size %u, used %d nritems %d\n",
1629 ret, nodesize, leaf_space_used(leaf, 0, nritems),
1636 * push some data in the path leaf to the right, trying to free up at
1637 * least data_size bytes. returns zero if the push worked, nonzero otherwise
1639 * returns 1 if the push failed because the other node didn't have enough
1640 * room, 0 if everything worked out and < 0 if there were major errors.
1642 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
1643 *root, struct btrfs_path *path, int data_size,
1646 struct extent_buffer *left = path->nodes[0];
1647 struct extent_buffer *right;
1648 struct extent_buffer *upper;
1649 struct btrfs_disk_key disk_key;
1655 struct btrfs_item *item;
1663 slot = path->slots[1];
1664 if (!path->nodes[1]) {
1667 upper = path->nodes[1];
1668 if (slot >= btrfs_header_nritems(upper) - 1)
1671 right = read_node_slot(root, upper, slot + 1);
1672 if (!extent_buffer_uptodate(right)) {
1674 return PTR_ERR(right);
1677 free_space = btrfs_leaf_free_space(root, right);
1678 if (free_space < data_size) {
1679 free_extent_buffer(right);
1683 /* cow and double check */
1684 ret = btrfs_cow_block(trans, root, right, upper,
1687 free_extent_buffer(right);
1690 free_space = btrfs_leaf_free_space(root, right);
1691 if (free_space < data_size) {
1692 free_extent_buffer(right);
1696 left_nritems = btrfs_header_nritems(left);
1697 if (left_nritems == 0) {
1698 free_extent_buffer(right);
1707 i = left_nritems - 1;
1709 item = btrfs_item_nr(i);
1711 if (path->slots[0] == i)
1712 push_space += data_size + sizeof(*item);
1714 this_item_size = btrfs_item_size(left, item);
1715 if (this_item_size + sizeof(*item) + push_space > free_space)
1718 push_space += this_item_size + sizeof(*item);
1724 if (push_items == 0) {
1725 free_extent_buffer(right);
1729 if (!empty && push_items == left_nritems)
1732 /* push left to right */
1733 right_nritems = btrfs_header_nritems(right);
1735 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
1736 push_space -= leaf_data_end(root, left);
1738 /* make room in the right data area */
1739 data_end = leaf_data_end(root, right);
1740 memmove_extent_buffer(right,
1741 btrfs_leaf_data(right) + data_end - push_space,
1742 btrfs_leaf_data(right) + data_end,
1743 BTRFS_LEAF_DATA_SIZE(root) - data_end);
1745 /* copy from the left data area */
1746 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
1747 BTRFS_LEAF_DATA_SIZE(root) - push_space,
1748 btrfs_leaf_data(left) + leaf_data_end(root, left),
1751 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
1752 btrfs_item_nr_offset(0),
1753 right_nritems * sizeof(struct btrfs_item));
1755 /* copy the items from left to right */
1756 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
1757 btrfs_item_nr_offset(left_nritems - push_items),
1758 push_items * sizeof(struct btrfs_item));
1760 /* update the item pointers */
1761 right_nritems += push_items;
1762 btrfs_set_header_nritems(right, right_nritems);
1763 push_space = BTRFS_LEAF_DATA_SIZE(root);
1764 for (i = 0; i < right_nritems; i++) {
1765 item = btrfs_item_nr(i);
1766 push_space -= btrfs_item_size(right, item);
1767 btrfs_set_item_offset(right, item, push_space);
1770 left_nritems -= push_items;
1771 btrfs_set_header_nritems(left, left_nritems);
1774 btrfs_mark_buffer_dirty(left);
1775 btrfs_mark_buffer_dirty(right);
1777 btrfs_item_key(right, &disk_key, 0);
1778 btrfs_set_node_key(upper, &disk_key, slot + 1);
1779 btrfs_mark_buffer_dirty(upper);
1781 /* then fixup the leaf pointer in the path */
1782 if (path->slots[0] >= left_nritems) {
1783 path->slots[0] -= left_nritems;
1784 free_extent_buffer(path->nodes[0]);
1785 path->nodes[0] = right;
1786 path->slots[1] += 1;
1788 free_extent_buffer(right);
1793 * push some data in the path leaf to the left, trying to free up at
1794 * least data_size bytes. returns zero if the push worked, nonzero otherwise
1796 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
1797 *root, struct btrfs_path *path, int data_size,
1800 struct btrfs_disk_key disk_key;
1801 struct extent_buffer *right = path->nodes[0];
1802 struct extent_buffer *left;
1808 struct btrfs_item *item;
1809 u32 old_left_nritems;
1814 u32 old_left_item_size;
1816 slot = path->slots[1];
1819 if (!path->nodes[1])
1822 right_nritems = btrfs_header_nritems(right);
1823 if (right_nritems == 0) {
1827 left = read_node_slot(root, path->nodes[1], slot - 1);
1828 free_space = btrfs_leaf_free_space(root, left);
1829 if (free_space < data_size) {
1830 free_extent_buffer(left);
1834 /* cow and double check */
1835 ret = btrfs_cow_block(trans, root, left,
1836 path->nodes[1], slot - 1, &left);
1838 /* we hit -ENOSPC, but it isn't fatal here */
1839 free_extent_buffer(left);
1843 free_space = btrfs_leaf_free_space(root, left);
1844 if (free_space < data_size) {
1845 free_extent_buffer(left);
1852 nr = right_nritems - 1;
1854 for (i = 0; i < nr; i++) {
1855 item = btrfs_item_nr(i);
1857 if (path->slots[0] == i)
1858 push_space += data_size + sizeof(*item);
1860 this_item_size = btrfs_item_size(right, item);
1861 if (this_item_size + sizeof(*item) + push_space > free_space)
1865 push_space += this_item_size + sizeof(*item);
1868 if (push_items == 0) {
1869 free_extent_buffer(left);
1872 if (!empty && push_items == btrfs_header_nritems(right))
1875 /* push data from right to left */
1876 copy_extent_buffer(left, right,
1877 btrfs_item_nr_offset(btrfs_header_nritems(left)),
1878 btrfs_item_nr_offset(0),
1879 push_items * sizeof(struct btrfs_item));
1881 push_space = BTRFS_LEAF_DATA_SIZE(root) -
1882 btrfs_item_offset_nr(right, push_items -1);
1884 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
1885 leaf_data_end(root, left) - push_space,
1886 btrfs_leaf_data(right) +
1887 btrfs_item_offset_nr(right, push_items - 1),
1889 old_left_nritems = btrfs_header_nritems(left);
1890 BUG_ON(old_left_nritems == 0);
1892 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
1893 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
1896 item = btrfs_item_nr(i);
1897 ioff = btrfs_item_offset(left, item);
1898 btrfs_set_item_offset(left, item,
1899 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
1901 btrfs_set_header_nritems(left, old_left_nritems + push_items);
1903 /* fixup right node */
1904 if (push_items > right_nritems) {
1905 printk("push items %d nr %u\n", push_items, right_nritems);
1909 if (push_items < right_nritems) {
1910 push_space = btrfs_item_offset_nr(right, push_items - 1) -
1911 leaf_data_end(root, right);
1912 memmove_extent_buffer(right, btrfs_leaf_data(right) +
1913 BTRFS_LEAF_DATA_SIZE(root) - push_space,
1914 btrfs_leaf_data(right) +
1915 leaf_data_end(root, right), push_space);
1917 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
1918 btrfs_item_nr_offset(push_items),
1919 (btrfs_header_nritems(right) - push_items) *
1920 sizeof(struct btrfs_item));
1922 right_nritems -= push_items;
1923 btrfs_set_header_nritems(right, right_nritems);
1924 push_space = BTRFS_LEAF_DATA_SIZE(root);
1925 for (i = 0; i < right_nritems; i++) {
1926 item = btrfs_item_nr(i);
1927 push_space = push_space - btrfs_item_size(right, item);
1928 btrfs_set_item_offset(right, item, push_space);
1931 btrfs_mark_buffer_dirty(left);
1933 btrfs_mark_buffer_dirty(right);
1935 btrfs_item_key(right, &disk_key, 0);
1936 btrfs_fixup_low_keys(root, path, &disk_key, 1);
1938 /* then fixup the leaf pointer in the path */
1939 if (path->slots[0] < push_items) {
1940 path->slots[0] += old_left_nritems;
1941 free_extent_buffer(path->nodes[0]);
1942 path->nodes[0] = left;
1943 path->slots[1] -= 1;
1945 free_extent_buffer(left);
1946 path->slots[0] -= push_items;
1948 BUG_ON(path->slots[0] < 0);
1953 * split the path's leaf in two, making sure there is at least data_size
1954 * available for the resulting leaf level of the path.
1956 * returns 0 if all went well and < 0 on failure.
1958 static noinline int copy_for_split(struct btrfs_trans_handle *trans,
1959 struct btrfs_root *root,
1960 struct btrfs_path *path,
1961 struct extent_buffer *l,
1962 struct extent_buffer *right,
1963 int slot, int mid, int nritems)
1970 struct btrfs_disk_key disk_key;
1972 nritems = nritems - mid;
1973 btrfs_set_header_nritems(right, nritems);
1974 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
1976 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
1977 btrfs_item_nr_offset(mid),
1978 nritems * sizeof(struct btrfs_item));
1980 copy_extent_buffer(right, l,
1981 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
1982 data_copy_size, btrfs_leaf_data(l) +
1983 leaf_data_end(root, l), data_copy_size);
1985 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
1986 btrfs_item_end_nr(l, mid);
1988 for (i = 0; i < nritems; i++) {
1989 struct btrfs_item *item = btrfs_item_nr(i);
1990 u32 ioff = btrfs_item_offset(right, item);
1991 btrfs_set_item_offset(right, item, ioff + rt_data_off);
1994 btrfs_set_header_nritems(l, mid);
1996 btrfs_item_key(right, &disk_key, 0);
1997 wret = insert_ptr(trans, root, path, &disk_key, right->start,
1998 path->slots[1] + 1, 1);
2002 btrfs_mark_buffer_dirty(right);
2003 btrfs_mark_buffer_dirty(l);
2004 BUG_ON(path->slots[0] != slot);
2007 free_extent_buffer(path->nodes[0]);
2008 path->nodes[0] = right;
2009 path->slots[0] -= mid;
2010 path->slots[1] += 1;
2012 free_extent_buffer(right);
2015 BUG_ON(path->slots[0] < 0);
2021 * split the path's leaf in two, making sure there is at least data_size
2022 * available for the resulting leaf level of the path.
2024 * returns 0 if all went well and < 0 on failure.
2026 static noinline int split_leaf(struct btrfs_trans_handle *trans,
2027 struct btrfs_root *root,
2028 struct btrfs_key *ins_key,
2029 struct btrfs_path *path, int data_size,
2032 struct btrfs_disk_key disk_key;
2033 struct extent_buffer *l;
2037 struct extent_buffer *right;
2041 int num_doubles = 0;
2044 slot = path->slots[0];
2045 if (extend && data_size + btrfs_item_size_nr(l, slot) +
2046 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
2049 /* first try to make some room by pushing left and right */
2050 if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) {
2051 wret = push_leaf_right(trans, root, path, data_size, 0);
2055 wret = push_leaf_left(trans, root, path, data_size, 0);
2061 /* did the pushes work? */
2062 if (btrfs_leaf_free_space(root, l) >= data_size)
2066 if (!path->nodes[1]) {
2067 ret = insert_new_root(trans, root, path, 1);
2074 slot = path->slots[0];
2075 nritems = btrfs_header_nritems(l);
2076 mid = (nritems + 1) / 2;
2080 leaf_space_used(l, mid, nritems - mid) + data_size >
2081 BTRFS_LEAF_DATA_SIZE(root)) {
2082 if (slot >= nritems) {
2086 if (mid != nritems &&
2087 leaf_space_used(l, mid, nritems - mid) +
2088 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2094 if (leaf_space_used(l, 0, mid) + data_size >
2095 BTRFS_LEAF_DATA_SIZE(root)) {
2096 if (!extend && data_size && slot == 0) {
2098 } else if ((extend || !data_size) && slot == 0) {
2102 if (mid != nritems &&
2103 leaf_space_used(l, mid, nritems - mid) +
2104 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2112 btrfs_cpu_key_to_disk(&disk_key, ins_key);
2114 btrfs_item_key(l, &disk_key, mid);
2116 right = btrfs_alloc_free_block(trans, root, root->nodesize,
2117 root->root_key.objectid,
2118 &disk_key, 0, l->start, 0);
2119 if (IS_ERR(right)) {
2121 return PTR_ERR(right);
2124 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
2125 btrfs_set_header_bytenr(right, right->start);
2126 btrfs_set_header_generation(right, trans->transid);
2127 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
2128 btrfs_set_header_owner(right, root->root_key.objectid);
2129 btrfs_set_header_level(right, 0);
2130 write_extent_buffer(right, root->fs_info->fsid,
2131 btrfs_header_fsid(), BTRFS_FSID_SIZE);
2133 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
2134 btrfs_header_chunk_tree_uuid(right),
2139 btrfs_set_header_nritems(right, 0);
2140 wret = insert_ptr(trans, root, path,
2141 &disk_key, right->start,
2142 path->slots[1] + 1, 1);
2146 free_extent_buffer(path->nodes[0]);
2147 path->nodes[0] = right;
2149 path->slots[1] += 1;
2151 btrfs_set_header_nritems(right, 0);
2152 wret = insert_ptr(trans, root, path,
2158 free_extent_buffer(path->nodes[0]);
2159 path->nodes[0] = right;
2161 if (path->slots[1] == 0) {
2162 btrfs_fixup_low_keys(root, path,
2166 btrfs_mark_buffer_dirty(right);
2170 ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems);
2174 BUG_ON(num_doubles != 0);
2183 * This function splits a single item into two items,
2184 * giving 'new_key' to the new item and splitting the
2185 * old one at split_offset (from the start of the item).
2187 * The path may be released by this operation. After
2188 * the split, the path is pointing to the old item. The
2189 * new item is going to be in the same node as the old one.
2191 * Note, the item being split must be smaller enough to live alone on
2192 * a tree block with room for one extra struct btrfs_item
2194 * This allows us to split the item in place, keeping a lock on the
2195 * leaf the entire time.
2197 int btrfs_split_item(struct btrfs_trans_handle *trans,
2198 struct btrfs_root *root,
2199 struct btrfs_path *path,
2200 struct btrfs_key *new_key,
2201 unsigned long split_offset)
2204 struct extent_buffer *leaf;
2205 struct btrfs_key orig_key;
2206 struct btrfs_item *item;
2207 struct btrfs_item *new_item;
2212 struct btrfs_disk_key disk_key;
2215 leaf = path->nodes[0];
2216 btrfs_item_key_to_cpu(leaf, &orig_key, path->slots[0]);
2217 if (btrfs_leaf_free_space(root, leaf) >= sizeof(struct btrfs_item))
2220 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2221 btrfs_release_path(path);
2223 path->search_for_split = 1;
2225 ret = btrfs_search_slot(trans, root, &orig_key, path, 0, 1);
2226 path->search_for_split = 0;
2228 /* if our item isn't there or got smaller, return now */
2229 if (ret != 0 || item_size != btrfs_item_size_nr(path->nodes[0],
2234 ret = split_leaf(trans, root, &orig_key, path, 0, 0);
2237 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
2238 leaf = path->nodes[0];
2241 item = btrfs_item_nr(path->slots[0]);
2242 orig_offset = btrfs_item_offset(leaf, item);
2243 item_size = btrfs_item_size(leaf, item);
2246 buf = kmalloc(item_size, GFP_NOFS);
2248 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
2249 path->slots[0]), item_size);
2250 slot = path->slots[0] + 1;
2251 leaf = path->nodes[0];
2253 nritems = btrfs_header_nritems(leaf);
2255 if (slot != nritems) {
2256 /* shift the items */
2257 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
2258 btrfs_item_nr_offset(slot),
2259 (nritems - slot) * sizeof(struct btrfs_item));
2263 btrfs_cpu_key_to_disk(&disk_key, new_key);
2264 btrfs_set_item_key(leaf, &disk_key, slot);
2266 new_item = btrfs_item_nr(slot);
2268 btrfs_set_item_offset(leaf, new_item, orig_offset);
2269 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
2271 btrfs_set_item_offset(leaf, item,
2272 orig_offset + item_size - split_offset);
2273 btrfs_set_item_size(leaf, item, split_offset);
2275 btrfs_set_header_nritems(leaf, nritems + 1);
2277 /* write the data for the start of the original item */
2278 write_extent_buffer(leaf, buf,
2279 btrfs_item_ptr_offset(leaf, path->slots[0]),
2282 /* write the data for the new item */
2283 write_extent_buffer(leaf, buf + split_offset,
2284 btrfs_item_ptr_offset(leaf, slot),
2285 item_size - split_offset);
2286 btrfs_mark_buffer_dirty(leaf);
2289 if (btrfs_leaf_free_space(root, leaf) < 0) {
2290 btrfs_print_leaf(root, leaf);
2297 int btrfs_truncate_item(struct btrfs_trans_handle *trans,
2298 struct btrfs_root *root,
2299 struct btrfs_path *path,
2300 u32 new_size, int from_end)
2304 struct extent_buffer *leaf;
2305 struct btrfs_item *item;
2307 unsigned int data_end;
2308 unsigned int old_data_start;
2309 unsigned int old_size;
2310 unsigned int size_diff;
2313 leaf = path->nodes[0];
2314 slot = path->slots[0];
2316 old_size = btrfs_item_size_nr(leaf, slot);
2317 if (old_size == new_size)
2320 nritems = btrfs_header_nritems(leaf);
2321 data_end = leaf_data_end(root, leaf);
2323 old_data_start = btrfs_item_offset_nr(leaf, slot);
2325 size_diff = old_size - new_size;
2328 BUG_ON(slot >= nritems);
2331 * item0..itemN ... dataN.offset..dataN.size .. data0.size
2333 /* first correct the data pointers */
2334 for (i = slot; i < nritems; i++) {
2336 item = btrfs_item_nr(i);
2337 ioff = btrfs_item_offset(leaf, item);
2338 btrfs_set_item_offset(leaf, item, ioff + size_diff);
2341 /* shift the data */
2343 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2344 data_end + size_diff, btrfs_leaf_data(leaf) +
2345 data_end, old_data_start + new_size - data_end);
2347 struct btrfs_disk_key disk_key;
2350 btrfs_item_key(leaf, &disk_key, slot);
2352 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
2354 struct btrfs_file_extent_item *fi;
2356 fi = btrfs_item_ptr(leaf, slot,
2357 struct btrfs_file_extent_item);
2358 fi = (struct btrfs_file_extent_item *)(
2359 (unsigned long)fi - size_diff);
2361 if (btrfs_file_extent_type(leaf, fi) ==
2362 BTRFS_FILE_EXTENT_INLINE) {
2363 ptr = btrfs_item_ptr_offset(leaf, slot);
2364 memmove_extent_buffer(leaf, ptr,
2366 offsetof(struct btrfs_file_extent_item,
2371 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2372 data_end + size_diff, btrfs_leaf_data(leaf) +
2373 data_end, old_data_start - data_end);
2375 offset = btrfs_disk_key_offset(&disk_key);
2376 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
2377 btrfs_set_item_key(leaf, &disk_key, slot);
2379 btrfs_fixup_low_keys(root, path, &disk_key, 1);
2382 item = btrfs_item_nr(slot);
2383 btrfs_set_item_size(leaf, item, new_size);
2384 btrfs_mark_buffer_dirty(leaf);
2387 if (btrfs_leaf_free_space(root, leaf) < 0) {
2388 btrfs_print_leaf(root, leaf);
2394 int btrfs_extend_item(struct btrfs_trans_handle *trans,
2395 struct btrfs_root *root, struct btrfs_path *path,
2400 struct extent_buffer *leaf;
2401 struct btrfs_item *item;
2403 unsigned int data_end;
2404 unsigned int old_data;
2405 unsigned int old_size;
2408 leaf = path->nodes[0];
2410 nritems = btrfs_header_nritems(leaf);
2411 data_end = leaf_data_end(root, leaf);
2413 if (btrfs_leaf_free_space(root, leaf) < data_size) {
2414 btrfs_print_leaf(root, leaf);
2417 slot = path->slots[0];
2418 old_data = btrfs_item_end_nr(leaf, slot);
2421 if (slot >= nritems) {
2422 btrfs_print_leaf(root, leaf);
2423 printk("slot %d too large, nritems %d\n", slot, nritems);
2428 * item0..itemN ... dataN.offset..dataN.size .. data0.size
2430 /* first correct the data pointers */
2431 for (i = slot; i < nritems; i++) {
2433 item = btrfs_item_nr(i);
2434 ioff = btrfs_item_offset(leaf, item);
2435 btrfs_set_item_offset(leaf, item, ioff - data_size);
2438 /* shift the data */
2439 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2440 data_end - data_size, btrfs_leaf_data(leaf) +
2441 data_end, old_data - data_end);
2443 data_end = old_data;
2444 old_size = btrfs_item_size_nr(leaf, slot);
2445 item = btrfs_item_nr(slot);
2446 btrfs_set_item_size(leaf, item, old_size + data_size);
2447 btrfs_mark_buffer_dirty(leaf);
2450 if (btrfs_leaf_free_space(root, leaf) < 0) {
2451 btrfs_print_leaf(root, leaf);
2458 * Given a key and some data, insert an item into the tree.
2459 * This does all the path init required, making room in the tree if needed.
2461 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
2462 struct btrfs_root *root,
2463 struct btrfs_path *path,
2464 struct btrfs_key *cpu_key, u32 *data_size,
2467 struct extent_buffer *leaf;
2468 struct btrfs_item *item;
2475 unsigned int data_end;
2476 struct btrfs_disk_key disk_key;
2478 for (i = 0; i < nr; i++) {
2479 total_data += data_size[i];
2482 /* create a root if there isn't one */
2486 total_size = total_data + nr * sizeof(struct btrfs_item);
2487 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
2494 leaf = path->nodes[0];
2496 nritems = btrfs_header_nritems(leaf);
2497 data_end = leaf_data_end(root, leaf);
2499 if (btrfs_leaf_free_space(root, leaf) < total_size) {
2500 btrfs_print_leaf(root, leaf);
2501 printk("not enough freespace need %u have %d\n",
2502 total_size, btrfs_leaf_free_space(root, leaf));
2506 slot = path->slots[0];
2509 if (slot != nritems) {
2510 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
2512 if (old_data < data_end) {
2513 btrfs_print_leaf(root, leaf);
2514 printk("slot %d old_data %d data_end %d\n",
2515 slot, old_data, data_end);
2519 * item0..itemN ... dataN.offset..dataN.size .. data0.size
2521 /* first correct the data pointers */
2522 for (i = slot; i < nritems; i++) {
2525 item = btrfs_item_nr(i);
2526 ioff = btrfs_item_offset(leaf, item);
2527 btrfs_set_item_offset(leaf, item, ioff - total_data);
2530 /* shift the items */
2531 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
2532 btrfs_item_nr_offset(slot),
2533 (nritems - slot) * sizeof(struct btrfs_item));
2535 /* shift the data */
2536 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2537 data_end - total_data, btrfs_leaf_data(leaf) +
2538 data_end, old_data - data_end);
2539 data_end = old_data;
2542 /* setup the item for the new data */
2543 for (i = 0; i < nr; i++) {
2544 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
2545 btrfs_set_item_key(leaf, &disk_key, slot + i);
2546 item = btrfs_item_nr(slot + i);
2547 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
2548 data_end -= data_size[i];
2549 btrfs_set_item_size(leaf, item, data_size[i]);
2551 btrfs_set_header_nritems(leaf, nritems + nr);
2552 btrfs_mark_buffer_dirty(leaf);
2556 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
2557 btrfs_fixup_low_keys(root, path, &disk_key, 1);
2560 if (btrfs_leaf_free_space(root, leaf) < 0) {
2561 btrfs_print_leaf(root, leaf);
2570 * Given a key and some data, insert an item into the tree.
2571 * This does all the path init required, making room in the tree if needed.
2573 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
2574 *root, struct btrfs_key *cpu_key, void *data, u32
2578 struct btrfs_path *path;
2579 struct extent_buffer *leaf;
2582 path = btrfs_alloc_path();
2586 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
2588 leaf = path->nodes[0];
2589 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
2590 write_extent_buffer(leaf, data, ptr, data_size);
2591 btrfs_mark_buffer_dirty(leaf);
2593 btrfs_free_path(path);
2598 * delete the pointer from a given node.
2600 * If the delete empties a node, the node is removed from the tree,
2601 * continuing all the way the root if required. The root is converted into
2602 * a leaf if all the nodes are emptied.
2604 int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2605 struct btrfs_path *path, int level, int slot)
2607 struct extent_buffer *parent = path->nodes[level];
2611 nritems = btrfs_header_nritems(parent);
2612 if (slot != nritems -1) {
2613 memmove_extent_buffer(parent,
2614 btrfs_node_key_ptr_offset(slot),
2615 btrfs_node_key_ptr_offset(slot + 1),
2616 sizeof(struct btrfs_key_ptr) *
2617 (nritems - slot - 1));
2620 btrfs_set_header_nritems(parent, nritems);
2621 if (nritems == 0 && parent == root->node) {
2622 BUG_ON(btrfs_header_level(root->node) != 1);
2623 /* just turn the root into a leaf and break */
2624 btrfs_set_header_level(root->node, 0);
2625 } else if (slot == 0) {
2626 struct btrfs_disk_key disk_key;
2628 btrfs_node_key(parent, &disk_key, 0);
2629 btrfs_fixup_low_keys(root, path, &disk_key, level + 1);
2631 btrfs_mark_buffer_dirty(parent);
2636 * a helper function to delete the leaf pointed to by path->slots[1] and
2639 * This deletes the pointer in path->nodes[1] and frees the leaf
2640 * block extent. zero is returned if it all worked out, < 0 otherwise.
2642 * The path must have already been setup for deleting the leaf, including
2643 * all the proper balancing. path->nodes[1] must be locked.
2645 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
2646 struct btrfs_root *root,
2647 struct btrfs_path *path,
2648 struct extent_buffer *leaf)
2652 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
2653 ret = btrfs_del_ptr(trans, root, path, 1, path->slots[1]);
2657 ret = btrfs_free_extent(trans, root, leaf->start, leaf->len,
2658 0, root->root_key.objectid, 0, 0);
2663 * delete the item at the leaf level in path. If that empties
2664 * the leaf, remove it from the tree
2666 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2667 struct btrfs_path *path, int slot, int nr)
2669 struct extent_buffer *leaf;
2670 struct btrfs_item *item;
2678 leaf = path->nodes[0];
2679 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
2681 for (i = 0; i < nr; i++)
2682 dsize += btrfs_item_size_nr(leaf, slot + i);
2684 nritems = btrfs_header_nritems(leaf);
2686 if (slot + nr != nritems) {
2687 int data_end = leaf_data_end(root, leaf);
2689 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2691 btrfs_leaf_data(leaf) + data_end,
2692 last_off - data_end);
2694 for (i = slot + nr; i < nritems; i++) {
2697 item = btrfs_item_nr(i);
2698 ioff = btrfs_item_offset(leaf, item);
2699 btrfs_set_item_offset(leaf, item, ioff + dsize);
2702 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
2703 btrfs_item_nr_offset(slot + nr),
2704 sizeof(struct btrfs_item) *
2705 (nritems - slot - nr));
2707 btrfs_set_header_nritems(leaf, nritems - nr);
2710 /* delete the leaf if we've emptied it */
2712 if (leaf == root->node) {
2713 btrfs_set_header_level(leaf, 0);
2715 clean_tree_block(trans, root, leaf);
2716 wait_on_tree_block_writeback(root, leaf);
2718 wret = btrfs_del_leaf(trans, root, path, leaf);
2724 int used = leaf_space_used(leaf, 0, nritems);
2726 struct btrfs_disk_key disk_key;
2728 btrfs_item_key(leaf, &disk_key, 0);
2729 btrfs_fixup_low_keys(root, path, &disk_key, 1);
2732 /* delete the leaf if it is mostly empty */
2733 if (used < BTRFS_LEAF_DATA_SIZE(root) / 4) {
2734 /* push_leaf_left fixes the path.
2735 * make sure the path still points to our leaf
2736 * for possible call to del_ptr below
2738 slot = path->slots[1];
2739 extent_buffer_get(leaf);
2741 wret = push_leaf_left(trans, root, path, 1, 1);
2742 if (wret < 0 && wret != -ENOSPC)
2745 if (path->nodes[0] == leaf &&
2746 btrfs_header_nritems(leaf)) {
2747 wret = push_leaf_right(trans, root, path, 1, 1);
2748 if (wret < 0 && wret != -ENOSPC)
2752 if (btrfs_header_nritems(leaf) == 0) {
2753 clean_tree_block(trans, root, leaf);
2754 wait_on_tree_block_writeback(root, leaf);
2756 path->slots[1] = slot;
2757 ret = btrfs_del_leaf(trans, root, path, leaf);
2759 free_extent_buffer(leaf);
2762 btrfs_mark_buffer_dirty(leaf);
2763 free_extent_buffer(leaf);
2766 btrfs_mark_buffer_dirty(leaf);
2773 * walk up the tree as far as required to find the previous leaf.
2774 * returns 0 if it found something or 1 if there are no lesser leaves.
2775 * returns < 0 on io errors.
2777 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
2781 struct extent_buffer *c;
2782 struct extent_buffer *next = NULL;
2784 while(level < BTRFS_MAX_LEVEL) {
2785 if (!path->nodes[level])
2788 slot = path->slots[level];
2789 c = path->nodes[level];
2792 if (level == BTRFS_MAX_LEVEL)
2798 next = read_node_slot(root, c, slot);
2799 if (!extent_buffer_uptodate(next)) {
2801 return PTR_ERR(next);
2806 path->slots[level] = slot;
2809 c = path->nodes[level];
2810 free_extent_buffer(c);
2811 slot = btrfs_header_nritems(next);
2814 path->nodes[level] = next;
2815 path->slots[level] = slot;
2818 next = read_node_slot(root, next, slot);
2819 if (!extent_buffer_uptodate(next)) {
2821 return PTR_ERR(next);
2829 * walk up the tree as far as required to find the next leaf.
2830 * returns 0 if it found something or 1 if there are no greater leaves.
2831 * returns < 0 on io errors.
2833 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
2837 struct extent_buffer *c;
2838 struct extent_buffer *next = NULL;
2840 while(level < BTRFS_MAX_LEVEL) {
2841 if (!path->nodes[level])
2844 slot = path->slots[level] + 1;
2845 c = path->nodes[level];
2846 if (slot >= btrfs_header_nritems(c)) {
2848 if (level == BTRFS_MAX_LEVEL)
2854 reada_for_search(root, path, level, slot, 0);
2856 next = read_node_slot(root, c, slot);
2857 if (!extent_buffer_uptodate(next))
2861 path->slots[level] = slot;
2864 c = path->nodes[level];
2865 free_extent_buffer(c);
2866 path->nodes[level] = next;
2867 path->slots[level] = 0;
2871 reada_for_search(root, path, level, 0, 0);
2872 next = read_node_slot(root, next, 0);
2873 if (!extent_buffer_uptodate(next))
2879 int btrfs_previous_item(struct btrfs_root *root,
2880 struct btrfs_path *path, u64 min_objectid,
2883 struct btrfs_key found_key;
2884 struct extent_buffer *leaf;
2889 if (path->slots[0] == 0) {
2890 ret = btrfs_prev_leaf(root, path);
2896 leaf = path->nodes[0];
2897 nritems = btrfs_header_nritems(leaf);
2900 if (path->slots[0] == nritems)
2903 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2904 if (found_key.objectid < min_objectid)
2906 if (found_key.type == type)
2908 if (found_key.objectid == min_objectid &&
2909 found_key.type < type)
2916 * search in extent tree to find a previous Metadata/Data extent item with
2919 * returns 0 if something is found, 1 if nothing was found and < 0 on error
2921 int btrfs_previous_extent_item(struct btrfs_root *root,
2922 struct btrfs_path *path, u64 min_objectid)
2924 struct btrfs_key found_key;
2925 struct extent_buffer *leaf;
2930 if (path->slots[0] == 0) {
2931 ret = btrfs_prev_leaf(root, path);
2937 leaf = path->nodes[0];
2938 nritems = btrfs_header_nritems(leaf);
2941 if (path->slots[0] == nritems)
2944 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2945 if (found_key.objectid < min_objectid)
2947 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
2948 found_key.type == BTRFS_METADATA_ITEM_KEY)
2950 if (found_key.objectid == min_objectid &&
2951 found_key.type < BTRFS_EXTENT_ITEM_KEY)
2958 * Search in extent tree to found next meta/data extent
2959 * Caller needs to check for no-hole or skinny metadata features.
2961 int btrfs_next_extent_item(struct btrfs_root *root,
2962 struct btrfs_path *path, u64 max_objectid)
2964 struct btrfs_key found_key;
2968 ret = btrfs_next_item(root, path);
2971 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2973 if (found_key.objectid > max_objectid)
2975 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
2976 found_key.type == BTRFS_METADATA_ITEM_KEY)