2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include "transaction.h"
21 #include "print-tree.h"
23 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
24 *root, struct btrfs_path *path, int level);
25 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
26 *root, struct btrfs_key *ins_key,
27 struct btrfs_path *path, int data_size, int extend);
28 static int push_node_left(struct btrfs_trans_handle *trans,
29 struct btrfs_root *root, struct extent_buffer *dst,
30 struct extent_buffer *src, int empty);
31 static int balance_node_right(struct btrfs_trans_handle *trans,
32 struct btrfs_root *root,
33 struct extent_buffer *dst_buf,
34 struct extent_buffer *src_buf);
35 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
36 struct btrfs_path *path, int level, int slot);
38 inline void btrfs_init_path(struct btrfs_path *p)
40 memset(p, 0, sizeof(*p));
43 struct btrfs_path *btrfs_alloc_path(void)
45 struct btrfs_path *path;
46 path = kmalloc(sizeof(struct btrfs_path), GFP_NOFS);
48 btrfs_init_path(path);
54 void btrfs_free_path(struct btrfs_path *p)
56 btrfs_release_path(NULL, p);
60 void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
63 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
66 free_extent_buffer(p->nodes[i]);
68 memset(p, 0, sizeof(*p));
71 static void add_root_to_dirty_list(struct btrfs_root *root)
73 if (root->track_dirty && list_empty(&root->dirty_list)) {
74 list_add(&root->dirty_list,
75 &root->fs_info->dirty_cowonly_roots);
79 int btrfs_copy_root(struct btrfs_trans_handle *trans,
80 struct btrfs_root *root,
81 struct extent_buffer *buf,
82 struct extent_buffer **cow_ret, u64 new_root_objectid)
84 struct extent_buffer *cow;
87 struct btrfs_root *new_root;
88 struct btrfs_disk_key disk_key;
90 new_root = kmalloc(sizeof(*new_root), GFP_NOFS);
94 memcpy(new_root, root, sizeof(*new_root));
95 new_root->root_key.objectid = new_root_objectid;
97 WARN_ON(root->ref_cows && trans->transid !=
98 root->fs_info->running_transaction->transid);
99 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
101 level = btrfs_header_level(buf);
103 btrfs_item_key(buf, &disk_key, 0);
105 btrfs_node_key(buf, &disk_key, 0);
106 cow = btrfs_alloc_free_block(trans, new_root, buf->len,
107 new_root_objectid, &disk_key,
108 level, buf->start, 0);
114 copy_extent_buffer(cow, buf, 0, 0, cow->len);
115 btrfs_set_header_bytenr(cow, cow->start);
116 btrfs_set_header_generation(cow, trans->transid);
117 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
118 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
119 BTRFS_HEADER_FLAG_RELOC);
120 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
121 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
123 btrfs_set_header_owner(cow, new_root_objectid);
125 write_extent_buffer(cow, root->fs_info->fsid,
126 (unsigned long)btrfs_header_fsid(cow),
129 WARN_ON(btrfs_header_generation(buf) > trans->transid);
130 ret = btrfs_inc_ref(trans, new_root, cow, 0);
136 btrfs_mark_buffer_dirty(cow);
142 * check if the tree block can be shared by multiple trees
144 int btrfs_block_can_be_shared(struct btrfs_root *root,
145 struct extent_buffer *buf)
148 * Tree blocks not in refernece counted trees and tree roots
149 * are never shared. If a block was allocated after the last
150 * snapshot and the block was not allocated by tree relocation,
151 * we know the block is not shared.
153 if (root->ref_cows &&
154 buf != root->node && buf != root->commit_root &&
155 (btrfs_header_generation(buf) <=
156 btrfs_root_last_snapshot(&root->root_item) ||
157 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
159 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
160 if (root->ref_cows &&
161 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
167 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
168 struct btrfs_root *root,
169 struct extent_buffer *buf,
170 struct extent_buffer *cow)
179 * Backrefs update rules:
181 * Always use full backrefs for extent pointers in tree block
182 * allocated by tree relocation.
184 * If a shared tree block is no longer referenced by its owner
185 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
186 * use full backrefs for extent pointers in tree block.
188 * If a tree block is been relocating
189 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
190 * use full backrefs for extent pointers in tree block.
191 * The reason for this is some operations (such as drop tree)
192 * are only allowed for blocks use full backrefs.
195 if (btrfs_block_can_be_shared(root, buf)) {
196 ret = btrfs_lookup_extent_info(trans, root, buf->start,
197 buf->len, &refs, &flags);
202 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
203 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
204 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
209 owner = btrfs_header_owner(buf);
210 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) &&
211 owner == BTRFS_TREE_RELOC_OBJECTID);
214 if ((owner == root->root_key.objectid ||
215 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
216 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
217 ret = btrfs_inc_ref(trans, root, buf, 1);
220 if (root->root_key.objectid ==
221 BTRFS_TREE_RELOC_OBJECTID) {
222 ret = btrfs_dec_ref(trans, root, buf, 0);
224 ret = btrfs_inc_ref(trans, root, cow, 1);
227 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
230 if (root->root_key.objectid ==
231 BTRFS_TREE_RELOC_OBJECTID)
232 ret = btrfs_inc_ref(trans, root, cow, 1);
234 ret = btrfs_inc_ref(trans, root, cow, 0);
237 if (new_flags != 0) {
238 ret = btrfs_set_block_flags(trans, root, buf->start,
239 buf->len, new_flags);
243 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
244 if (root->root_key.objectid ==
245 BTRFS_TREE_RELOC_OBJECTID)
246 ret = btrfs_inc_ref(trans, root, cow, 1);
248 ret = btrfs_inc_ref(trans, root, cow, 0);
250 ret = btrfs_dec_ref(trans, root, buf, 1);
253 clean_tree_block(trans, root, buf);
258 int __btrfs_cow_block(struct btrfs_trans_handle *trans,
259 struct btrfs_root *root,
260 struct extent_buffer *buf,
261 struct extent_buffer *parent, int parent_slot,
262 struct extent_buffer **cow_ret,
263 u64 search_start, u64 empty_size)
265 struct extent_buffer *cow;
266 struct btrfs_disk_key disk_key;
269 WARN_ON(root->ref_cows && trans->transid !=
270 root->fs_info->running_transaction->transid);
271 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
273 level = btrfs_header_level(buf);
276 btrfs_item_key(buf, &disk_key, 0);
278 btrfs_node_key(buf, &disk_key, 0);
280 cow = btrfs_alloc_free_block(trans, root, buf->len,
281 root->root_key.objectid, &disk_key,
282 level, search_start, empty_size);
286 copy_extent_buffer(cow, buf, 0, 0, cow->len);
287 btrfs_set_header_bytenr(cow, cow->start);
288 btrfs_set_header_generation(cow, trans->transid);
289 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
290 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
291 BTRFS_HEADER_FLAG_RELOC);
292 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
293 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
295 btrfs_set_header_owner(cow, root->root_key.objectid);
297 write_extent_buffer(cow, root->fs_info->fsid,
298 (unsigned long)btrfs_header_fsid(cow),
301 WARN_ON(btrfs_header_generation(buf) > trans->transid);
303 update_ref_for_cow(trans, root, buf, cow);
305 if (buf == root->node) {
307 extent_buffer_get(cow);
309 btrfs_free_extent(trans, root, buf->start, buf->len,
310 0, root->root_key.objectid, level, 0);
311 free_extent_buffer(buf);
312 add_root_to_dirty_list(root);
314 btrfs_set_node_blockptr(parent, parent_slot,
316 WARN_ON(trans->transid == 0);
317 btrfs_set_node_ptr_generation(parent, parent_slot,
319 btrfs_mark_buffer_dirty(parent);
320 WARN_ON(btrfs_header_generation(parent) != trans->transid);
322 btrfs_free_extent(trans, root, buf->start, buf->len,
323 0, root->root_key.objectid, level, 1);
325 free_extent_buffer(buf);
326 btrfs_mark_buffer_dirty(cow);
331 static inline int should_cow_block(struct btrfs_trans_handle *trans,
332 struct btrfs_root *root,
333 struct extent_buffer *buf)
335 if (btrfs_header_generation(buf) == trans->transid &&
336 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
337 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
338 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
343 int btrfs_cow_block(struct btrfs_trans_handle *trans,
344 struct btrfs_root *root, struct extent_buffer *buf,
345 struct extent_buffer *parent, int parent_slot,
346 struct extent_buffer **cow_ret)
351 if (trans->transaction != root->fs_info->running_transaction) {
352 printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
353 root->fs_info->running_transaction->transid);
357 if (trans->transid != root->fs_info->generation) {
358 printk(KERN_CRIT "trans %llu running %llu\n",
359 (unsigned long long)trans->transid,
360 (unsigned long long)root->fs_info->generation);
363 if (!should_cow_block(trans, root, buf)) {
368 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
369 ret = __btrfs_cow_block(trans, root, buf, parent,
370 parent_slot, cow_ret, search_start, 0);
375 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
377 if (blocknr < other && other - (blocknr + blocksize) < 32768)
379 if (blocknr > other && blocknr - (other + blocksize) < 32768)
386 * compare two keys in a memcmp fashion
388 int btrfs_comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
392 btrfs_disk_key_to_cpu(&k1, disk);
394 if (k1.objectid > k2->objectid)
396 if (k1.objectid < k2->objectid)
398 if (k1.type > k2->type)
400 if (k1.type < k2->type)
402 if (k1.offset > k2->offset)
404 if (k1.offset < k2->offset)
411 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
412 struct btrfs_root *root, struct extent_buffer *parent,
413 int start_slot, int cache_only, u64 *last_ret,
414 struct btrfs_key *progress)
416 struct extent_buffer *cur;
417 struct extent_buffer *tmp;
420 u64 search_start = *last_ret;
430 int progress_passed = 0;
431 struct btrfs_disk_key disk_key;
433 parent_level = btrfs_header_level(parent);
434 if (cache_only && parent_level != 1)
437 if (trans->transaction != root->fs_info->running_transaction) {
438 printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
439 root->fs_info->running_transaction->transid);
442 if (trans->transid != root->fs_info->generation) {
443 printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
444 root->fs_info->generation);
448 parent_nritems = btrfs_header_nritems(parent);
449 blocksize = btrfs_level_size(root, parent_level - 1);
450 end_slot = parent_nritems;
452 if (parent_nritems == 1)
455 for (i = start_slot; i < end_slot; i++) {
458 if (!parent->map_token) {
459 map_extent_buffer(parent,
460 btrfs_node_key_ptr_offset(i),
461 sizeof(struct btrfs_key_ptr),
462 &parent->map_token, &parent->kaddr,
463 &parent->map_start, &parent->map_len,
466 btrfs_node_key(parent, &disk_key, i);
467 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
471 blocknr = btrfs_node_blockptr(parent, i);
472 gen = btrfs_node_ptr_generation(parent, i);
474 last_block = blocknr;
477 other = btrfs_node_blockptr(parent, i - 1);
478 close = close_blocks(blocknr, other, blocksize);
480 if (close && i < end_slot - 2) {
481 other = btrfs_node_blockptr(parent, i + 1);
482 close = close_blocks(blocknr, other, blocksize);
485 last_block = blocknr;
488 if (parent->map_token) {
489 unmap_extent_buffer(parent, parent->map_token,
491 parent->map_token = NULL;
494 cur = btrfs_find_tree_block(root, blocknr, blocksize);
496 uptodate = btrfs_buffer_uptodate(cur, gen);
499 if (!cur || !uptodate) {
501 free_extent_buffer(cur);
505 cur = read_tree_block(root, blocknr,
507 } else if (!uptodate) {
508 btrfs_read_buffer(cur, gen);
511 if (search_start == 0)
512 search_start = last_block;
514 err = __btrfs_cow_block(trans, root, cur, parent, i,
517 (end_slot - i) * blocksize));
519 free_extent_buffer(cur);
522 search_start = tmp->start;
523 last_block = tmp->start;
524 *last_ret = search_start;
525 if (parent_level == 1)
526 btrfs_clear_buffer_defrag(tmp);
527 free_extent_buffer(tmp);
529 if (parent->map_token) {
530 unmap_extent_buffer(parent, parent->map_token,
532 parent->map_token = NULL;
539 * The leaf data grows from end-to-front in the node.
540 * this returns the address of the start of the last item,
541 * which is the stop of the leaf data stack
543 static inline unsigned int leaf_data_end(struct btrfs_root *root,
544 struct extent_buffer *leaf)
546 u32 nr = btrfs_header_nritems(leaf);
548 return BTRFS_LEAF_DATA_SIZE(root);
549 return btrfs_item_offset_nr(leaf, nr - 1);
552 static int check_node(struct btrfs_root *root, struct btrfs_path *path,
555 struct extent_buffer *parent = NULL;
556 struct extent_buffer *node = path->nodes[level];
557 struct btrfs_disk_key parent_key;
558 struct btrfs_disk_key node_key;
561 struct btrfs_key cpukey;
562 u32 nritems = btrfs_header_nritems(node);
564 if (path->nodes[level + 1])
565 parent = path->nodes[level + 1];
567 slot = path->slots[level];
568 BUG_ON(nritems == 0);
570 parent_slot = path->slots[level + 1];
571 btrfs_node_key(parent, &parent_key, parent_slot);
572 btrfs_node_key(node, &node_key, 0);
573 BUG_ON(memcmp(&parent_key, &node_key,
574 sizeof(struct btrfs_disk_key)));
575 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
576 btrfs_header_bytenr(node));
578 BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root));
580 btrfs_node_key_to_cpu(node, &cpukey, slot - 1);
581 btrfs_node_key(node, &node_key, slot);
582 BUG_ON(btrfs_comp_keys(&node_key, &cpukey) <= 0);
584 if (slot < nritems - 1) {
585 btrfs_node_key_to_cpu(node, &cpukey, slot + 1);
586 btrfs_node_key(node, &node_key, slot);
587 BUG_ON(btrfs_comp_keys(&node_key, &cpukey) >= 0);
592 static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
595 struct extent_buffer *leaf = path->nodes[level];
596 struct extent_buffer *parent = NULL;
598 struct btrfs_key cpukey;
599 struct btrfs_disk_key parent_key;
600 struct btrfs_disk_key leaf_key;
601 int slot = path->slots[0];
603 u32 nritems = btrfs_header_nritems(leaf);
605 if (path->nodes[level + 1])
606 parent = path->nodes[level + 1];
612 parent_slot = path->slots[level + 1];
613 btrfs_node_key(parent, &parent_key, parent_slot);
614 btrfs_item_key(leaf, &leaf_key, 0);
616 BUG_ON(memcmp(&parent_key, &leaf_key,
617 sizeof(struct btrfs_disk_key)));
618 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
619 btrfs_header_bytenr(leaf));
622 for (i = 0; nritems > 1 && i < nritems - 2; i++) {
623 btrfs_item_key_to_cpu(leaf, &cpukey, i + 1);
624 btrfs_item_key(leaf, &leaf_key, i);
625 if (comp_keys(&leaf_key, &cpukey) >= 0) {
626 btrfs_print_leaf(root, leaf);
627 printk("slot %d offset bad key\n", i);
630 if (btrfs_item_offset_nr(leaf, i) !=
631 btrfs_item_end_nr(leaf, i + 1)) {
632 btrfs_print_leaf(root, leaf);
633 printk("slot %d offset bad\n", i);
637 if (btrfs_item_offset_nr(leaf, i) +
638 btrfs_item_size_nr(leaf, i) !=
639 BTRFS_LEAF_DATA_SIZE(root)) {
640 btrfs_print_leaf(root, leaf);
641 printk("slot %d first offset bad\n", i);
647 if (btrfs_item_size_nr(leaf, nritems - 1) > 4096) {
648 btrfs_print_leaf(root, leaf);
649 printk("slot %d bad size \n", nritems - 1);
654 if (slot != 0 && slot < nritems - 1) {
655 btrfs_item_key(leaf, &leaf_key, slot);
656 btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
657 if (btrfs_comp_keys(&leaf_key, &cpukey) <= 0) {
658 btrfs_print_leaf(root, leaf);
659 printk("slot %d offset bad key\n", slot);
662 if (btrfs_item_offset_nr(leaf, slot - 1) !=
663 btrfs_item_end_nr(leaf, slot)) {
664 btrfs_print_leaf(root, leaf);
665 printk("slot %d offset bad\n", slot);
669 if (slot < nritems - 1) {
670 btrfs_item_key(leaf, &leaf_key, slot);
671 btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1);
672 BUG_ON(btrfs_comp_keys(&leaf_key, &cpukey) >= 0);
673 if (btrfs_item_offset_nr(leaf, slot) !=
674 btrfs_item_end_nr(leaf, slot + 1)) {
675 btrfs_print_leaf(root, leaf);
676 printk("slot %d offset bad\n", slot);
680 BUG_ON(btrfs_item_offset_nr(leaf, 0) +
681 btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root));
685 static int noinline check_block(struct btrfs_root *root,
686 struct btrfs_path *path, int level)
690 struct extent_buffer *buf = path->nodes[level];
692 if (memcmp_extent_buffer(buf, root->fs_info->fsid,
693 (unsigned long)btrfs_header_fsid(buf),
695 printk("warning bad block %Lu\n", buf->start);
700 return check_leaf(root, path, level);
701 return check_node(root, path, level);
705 * search for key in the extent_buffer. The items start at offset p,
706 * and they are item_size apart. There are 'max' items in p.
708 * the slot in the array is returned via slot, and it points to
709 * the place where you would insert key if it is not found in
712 * slot may point to max if the key is bigger than all of the keys
714 static int generic_bin_search(struct extent_buffer *eb, unsigned long p,
715 int item_size, struct btrfs_key *key,
722 unsigned long offset;
723 struct btrfs_disk_key *tmp;
726 mid = (low + high) / 2;
727 offset = p + mid * item_size;
729 tmp = (struct btrfs_disk_key *)(eb->data + offset);
730 ret = btrfs_comp_keys(tmp, key);
746 * simple bin_search frontend that does the right thing for
749 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
750 int level, int *slot)
753 return generic_bin_search(eb,
754 offsetof(struct btrfs_leaf, items),
755 sizeof(struct btrfs_item),
756 key, btrfs_header_nritems(eb),
759 return generic_bin_search(eb,
760 offsetof(struct btrfs_node, ptrs),
761 sizeof(struct btrfs_key_ptr),
762 key, btrfs_header_nritems(eb),
768 static struct extent_buffer *read_node_slot(struct btrfs_root *root,
769 struct extent_buffer *parent, int slot)
771 int level = btrfs_header_level(parent);
774 if (slot >= btrfs_header_nritems(parent))
779 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
780 btrfs_level_size(root, level - 1),
781 btrfs_node_ptr_generation(parent, slot));
784 static int balance_level(struct btrfs_trans_handle *trans,
785 struct btrfs_root *root,
786 struct btrfs_path *path, int level)
788 struct extent_buffer *right = NULL;
789 struct extent_buffer *mid;
790 struct extent_buffer *left = NULL;
791 struct extent_buffer *parent = NULL;
795 int orig_slot = path->slots[level];
801 mid = path->nodes[level];
802 WARN_ON(btrfs_header_generation(mid) != trans->transid);
804 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
806 if (level < BTRFS_MAX_LEVEL - 1)
807 parent = path->nodes[level + 1];
808 pslot = path->slots[level + 1];
811 * deal with the case where there is only one pointer in the root
812 * by promoting the node below to a root
815 struct extent_buffer *child;
817 if (btrfs_header_nritems(mid) != 1)
820 /* promote the child to a root */
821 child = read_node_slot(root, mid, 0);
823 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
827 add_root_to_dirty_list(root);
828 path->nodes[level] = NULL;
829 clean_tree_block(trans, root, mid);
830 wait_on_tree_block_writeback(root, mid);
831 /* once for the path */
832 free_extent_buffer(mid);
834 ret = btrfs_free_extent(trans, root, mid->start, mid->len,
835 0, root->root_key.objectid,
837 /* once for the root ptr */
838 free_extent_buffer(mid);
841 if (btrfs_header_nritems(mid) >
842 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
845 left = read_node_slot(root, parent, pslot - 1);
847 wret = btrfs_cow_block(trans, root, left,
848 parent, pslot - 1, &left);
854 right = read_node_slot(root, parent, pslot + 1);
856 wret = btrfs_cow_block(trans, root, right,
857 parent, pslot + 1, &right);
864 /* first, try to make some room in the middle buffer */
866 orig_slot += btrfs_header_nritems(left);
867 wret = push_node_left(trans, root, left, mid, 1);
873 * then try to empty the right most buffer into the middle
876 wret = push_node_left(trans, root, mid, right, 1);
877 if (wret < 0 && wret != -ENOSPC)
879 if (btrfs_header_nritems(right) == 0) {
880 u64 bytenr = right->start;
881 u32 blocksize = right->len;
883 clean_tree_block(trans, root, right);
884 wait_on_tree_block_writeback(root, right);
885 free_extent_buffer(right);
887 wret = del_ptr(trans, root, path, level + 1, pslot +
891 wret = btrfs_free_extent(trans, root, bytenr,
893 root->root_key.objectid,
898 struct btrfs_disk_key right_key;
899 btrfs_node_key(right, &right_key, 0);
900 btrfs_set_node_key(parent, &right_key, pslot + 1);
901 btrfs_mark_buffer_dirty(parent);
904 if (btrfs_header_nritems(mid) == 1) {
906 * we're not allowed to leave a node with one item in the
907 * tree during a delete. A deletion from lower in the tree
908 * could try to delete the only pointer in this node.
909 * So, pull some keys from the left.
910 * There has to be a left pointer at this point because
911 * otherwise we would have pulled some pointers from the
915 wret = balance_node_right(trans, root, mid, left);
921 wret = push_node_left(trans, root, left, mid, 1);
927 if (btrfs_header_nritems(mid) == 0) {
928 /* we've managed to empty the middle node, drop it */
929 u64 bytenr = mid->start;
930 u32 blocksize = mid->len;
931 clean_tree_block(trans, root, mid);
932 wait_on_tree_block_writeback(root, mid);
933 free_extent_buffer(mid);
935 wret = del_ptr(trans, root, path, level + 1, pslot);
938 wret = btrfs_free_extent(trans, root, bytenr, blocksize,
939 0, root->root_key.objectid,
944 /* update the parent key to reflect our changes */
945 struct btrfs_disk_key mid_key;
946 btrfs_node_key(mid, &mid_key, 0);
947 btrfs_set_node_key(parent, &mid_key, pslot);
948 btrfs_mark_buffer_dirty(parent);
951 /* update the path */
953 if (btrfs_header_nritems(left) > orig_slot) {
954 extent_buffer_get(left);
955 path->nodes[level] = left;
956 path->slots[level + 1] -= 1;
957 path->slots[level] = orig_slot;
959 free_extent_buffer(mid);
961 orig_slot -= btrfs_header_nritems(left);
962 path->slots[level] = orig_slot;
965 /* double check we haven't messed things up */
966 check_block(root, path, level);
968 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
972 free_extent_buffer(right);
974 free_extent_buffer(left);
978 /* returns zero if the push worked, non-zero otherwise */
979 static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans,
980 struct btrfs_root *root,
981 struct btrfs_path *path, int level)
983 struct extent_buffer *right = NULL;
984 struct extent_buffer *mid;
985 struct extent_buffer *left = NULL;
986 struct extent_buffer *parent = NULL;
990 int orig_slot = path->slots[level];
995 mid = path->nodes[level];
996 WARN_ON(btrfs_header_generation(mid) != trans->transid);
998 if (level < BTRFS_MAX_LEVEL - 1)
999 parent = path->nodes[level + 1];
1000 pslot = path->slots[level + 1];
1005 left = read_node_slot(root, parent, pslot - 1);
1007 /* first, try to make some room in the middle buffer */
1010 left_nr = btrfs_header_nritems(left);
1011 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1014 ret = btrfs_cow_block(trans, root, left, parent,
1019 wret = push_node_left(trans, root,
1026 struct btrfs_disk_key disk_key;
1027 orig_slot += left_nr;
1028 btrfs_node_key(mid, &disk_key, 0);
1029 btrfs_set_node_key(parent, &disk_key, pslot);
1030 btrfs_mark_buffer_dirty(parent);
1031 if (btrfs_header_nritems(left) > orig_slot) {
1032 path->nodes[level] = left;
1033 path->slots[level + 1] -= 1;
1034 path->slots[level] = orig_slot;
1035 free_extent_buffer(mid);
1038 btrfs_header_nritems(left);
1039 path->slots[level] = orig_slot;
1040 free_extent_buffer(left);
1044 free_extent_buffer(left);
1046 right= read_node_slot(root, parent, pslot + 1);
1049 * then try to empty the right most buffer into the middle
1053 right_nr = btrfs_header_nritems(right);
1054 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1057 ret = btrfs_cow_block(trans, root, right,
1063 wret = balance_node_right(trans, root,
1070 struct btrfs_disk_key disk_key;
1072 btrfs_node_key(right, &disk_key, 0);
1073 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1074 btrfs_mark_buffer_dirty(parent);
1076 if (btrfs_header_nritems(mid) <= orig_slot) {
1077 path->nodes[level] = right;
1078 path->slots[level + 1] += 1;
1079 path->slots[level] = orig_slot -
1080 btrfs_header_nritems(mid);
1081 free_extent_buffer(mid);
1083 free_extent_buffer(right);
1087 free_extent_buffer(right);
1093 * readahead one full node of leaves
1095 static void reada_for_search(struct btrfs_root *root, struct btrfs_path *path,
1096 int level, int slot, u64 objectid)
1098 struct extent_buffer *node;
1099 struct btrfs_disk_key disk_key;
1105 int direction = path->reada;
1106 struct extent_buffer *eb;
1114 if (!path->nodes[level])
1117 node = path->nodes[level];
1118 search = btrfs_node_blockptr(node, slot);
1119 blocksize = btrfs_level_size(root, level - 1);
1120 eb = btrfs_find_tree_block(root, search, blocksize);
1122 free_extent_buffer(eb);
1126 highest_read = search;
1127 lowest_read = search;
1129 nritems = btrfs_header_nritems(node);
1132 if (direction < 0) {
1136 } else if (direction > 0) {
1141 if (path->reada < 0 && objectid) {
1142 btrfs_node_key(node, &disk_key, nr);
1143 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1146 search = btrfs_node_blockptr(node, nr);
1147 if ((search >= lowest_read && search <= highest_read) ||
1148 (search < lowest_read && lowest_read - search <= 32768) ||
1149 (search > highest_read && search - highest_read <= 32768)) {
1150 readahead_tree_block(root, search, blocksize,
1151 btrfs_node_ptr_generation(node, nr));
1155 if (path->reada < 2 && (nread > (256 * 1024) || nscan > 32))
1157 if(nread > (1024 * 1024) || nscan > 128)
1160 if (search < lowest_read)
1161 lowest_read = search;
1162 if (search > highest_read)
1163 highest_read = search;
1168 * look for key in the tree. path is filled in with nodes along the way
1169 * if key is found, we return zero and you can find the item in the leaf
1170 * level of the path (level 0)
1172 * If the key isn't found, the path points to the slot where it should
1173 * be inserted, and 1 is returned. If there are other errors during the
1174 * search a negative error number is returned.
1176 * if ins_len > 0, nodes and leaves will be split as we walk down the
1177 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1180 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1181 *root, struct btrfs_key *key, struct btrfs_path *p, int
1184 struct extent_buffer *b;
1188 int should_reada = p->reada;
1189 u8 lowest_level = 0;
1191 lowest_level = p->lowest_level;
1192 WARN_ON(lowest_level && ins_len);
1193 WARN_ON(p->nodes[0] != NULL);
1195 WARN_ON(!mutex_is_locked(&root->fs_info->fs_mutex));
1199 extent_buffer_get(b);
1201 level = btrfs_header_level(b);
1204 wret = btrfs_cow_block(trans, root, b,
1205 p->nodes[level + 1],
1206 p->slots[level + 1],
1209 free_extent_buffer(b);
1213 BUG_ON(!cow && ins_len);
1214 if (level != btrfs_header_level(b))
1216 level = btrfs_header_level(b);
1217 p->nodes[level] = b;
1218 ret = check_block(root, p, level);
1221 ret = bin_search(b, key, level, &slot);
1223 if (ret && slot > 0)
1225 p->slots[level] = slot;
1226 if ((p->search_for_split || ins_len > 0) &&
1227 btrfs_header_nritems(b) >=
1228 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1229 int sret = split_node(trans, root, p, level);
1233 b = p->nodes[level];
1234 slot = p->slots[level];
1235 } else if (ins_len < 0) {
1236 int sret = balance_level(trans, root, p,
1240 b = p->nodes[level];
1242 btrfs_release_path(NULL, p);
1245 slot = p->slots[level];
1246 BUG_ON(btrfs_header_nritems(b) == 1);
1248 /* this is only true while dropping a snapshot */
1249 if (level == lowest_level)
1253 reada_for_search(root, p, level, slot,
1256 b = read_node_slot(root, b, slot);
1258 p->slots[level] = slot;
1260 ins_len > btrfs_leaf_free_space(root, b)) {
1261 int sret = split_leaf(trans, root, key,
1262 p, ins_len, ret == 0);
1274 * adjust the pointers going up the tree, starting at level
1275 * making sure the right key of each node is points to 'key'.
1276 * This is used after shifting pointers to the left, so it stops
1277 * fixing up pointers when a given leaf/node is not in slot 0 of the
1280 * If this fails to write a tree block, it returns -1, but continues
1281 * fixing up the blocks in ram so the tree is consistent.
1283 static int fixup_low_keys(struct btrfs_trans_handle *trans,
1284 struct btrfs_root *root, struct btrfs_path *path,
1285 struct btrfs_disk_key *key, int level)
1289 struct extent_buffer *t;
1291 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1292 int tslot = path->slots[i];
1293 if (!path->nodes[i])
1296 btrfs_set_node_key(t, key, tslot);
1297 btrfs_mark_buffer_dirty(path->nodes[i]);
1307 * This function isn't completely safe. It's the caller's responsibility
1308 * that the new key won't break the order
1310 int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1311 struct btrfs_root *root, struct btrfs_path *path,
1312 struct btrfs_key *new_key)
1314 struct btrfs_disk_key disk_key;
1315 struct extent_buffer *eb;
1318 eb = path->nodes[0];
1319 slot = path->slots[0];
1321 btrfs_item_key(eb, &disk_key, slot - 1);
1322 if (btrfs_comp_keys(&disk_key, new_key) >= 0)
1325 if (slot < btrfs_header_nritems(eb) - 1) {
1326 btrfs_item_key(eb, &disk_key, slot + 1);
1327 if (btrfs_comp_keys(&disk_key, new_key) <= 0)
1331 btrfs_cpu_key_to_disk(&disk_key, new_key);
1332 btrfs_set_item_key(eb, &disk_key, slot);
1333 btrfs_mark_buffer_dirty(eb);
1335 fixup_low_keys(trans, root, path, &disk_key, 1);
1340 * try to push data from one node into the next node left in the
1343 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1344 * error, and > 0 if there was no room in the left hand block.
1346 static int push_node_left(struct btrfs_trans_handle *trans,
1347 struct btrfs_root *root, struct extent_buffer *dst,
1348 struct extent_buffer *src, int empty)
1355 src_nritems = btrfs_header_nritems(src);
1356 dst_nritems = btrfs_header_nritems(dst);
1357 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1358 WARN_ON(btrfs_header_generation(src) != trans->transid);
1359 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1361 if (!empty && src_nritems <= 8)
1364 if (push_items <= 0) {
1369 push_items = min(src_nritems, push_items);
1370 if (push_items < src_nritems) {
1371 /* leave at least 8 pointers in the node if
1372 * we aren't going to empty it
1374 if (src_nritems - push_items < 8) {
1375 if (push_items <= 8)
1381 push_items = min(src_nritems - 8, push_items);
1383 copy_extent_buffer(dst, src,
1384 btrfs_node_key_ptr_offset(dst_nritems),
1385 btrfs_node_key_ptr_offset(0),
1386 push_items * sizeof(struct btrfs_key_ptr));
1388 if (push_items < src_nritems) {
1389 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
1390 btrfs_node_key_ptr_offset(push_items),
1391 (src_nritems - push_items) *
1392 sizeof(struct btrfs_key_ptr));
1394 btrfs_set_header_nritems(src, src_nritems - push_items);
1395 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1396 btrfs_mark_buffer_dirty(src);
1397 btrfs_mark_buffer_dirty(dst);
1403 * try to push data from one node into the next node right in the
1406 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
1407 * error, and > 0 if there was no room in the right hand block.
1409 * this will only push up to 1/2 the contents of the left node over
1411 static int balance_node_right(struct btrfs_trans_handle *trans,
1412 struct btrfs_root *root,
1413 struct extent_buffer *dst,
1414 struct extent_buffer *src)
1422 WARN_ON(btrfs_header_generation(src) != trans->transid);
1423 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1425 src_nritems = btrfs_header_nritems(src);
1426 dst_nritems = btrfs_header_nritems(dst);
1427 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1428 if (push_items <= 0) {
1432 if (src_nritems < 4) {
1436 max_push = src_nritems / 2 + 1;
1437 /* don't try to empty the node */
1438 if (max_push >= src_nritems) {
1442 if (max_push < push_items)
1443 push_items = max_push;
1445 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
1446 btrfs_node_key_ptr_offset(0),
1448 sizeof(struct btrfs_key_ptr));
1450 copy_extent_buffer(dst, src,
1451 btrfs_node_key_ptr_offset(0),
1452 btrfs_node_key_ptr_offset(src_nritems - push_items),
1453 push_items * sizeof(struct btrfs_key_ptr));
1455 btrfs_set_header_nritems(src, src_nritems - push_items);
1456 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1458 btrfs_mark_buffer_dirty(src);
1459 btrfs_mark_buffer_dirty(dst);
1465 * helper function to insert a new root level in the tree.
1466 * A new node is allocated, and a single item is inserted to
1467 * point to the existing root
1469 * returns zero on success or < 0 on failure.
1471 static int noinline insert_new_root(struct btrfs_trans_handle *trans,
1472 struct btrfs_root *root,
1473 struct btrfs_path *path, int level)
1476 struct extent_buffer *lower;
1477 struct extent_buffer *c;
1478 struct extent_buffer *old;
1479 struct btrfs_disk_key lower_key;
1481 BUG_ON(path->nodes[level]);
1482 BUG_ON(path->nodes[level-1] != root->node);
1484 lower = path->nodes[level-1];
1486 btrfs_item_key(lower, &lower_key, 0);
1488 btrfs_node_key(lower, &lower_key, 0);
1490 c = btrfs_alloc_free_block(trans, root, root->nodesize,
1491 root->root_key.objectid, &lower_key,
1492 level, root->node->start, 0);
1497 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
1498 btrfs_set_header_nritems(c, 1);
1499 btrfs_set_header_level(c, level);
1500 btrfs_set_header_bytenr(c, c->start);
1501 btrfs_set_header_generation(c, trans->transid);
1502 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
1503 btrfs_set_header_owner(c, root->root_key.objectid);
1505 write_extent_buffer(c, root->fs_info->fsid,
1506 (unsigned long)btrfs_header_fsid(c),
1509 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
1510 (unsigned long)btrfs_header_chunk_tree_uuid(c),
1513 btrfs_set_node_key(c, &lower_key, 0);
1514 btrfs_set_node_blockptr(c, 0, lower->start);
1515 lower_gen = btrfs_header_generation(lower);
1516 WARN_ON(lower_gen != trans->transid);
1518 btrfs_set_node_ptr_generation(c, 0, lower_gen);
1520 btrfs_mark_buffer_dirty(c);
1525 /* the super has an extra ref to root->node */
1526 free_extent_buffer(old);
1528 add_root_to_dirty_list(root);
1529 extent_buffer_get(c);
1530 path->nodes[level] = c;
1531 path->slots[level] = 0;
1536 * worker function to insert a single pointer in a node.
1537 * the node should have enough room for the pointer already
1539 * slot and level indicate where you want the key to go, and
1540 * blocknr is the block the key points to.
1542 * returns zero on success and < 0 on any error
1544 static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
1545 *root, struct btrfs_path *path, struct btrfs_disk_key
1546 *key, u64 bytenr, int slot, int level)
1548 struct extent_buffer *lower;
1551 BUG_ON(!path->nodes[level]);
1552 lower = path->nodes[level];
1553 nritems = btrfs_header_nritems(lower);
1556 if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
1558 if (slot != nritems) {
1559 memmove_extent_buffer(lower,
1560 btrfs_node_key_ptr_offset(slot + 1),
1561 btrfs_node_key_ptr_offset(slot),
1562 (nritems - slot) * sizeof(struct btrfs_key_ptr));
1564 btrfs_set_node_key(lower, key, slot);
1565 btrfs_set_node_blockptr(lower, slot, bytenr);
1566 WARN_ON(trans->transid == 0);
1567 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
1568 btrfs_set_header_nritems(lower, nritems + 1);
1569 btrfs_mark_buffer_dirty(lower);
1574 * split the node at the specified level in path in two.
1575 * The path is corrected to point to the appropriate node after the split
1577 * Before splitting this tries to make some room in the node by pushing
1578 * left and right, if either one works, it returns right away.
1580 * returns 0 on success and < 0 on failure
1582 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
1583 *root, struct btrfs_path *path, int level)
1585 struct extent_buffer *c;
1586 struct extent_buffer *split;
1587 struct btrfs_disk_key disk_key;
1593 c = path->nodes[level];
1594 WARN_ON(btrfs_header_generation(c) != trans->transid);
1595 if (c == root->node) {
1596 /* trying to split the root, lets make a new one */
1597 ret = insert_new_root(trans, root, path, level + 1);
1601 ret = push_nodes_for_insert(trans, root, path, level);
1602 c = path->nodes[level];
1603 if (!ret && btrfs_header_nritems(c) <
1604 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
1610 c_nritems = btrfs_header_nritems(c);
1611 mid = (c_nritems + 1) / 2;
1612 btrfs_node_key(c, &disk_key, mid);
1614 split = btrfs_alloc_free_block(trans, root, root->nodesize,
1615 root->root_key.objectid,
1616 &disk_key, level, c->start, 0);
1618 return PTR_ERR(split);
1620 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
1621 btrfs_set_header_level(split, btrfs_header_level(c));
1622 btrfs_set_header_bytenr(split, split->start);
1623 btrfs_set_header_generation(split, trans->transid);
1624 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
1625 btrfs_set_header_owner(split, root->root_key.objectid);
1626 write_extent_buffer(split, root->fs_info->fsid,
1627 (unsigned long)btrfs_header_fsid(split),
1629 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
1630 (unsigned long)btrfs_header_chunk_tree_uuid(split),
1634 copy_extent_buffer(split, c,
1635 btrfs_node_key_ptr_offset(0),
1636 btrfs_node_key_ptr_offset(mid),
1637 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
1638 btrfs_set_header_nritems(split, c_nritems - mid);
1639 btrfs_set_header_nritems(c, mid);
1642 btrfs_mark_buffer_dirty(c);
1643 btrfs_mark_buffer_dirty(split);
1645 wret = insert_ptr(trans, root, path, &disk_key, split->start,
1646 path->slots[level + 1] + 1,
1651 if (path->slots[level] >= mid) {
1652 path->slots[level] -= mid;
1653 free_extent_buffer(c);
1654 path->nodes[level] = split;
1655 path->slots[level + 1] += 1;
1657 free_extent_buffer(split);
1663 * how many bytes are required to store the items in a leaf. start
1664 * and nr indicate which items in the leaf to check. This totals up the
1665 * space used both by the item structs and the item data
1667 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
1670 int nritems = btrfs_header_nritems(l);
1671 int end = min(nritems, start + nr) - 1;
1675 data_len = btrfs_item_end_nr(l, start);
1676 data_len = data_len - btrfs_item_offset_nr(l, end);
1677 data_len += sizeof(struct btrfs_item) * nr;
1678 WARN_ON(data_len < 0);
1683 * The space between the end of the leaf items and
1684 * the start of the leaf data. IOW, how much room
1685 * the leaf has left for both items and data
1687 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf)
1689 int nritems = btrfs_header_nritems(leaf);
1691 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
1693 printk("leaf free space ret %d, leaf data size %lu, used %d nritems %d\n",
1694 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
1695 leaf_space_used(leaf, 0, nritems), nritems);
1701 * push some data in the path leaf to the right, trying to free up at
1702 * least data_size bytes. returns zero if the push worked, nonzero otherwise
1704 * returns 1 if the push failed because the other node didn't have enough
1705 * room, 0 if everything worked out and < 0 if there were major errors.
1707 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
1708 *root, struct btrfs_path *path, int data_size,
1711 struct extent_buffer *left = path->nodes[0];
1712 struct extent_buffer *right;
1713 struct extent_buffer *upper;
1714 struct btrfs_disk_key disk_key;
1720 struct btrfs_item *item;
1728 slot = path->slots[1];
1729 if (!path->nodes[1]) {
1732 upper = path->nodes[1];
1733 if (slot >= btrfs_header_nritems(upper) - 1)
1736 right = read_node_slot(root, upper, slot + 1);
1737 free_space = btrfs_leaf_free_space(root, right);
1738 if (free_space < data_size) {
1739 free_extent_buffer(right);
1743 /* cow and double check */
1744 ret = btrfs_cow_block(trans, root, right, upper,
1747 free_extent_buffer(right);
1750 free_space = btrfs_leaf_free_space(root, right);
1751 if (free_space < data_size) {
1752 free_extent_buffer(right);
1756 left_nritems = btrfs_header_nritems(left);
1757 if (left_nritems == 0) {
1758 free_extent_buffer(right);
1767 i = left_nritems - 1;
1769 item = btrfs_item_nr(left, i);
1771 if (path->slots[0] == i)
1772 push_space += data_size + sizeof(*item);
1774 this_item_size = btrfs_item_size(left, item);
1775 if (this_item_size + sizeof(*item) + push_space > free_space)
1778 push_space += this_item_size + sizeof(*item);
1784 if (push_items == 0) {
1785 free_extent_buffer(right);
1789 if (!empty && push_items == left_nritems)
1792 /* push left to right */
1793 right_nritems = btrfs_header_nritems(right);
1795 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
1796 push_space -= leaf_data_end(root, left);
1798 /* make room in the right data area */
1799 data_end = leaf_data_end(root, right);
1800 memmove_extent_buffer(right,
1801 btrfs_leaf_data(right) + data_end - push_space,
1802 btrfs_leaf_data(right) + data_end,
1803 BTRFS_LEAF_DATA_SIZE(root) - data_end);
1805 /* copy from the left data area */
1806 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
1807 BTRFS_LEAF_DATA_SIZE(root) - push_space,
1808 btrfs_leaf_data(left) + leaf_data_end(root, left),
1811 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
1812 btrfs_item_nr_offset(0),
1813 right_nritems * sizeof(struct btrfs_item));
1815 /* copy the items from left to right */
1816 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
1817 btrfs_item_nr_offset(left_nritems - push_items),
1818 push_items * sizeof(struct btrfs_item));
1820 /* update the item pointers */
1821 right_nritems += push_items;
1822 btrfs_set_header_nritems(right, right_nritems);
1823 push_space = BTRFS_LEAF_DATA_SIZE(root);
1824 for (i = 0; i < right_nritems; i++) {
1825 item = btrfs_item_nr(right, i);
1826 push_space -= btrfs_item_size(right, item);
1827 btrfs_set_item_offset(right, item, push_space);
1830 left_nritems -= push_items;
1831 btrfs_set_header_nritems(left, left_nritems);
1834 btrfs_mark_buffer_dirty(left);
1835 btrfs_mark_buffer_dirty(right);
1837 btrfs_item_key(right, &disk_key, 0);
1838 btrfs_set_node_key(upper, &disk_key, slot + 1);
1839 btrfs_mark_buffer_dirty(upper);
1841 /* then fixup the leaf pointer in the path */
1842 if (path->slots[0] >= left_nritems) {
1843 path->slots[0] -= left_nritems;
1844 free_extent_buffer(path->nodes[0]);
1845 path->nodes[0] = right;
1846 path->slots[1] += 1;
1848 free_extent_buffer(right);
1853 * push some data in the path leaf to the left, trying to free up at
1854 * least data_size bytes. returns zero if the push worked, nonzero otherwise
1856 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
1857 *root, struct btrfs_path *path, int data_size,
1860 struct btrfs_disk_key disk_key;
1861 struct extent_buffer *right = path->nodes[0];
1862 struct extent_buffer *left;
1868 struct btrfs_item *item;
1869 u32 old_left_nritems;
1875 u32 old_left_item_size;
1877 slot = path->slots[1];
1880 if (!path->nodes[1])
1883 right_nritems = btrfs_header_nritems(right);
1884 if (right_nritems == 0) {
1888 left = read_node_slot(root, path->nodes[1], slot - 1);
1889 free_space = btrfs_leaf_free_space(root, left);
1890 if (free_space < data_size) {
1891 free_extent_buffer(left);
1895 /* cow and double check */
1896 ret = btrfs_cow_block(trans, root, left,
1897 path->nodes[1], slot - 1, &left);
1899 /* we hit -ENOSPC, but it isn't fatal here */
1900 free_extent_buffer(left);
1904 free_space = btrfs_leaf_free_space(root, left);
1905 if (free_space < data_size) {
1906 free_extent_buffer(left);
1913 nr = right_nritems - 1;
1915 for (i = 0; i < nr; i++) {
1916 item = btrfs_item_nr(right, i);
1918 if (path->slots[0] == i)
1919 push_space += data_size + sizeof(*item);
1921 this_item_size = btrfs_item_size(right, item);
1922 if (this_item_size + sizeof(*item) + push_space > free_space)
1926 push_space += this_item_size + sizeof(*item);
1929 if (push_items == 0) {
1930 free_extent_buffer(left);
1933 if (!empty && push_items == btrfs_header_nritems(right))
1936 /* push data from right to left */
1937 copy_extent_buffer(left, right,
1938 btrfs_item_nr_offset(btrfs_header_nritems(left)),
1939 btrfs_item_nr_offset(0),
1940 push_items * sizeof(struct btrfs_item));
1942 push_space = BTRFS_LEAF_DATA_SIZE(root) -
1943 btrfs_item_offset_nr(right, push_items -1);
1945 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
1946 leaf_data_end(root, left) - push_space,
1947 btrfs_leaf_data(right) +
1948 btrfs_item_offset_nr(right, push_items - 1),
1950 old_left_nritems = btrfs_header_nritems(left);
1951 BUG_ON(old_left_nritems < 0);
1953 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
1954 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
1957 item = btrfs_item_nr(left, i);
1958 ioff = btrfs_item_offset(left, item);
1959 btrfs_set_item_offset(left, item,
1960 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
1962 btrfs_set_header_nritems(left, old_left_nritems + push_items);
1964 /* fixup right node */
1965 if (push_items > right_nritems) {
1966 printk("push items %d nr %u\n", push_items, right_nritems);
1970 if (push_items < right_nritems) {
1971 push_space = btrfs_item_offset_nr(right, push_items - 1) -
1972 leaf_data_end(root, right);
1973 memmove_extent_buffer(right, btrfs_leaf_data(right) +
1974 BTRFS_LEAF_DATA_SIZE(root) - push_space,
1975 btrfs_leaf_data(right) +
1976 leaf_data_end(root, right), push_space);
1978 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
1979 btrfs_item_nr_offset(push_items),
1980 (btrfs_header_nritems(right) - push_items) *
1981 sizeof(struct btrfs_item));
1983 right_nritems -= push_items;
1984 btrfs_set_header_nritems(right, right_nritems);
1985 push_space = BTRFS_LEAF_DATA_SIZE(root);
1986 for (i = 0; i < right_nritems; i++) {
1987 item = btrfs_item_nr(right, i);
1988 push_space = push_space - btrfs_item_size(right, item);
1989 btrfs_set_item_offset(right, item, push_space);
1992 btrfs_mark_buffer_dirty(left);
1994 btrfs_mark_buffer_dirty(right);
1996 btrfs_item_key(right, &disk_key, 0);
1997 wret = fixup_low_keys(trans, root, path, &disk_key, 1);
2001 /* then fixup the leaf pointer in the path */
2002 if (path->slots[0] < push_items) {
2003 path->slots[0] += old_left_nritems;
2004 free_extent_buffer(path->nodes[0]);
2005 path->nodes[0] = left;
2006 path->slots[1] -= 1;
2008 free_extent_buffer(left);
2009 path->slots[0] -= push_items;
2011 BUG_ON(path->slots[0] < 0);
2016 * split the path's leaf in two, making sure there is at least data_size
2017 * available for the resulting leaf level of the path.
2019 * returns 0 if all went well and < 0 on failure.
2021 static noinline int copy_for_split(struct btrfs_trans_handle *trans,
2022 struct btrfs_root *root,
2023 struct btrfs_path *path,
2024 struct extent_buffer *l,
2025 struct extent_buffer *right,
2026 int slot, int mid, int nritems)
2033 struct btrfs_disk_key disk_key;
2035 nritems = nritems - mid;
2036 btrfs_set_header_nritems(right, nritems);
2037 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
2039 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
2040 btrfs_item_nr_offset(mid),
2041 nritems * sizeof(struct btrfs_item));
2043 copy_extent_buffer(right, l,
2044 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
2045 data_copy_size, btrfs_leaf_data(l) +
2046 leaf_data_end(root, l), data_copy_size);
2048 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
2049 btrfs_item_end_nr(l, mid);
2051 for (i = 0; i < nritems; i++) {
2052 struct btrfs_item *item = btrfs_item_nr(right, i);
2053 u32 ioff = btrfs_item_offset(right, item);
2054 btrfs_set_item_offset(right, item, ioff + rt_data_off);
2057 btrfs_set_header_nritems(l, mid);
2059 btrfs_item_key(right, &disk_key, 0);
2060 wret = insert_ptr(trans, root, path, &disk_key, right->start,
2061 path->slots[1] + 1, 1);
2065 btrfs_mark_buffer_dirty(right);
2066 btrfs_mark_buffer_dirty(l);
2067 BUG_ON(path->slots[0] != slot);
2070 free_extent_buffer(path->nodes[0]);
2071 path->nodes[0] = right;
2072 path->slots[0] -= mid;
2073 path->slots[1] += 1;
2075 free_extent_buffer(right);
2078 BUG_ON(path->slots[0] < 0);
2084 * split the path's leaf in two, making sure there is at least data_size
2085 * available for the resulting leaf level of the path.
2087 * returns 0 if all went well and < 0 on failure.
2089 static noinline int split_leaf(struct btrfs_trans_handle *trans,
2090 struct btrfs_root *root,
2091 struct btrfs_key *ins_key,
2092 struct btrfs_path *path, int data_size,
2095 struct btrfs_disk_key disk_key;
2096 struct extent_buffer *l;
2100 struct extent_buffer *right;
2104 int num_doubles = 0;
2106 /* first try to make some room by pushing left and right */
2107 if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) {
2108 wret = push_leaf_right(trans, root, path, data_size, 0);
2112 wret = push_leaf_left(trans, root, path, data_size, 0);
2118 /* did the pushes work? */
2119 if (btrfs_leaf_free_space(root, l) >= data_size)
2123 if (!path->nodes[1]) {
2124 ret = insert_new_root(trans, root, path, 1);
2131 slot = path->slots[0];
2132 nritems = btrfs_header_nritems(l);
2133 mid = (nritems + 1) / 2;
2137 leaf_space_used(l, mid, nritems - mid) + data_size >
2138 BTRFS_LEAF_DATA_SIZE(root)) {
2139 if (slot >= nritems) {
2143 if (mid != nritems &&
2144 leaf_space_used(l, mid, nritems - mid) +
2145 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2151 if (leaf_space_used(l, 0, mid) + data_size >
2152 BTRFS_LEAF_DATA_SIZE(root)) {
2153 if (!extend && data_size && slot == 0) {
2155 } else if ((extend || !data_size) && slot == 0) {
2159 if (mid != nritems &&
2160 leaf_space_used(l, mid, nritems - mid) +
2161 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2169 btrfs_cpu_key_to_disk(&disk_key, ins_key);
2171 btrfs_item_key(l, &disk_key, mid);
2173 right = btrfs_alloc_free_block(trans, root, root->leafsize,
2174 root->root_key.objectid,
2175 &disk_key, 0, l->start, 0);
2176 if (IS_ERR(right)) {
2178 return PTR_ERR(right);
2181 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
2182 btrfs_set_header_bytenr(right, right->start);
2183 btrfs_set_header_generation(right, trans->transid);
2184 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
2185 btrfs_set_header_owner(right, root->root_key.objectid);
2186 btrfs_set_header_level(right, 0);
2187 write_extent_buffer(right, root->fs_info->fsid,
2188 (unsigned long)btrfs_header_fsid(right),
2191 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
2192 (unsigned long)btrfs_header_chunk_tree_uuid(right),
2197 btrfs_set_header_nritems(right, 0);
2198 wret = insert_ptr(trans, root, path,
2199 &disk_key, right->start,
2200 path->slots[1] + 1, 1);
2204 free_extent_buffer(path->nodes[0]);
2205 path->nodes[0] = right;
2207 path->slots[1] += 1;
2209 btrfs_set_header_nritems(right, 0);
2210 wret = insert_ptr(trans, root, path,
2216 free_extent_buffer(path->nodes[0]);
2217 path->nodes[0] = right;
2219 if (path->slots[1] == 0) {
2220 wret = fixup_low_keys(trans, root,
2221 path, &disk_key, 1);
2226 btrfs_mark_buffer_dirty(right);
2230 ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems);
2234 BUG_ON(num_doubles != 0);
2243 * This function splits a single item into two items,
2244 * giving 'new_key' to the new item and splitting the
2245 * old one at split_offset (from the start of the item).
2247 * The path may be released by this operation. After
2248 * the split, the path is pointing to the old item. The
2249 * new item is going to be in the same node as the old one.
2251 * Note, the item being split must be smaller enough to live alone on
2252 * a tree block with room for one extra struct btrfs_item
2254 * This allows us to split the item in place, keeping a lock on the
2255 * leaf the entire time.
2257 int btrfs_split_item(struct btrfs_trans_handle *trans,
2258 struct btrfs_root *root,
2259 struct btrfs_path *path,
2260 struct btrfs_key *new_key,
2261 unsigned long split_offset)
2264 struct extent_buffer *leaf;
2265 struct btrfs_key orig_key;
2266 struct btrfs_item *item;
2267 struct btrfs_item *new_item;
2272 struct btrfs_disk_key disk_key;
2275 leaf = path->nodes[0];
2276 btrfs_item_key_to_cpu(leaf, &orig_key, path->slots[0]);
2277 if (btrfs_leaf_free_space(root, leaf) >= sizeof(struct btrfs_item))
2280 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2281 btrfs_release_path(root, path);
2283 path->search_for_split = 1;
2285 ret = btrfs_search_slot(trans, root, &orig_key, path, 0, 1);
2286 path->search_for_split = 0;
2288 /* if our item isn't there or got smaller, return now */
2289 if (ret != 0 || item_size != btrfs_item_size_nr(path->nodes[0],
2294 ret = split_leaf(trans, root, &orig_key, path, 0, 0);
2297 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
2298 leaf = path->nodes[0];
2301 item = btrfs_item_nr(leaf, path->slots[0]);
2302 orig_offset = btrfs_item_offset(leaf, item);
2303 item_size = btrfs_item_size(leaf, item);
2306 buf = kmalloc(item_size, GFP_NOFS);
2307 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
2308 path->slots[0]), item_size);
2309 slot = path->slots[0] + 1;
2310 leaf = path->nodes[0];
2312 nritems = btrfs_header_nritems(leaf);
2314 if (slot != nritems) {
2315 /* shift the items */
2316 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
2317 btrfs_item_nr_offset(slot),
2318 (nritems - slot) * sizeof(struct btrfs_item));
2322 btrfs_cpu_key_to_disk(&disk_key, new_key);
2323 btrfs_set_item_key(leaf, &disk_key, slot);
2325 new_item = btrfs_item_nr(leaf, slot);
2327 btrfs_set_item_offset(leaf, new_item, orig_offset);
2328 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
2330 btrfs_set_item_offset(leaf, item,
2331 orig_offset + item_size - split_offset);
2332 btrfs_set_item_size(leaf, item, split_offset);
2334 btrfs_set_header_nritems(leaf, nritems + 1);
2336 /* write the data for the start of the original item */
2337 write_extent_buffer(leaf, buf,
2338 btrfs_item_ptr_offset(leaf, path->slots[0]),
2341 /* write the data for the new item */
2342 write_extent_buffer(leaf, buf + split_offset,
2343 btrfs_item_ptr_offset(leaf, slot),
2344 item_size - split_offset);
2345 btrfs_mark_buffer_dirty(leaf);
2348 if (btrfs_leaf_free_space(root, leaf) < 0) {
2349 btrfs_print_leaf(root, leaf);
2356 int btrfs_truncate_item(struct btrfs_trans_handle *trans,
2357 struct btrfs_root *root,
2358 struct btrfs_path *path,
2359 u32 new_size, int from_end)
2363 struct extent_buffer *leaf;
2364 struct btrfs_item *item;
2366 unsigned int data_end;
2367 unsigned int old_data_start;
2368 unsigned int old_size;
2369 unsigned int size_diff;
2372 leaf = path->nodes[0];
2373 slot = path->slots[0];
2375 old_size = btrfs_item_size_nr(leaf, slot);
2376 if (old_size == new_size)
2379 nritems = btrfs_header_nritems(leaf);
2380 data_end = leaf_data_end(root, leaf);
2382 old_data_start = btrfs_item_offset_nr(leaf, slot);
2384 size_diff = old_size - new_size;
2387 BUG_ON(slot >= nritems);
2390 * item0..itemN ... dataN.offset..dataN.size .. data0.size
2392 /* first correct the data pointers */
2393 for (i = slot; i < nritems; i++) {
2395 item = btrfs_item_nr(leaf, i);
2396 ioff = btrfs_item_offset(leaf, item);
2397 btrfs_set_item_offset(leaf, item, ioff + size_diff);
2400 /* shift the data */
2402 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2403 data_end + size_diff, btrfs_leaf_data(leaf) +
2404 data_end, old_data_start + new_size - data_end);
2406 struct btrfs_disk_key disk_key;
2409 btrfs_item_key(leaf, &disk_key, slot);
2411 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
2413 struct btrfs_file_extent_item *fi;
2415 fi = btrfs_item_ptr(leaf, slot,
2416 struct btrfs_file_extent_item);
2417 fi = (struct btrfs_file_extent_item *)(
2418 (unsigned long)fi - size_diff);
2420 if (btrfs_file_extent_type(leaf, fi) ==
2421 BTRFS_FILE_EXTENT_INLINE) {
2422 ptr = btrfs_item_ptr_offset(leaf, slot);
2423 memmove_extent_buffer(leaf, ptr,
2425 offsetof(struct btrfs_file_extent_item,
2430 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2431 data_end + size_diff, btrfs_leaf_data(leaf) +
2432 data_end, old_data_start - data_end);
2434 offset = btrfs_disk_key_offset(&disk_key);
2435 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
2436 btrfs_set_item_key(leaf, &disk_key, slot);
2438 fixup_low_keys(trans, root, path, &disk_key, 1);
2441 item = btrfs_item_nr(leaf, slot);
2442 btrfs_set_item_size(leaf, item, new_size);
2443 btrfs_mark_buffer_dirty(leaf);
2446 if (btrfs_leaf_free_space(root, leaf) < 0) {
2447 btrfs_print_leaf(root, leaf);
2453 int btrfs_extend_item(struct btrfs_trans_handle *trans,
2454 struct btrfs_root *root, struct btrfs_path *path,
2459 struct extent_buffer *leaf;
2460 struct btrfs_item *item;
2462 unsigned int data_end;
2463 unsigned int old_data;
2464 unsigned int old_size;
2467 leaf = path->nodes[0];
2469 nritems = btrfs_header_nritems(leaf);
2470 data_end = leaf_data_end(root, leaf);
2472 if (btrfs_leaf_free_space(root, leaf) < data_size) {
2473 btrfs_print_leaf(root, leaf);
2476 slot = path->slots[0];
2477 old_data = btrfs_item_end_nr(leaf, slot);
2480 if (slot >= nritems) {
2481 btrfs_print_leaf(root, leaf);
2482 printk("slot %d too large, nritems %d\n", slot, nritems);
2487 * item0..itemN ... dataN.offset..dataN.size .. data0.size
2489 /* first correct the data pointers */
2490 for (i = slot; i < nritems; i++) {
2492 item = btrfs_item_nr(leaf, i);
2493 ioff = btrfs_item_offset(leaf, item);
2494 btrfs_set_item_offset(leaf, item, ioff - data_size);
2497 /* shift the data */
2498 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2499 data_end - data_size, btrfs_leaf_data(leaf) +
2500 data_end, old_data - data_end);
2502 data_end = old_data;
2503 old_size = btrfs_item_size_nr(leaf, slot);
2504 item = btrfs_item_nr(leaf, slot);
2505 btrfs_set_item_size(leaf, item, old_size + data_size);
2506 btrfs_mark_buffer_dirty(leaf);
2509 if (btrfs_leaf_free_space(root, leaf) < 0) {
2510 btrfs_print_leaf(root, leaf);
2517 * Given a key and some data, insert an item into the tree.
2518 * This does all the path init required, making room in the tree if needed.
2520 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
2521 struct btrfs_root *root,
2522 struct btrfs_path *path,
2523 struct btrfs_key *cpu_key, u32 *data_size,
2526 struct extent_buffer *leaf;
2527 struct btrfs_item *item;
2534 unsigned int data_end;
2535 struct btrfs_disk_key disk_key;
2537 for (i = 0; i < nr; i++) {
2538 total_data += data_size[i];
2541 /* create a root if there isn't one */
2545 total_size = total_data + nr * sizeof(struct btrfs_item);
2546 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
2553 leaf = path->nodes[0];
2555 nritems = btrfs_header_nritems(leaf);
2556 data_end = leaf_data_end(root, leaf);
2558 if (btrfs_leaf_free_space(root, leaf) < total_size) {
2559 btrfs_print_leaf(root, leaf);
2560 printk("not enough freespace need %u have %d\n",
2561 total_size, btrfs_leaf_free_space(root, leaf));
2565 slot = path->slots[0];
2568 if (slot != nritems) {
2570 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
2572 if (old_data < data_end) {
2573 btrfs_print_leaf(root, leaf);
2574 printk("slot %d old_data %d data_end %d\n",
2575 slot, old_data, data_end);
2579 * item0..itemN ... dataN.offset..dataN.size .. data0.size
2581 /* first correct the data pointers */
2582 for (i = slot; i < nritems; i++) {
2585 item = btrfs_item_nr(leaf, i);
2586 ioff = btrfs_item_offset(leaf, item);
2587 btrfs_set_item_offset(leaf, item, ioff - total_data);
2590 /* shift the items */
2591 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
2592 btrfs_item_nr_offset(slot),
2593 (nritems - slot) * sizeof(struct btrfs_item));
2595 /* shift the data */
2596 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2597 data_end - total_data, btrfs_leaf_data(leaf) +
2598 data_end, old_data - data_end);
2599 data_end = old_data;
2602 /* setup the item for the new data */
2603 for (i = 0; i < nr; i++) {
2604 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
2605 btrfs_set_item_key(leaf, &disk_key, slot + i);
2606 item = btrfs_item_nr(leaf, slot + i);
2607 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
2608 data_end -= data_size[i];
2609 btrfs_set_item_size(leaf, item, data_size[i]);
2611 btrfs_set_header_nritems(leaf, nritems + nr);
2612 btrfs_mark_buffer_dirty(leaf);
2616 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
2617 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
2620 if (btrfs_leaf_free_space(root, leaf) < 0) {
2621 btrfs_print_leaf(root, leaf);
2630 * Given a key and some data, insert an item into the tree.
2631 * This does all the path init required, making room in the tree if needed.
2633 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
2634 *root, struct btrfs_key *cpu_key, void *data, u32
2638 struct btrfs_path *path;
2639 struct extent_buffer *leaf;
2642 path = btrfs_alloc_path();
2644 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
2646 leaf = path->nodes[0];
2647 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
2648 write_extent_buffer(leaf, data, ptr, data_size);
2649 btrfs_mark_buffer_dirty(leaf);
2651 btrfs_free_path(path);
2656 * delete the pointer from a given node.
2658 * If the delete empties a node, the node is removed from the tree,
2659 * continuing all the way the root if required. The root is converted into
2660 * a leaf if all the nodes are emptied.
2662 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2663 struct btrfs_path *path, int level, int slot)
2665 struct extent_buffer *parent = path->nodes[level];
2670 nritems = btrfs_header_nritems(parent);
2671 if (slot != nritems -1) {
2672 memmove_extent_buffer(parent,
2673 btrfs_node_key_ptr_offset(slot),
2674 btrfs_node_key_ptr_offset(slot + 1),
2675 sizeof(struct btrfs_key_ptr) *
2676 (nritems - slot - 1));
2679 btrfs_set_header_nritems(parent, nritems);
2680 if (nritems == 0 && parent == root->node) {
2681 BUG_ON(btrfs_header_level(root->node) != 1);
2682 /* just turn the root into a leaf and break */
2683 btrfs_set_header_level(root->node, 0);
2684 } else if (slot == 0) {
2685 struct btrfs_disk_key disk_key;
2687 btrfs_node_key(parent, &disk_key, 0);
2688 wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
2692 btrfs_mark_buffer_dirty(parent);
2697 * a helper function to delete the leaf pointed to by path->slots[1] and
2700 * This deletes the pointer in path->nodes[1] and frees the leaf
2701 * block extent. zero is returned if it all worked out, < 0 otherwise.
2703 * The path must have already been setup for deleting the leaf, including
2704 * all the proper balancing. path->nodes[1] must be locked.
2706 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
2707 struct btrfs_root *root,
2708 struct btrfs_path *path,
2709 struct extent_buffer *leaf)
2713 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
2714 ret = del_ptr(trans, root, path, 1, path->slots[1]);
2718 ret = btrfs_free_extent(trans, root, leaf->start, leaf->len,
2719 0, root->root_key.objectid, 0, 0);
2724 * delete the item at the leaf level in path. If that empties
2725 * the leaf, remove it from the tree
2727 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2728 struct btrfs_path *path, int slot, int nr)
2730 struct extent_buffer *leaf;
2731 struct btrfs_item *item;
2739 leaf = path->nodes[0];
2740 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
2742 for (i = 0; i < nr; i++)
2743 dsize += btrfs_item_size_nr(leaf, slot + i);
2745 nritems = btrfs_header_nritems(leaf);
2747 if (slot + nr != nritems) {
2749 int data_end = leaf_data_end(root, leaf);
2751 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2753 btrfs_leaf_data(leaf) + data_end,
2754 last_off - data_end);
2756 for (i = slot + nr; i < nritems; i++) {
2759 item = btrfs_item_nr(leaf, i);
2760 ioff = btrfs_item_offset(leaf, item);
2761 btrfs_set_item_offset(leaf, item, ioff + dsize);
2764 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
2765 btrfs_item_nr_offset(slot + nr),
2766 sizeof(struct btrfs_item) *
2767 (nritems - slot - nr));
2769 btrfs_set_header_nritems(leaf, nritems - nr);
2772 /* delete the leaf if we've emptied it */
2774 if (leaf == root->node) {
2775 btrfs_set_header_level(leaf, 0);
2777 clean_tree_block(trans, root, leaf);
2778 wait_on_tree_block_writeback(root, leaf);
2780 wret = btrfs_del_leaf(trans, root, path, leaf);
2786 int used = leaf_space_used(leaf, 0, nritems);
2788 struct btrfs_disk_key disk_key;
2790 btrfs_item_key(leaf, &disk_key, 0);
2791 wret = fixup_low_keys(trans, root, path,
2797 /* delete the leaf if it is mostly empty */
2798 if (used < BTRFS_LEAF_DATA_SIZE(root) / 4) {
2799 /* push_leaf_left fixes the path.
2800 * make sure the path still points to our leaf
2801 * for possible call to del_ptr below
2803 slot = path->slots[1];
2804 extent_buffer_get(leaf);
2806 wret = push_leaf_left(trans, root, path, 1, 1);
2807 if (wret < 0 && wret != -ENOSPC)
2810 if (path->nodes[0] == leaf &&
2811 btrfs_header_nritems(leaf)) {
2812 wret = push_leaf_right(trans, root, path, 1, 1);
2813 if (wret < 0 && wret != -ENOSPC)
2817 if (btrfs_header_nritems(leaf) == 0) {
2818 clean_tree_block(trans, root, leaf);
2819 wait_on_tree_block_writeback(root, leaf);
2821 path->slots[1] = slot;
2822 ret = btrfs_del_leaf(trans, root, path, leaf);
2824 free_extent_buffer(leaf);
2827 btrfs_mark_buffer_dirty(leaf);
2828 free_extent_buffer(leaf);
2831 btrfs_mark_buffer_dirty(leaf);
2838 * walk up the tree as far as required to find the previous leaf.
2839 * returns 0 if it found something or 1 if there are no lesser leaves.
2840 * returns < 0 on io errors.
2842 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
2846 struct extent_buffer *c;
2847 struct extent_buffer *next = NULL;
2849 while(level < BTRFS_MAX_LEVEL) {
2850 if (!path->nodes[level])
2853 slot = path->slots[level];
2854 c = path->nodes[level];
2857 if (level == BTRFS_MAX_LEVEL)
2864 free_extent_buffer(next);
2866 next = read_node_slot(root, c, slot);
2869 path->slots[level] = slot;
2872 c = path->nodes[level];
2873 free_extent_buffer(c);
2874 slot = btrfs_header_nritems(next);
2877 path->nodes[level] = next;
2878 path->slots[level] = slot;
2881 next = read_node_slot(root, next, slot);
2887 * walk up the tree as far as required to find the next leaf.
2888 * returns 0 if it found something or 1 if there are no greater leaves.
2889 * returns < 0 on io errors.
2891 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
2895 struct extent_buffer *c;
2896 struct extent_buffer *next = NULL;
2898 while(level < BTRFS_MAX_LEVEL) {
2899 if (!path->nodes[level])
2902 slot = path->slots[level] + 1;
2903 c = path->nodes[level];
2904 if (slot >= btrfs_header_nritems(c)) {
2906 if (level == BTRFS_MAX_LEVEL)
2912 free_extent_buffer(next);
2915 reada_for_search(root, path, level, slot, 0);
2917 next = read_node_slot(root, c, slot);
2920 path->slots[level] = slot;
2923 c = path->nodes[level];
2924 free_extent_buffer(c);
2925 path->nodes[level] = next;
2926 path->slots[level] = 0;
2930 reada_for_search(root, path, level, 0, 0);
2931 next = read_node_slot(root, next, 0);
2936 int btrfs_previous_item(struct btrfs_root *root,
2937 struct btrfs_path *path, u64 min_objectid,
2940 struct btrfs_key found_key;
2941 struct extent_buffer *leaf;
2945 if (path->slots[0] == 0) {
2946 ret = btrfs_prev_leaf(root, path);
2952 leaf = path->nodes[0];
2953 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2954 if (found_key.type == type)