1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
12 #include "transaction.h"
13 #include "print-tree.h"
17 #include "tree-mod-log.h"
19 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
20 *root, struct btrfs_path *path, int level);
21 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
22 const struct btrfs_key *ins_key, struct btrfs_path *path,
23 int data_size, int extend);
24 static int push_node_left(struct btrfs_trans_handle *trans,
25 struct extent_buffer *dst,
26 struct extent_buffer *src, int empty);
27 static int balance_node_right(struct btrfs_trans_handle *trans,
28 struct extent_buffer *dst_buf,
29 struct extent_buffer *src_buf);
30 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
33 static const struct btrfs_csums {
36 const char driver[12];
38 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
39 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
40 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
41 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
42 .driver = "blake2b-256" },
45 int btrfs_super_csum_size(const struct btrfs_super_block *s)
47 u16 t = btrfs_super_csum_type(s);
49 * csum type is validated at mount time
51 return btrfs_csums[t].size;
54 const char *btrfs_super_csum_name(u16 csum_type)
56 /* csum type is validated at mount time */
57 return btrfs_csums[csum_type].name;
61 * Return driver name if defined, otherwise the name that's also a valid driver
64 const char *btrfs_super_csum_driver(u16 csum_type)
66 /* csum type is validated at mount time */
67 return btrfs_csums[csum_type].driver[0] ?
68 btrfs_csums[csum_type].driver :
69 btrfs_csums[csum_type].name;
72 size_t __attribute_const__ btrfs_get_num_csums(void)
74 return ARRAY_SIZE(btrfs_csums);
77 struct btrfs_path *btrfs_alloc_path(void)
79 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
82 /* this also releases the path */
83 void btrfs_free_path(struct btrfs_path *p)
87 btrfs_release_path(p);
88 kmem_cache_free(btrfs_path_cachep, p);
92 * path release drops references on the extent buffers in the path
93 * and it drops any locks held by this path
95 * It is safe to call this on paths that no locks or extent buffers held.
97 noinline void btrfs_release_path(struct btrfs_path *p)
101 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
106 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
109 free_extent_buffer(p->nodes[i]);
115 * safely gets a reference on the root node of a tree. A lock
116 * is not taken, so a concurrent writer may put a different node
117 * at the root of the tree. See btrfs_lock_root_node for the
120 * The extent buffer returned by this has a reference taken, so
121 * it won't disappear. It may stop being the root of the tree
122 * at any time because there are no locks held.
124 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
126 struct extent_buffer *eb;
130 eb = rcu_dereference(root->node);
133 * RCU really hurts here, we could free up the root node because
134 * it was COWed but we may not get the new root node yet so do
135 * the inc_not_zero dance and if it doesn't work then
136 * synchronize_rcu and try again.
138 if (atomic_inc_not_zero(&eb->refs)) {
149 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots),
150 * just get put onto a simple dirty list. Transaction walks this list to make
151 * sure they get properly updated on disk.
153 static void add_root_to_dirty_list(struct btrfs_root *root)
155 struct btrfs_fs_info *fs_info = root->fs_info;
157 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
158 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
161 spin_lock(&fs_info->trans_lock);
162 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
163 /* Want the extent tree to be the last on the list */
164 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
165 list_move_tail(&root->dirty_list,
166 &fs_info->dirty_cowonly_roots);
168 list_move(&root->dirty_list,
169 &fs_info->dirty_cowonly_roots);
171 spin_unlock(&fs_info->trans_lock);
175 * used by snapshot creation to make a copy of a root for a tree with
176 * a given objectid. The buffer with the new root node is returned in
177 * cow_ret, and this func returns zero on success or a negative error code.
179 int btrfs_copy_root(struct btrfs_trans_handle *trans,
180 struct btrfs_root *root,
181 struct extent_buffer *buf,
182 struct extent_buffer **cow_ret, u64 new_root_objectid)
184 struct btrfs_fs_info *fs_info = root->fs_info;
185 struct extent_buffer *cow;
188 struct btrfs_disk_key disk_key;
190 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
191 trans->transid != fs_info->running_transaction->transid);
192 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
193 trans->transid != root->last_trans);
195 level = btrfs_header_level(buf);
197 btrfs_item_key(buf, &disk_key, 0);
199 btrfs_node_key(buf, &disk_key, 0);
201 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
202 &disk_key, level, buf->start, 0,
203 BTRFS_NESTING_NEW_ROOT);
207 copy_extent_buffer_full(cow, buf);
208 btrfs_set_header_bytenr(cow, cow->start);
209 btrfs_set_header_generation(cow, trans->transid);
210 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
211 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
212 BTRFS_HEADER_FLAG_RELOC);
213 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
214 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
216 btrfs_set_header_owner(cow, new_root_objectid);
218 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
220 WARN_ON(btrfs_header_generation(buf) > trans->transid);
221 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
222 ret = btrfs_inc_ref(trans, root, cow, 1);
224 ret = btrfs_inc_ref(trans, root, cow, 0);
226 btrfs_tree_unlock(cow);
227 free_extent_buffer(cow);
228 btrfs_abort_transaction(trans, ret);
232 btrfs_mark_buffer_dirty(cow);
238 * check if the tree block can be shared by multiple trees
240 int btrfs_block_can_be_shared(struct btrfs_root *root,
241 struct extent_buffer *buf)
244 * Tree blocks not in shareable trees and tree roots are never shared.
245 * If a block was allocated after the last snapshot and the block was
246 * not allocated by tree relocation, we know the block is not shared.
248 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
249 buf != root->node && buf != root->commit_root &&
250 (btrfs_header_generation(buf) <=
251 btrfs_root_last_snapshot(&root->root_item) ||
252 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
258 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
259 struct btrfs_root *root,
260 struct extent_buffer *buf,
261 struct extent_buffer *cow,
264 struct btrfs_fs_info *fs_info = root->fs_info;
272 * Backrefs update rules:
274 * Always use full backrefs for extent pointers in tree block
275 * allocated by tree relocation.
277 * If a shared tree block is no longer referenced by its owner
278 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
279 * use full backrefs for extent pointers in tree block.
281 * If a tree block is been relocating
282 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
283 * use full backrefs for extent pointers in tree block.
284 * The reason for this is some operations (such as drop tree)
285 * are only allowed for blocks use full backrefs.
288 if (btrfs_block_can_be_shared(root, buf)) {
289 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
290 btrfs_header_level(buf), 1,
296 btrfs_handle_fs_error(fs_info, ret, NULL);
301 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
302 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
303 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
308 owner = btrfs_header_owner(buf);
309 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
310 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
313 if ((owner == root->root_key.objectid ||
314 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
315 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
316 ret = btrfs_inc_ref(trans, root, buf, 1);
320 if (root->root_key.objectid ==
321 BTRFS_TREE_RELOC_OBJECTID) {
322 ret = btrfs_dec_ref(trans, root, buf, 0);
325 ret = btrfs_inc_ref(trans, root, cow, 1);
329 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
332 if (root->root_key.objectid ==
333 BTRFS_TREE_RELOC_OBJECTID)
334 ret = btrfs_inc_ref(trans, root, cow, 1);
336 ret = btrfs_inc_ref(trans, root, cow, 0);
340 if (new_flags != 0) {
341 int level = btrfs_header_level(buf);
343 ret = btrfs_set_disk_extent_flags(trans, buf,
344 new_flags, level, 0);
349 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
350 if (root->root_key.objectid ==
351 BTRFS_TREE_RELOC_OBJECTID)
352 ret = btrfs_inc_ref(trans, root, cow, 1);
354 ret = btrfs_inc_ref(trans, root, cow, 0);
357 ret = btrfs_dec_ref(trans, root, buf, 1);
361 btrfs_clean_tree_block(buf);
367 static struct extent_buffer *alloc_tree_block_no_bg_flush(
368 struct btrfs_trans_handle *trans,
369 struct btrfs_root *root,
371 const struct btrfs_disk_key *disk_key,
375 enum btrfs_lock_nesting nest)
377 struct btrfs_fs_info *fs_info = root->fs_info;
378 struct extent_buffer *ret;
381 * If we are COWing a node/leaf from the extent, chunk, device or free
382 * space trees, make sure that we do not finish block group creation of
383 * pending block groups. We do this to avoid a deadlock.
384 * COWing can result in allocation of a new chunk, and flushing pending
385 * block groups (btrfs_create_pending_block_groups()) can be triggered
386 * when finishing allocation of a new chunk. Creation of a pending block
387 * group modifies the extent, chunk, device and free space trees,
388 * therefore we could deadlock with ourselves since we are holding a
389 * lock on an extent buffer that btrfs_create_pending_block_groups() may
391 * For similar reasons, we also need to delay flushing pending block
392 * groups when splitting a leaf or node, from one of those trees, since
393 * we are holding a write lock on it and its parent or when inserting a
394 * new root node for one of those trees.
396 if (root == fs_info->extent_root ||
397 root == fs_info->chunk_root ||
398 root == fs_info->dev_root ||
399 root == fs_info->free_space_root)
400 trans->can_flush_pending_bgs = false;
402 ret = btrfs_alloc_tree_block(trans, root, parent_start,
403 root->root_key.objectid, disk_key, level,
404 hint, empty_size, nest);
405 trans->can_flush_pending_bgs = true;
411 * does the dirty work in cow of a single block. The parent block (if
412 * supplied) is updated to point to the new cow copy. The new buffer is marked
413 * dirty and returned locked. If you modify the block it needs to be marked
416 * search_start -- an allocation hint for the new block
418 * empty_size -- a hint that you plan on doing more cow. This is the size in
419 * bytes the allocator should try to find free next to the block it returns.
420 * This is just a hint and may be ignored by the allocator.
422 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
423 struct btrfs_root *root,
424 struct extent_buffer *buf,
425 struct extent_buffer *parent, int parent_slot,
426 struct extent_buffer **cow_ret,
427 u64 search_start, u64 empty_size,
428 enum btrfs_lock_nesting nest)
430 struct btrfs_fs_info *fs_info = root->fs_info;
431 struct btrfs_disk_key disk_key;
432 struct extent_buffer *cow;
436 u64 parent_start = 0;
441 btrfs_assert_tree_locked(buf);
443 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
444 trans->transid != fs_info->running_transaction->transid);
445 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
446 trans->transid != root->last_trans);
448 level = btrfs_header_level(buf);
451 btrfs_item_key(buf, &disk_key, 0);
453 btrfs_node_key(buf, &disk_key, 0);
455 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
456 parent_start = parent->start;
458 cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
459 level, search_start, empty_size, nest);
463 /* cow is set to blocking by btrfs_init_new_buffer */
465 copy_extent_buffer_full(cow, buf);
466 btrfs_set_header_bytenr(cow, cow->start);
467 btrfs_set_header_generation(cow, trans->transid);
468 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
469 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
470 BTRFS_HEADER_FLAG_RELOC);
471 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
472 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
474 btrfs_set_header_owner(cow, root->root_key.objectid);
476 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
478 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
480 btrfs_tree_unlock(cow);
481 free_extent_buffer(cow);
482 btrfs_abort_transaction(trans, ret);
486 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
487 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
489 btrfs_tree_unlock(cow);
490 free_extent_buffer(cow);
491 btrfs_abort_transaction(trans, ret);
496 if (buf == root->node) {
497 WARN_ON(parent && parent != buf);
498 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
499 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
500 parent_start = buf->start;
502 atomic_inc(&cow->refs);
503 ret = btrfs_tree_mod_log_insert_root(root->node, cow, 1);
505 rcu_assign_pointer(root->node, cow);
507 btrfs_free_tree_block(trans, root, buf, parent_start,
509 free_extent_buffer(buf);
510 add_root_to_dirty_list(root);
512 WARN_ON(trans->transid != btrfs_header_generation(parent));
513 btrfs_tree_mod_log_insert_key(parent, parent_slot,
514 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
515 btrfs_set_node_blockptr(parent, parent_slot,
517 btrfs_set_node_ptr_generation(parent, parent_slot,
519 btrfs_mark_buffer_dirty(parent);
521 ret = btrfs_tree_mod_log_free_eb(buf);
523 btrfs_tree_unlock(cow);
524 free_extent_buffer(cow);
525 btrfs_abort_transaction(trans, ret);
529 btrfs_free_tree_block(trans, root, buf, parent_start,
533 btrfs_tree_unlock(buf);
534 free_extent_buffer_stale(buf);
535 btrfs_mark_buffer_dirty(cow);
540 static inline int should_cow_block(struct btrfs_trans_handle *trans,
541 struct btrfs_root *root,
542 struct extent_buffer *buf)
544 if (btrfs_is_testing(root->fs_info))
547 /* Ensure we can see the FORCE_COW bit */
548 smp_mb__before_atomic();
551 * We do not need to cow a block if
552 * 1) this block is not created or changed in this transaction;
553 * 2) this block does not belong to TREE_RELOC tree;
554 * 3) the root is not forced COW.
556 * What is forced COW:
557 * when we create snapshot during committing the transaction,
558 * after we've finished copying src root, we must COW the shared
559 * block to ensure the metadata consistency.
561 if (btrfs_header_generation(buf) == trans->transid &&
562 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
563 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
564 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
565 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
571 * cows a single block, see __btrfs_cow_block for the real work.
572 * This version of it has extra checks so that a block isn't COWed more than
573 * once per transaction, as long as it hasn't been written yet
575 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
576 struct btrfs_root *root, struct extent_buffer *buf,
577 struct extent_buffer *parent, int parent_slot,
578 struct extent_buffer **cow_ret,
579 enum btrfs_lock_nesting nest)
581 struct btrfs_fs_info *fs_info = root->fs_info;
585 if (test_bit(BTRFS_ROOT_DELETING, &root->state))
587 "COW'ing blocks on a fs root that's being dropped");
589 if (trans->transaction != fs_info->running_transaction)
590 WARN(1, KERN_CRIT "trans %llu running %llu\n",
592 fs_info->running_transaction->transid);
594 if (trans->transid != fs_info->generation)
595 WARN(1, KERN_CRIT "trans %llu running %llu\n",
596 trans->transid, fs_info->generation);
598 if (!should_cow_block(trans, root, buf)) {
604 search_start = buf->start & ~((u64)SZ_1G - 1);
607 * Before CoWing this block for later modification, check if it's
608 * the subtree root and do the delayed subtree trace if needed.
610 * Also We don't care about the error, as it's handled internally.
612 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
613 ret = __btrfs_cow_block(trans, root, buf, parent,
614 parent_slot, cow_ret, search_start, 0, nest);
616 trace_btrfs_cow_block(root, buf, *cow_ret);
620 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
623 * helper function for defrag to decide if two blocks pointed to by a
624 * node are actually close by
626 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
628 if (blocknr < other && other - (blocknr + blocksize) < 32768)
630 if (blocknr > other && blocknr - (other + blocksize) < 32768)
635 #ifdef __LITTLE_ENDIAN
638 * Compare two keys, on little-endian the disk order is same as CPU order and
639 * we can avoid the conversion.
641 static int comp_keys(const struct btrfs_disk_key *disk_key,
642 const struct btrfs_key *k2)
644 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
646 return btrfs_comp_cpu_keys(k1, k2);
652 * compare two keys in a memcmp fashion
654 static int comp_keys(const struct btrfs_disk_key *disk,
655 const struct btrfs_key *k2)
659 btrfs_disk_key_to_cpu(&k1, disk);
661 return btrfs_comp_cpu_keys(&k1, k2);
666 * same as comp_keys only with two btrfs_key's
668 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
670 if (k1->objectid > k2->objectid)
672 if (k1->objectid < k2->objectid)
674 if (k1->type > k2->type)
676 if (k1->type < k2->type)
678 if (k1->offset > k2->offset)
680 if (k1->offset < k2->offset)
686 * this is used by the defrag code to go through all the
687 * leaves pointed to by a node and reallocate them so that
688 * disk order is close to key order
690 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
691 struct btrfs_root *root, struct extent_buffer *parent,
692 int start_slot, u64 *last_ret,
693 struct btrfs_key *progress)
695 struct btrfs_fs_info *fs_info = root->fs_info;
696 struct extent_buffer *cur;
698 u64 search_start = *last_ret;
706 int progress_passed = 0;
707 struct btrfs_disk_key disk_key;
709 WARN_ON(trans->transaction != fs_info->running_transaction);
710 WARN_ON(trans->transid != fs_info->generation);
712 parent_nritems = btrfs_header_nritems(parent);
713 blocksize = fs_info->nodesize;
714 end_slot = parent_nritems - 1;
716 if (parent_nritems <= 1)
719 for (i = start_slot; i <= end_slot; i++) {
722 btrfs_node_key(parent, &disk_key, i);
723 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
727 blocknr = btrfs_node_blockptr(parent, i);
729 last_block = blocknr;
732 other = btrfs_node_blockptr(parent, i - 1);
733 close = close_blocks(blocknr, other, blocksize);
735 if (!close && i < end_slot) {
736 other = btrfs_node_blockptr(parent, i + 1);
737 close = close_blocks(blocknr, other, blocksize);
740 last_block = blocknr;
744 cur = btrfs_read_node_slot(parent, i);
747 if (search_start == 0)
748 search_start = last_block;
750 btrfs_tree_lock(cur);
751 err = __btrfs_cow_block(trans, root, cur, parent, i,
754 (end_slot - i) * blocksize),
757 btrfs_tree_unlock(cur);
758 free_extent_buffer(cur);
761 search_start = cur->start;
762 last_block = cur->start;
763 *last_ret = search_start;
764 btrfs_tree_unlock(cur);
765 free_extent_buffer(cur);
771 * search for key in the extent_buffer. The items start at offset p,
772 * and they are item_size apart. There are 'max' items in p.
774 * the slot in the array is returned via slot, and it points to
775 * the place where you would insert key if it is not found in
778 * slot may point to max if the key is bigger than all of the keys
780 static noinline int generic_bin_search(struct extent_buffer *eb,
781 unsigned long p, int item_size,
782 const struct btrfs_key *key,
788 const int key_size = sizeof(struct btrfs_disk_key);
791 btrfs_err(eb->fs_info,
792 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
793 __func__, low, high, eb->start,
794 btrfs_header_owner(eb), btrfs_header_level(eb));
800 unsigned long offset;
801 struct btrfs_disk_key *tmp;
802 struct btrfs_disk_key unaligned;
805 mid = (low + high) / 2;
806 offset = p + mid * item_size;
807 oip = offset_in_page(offset);
809 if (oip + key_size <= PAGE_SIZE) {
810 const unsigned long idx = get_eb_page_index(offset);
811 char *kaddr = page_address(eb->pages[idx]);
813 oip = get_eb_offset_in_page(eb, offset);
814 tmp = (struct btrfs_disk_key *)(kaddr + oip);
816 read_extent_buffer(eb, &unaligned, offset, key_size);
820 ret = comp_keys(tmp, key);
836 * simple bin_search frontend that does the right thing for
839 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
842 if (btrfs_header_level(eb) == 0)
843 return generic_bin_search(eb,
844 offsetof(struct btrfs_leaf, items),
845 sizeof(struct btrfs_item),
846 key, btrfs_header_nritems(eb),
849 return generic_bin_search(eb,
850 offsetof(struct btrfs_node, ptrs),
851 sizeof(struct btrfs_key_ptr),
852 key, btrfs_header_nritems(eb),
856 static void root_add_used(struct btrfs_root *root, u32 size)
858 spin_lock(&root->accounting_lock);
859 btrfs_set_root_used(&root->root_item,
860 btrfs_root_used(&root->root_item) + size);
861 spin_unlock(&root->accounting_lock);
864 static void root_sub_used(struct btrfs_root *root, u32 size)
866 spin_lock(&root->accounting_lock);
867 btrfs_set_root_used(&root->root_item,
868 btrfs_root_used(&root->root_item) - size);
869 spin_unlock(&root->accounting_lock);
872 /* given a node and slot number, this reads the blocks it points to. The
873 * extent buffer is returned with a reference taken (but unlocked).
875 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
878 int level = btrfs_header_level(parent);
879 struct extent_buffer *eb;
880 struct btrfs_key first_key;
882 if (slot < 0 || slot >= btrfs_header_nritems(parent))
883 return ERR_PTR(-ENOENT);
887 btrfs_node_key_to_cpu(parent, &first_key, slot);
888 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
889 btrfs_header_owner(parent),
890 btrfs_node_ptr_generation(parent, slot),
891 level - 1, &first_key);
892 if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
893 free_extent_buffer(eb);
901 * node level balancing, used to make sure nodes are in proper order for
902 * item deletion. We balance from the top down, so we have to make sure
903 * that a deletion won't leave an node completely empty later on.
905 static noinline int balance_level(struct btrfs_trans_handle *trans,
906 struct btrfs_root *root,
907 struct btrfs_path *path, int level)
909 struct btrfs_fs_info *fs_info = root->fs_info;
910 struct extent_buffer *right = NULL;
911 struct extent_buffer *mid;
912 struct extent_buffer *left = NULL;
913 struct extent_buffer *parent = NULL;
917 int orig_slot = path->slots[level];
922 mid = path->nodes[level];
924 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK);
925 WARN_ON(btrfs_header_generation(mid) != trans->transid);
927 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
929 if (level < BTRFS_MAX_LEVEL - 1) {
930 parent = path->nodes[level + 1];
931 pslot = path->slots[level + 1];
935 * deal with the case where there is only one pointer in the root
936 * by promoting the node below to a root
939 struct extent_buffer *child;
941 if (btrfs_header_nritems(mid) != 1)
944 /* promote the child to a root */
945 child = btrfs_read_node_slot(mid, 0);
947 ret = PTR_ERR(child);
948 btrfs_handle_fs_error(fs_info, ret, NULL);
952 btrfs_tree_lock(child);
953 ret = btrfs_cow_block(trans, root, child, mid, 0, &child,
956 btrfs_tree_unlock(child);
957 free_extent_buffer(child);
961 ret = btrfs_tree_mod_log_insert_root(root->node, child, 1);
963 rcu_assign_pointer(root->node, child);
965 add_root_to_dirty_list(root);
966 btrfs_tree_unlock(child);
968 path->locks[level] = 0;
969 path->nodes[level] = NULL;
970 btrfs_clean_tree_block(mid);
971 btrfs_tree_unlock(mid);
972 /* once for the path */
973 free_extent_buffer(mid);
975 root_sub_used(root, mid->len);
976 btrfs_free_tree_block(trans, root, mid, 0, 1);
977 /* once for the root ptr */
978 free_extent_buffer_stale(mid);
981 if (btrfs_header_nritems(mid) >
982 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
985 left = btrfs_read_node_slot(parent, pslot - 1);
990 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
991 wret = btrfs_cow_block(trans, root, left,
992 parent, pslot - 1, &left,
993 BTRFS_NESTING_LEFT_COW);
1000 right = btrfs_read_node_slot(parent, pslot + 1);
1005 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
1006 wret = btrfs_cow_block(trans, root, right,
1007 parent, pslot + 1, &right,
1008 BTRFS_NESTING_RIGHT_COW);
1015 /* first, try to make some room in the middle buffer */
1017 orig_slot += btrfs_header_nritems(left);
1018 wret = push_node_left(trans, left, mid, 1);
1024 * then try to empty the right most buffer into the middle
1027 wret = push_node_left(trans, mid, right, 1);
1028 if (wret < 0 && wret != -ENOSPC)
1030 if (btrfs_header_nritems(right) == 0) {
1031 btrfs_clean_tree_block(right);
1032 btrfs_tree_unlock(right);
1033 del_ptr(root, path, level + 1, pslot + 1);
1034 root_sub_used(root, right->len);
1035 btrfs_free_tree_block(trans, root, right, 0, 1);
1036 free_extent_buffer_stale(right);
1039 struct btrfs_disk_key right_key;
1040 btrfs_node_key(right, &right_key, 0);
1041 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1042 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1044 btrfs_set_node_key(parent, &right_key, pslot + 1);
1045 btrfs_mark_buffer_dirty(parent);
1048 if (btrfs_header_nritems(mid) == 1) {
1050 * we're not allowed to leave a node with one item in the
1051 * tree during a delete. A deletion from lower in the tree
1052 * could try to delete the only pointer in this node.
1053 * So, pull some keys from the left.
1054 * There has to be a left pointer at this point because
1055 * otherwise we would have pulled some pointers from the
1060 btrfs_handle_fs_error(fs_info, ret, NULL);
1063 wret = balance_node_right(trans, mid, left);
1069 wret = push_node_left(trans, left, mid, 1);
1075 if (btrfs_header_nritems(mid) == 0) {
1076 btrfs_clean_tree_block(mid);
1077 btrfs_tree_unlock(mid);
1078 del_ptr(root, path, level + 1, pslot);
1079 root_sub_used(root, mid->len);
1080 btrfs_free_tree_block(trans, root, mid, 0, 1);
1081 free_extent_buffer_stale(mid);
1084 /* update the parent key to reflect our changes */
1085 struct btrfs_disk_key mid_key;
1086 btrfs_node_key(mid, &mid_key, 0);
1087 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1088 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1090 btrfs_set_node_key(parent, &mid_key, pslot);
1091 btrfs_mark_buffer_dirty(parent);
1094 /* update the path */
1096 if (btrfs_header_nritems(left) > orig_slot) {
1097 atomic_inc(&left->refs);
1098 /* left was locked after cow */
1099 path->nodes[level] = left;
1100 path->slots[level + 1] -= 1;
1101 path->slots[level] = orig_slot;
1103 btrfs_tree_unlock(mid);
1104 free_extent_buffer(mid);
1107 orig_slot -= btrfs_header_nritems(left);
1108 path->slots[level] = orig_slot;
1111 /* double check we haven't messed things up */
1113 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1117 btrfs_tree_unlock(right);
1118 free_extent_buffer(right);
1121 if (path->nodes[level] != left)
1122 btrfs_tree_unlock(left);
1123 free_extent_buffer(left);
1128 /* Node balancing for insertion. Here we only split or push nodes around
1129 * when they are completely full. This is also done top down, so we
1130 * have to be pessimistic.
1132 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1133 struct btrfs_root *root,
1134 struct btrfs_path *path, int level)
1136 struct btrfs_fs_info *fs_info = root->fs_info;
1137 struct extent_buffer *right = NULL;
1138 struct extent_buffer *mid;
1139 struct extent_buffer *left = NULL;
1140 struct extent_buffer *parent = NULL;
1144 int orig_slot = path->slots[level];
1149 mid = path->nodes[level];
1150 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1152 if (level < BTRFS_MAX_LEVEL - 1) {
1153 parent = path->nodes[level + 1];
1154 pslot = path->slots[level + 1];
1160 left = btrfs_read_node_slot(parent, pslot - 1);
1164 /* first, try to make some room in the middle buffer */
1168 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
1170 left_nr = btrfs_header_nritems(left);
1171 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1174 ret = btrfs_cow_block(trans, root, left, parent,
1176 BTRFS_NESTING_LEFT_COW);
1180 wret = push_node_left(trans, left, mid, 0);
1186 struct btrfs_disk_key disk_key;
1187 orig_slot += left_nr;
1188 btrfs_node_key(mid, &disk_key, 0);
1189 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1190 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1192 btrfs_set_node_key(parent, &disk_key, pslot);
1193 btrfs_mark_buffer_dirty(parent);
1194 if (btrfs_header_nritems(left) > orig_slot) {
1195 path->nodes[level] = left;
1196 path->slots[level + 1] -= 1;
1197 path->slots[level] = orig_slot;
1198 btrfs_tree_unlock(mid);
1199 free_extent_buffer(mid);
1202 btrfs_header_nritems(left);
1203 path->slots[level] = orig_slot;
1204 btrfs_tree_unlock(left);
1205 free_extent_buffer(left);
1209 btrfs_tree_unlock(left);
1210 free_extent_buffer(left);
1212 right = btrfs_read_node_slot(parent, pslot + 1);
1217 * then try to empty the right most buffer into the middle
1222 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
1224 right_nr = btrfs_header_nritems(right);
1225 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1228 ret = btrfs_cow_block(trans, root, right,
1230 &right, BTRFS_NESTING_RIGHT_COW);
1234 wret = balance_node_right(trans, right, mid);
1240 struct btrfs_disk_key disk_key;
1242 btrfs_node_key(right, &disk_key, 0);
1243 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1244 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1246 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1247 btrfs_mark_buffer_dirty(parent);
1249 if (btrfs_header_nritems(mid) <= orig_slot) {
1250 path->nodes[level] = right;
1251 path->slots[level + 1] += 1;
1252 path->slots[level] = orig_slot -
1253 btrfs_header_nritems(mid);
1254 btrfs_tree_unlock(mid);
1255 free_extent_buffer(mid);
1257 btrfs_tree_unlock(right);
1258 free_extent_buffer(right);
1262 btrfs_tree_unlock(right);
1263 free_extent_buffer(right);
1269 * readahead one full node of leaves, finding things that are close
1270 * to the block in 'slot', and triggering ra on them.
1272 static void reada_for_search(struct btrfs_fs_info *fs_info,
1273 struct btrfs_path *path,
1274 int level, int slot, u64 objectid)
1276 struct extent_buffer *node;
1277 struct btrfs_disk_key disk_key;
1282 struct extent_buffer *eb;
1290 if (!path->nodes[level])
1293 node = path->nodes[level];
1295 search = btrfs_node_blockptr(node, slot);
1296 blocksize = fs_info->nodesize;
1297 eb = find_extent_buffer(fs_info, search);
1299 free_extent_buffer(eb);
1305 nritems = btrfs_header_nritems(node);
1309 if (path->reada == READA_BACK) {
1313 } else if (path->reada == READA_FORWARD) {
1318 if (path->reada == READA_BACK && objectid) {
1319 btrfs_node_key(node, &disk_key, nr);
1320 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1323 search = btrfs_node_blockptr(node, nr);
1324 if ((search <= target && target - search <= 65536) ||
1325 (search > target && search - target <= 65536)) {
1326 btrfs_readahead_node_child(node, nr);
1330 if ((nread > 65536 || nscan > 32))
1335 static noinline void reada_for_balance(struct btrfs_path *path, int level)
1337 struct extent_buffer *parent;
1341 parent = path->nodes[level + 1];
1345 nritems = btrfs_header_nritems(parent);
1346 slot = path->slots[level + 1];
1349 btrfs_readahead_node_child(parent, slot - 1);
1350 if (slot + 1 < nritems)
1351 btrfs_readahead_node_child(parent, slot + 1);
1356 * when we walk down the tree, it is usually safe to unlock the higher layers
1357 * in the tree. The exceptions are when our path goes through slot 0, because
1358 * operations on the tree might require changing key pointers higher up in the
1361 * callers might also have set path->keep_locks, which tells this code to keep
1362 * the lock if the path points to the last slot in the block. This is part of
1363 * walking through the tree, and selecting the next slot in the higher block.
1365 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1366 * if lowest_unlock is 1, level 0 won't be unlocked
1368 static noinline void unlock_up(struct btrfs_path *path, int level,
1369 int lowest_unlock, int min_write_lock_level,
1370 int *write_lock_level)
1373 int skip_level = level;
1375 struct extent_buffer *t;
1377 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1378 if (!path->nodes[i])
1380 if (!path->locks[i])
1382 if (!no_skips && path->slots[i] == 0) {
1386 if (!no_skips && path->keep_locks) {
1389 nritems = btrfs_header_nritems(t);
1390 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1395 if (skip_level < i && i >= lowest_unlock)
1399 if (i >= lowest_unlock && i > skip_level) {
1400 btrfs_tree_unlock_rw(t, path->locks[i]);
1402 if (write_lock_level &&
1403 i > min_write_lock_level &&
1404 i <= *write_lock_level) {
1405 *write_lock_level = i - 1;
1412 * helper function for btrfs_search_slot. The goal is to find a block
1413 * in cache without setting the path to blocking. If we find the block
1414 * we return zero and the path is unchanged.
1416 * If we can't find the block, we set the path blocking and do some
1417 * reada. -EAGAIN is returned and the search must be repeated.
1420 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
1421 struct extent_buffer **eb_ret, int level, int slot,
1422 const struct btrfs_key *key)
1424 struct btrfs_fs_info *fs_info = root->fs_info;
1427 struct extent_buffer *tmp;
1428 struct btrfs_key first_key;
1432 blocknr = btrfs_node_blockptr(*eb_ret, slot);
1433 gen = btrfs_node_ptr_generation(*eb_ret, slot);
1434 parent_level = btrfs_header_level(*eb_ret);
1435 btrfs_node_key_to_cpu(*eb_ret, &first_key, slot);
1437 tmp = find_extent_buffer(fs_info, blocknr);
1439 /* first we do an atomic uptodate check */
1440 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
1442 * Do extra check for first_key, eb can be stale due to
1443 * being cached, read from scrub, or have multiple
1444 * parents (shared tree blocks).
1446 if (btrfs_verify_level_key(tmp,
1447 parent_level - 1, &first_key, gen)) {
1448 free_extent_buffer(tmp);
1455 /* now we're allowed to do a blocking uptodate check */
1456 ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
1461 free_extent_buffer(tmp);
1462 btrfs_release_path(p);
1467 * reduce lock contention at high levels
1468 * of the btree by dropping locks before
1469 * we read. Don't release the lock on the current
1470 * level because we need to walk this node to figure
1471 * out which blocks to read.
1473 btrfs_unlock_up_safe(p, level + 1);
1475 if (p->reada != READA_NONE)
1476 reada_for_search(fs_info, p, level, slot, key->objectid);
1479 tmp = read_tree_block(fs_info, blocknr, root->root_key.objectid,
1480 gen, parent_level - 1, &first_key);
1483 * If the read above didn't mark this buffer up to date,
1484 * it will never end up being up to date. Set ret to EIO now
1485 * and give up so that our caller doesn't loop forever
1488 if (!extent_buffer_uptodate(tmp))
1490 free_extent_buffer(tmp);
1495 btrfs_release_path(p);
1500 * helper function for btrfs_search_slot. This does all of the checks
1501 * for node-level blocks and does any balancing required based on
1504 * If no extra work was required, zero is returned. If we had to
1505 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1509 setup_nodes_for_search(struct btrfs_trans_handle *trans,
1510 struct btrfs_root *root, struct btrfs_path *p,
1511 struct extent_buffer *b, int level, int ins_len,
1512 int *write_lock_level)
1514 struct btrfs_fs_info *fs_info = root->fs_info;
1517 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1518 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
1520 if (*write_lock_level < level + 1) {
1521 *write_lock_level = level + 1;
1522 btrfs_release_path(p);
1526 reada_for_balance(p, level);
1527 ret = split_node(trans, root, p, level);
1529 b = p->nodes[level];
1530 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1531 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
1533 if (*write_lock_level < level + 1) {
1534 *write_lock_level = level + 1;
1535 btrfs_release_path(p);
1539 reada_for_balance(p, level);
1540 ret = balance_level(trans, root, p, level);
1544 b = p->nodes[level];
1546 btrfs_release_path(p);
1549 BUG_ON(btrfs_header_nritems(b) == 1);
1554 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
1555 u64 iobjectid, u64 ioff, u8 key_type,
1556 struct btrfs_key *found_key)
1559 struct btrfs_key key;
1560 struct extent_buffer *eb;
1565 key.type = key_type;
1566 key.objectid = iobjectid;
1569 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1573 eb = path->nodes[0];
1574 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1575 ret = btrfs_next_leaf(fs_root, path);
1578 eb = path->nodes[0];
1581 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1582 if (found_key->type != key.type ||
1583 found_key->objectid != key.objectid)
1589 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
1590 struct btrfs_path *p,
1591 int write_lock_level)
1593 struct btrfs_fs_info *fs_info = root->fs_info;
1594 struct extent_buffer *b;
1598 /* We try very hard to do read locks on the root */
1599 root_lock = BTRFS_READ_LOCK;
1601 if (p->search_commit_root) {
1603 * The commit roots are read only so we always do read locks,
1604 * and we always must hold the commit_root_sem when doing
1605 * searches on them, the only exception is send where we don't
1606 * want to block transaction commits for a long time, so
1607 * we need to clone the commit root in order to avoid races
1608 * with transaction commits that create a snapshot of one of
1609 * the roots used by a send operation.
1611 if (p->need_commit_sem) {
1612 down_read(&fs_info->commit_root_sem);
1613 b = btrfs_clone_extent_buffer(root->commit_root);
1614 up_read(&fs_info->commit_root_sem);
1616 return ERR_PTR(-ENOMEM);
1619 b = root->commit_root;
1620 atomic_inc(&b->refs);
1622 level = btrfs_header_level(b);
1624 * Ensure that all callers have set skip_locking when
1625 * p->search_commit_root = 1.
1627 ASSERT(p->skip_locking == 1);
1632 if (p->skip_locking) {
1633 b = btrfs_root_node(root);
1634 level = btrfs_header_level(b);
1639 * If the level is set to maximum, we can skip trying to get the read
1642 if (write_lock_level < BTRFS_MAX_LEVEL) {
1644 * We don't know the level of the root node until we actually
1645 * have it read locked
1647 b = btrfs_read_lock_root_node(root);
1648 level = btrfs_header_level(b);
1649 if (level > write_lock_level)
1652 /* Whoops, must trade for write lock */
1653 btrfs_tree_read_unlock(b);
1654 free_extent_buffer(b);
1657 b = btrfs_lock_root_node(root);
1658 root_lock = BTRFS_WRITE_LOCK;
1660 /* The level might have changed, check again */
1661 level = btrfs_header_level(b);
1664 p->nodes[level] = b;
1665 if (!p->skip_locking)
1666 p->locks[level] = root_lock;
1668 * Callers are responsible for dropping b's references.
1675 * btrfs_search_slot - look for a key in a tree and perform necessary
1676 * modifications to preserve tree invariants.
1678 * @trans: Handle of transaction, used when modifying the tree
1679 * @p: Holds all btree nodes along the search path
1680 * @root: The root node of the tree
1681 * @key: The key we are looking for
1682 * @ins_len: Indicates purpose of search:
1683 * >0 for inserts it's size of item inserted (*)
1685 * 0 for plain searches, not modifying the tree
1687 * (*) If size of item inserted doesn't include
1688 * sizeof(struct btrfs_item), then p->search_for_extension must
1690 * @cow: boolean should CoW operations be performed. Must always be 1
1691 * when modifying the tree.
1693 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
1694 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
1696 * If @key is found, 0 is returned and you can find the item in the leaf level
1697 * of the path (level 0)
1699 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
1700 * points to the slot where it should be inserted
1702 * If an error is encountered while searching the tree a negative error number
1705 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1706 const struct btrfs_key *key, struct btrfs_path *p,
1707 int ins_len, int cow)
1709 struct extent_buffer *b;
1714 int lowest_unlock = 1;
1715 /* everything at write_lock_level or lower must be write locked */
1716 int write_lock_level = 0;
1717 u8 lowest_level = 0;
1718 int min_write_lock_level;
1721 lowest_level = p->lowest_level;
1722 WARN_ON(lowest_level && ins_len > 0);
1723 WARN_ON(p->nodes[0] != NULL);
1724 BUG_ON(!cow && ins_len);
1729 /* when we are removing items, we might have to go up to level
1730 * two as we update tree pointers Make sure we keep write
1731 * for those levels as well
1733 write_lock_level = 2;
1734 } else if (ins_len > 0) {
1736 * for inserting items, make sure we have a write lock on
1737 * level 1 so we can update keys
1739 write_lock_level = 1;
1743 write_lock_level = -1;
1745 if (cow && (p->keep_locks || p->lowest_level))
1746 write_lock_level = BTRFS_MAX_LEVEL;
1748 min_write_lock_level = write_lock_level;
1752 b = btrfs_search_slot_get_root(root, p, write_lock_level);
1761 level = btrfs_header_level(b);
1764 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
1767 * if we don't really need to cow this block
1768 * then we don't want to set the path blocking,
1769 * so we test it here
1771 if (!should_cow_block(trans, root, b)) {
1772 trans->dirty = true;
1777 * must have write locks on this node and the
1780 if (level > write_lock_level ||
1781 (level + 1 > write_lock_level &&
1782 level + 1 < BTRFS_MAX_LEVEL &&
1783 p->nodes[level + 1])) {
1784 write_lock_level = level + 1;
1785 btrfs_release_path(p);
1790 err = btrfs_cow_block(trans, root, b, NULL, 0,
1794 err = btrfs_cow_block(trans, root, b,
1795 p->nodes[level + 1],
1796 p->slots[level + 1], &b,
1804 p->nodes[level] = b;
1806 * Leave path with blocking locks to avoid massive
1807 * lock context switch, this is made on purpose.
1811 * we have a lock on b and as long as we aren't changing
1812 * the tree, there is no way to for the items in b to change.
1813 * It is safe to drop the lock on our parent before we
1814 * go through the expensive btree search on b.
1816 * If we're inserting or deleting (ins_len != 0), then we might
1817 * be changing slot zero, which may require changing the parent.
1818 * So, we can't drop the lock until after we know which slot
1819 * we're operating on.
1821 if (!ins_len && !p->keep_locks) {
1824 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
1825 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
1831 * If btrfs_bin_search returns an exact match (prev_cmp == 0)
1832 * we can safely assume the target key will always be in slot 0
1833 * on lower levels due to the invariants BTRFS' btree provides,
1834 * namely that a btrfs_key_ptr entry always points to the
1835 * lowest key in the child node, thus we can skip searching
1838 if (prev_cmp == 0) {
1842 ret = btrfs_bin_search(b, key, &slot);
1849 p->slots[level] = slot;
1851 * Item key already exists. In this case, if we are
1852 * allowed to insert the item (for example, in dir_item
1853 * case, item key collision is allowed), it will be
1854 * merged with the original item. Only the item size
1855 * grows, no new btrfs item will be added. If
1856 * search_for_extension is not set, ins_len already
1857 * accounts the size btrfs_item, deduct it here so leaf
1858 * space check will be correct.
1860 if (ret == 0 && ins_len > 0 && !p->search_for_extension) {
1861 ASSERT(ins_len >= sizeof(struct btrfs_item));
1862 ins_len -= sizeof(struct btrfs_item);
1865 btrfs_leaf_free_space(b) < ins_len) {
1866 if (write_lock_level < 1) {
1867 write_lock_level = 1;
1868 btrfs_release_path(p);
1872 err = split_leaf(trans, root, key,
1873 p, ins_len, ret == 0);
1881 if (!p->search_for_split)
1882 unlock_up(p, level, lowest_unlock,
1883 min_write_lock_level, NULL);
1886 if (ret && slot > 0) {
1890 p->slots[level] = slot;
1891 err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
1899 b = p->nodes[level];
1900 slot = p->slots[level];
1903 * Slot 0 is special, if we change the key we have to update
1904 * the parent pointer which means we must have a write lock on
1907 if (slot == 0 && ins_len && write_lock_level < level + 1) {
1908 write_lock_level = level + 1;
1909 btrfs_release_path(p);
1913 unlock_up(p, level, lowest_unlock, min_write_lock_level,
1916 if (level == lowest_level) {
1922 err = read_block_for_search(root, p, &b, level, slot, key);
1930 if (!p->skip_locking) {
1931 level = btrfs_header_level(b);
1932 if (level <= write_lock_level) {
1934 p->locks[level] = BTRFS_WRITE_LOCK;
1936 btrfs_tree_read_lock(b);
1937 p->locks[level] = BTRFS_READ_LOCK;
1939 p->nodes[level] = b;
1944 if (ret < 0 && !p->skip_release_on_error)
1945 btrfs_release_path(p);
1948 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO);
1951 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
1952 * current state of the tree together with the operations recorded in the tree
1953 * modification log to search for the key in a previous version of this tree, as
1954 * denoted by the time_seq parameter.
1956 * Naturally, there is no support for insert, delete or cow operations.
1958 * The resulting path and return value will be set up as if we called
1959 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
1961 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
1962 struct btrfs_path *p, u64 time_seq)
1964 struct btrfs_fs_info *fs_info = root->fs_info;
1965 struct extent_buffer *b;
1970 int lowest_unlock = 1;
1971 u8 lowest_level = 0;
1973 lowest_level = p->lowest_level;
1974 WARN_ON(p->nodes[0] != NULL);
1976 if (p->search_commit_root) {
1978 return btrfs_search_slot(NULL, root, key, p, 0, 0);
1982 b = btrfs_get_old_root(root, time_seq);
1987 level = btrfs_header_level(b);
1988 p->locks[level] = BTRFS_READ_LOCK;
1993 level = btrfs_header_level(b);
1994 p->nodes[level] = b;
1997 * we have a lock on b and as long as we aren't changing
1998 * the tree, there is no way to for the items in b to change.
1999 * It is safe to drop the lock on our parent before we
2000 * go through the expensive btree search on b.
2002 btrfs_unlock_up_safe(p, level + 1);
2004 ret = btrfs_bin_search(b, key, &slot);
2009 p->slots[level] = slot;
2010 unlock_up(p, level, lowest_unlock, 0, NULL);
2014 if (ret && slot > 0) {
2018 p->slots[level] = slot;
2019 unlock_up(p, level, lowest_unlock, 0, NULL);
2021 if (level == lowest_level) {
2027 err = read_block_for_search(root, p, &b, level, slot, key);
2035 level = btrfs_header_level(b);
2036 btrfs_tree_read_lock(b);
2037 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq);
2042 p->locks[level] = BTRFS_READ_LOCK;
2043 p->nodes[level] = b;
2048 btrfs_release_path(p);
2054 * helper to use instead of search slot if no exact match is needed but
2055 * instead the next or previous item should be returned.
2056 * When find_higher is true, the next higher item is returned, the next lower
2058 * When return_any and find_higher are both true, and no higher item is found,
2059 * return the next lower instead.
2060 * When return_any is true and find_higher is false, and no lower item is found,
2061 * return the next higher instead.
2062 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2065 int btrfs_search_slot_for_read(struct btrfs_root *root,
2066 const struct btrfs_key *key,
2067 struct btrfs_path *p, int find_higher,
2071 struct extent_buffer *leaf;
2074 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2078 * a return value of 1 means the path is at the position where the
2079 * item should be inserted. Normally this is the next bigger item,
2080 * but in case the previous item is the last in a leaf, path points
2081 * to the first free slot in the previous leaf, i.e. at an invalid
2087 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2088 ret = btrfs_next_leaf(root, p);
2094 * no higher item found, return the next
2099 btrfs_release_path(p);
2103 if (p->slots[0] == 0) {
2104 ret = btrfs_prev_leaf(root, p);
2109 if (p->slots[0] == btrfs_header_nritems(leaf))
2116 * no lower item found, return the next
2121 btrfs_release_path(p);
2131 * adjust the pointers going up the tree, starting at level
2132 * making sure the right key of each node is points to 'key'.
2133 * This is used after shifting pointers to the left, so it stops
2134 * fixing up pointers when a given leaf/node is not in slot 0 of the
2138 static void fixup_low_keys(struct btrfs_path *path,
2139 struct btrfs_disk_key *key, int level)
2142 struct extent_buffer *t;
2145 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2146 int tslot = path->slots[i];
2148 if (!path->nodes[i])
2151 ret = btrfs_tree_mod_log_insert_key(t, tslot,
2152 BTRFS_MOD_LOG_KEY_REPLACE, GFP_ATOMIC);
2154 btrfs_set_node_key(t, key, tslot);
2155 btrfs_mark_buffer_dirty(path->nodes[i]);
2164 * This function isn't completely safe. It's the caller's responsibility
2165 * that the new key won't break the order
2167 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
2168 struct btrfs_path *path,
2169 const struct btrfs_key *new_key)
2171 struct btrfs_disk_key disk_key;
2172 struct extent_buffer *eb;
2175 eb = path->nodes[0];
2176 slot = path->slots[0];
2178 btrfs_item_key(eb, &disk_key, slot - 1);
2179 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
2181 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2182 slot, btrfs_disk_key_objectid(&disk_key),
2183 btrfs_disk_key_type(&disk_key),
2184 btrfs_disk_key_offset(&disk_key),
2185 new_key->objectid, new_key->type,
2187 btrfs_print_leaf(eb);
2191 if (slot < btrfs_header_nritems(eb) - 1) {
2192 btrfs_item_key(eb, &disk_key, slot + 1);
2193 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
2195 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2196 slot, btrfs_disk_key_objectid(&disk_key),
2197 btrfs_disk_key_type(&disk_key),
2198 btrfs_disk_key_offset(&disk_key),
2199 new_key->objectid, new_key->type,
2201 btrfs_print_leaf(eb);
2206 btrfs_cpu_key_to_disk(&disk_key, new_key);
2207 btrfs_set_item_key(eb, &disk_key, slot);
2208 btrfs_mark_buffer_dirty(eb);
2210 fixup_low_keys(path, &disk_key, 1);
2214 * Check key order of two sibling extent buffers.
2216 * Return true if something is wrong.
2217 * Return false if everything is fine.
2219 * Tree-checker only works inside one tree block, thus the following
2220 * corruption can not be detected by tree-checker:
2222 * Leaf @left | Leaf @right
2223 * --------------------------------------------------------------
2224 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 |
2226 * Key f6 in leaf @left itself is valid, but not valid when the next
2227 * key in leaf @right is 7.
2228 * This can only be checked at tree block merge time.
2229 * And since tree checker has ensured all key order in each tree block
2230 * is correct, we only need to bother the last key of @left and the first
2233 static bool check_sibling_keys(struct extent_buffer *left,
2234 struct extent_buffer *right)
2236 struct btrfs_key left_last;
2237 struct btrfs_key right_first;
2238 int level = btrfs_header_level(left);
2239 int nr_left = btrfs_header_nritems(left);
2240 int nr_right = btrfs_header_nritems(right);
2242 /* No key to check in one of the tree blocks */
2243 if (!nr_left || !nr_right)
2247 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1);
2248 btrfs_node_key_to_cpu(right, &right_first, 0);
2250 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1);
2251 btrfs_item_key_to_cpu(right, &right_first, 0);
2254 if (btrfs_comp_cpu_keys(&left_last, &right_first) >= 0) {
2255 btrfs_crit(left->fs_info,
2256 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
2257 left_last.objectid, left_last.type,
2258 left_last.offset, right_first.objectid,
2259 right_first.type, right_first.offset);
2266 * try to push data from one node into the next node left in the
2269 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2270 * error, and > 0 if there was no room in the left hand block.
2272 static int push_node_left(struct btrfs_trans_handle *trans,
2273 struct extent_buffer *dst,
2274 struct extent_buffer *src, int empty)
2276 struct btrfs_fs_info *fs_info = trans->fs_info;
2282 src_nritems = btrfs_header_nritems(src);
2283 dst_nritems = btrfs_header_nritems(dst);
2284 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2285 WARN_ON(btrfs_header_generation(src) != trans->transid);
2286 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2288 if (!empty && src_nritems <= 8)
2291 if (push_items <= 0)
2295 push_items = min(src_nritems, push_items);
2296 if (push_items < src_nritems) {
2297 /* leave at least 8 pointers in the node if
2298 * we aren't going to empty it
2300 if (src_nritems - push_items < 8) {
2301 if (push_items <= 8)
2307 push_items = min(src_nritems - 8, push_items);
2309 /* dst is the left eb, src is the middle eb */
2310 if (check_sibling_keys(dst, src)) {
2312 btrfs_abort_transaction(trans, ret);
2315 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
2317 btrfs_abort_transaction(trans, ret);
2320 copy_extent_buffer(dst, src,
2321 btrfs_node_key_ptr_offset(dst_nritems),
2322 btrfs_node_key_ptr_offset(0),
2323 push_items * sizeof(struct btrfs_key_ptr));
2325 if (push_items < src_nritems) {
2327 * Don't call btrfs_tree_mod_log_insert_move() here, key removal
2328 * was already fully logged by btrfs_tree_mod_log_eb_copy() above.
2330 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2331 btrfs_node_key_ptr_offset(push_items),
2332 (src_nritems - push_items) *
2333 sizeof(struct btrfs_key_ptr));
2335 btrfs_set_header_nritems(src, src_nritems - push_items);
2336 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2337 btrfs_mark_buffer_dirty(src);
2338 btrfs_mark_buffer_dirty(dst);
2344 * try to push data from one node into the next node right in the
2347 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2348 * error, and > 0 if there was no room in the right hand block.
2350 * this will only push up to 1/2 the contents of the left node over
2352 static int balance_node_right(struct btrfs_trans_handle *trans,
2353 struct extent_buffer *dst,
2354 struct extent_buffer *src)
2356 struct btrfs_fs_info *fs_info = trans->fs_info;
2363 WARN_ON(btrfs_header_generation(src) != trans->transid);
2364 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2366 src_nritems = btrfs_header_nritems(src);
2367 dst_nritems = btrfs_header_nritems(dst);
2368 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2369 if (push_items <= 0)
2372 if (src_nritems < 4)
2375 max_push = src_nritems / 2 + 1;
2376 /* don't try to empty the node */
2377 if (max_push >= src_nritems)
2380 if (max_push < push_items)
2381 push_items = max_push;
2383 /* dst is the right eb, src is the middle eb */
2384 if (check_sibling_keys(src, dst)) {
2386 btrfs_abort_transaction(trans, ret);
2389 ret = btrfs_tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
2391 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2392 btrfs_node_key_ptr_offset(0),
2394 sizeof(struct btrfs_key_ptr));
2396 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
2399 btrfs_abort_transaction(trans, ret);
2402 copy_extent_buffer(dst, src,
2403 btrfs_node_key_ptr_offset(0),
2404 btrfs_node_key_ptr_offset(src_nritems - push_items),
2405 push_items * sizeof(struct btrfs_key_ptr));
2407 btrfs_set_header_nritems(src, src_nritems - push_items);
2408 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2410 btrfs_mark_buffer_dirty(src);
2411 btrfs_mark_buffer_dirty(dst);
2417 * helper function to insert a new root level in the tree.
2418 * A new node is allocated, and a single item is inserted to
2419 * point to the existing root
2421 * returns zero on success or < 0 on failure.
2423 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2424 struct btrfs_root *root,
2425 struct btrfs_path *path, int level)
2427 struct btrfs_fs_info *fs_info = root->fs_info;
2429 struct extent_buffer *lower;
2430 struct extent_buffer *c;
2431 struct extent_buffer *old;
2432 struct btrfs_disk_key lower_key;
2435 BUG_ON(path->nodes[level]);
2436 BUG_ON(path->nodes[level-1] != root->node);
2438 lower = path->nodes[level-1];
2440 btrfs_item_key(lower, &lower_key, 0);
2442 btrfs_node_key(lower, &lower_key, 0);
2444 c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
2445 root->node->start, 0,
2446 BTRFS_NESTING_NEW_ROOT);
2450 root_add_used(root, fs_info->nodesize);
2452 btrfs_set_header_nritems(c, 1);
2453 btrfs_set_node_key(c, &lower_key, 0);
2454 btrfs_set_node_blockptr(c, 0, lower->start);
2455 lower_gen = btrfs_header_generation(lower);
2456 WARN_ON(lower_gen != trans->transid);
2458 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2460 btrfs_mark_buffer_dirty(c);
2463 ret = btrfs_tree_mod_log_insert_root(root->node, c, 0);
2465 rcu_assign_pointer(root->node, c);
2467 /* the super has an extra ref to root->node */
2468 free_extent_buffer(old);
2470 add_root_to_dirty_list(root);
2471 atomic_inc(&c->refs);
2472 path->nodes[level] = c;
2473 path->locks[level] = BTRFS_WRITE_LOCK;
2474 path->slots[level] = 0;
2479 * worker function to insert a single pointer in a node.
2480 * the node should have enough room for the pointer already
2482 * slot and level indicate where you want the key to go, and
2483 * blocknr is the block the key points to.
2485 static void insert_ptr(struct btrfs_trans_handle *trans,
2486 struct btrfs_path *path,
2487 struct btrfs_disk_key *key, u64 bytenr,
2488 int slot, int level)
2490 struct extent_buffer *lower;
2494 BUG_ON(!path->nodes[level]);
2495 btrfs_assert_tree_locked(path->nodes[level]);
2496 lower = path->nodes[level];
2497 nritems = btrfs_header_nritems(lower);
2498 BUG_ON(slot > nritems);
2499 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
2500 if (slot != nritems) {
2502 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1,
2503 slot, nritems - slot);
2506 memmove_extent_buffer(lower,
2507 btrfs_node_key_ptr_offset(slot + 1),
2508 btrfs_node_key_ptr_offset(slot),
2509 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2512 ret = btrfs_tree_mod_log_insert_key(lower, slot,
2513 BTRFS_MOD_LOG_KEY_ADD, GFP_NOFS);
2516 btrfs_set_node_key(lower, key, slot);
2517 btrfs_set_node_blockptr(lower, slot, bytenr);
2518 WARN_ON(trans->transid == 0);
2519 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2520 btrfs_set_header_nritems(lower, nritems + 1);
2521 btrfs_mark_buffer_dirty(lower);
2525 * split the node at the specified level in path in two.
2526 * The path is corrected to point to the appropriate node after the split
2528 * Before splitting this tries to make some room in the node by pushing
2529 * left and right, if either one works, it returns right away.
2531 * returns 0 on success and < 0 on failure
2533 static noinline int split_node(struct btrfs_trans_handle *trans,
2534 struct btrfs_root *root,
2535 struct btrfs_path *path, int level)
2537 struct btrfs_fs_info *fs_info = root->fs_info;
2538 struct extent_buffer *c;
2539 struct extent_buffer *split;
2540 struct btrfs_disk_key disk_key;
2545 c = path->nodes[level];
2546 WARN_ON(btrfs_header_generation(c) != trans->transid);
2547 if (c == root->node) {
2549 * trying to split the root, lets make a new one
2551 * tree mod log: We don't log_removal old root in
2552 * insert_new_root, because that root buffer will be kept as a
2553 * normal node. We are going to log removal of half of the
2554 * elements below with btrfs_tree_mod_log_eb_copy(). We're
2555 * holding a tree lock on the buffer, which is why we cannot
2556 * race with other tree_mod_log users.
2558 ret = insert_new_root(trans, root, path, level + 1);
2562 ret = push_nodes_for_insert(trans, root, path, level);
2563 c = path->nodes[level];
2564 if (!ret && btrfs_header_nritems(c) <
2565 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
2571 c_nritems = btrfs_header_nritems(c);
2572 mid = (c_nritems + 1) / 2;
2573 btrfs_node_key(c, &disk_key, mid);
2575 split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
2576 c->start, 0, BTRFS_NESTING_SPLIT);
2578 return PTR_ERR(split);
2580 root_add_used(root, fs_info->nodesize);
2581 ASSERT(btrfs_header_level(c) == level);
2583 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
2585 btrfs_abort_transaction(trans, ret);
2588 copy_extent_buffer(split, c,
2589 btrfs_node_key_ptr_offset(0),
2590 btrfs_node_key_ptr_offset(mid),
2591 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2592 btrfs_set_header_nritems(split, c_nritems - mid);
2593 btrfs_set_header_nritems(c, mid);
2595 btrfs_mark_buffer_dirty(c);
2596 btrfs_mark_buffer_dirty(split);
2598 insert_ptr(trans, path, &disk_key, split->start,
2599 path->slots[level + 1] + 1, level + 1);
2601 if (path->slots[level] >= mid) {
2602 path->slots[level] -= mid;
2603 btrfs_tree_unlock(c);
2604 free_extent_buffer(c);
2605 path->nodes[level] = split;
2606 path->slots[level + 1] += 1;
2608 btrfs_tree_unlock(split);
2609 free_extent_buffer(split);
2615 * how many bytes are required to store the items in a leaf. start
2616 * and nr indicate which items in the leaf to check. This totals up the
2617 * space used both by the item structs and the item data
2619 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2621 struct btrfs_item *start_item;
2622 struct btrfs_item *end_item;
2624 int nritems = btrfs_header_nritems(l);
2625 int end = min(nritems, start + nr) - 1;
2629 start_item = btrfs_item_nr(start);
2630 end_item = btrfs_item_nr(end);
2631 data_len = btrfs_item_offset(l, start_item) +
2632 btrfs_item_size(l, start_item);
2633 data_len = data_len - btrfs_item_offset(l, end_item);
2634 data_len += sizeof(struct btrfs_item) * nr;
2635 WARN_ON(data_len < 0);
2640 * The space between the end of the leaf items and
2641 * the start of the leaf data. IOW, how much room
2642 * the leaf has left for both items and data
2644 noinline int btrfs_leaf_free_space(struct extent_buffer *leaf)
2646 struct btrfs_fs_info *fs_info = leaf->fs_info;
2647 int nritems = btrfs_header_nritems(leaf);
2650 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
2653 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
2655 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
2656 leaf_space_used(leaf, 0, nritems), nritems);
2662 * min slot controls the lowest index we're willing to push to the
2663 * right. We'll push up to and including min_slot, but no lower
2665 static noinline int __push_leaf_right(struct btrfs_path *path,
2666 int data_size, int empty,
2667 struct extent_buffer *right,
2668 int free_space, u32 left_nritems,
2671 struct btrfs_fs_info *fs_info = right->fs_info;
2672 struct extent_buffer *left = path->nodes[0];
2673 struct extent_buffer *upper = path->nodes[1];
2674 struct btrfs_map_token token;
2675 struct btrfs_disk_key disk_key;
2680 struct btrfs_item *item;
2689 nr = max_t(u32, 1, min_slot);
2691 if (path->slots[0] >= left_nritems)
2692 push_space += data_size;
2694 slot = path->slots[1];
2695 i = left_nritems - 1;
2697 item = btrfs_item_nr(i);
2699 if (!empty && push_items > 0) {
2700 if (path->slots[0] > i)
2702 if (path->slots[0] == i) {
2703 int space = btrfs_leaf_free_space(left);
2705 if (space + push_space * 2 > free_space)
2710 if (path->slots[0] == i)
2711 push_space += data_size;
2713 this_item_size = btrfs_item_size(left, item);
2714 if (this_item_size + sizeof(*item) + push_space > free_space)
2718 push_space += this_item_size + sizeof(*item);
2724 if (push_items == 0)
2727 WARN_ON(!empty && push_items == left_nritems);
2729 /* push left to right */
2730 right_nritems = btrfs_header_nritems(right);
2732 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
2733 push_space -= leaf_data_end(left);
2735 /* make room in the right data area */
2736 data_end = leaf_data_end(right);
2737 memmove_extent_buffer(right,
2738 BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
2739 BTRFS_LEAF_DATA_OFFSET + data_end,
2740 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
2742 /* copy from the left data area */
2743 copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
2744 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
2745 BTRFS_LEAF_DATA_OFFSET + leaf_data_end(left),
2748 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2749 btrfs_item_nr_offset(0),
2750 right_nritems * sizeof(struct btrfs_item));
2752 /* copy the items from left to right */
2753 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2754 btrfs_item_nr_offset(left_nritems - push_items),
2755 push_items * sizeof(struct btrfs_item));
2757 /* update the item pointers */
2758 btrfs_init_map_token(&token, right);
2759 right_nritems += push_items;
2760 btrfs_set_header_nritems(right, right_nritems);
2761 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
2762 for (i = 0; i < right_nritems; i++) {
2763 item = btrfs_item_nr(i);
2764 push_space -= btrfs_token_item_size(&token, item);
2765 btrfs_set_token_item_offset(&token, item, push_space);
2768 left_nritems -= push_items;
2769 btrfs_set_header_nritems(left, left_nritems);
2772 btrfs_mark_buffer_dirty(left);
2774 btrfs_clean_tree_block(left);
2776 btrfs_mark_buffer_dirty(right);
2778 btrfs_item_key(right, &disk_key, 0);
2779 btrfs_set_node_key(upper, &disk_key, slot + 1);
2780 btrfs_mark_buffer_dirty(upper);
2782 /* then fixup the leaf pointer in the path */
2783 if (path->slots[0] >= left_nritems) {
2784 path->slots[0] -= left_nritems;
2785 if (btrfs_header_nritems(path->nodes[0]) == 0)
2786 btrfs_clean_tree_block(path->nodes[0]);
2787 btrfs_tree_unlock(path->nodes[0]);
2788 free_extent_buffer(path->nodes[0]);
2789 path->nodes[0] = right;
2790 path->slots[1] += 1;
2792 btrfs_tree_unlock(right);
2793 free_extent_buffer(right);
2798 btrfs_tree_unlock(right);
2799 free_extent_buffer(right);
2804 * push some data in the path leaf to the right, trying to free up at
2805 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2807 * returns 1 if the push failed because the other node didn't have enough
2808 * room, 0 if everything worked out and < 0 if there were major errors.
2810 * this will push starting from min_slot to the end of the leaf. It won't
2811 * push any slot lower than min_slot
2813 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2814 *root, struct btrfs_path *path,
2815 int min_data_size, int data_size,
2816 int empty, u32 min_slot)
2818 struct extent_buffer *left = path->nodes[0];
2819 struct extent_buffer *right;
2820 struct extent_buffer *upper;
2826 if (!path->nodes[1])
2829 slot = path->slots[1];
2830 upper = path->nodes[1];
2831 if (slot >= btrfs_header_nritems(upper) - 1)
2834 btrfs_assert_tree_locked(path->nodes[1]);
2836 right = btrfs_read_node_slot(upper, slot + 1);
2838 * slot + 1 is not valid or we fail to read the right node,
2839 * no big deal, just return.
2844 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
2846 free_space = btrfs_leaf_free_space(right);
2847 if (free_space < data_size)
2850 /* cow and double check */
2851 ret = btrfs_cow_block(trans, root, right, upper,
2852 slot + 1, &right, BTRFS_NESTING_RIGHT_COW);
2856 free_space = btrfs_leaf_free_space(right);
2857 if (free_space < data_size)
2860 left_nritems = btrfs_header_nritems(left);
2861 if (left_nritems == 0)
2864 if (check_sibling_keys(left, right)) {
2866 btrfs_tree_unlock(right);
2867 free_extent_buffer(right);
2870 if (path->slots[0] == left_nritems && !empty) {
2871 /* Key greater than all keys in the leaf, right neighbor has
2872 * enough room for it and we're not emptying our leaf to delete
2873 * it, therefore use right neighbor to insert the new item and
2874 * no need to touch/dirty our left leaf. */
2875 btrfs_tree_unlock(left);
2876 free_extent_buffer(left);
2877 path->nodes[0] = right;
2883 return __push_leaf_right(path, min_data_size, empty,
2884 right, free_space, left_nritems, min_slot);
2886 btrfs_tree_unlock(right);
2887 free_extent_buffer(right);
2892 * push some data in the path leaf to the left, trying to free up at
2893 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2895 * max_slot can put a limit on how far into the leaf we'll push items. The
2896 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
2899 static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
2900 int empty, struct extent_buffer *left,
2901 int free_space, u32 right_nritems,
2904 struct btrfs_fs_info *fs_info = left->fs_info;
2905 struct btrfs_disk_key disk_key;
2906 struct extent_buffer *right = path->nodes[0];
2910 struct btrfs_item *item;
2911 u32 old_left_nritems;
2915 u32 old_left_item_size;
2916 struct btrfs_map_token token;
2919 nr = min(right_nritems, max_slot);
2921 nr = min(right_nritems - 1, max_slot);
2923 for (i = 0; i < nr; i++) {
2924 item = btrfs_item_nr(i);
2926 if (!empty && push_items > 0) {
2927 if (path->slots[0] < i)
2929 if (path->slots[0] == i) {
2930 int space = btrfs_leaf_free_space(right);
2932 if (space + push_space * 2 > free_space)
2937 if (path->slots[0] == i)
2938 push_space += data_size;
2940 this_item_size = btrfs_item_size(right, item);
2941 if (this_item_size + sizeof(*item) + push_space > free_space)
2945 push_space += this_item_size + sizeof(*item);
2948 if (push_items == 0) {
2952 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
2954 /* push data from right to left */
2955 copy_extent_buffer(left, right,
2956 btrfs_item_nr_offset(btrfs_header_nritems(left)),
2957 btrfs_item_nr_offset(0),
2958 push_items * sizeof(struct btrfs_item));
2960 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
2961 btrfs_item_offset_nr(right, push_items - 1);
2963 copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
2964 leaf_data_end(left) - push_space,
2965 BTRFS_LEAF_DATA_OFFSET +
2966 btrfs_item_offset_nr(right, push_items - 1),
2968 old_left_nritems = btrfs_header_nritems(left);
2969 BUG_ON(old_left_nritems <= 0);
2971 btrfs_init_map_token(&token, left);
2972 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
2973 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
2976 item = btrfs_item_nr(i);
2978 ioff = btrfs_token_item_offset(&token, item);
2979 btrfs_set_token_item_offset(&token, item,
2980 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size));
2982 btrfs_set_header_nritems(left, old_left_nritems + push_items);
2984 /* fixup right node */
2985 if (push_items > right_nritems)
2986 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
2989 if (push_items < right_nritems) {
2990 push_space = btrfs_item_offset_nr(right, push_items - 1) -
2991 leaf_data_end(right);
2992 memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
2993 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
2994 BTRFS_LEAF_DATA_OFFSET +
2995 leaf_data_end(right), push_space);
2997 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
2998 btrfs_item_nr_offset(push_items),
2999 (btrfs_header_nritems(right) - push_items) *
3000 sizeof(struct btrfs_item));
3003 btrfs_init_map_token(&token, right);
3004 right_nritems -= push_items;
3005 btrfs_set_header_nritems(right, right_nritems);
3006 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3007 for (i = 0; i < right_nritems; i++) {
3008 item = btrfs_item_nr(i);
3010 push_space = push_space - btrfs_token_item_size(&token, item);
3011 btrfs_set_token_item_offset(&token, item, push_space);
3014 btrfs_mark_buffer_dirty(left);
3016 btrfs_mark_buffer_dirty(right);
3018 btrfs_clean_tree_block(right);
3020 btrfs_item_key(right, &disk_key, 0);
3021 fixup_low_keys(path, &disk_key, 1);
3023 /* then fixup the leaf pointer in the path */
3024 if (path->slots[0] < push_items) {
3025 path->slots[0] += old_left_nritems;
3026 btrfs_tree_unlock(path->nodes[0]);
3027 free_extent_buffer(path->nodes[0]);
3028 path->nodes[0] = left;
3029 path->slots[1] -= 1;
3031 btrfs_tree_unlock(left);
3032 free_extent_buffer(left);
3033 path->slots[0] -= push_items;
3035 BUG_ON(path->slots[0] < 0);
3038 btrfs_tree_unlock(left);
3039 free_extent_buffer(left);
3044 * push some data in the path leaf to the left, trying to free up at
3045 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3047 * max_slot can put a limit on how far into the leaf we'll push items. The
3048 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3051 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3052 *root, struct btrfs_path *path, int min_data_size,
3053 int data_size, int empty, u32 max_slot)
3055 struct extent_buffer *right = path->nodes[0];
3056 struct extent_buffer *left;
3062 slot = path->slots[1];
3065 if (!path->nodes[1])
3068 right_nritems = btrfs_header_nritems(right);
3069 if (right_nritems == 0)
3072 btrfs_assert_tree_locked(path->nodes[1]);
3074 left = btrfs_read_node_slot(path->nodes[1], slot - 1);
3076 * slot - 1 is not valid or we fail to read the left node,
3077 * no big deal, just return.
3082 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
3084 free_space = btrfs_leaf_free_space(left);
3085 if (free_space < data_size) {
3090 /* cow and double check */
3091 ret = btrfs_cow_block(trans, root, left,
3092 path->nodes[1], slot - 1, &left,
3093 BTRFS_NESTING_LEFT_COW);
3095 /* we hit -ENOSPC, but it isn't fatal here */
3101 free_space = btrfs_leaf_free_space(left);
3102 if (free_space < data_size) {
3107 if (check_sibling_keys(left, right)) {
3111 return __push_leaf_left(path, min_data_size,
3112 empty, left, free_space, right_nritems,
3115 btrfs_tree_unlock(left);
3116 free_extent_buffer(left);
3121 * split the path's leaf in two, making sure there is at least data_size
3122 * available for the resulting leaf level of the path.
3124 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3125 struct btrfs_path *path,
3126 struct extent_buffer *l,
3127 struct extent_buffer *right,
3128 int slot, int mid, int nritems)
3130 struct btrfs_fs_info *fs_info = trans->fs_info;
3134 struct btrfs_disk_key disk_key;
3135 struct btrfs_map_token token;
3137 nritems = nritems - mid;
3138 btrfs_set_header_nritems(right, nritems);
3139 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(l);
3141 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3142 btrfs_item_nr_offset(mid),
3143 nritems * sizeof(struct btrfs_item));
3145 copy_extent_buffer(right, l,
3146 BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
3147 data_copy_size, BTRFS_LEAF_DATA_OFFSET +
3148 leaf_data_end(l), data_copy_size);
3150 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
3152 btrfs_init_map_token(&token, right);
3153 for (i = 0; i < nritems; i++) {
3154 struct btrfs_item *item = btrfs_item_nr(i);
3157 ioff = btrfs_token_item_offset(&token, item);
3158 btrfs_set_token_item_offset(&token, item, ioff + rt_data_off);
3161 btrfs_set_header_nritems(l, mid);
3162 btrfs_item_key(right, &disk_key, 0);
3163 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
3165 btrfs_mark_buffer_dirty(right);
3166 btrfs_mark_buffer_dirty(l);
3167 BUG_ON(path->slots[0] != slot);
3170 btrfs_tree_unlock(path->nodes[0]);
3171 free_extent_buffer(path->nodes[0]);
3172 path->nodes[0] = right;
3173 path->slots[0] -= mid;
3174 path->slots[1] += 1;
3176 btrfs_tree_unlock(right);
3177 free_extent_buffer(right);
3180 BUG_ON(path->slots[0] < 0);
3184 * double splits happen when we need to insert a big item in the middle
3185 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3186 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3189 * We avoid this by trying to push the items on either side of our target
3190 * into the adjacent leaves. If all goes well we can avoid the double split
3193 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3194 struct btrfs_root *root,
3195 struct btrfs_path *path,
3202 int space_needed = data_size;
3204 slot = path->slots[0];
3205 if (slot < btrfs_header_nritems(path->nodes[0]))
3206 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3209 * try to push all the items after our slot into the
3212 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
3219 nritems = btrfs_header_nritems(path->nodes[0]);
3221 * our goal is to get our slot at the start or end of a leaf. If
3222 * we've done so we're done
3224 if (path->slots[0] == 0 || path->slots[0] == nritems)
3227 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3230 /* try to push all the items before our slot into the next leaf */
3231 slot = path->slots[0];
3232 space_needed = data_size;
3234 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3235 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
3248 * split the path's leaf in two, making sure there is at least data_size
3249 * available for the resulting leaf level of the path.
3251 * returns 0 if all went well and < 0 on failure.
3253 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3254 struct btrfs_root *root,
3255 const struct btrfs_key *ins_key,
3256 struct btrfs_path *path, int data_size,
3259 struct btrfs_disk_key disk_key;
3260 struct extent_buffer *l;
3264 struct extent_buffer *right;
3265 struct btrfs_fs_info *fs_info = root->fs_info;
3269 int num_doubles = 0;
3270 int tried_avoid_double = 0;
3273 slot = path->slots[0];
3274 if (extend && data_size + btrfs_item_size_nr(l, slot) +
3275 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
3278 /* first try to make some room by pushing left and right */
3279 if (data_size && path->nodes[1]) {
3280 int space_needed = data_size;
3282 if (slot < btrfs_header_nritems(l))
3283 space_needed -= btrfs_leaf_free_space(l);
3285 wret = push_leaf_right(trans, root, path, space_needed,
3286 space_needed, 0, 0);
3290 space_needed = data_size;
3292 space_needed -= btrfs_leaf_free_space(l);
3293 wret = push_leaf_left(trans, root, path, space_needed,
3294 space_needed, 0, (u32)-1);
3300 /* did the pushes work? */
3301 if (btrfs_leaf_free_space(l) >= data_size)
3305 if (!path->nodes[1]) {
3306 ret = insert_new_root(trans, root, path, 1);
3313 slot = path->slots[0];
3314 nritems = btrfs_header_nritems(l);
3315 mid = (nritems + 1) / 2;
3319 leaf_space_used(l, mid, nritems - mid) + data_size >
3320 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3321 if (slot >= nritems) {
3325 if (mid != nritems &&
3326 leaf_space_used(l, mid, nritems - mid) +
3327 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3328 if (data_size && !tried_avoid_double)
3329 goto push_for_double;
3335 if (leaf_space_used(l, 0, mid) + data_size >
3336 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3337 if (!extend && data_size && slot == 0) {
3339 } else if ((extend || !data_size) && slot == 0) {
3343 if (mid != nritems &&
3344 leaf_space_used(l, mid, nritems - mid) +
3345 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3346 if (data_size && !tried_avoid_double)
3347 goto push_for_double;
3355 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3357 btrfs_item_key(l, &disk_key, mid);
3360 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double
3361 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES
3362 * subclasses, which is 8 at the time of this patch, and we've maxed it
3363 * out. In the future we could add a
3364 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
3365 * use BTRFS_NESTING_NEW_ROOT.
3367 right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
3368 l->start, 0, num_doubles ?
3369 BTRFS_NESTING_NEW_ROOT :
3370 BTRFS_NESTING_SPLIT);
3372 return PTR_ERR(right);
3374 root_add_used(root, fs_info->nodesize);
3378 btrfs_set_header_nritems(right, 0);
3379 insert_ptr(trans, path, &disk_key,
3380 right->start, path->slots[1] + 1, 1);
3381 btrfs_tree_unlock(path->nodes[0]);
3382 free_extent_buffer(path->nodes[0]);
3383 path->nodes[0] = right;
3385 path->slots[1] += 1;
3387 btrfs_set_header_nritems(right, 0);
3388 insert_ptr(trans, path, &disk_key,
3389 right->start, path->slots[1], 1);
3390 btrfs_tree_unlock(path->nodes[0]);
3391 free_extent_buffer(path->nodes[0]);
3392 path->nodes[0] = right;
3394 if (path->slots[1] == 0)
3395 fixup_low_keys(path, &disk_key, 1);
3398 * We create a new leaf 'right' for the required ins_len and
3399 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
3400 * the content of ins_len to 'right'.
3405 copy_for_split(trans, path, l, right, slot, mid, nritems);
3408 BUG_ON(num_doubles != 0);
3416 push_for_double_split(trans, root, path, data_size);
3417 tried_avoid_double = 1;
3418 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3423 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3424 struct btrfs_root *root,
3425 struct btrfs_path *path, int ins_len)
3427 struct btrfs_key key;
3428 struct extent_buffer *leaf;
3429 struct btrfs_file_extent_item *fi;
3434 leaf = path->nodes[0];
3435 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3437 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3438 key.type != BTRFS_EXTENT_CSUM_KEY);
3440 if (btrfs_leaf_free_space(leaf) >= ins_len)
3443 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3444 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3445 fi = btrfs_item_ptr(leaf, path->slots[0],
3446 struct btrfs_file_extent_item);
3447 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3449 btrfs_release_path(path);
3451 path->keep_locks = 1;
3452 path->search_for_split = 1;
3453 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3454 path->search_for_split = 0;
3461 leaf = path->nodes[0];
3462 /* if our item isn't there, return now */
3463 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3466 /* the leaf has changed, it now has room. return now */
3467 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
3470 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3471 fi = btrfs_item_ptr(leaf, path->slots[0],
3472 struct btrfs_file_extent_item);
3473 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3477 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3481 path->keep_locks = 0;
3482 btrfs_unlock_up_safe(path, 1);
3485 path->keep_locks = 0;
3489 static noinline int split_item(struct btrfs_path *path,
3490 const struct btrfs_key *new_key,
3491 unsigned long split_offset)
3493 struct extent_buffer *leaf;
3494 struct btrfs_item *item;
3495 struct btrfs_item *new_item;
3501 struct btrfs_disk_key disk_key;
3503 leaf = path->nodes[0];
3504 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
3506 item = btrfs_item_nr(path->slots[0]);
3507 orig_offset = btrfs_item_offset(leaf, item);
3508 item_size = btrfs_item_size(leaf, item);
3510 buf = kmalloc(item_size, GFP_NOFS);
3514 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3515 path->slots[0]), item_size);
3517 slot = path->slots[0] + 1;
3518 nritems = btrfs_header_nritems(leaf);
3519 if (slot != nritems) {
3520 /* shift the items */
3521 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3522 btrfs_item_nr_offset(slot),
3523 (nritems - slot) * sizeof(struct btrfs_item));
3526 btrfs_cpu_key_to_disk(&disk_key, new_key);
3527 btrfs_set_item_key(leaf, &disk_key, slot);
3529 new_item = btrfs_item_nr(slot);
3531 btrfs_set_item_offset(leaf, new_item, orig_offset);
3532 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3534 btrfs_set_item_offset(leaf, item,
3535 orig_offset + item_size - split_offset);
3536 btrfs_set_item_size(leaf, item, split_offset);
3538 btrfs_set_header_nritems(leaf, nritems + 1);
3540 /* write the data for the start of the original item */
3541 write_extent_buffer(leaf, buf,
3542 btrfs_item_ptr_offset(leaf, path->slots[0]),
3545 /* write the data for the new item */
3546 write_extent_buffer(leaf, buf + split_offset,
3547 btrfs_item_ptr_offset(leaf, slot),
3548 item_size - split_offset);
3549 btrfs_mark_buffer_dirty(leaf);
3551 BUG_ON(btrfs_leaf_free_space(leaf) < 0);
3557 * This function splits a single item into two items,
3558 * giving 'new_key' to the new item and splitting the
3559 * old one at split_offset (from the start of the item).
3561 * The path may be released by this operation. After
3562 * the split, the path is pointing to the old item. The
3563 * new item is going to be in the same node as the old one.
3565 * Note, the item being split must be smaller enough to live alone on
3566 * a tree block with room for one extra struct btrfs_item
3568 * This allows us to split the item in place, keeping a lock on the
3569 * leaf the entire time.
3571 int btrfs_split_item(struct btrfs_trans_handle *trans,
3572 struct btrfs_root *root,
3573 struct btrfs_path *path,
3574 const struct btrfs_key *new_key,
3575 unsigned long split_offset)
3578 ret = setup_leaf_for_split(trans, root, path,
3579 sizeof(struct btrfs_item));
3583 ret = split_item(path, new_key, split_offset);
3588 * This function duplicate a item, giving 'new_key' to the new item.
3589 * It guarantees both items live in the same tree leaf and the new item
3590 * is contiguous with the original item.
3592 * This allows us to split file extent in place, keeping a lock on the
3593 * leaf the entire time.
3595 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3596 struct btrfs_root *root,
3597 struct btrfs_path *path,
3598 const struct btrfs_key *new_key)
3600 struct extent_buffer *leaf;
3604 leaf = path->nodes[0];
3605 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3606 ret = setup_leaf_for_split(trans, root, path,
3607 item_size + sizeof(struct btrfs_item));
3612 setup_items_for_insert(root, path, new_key, &item_size, 1);
3613 leaf = path->nodes[0];
3614 memcpy_extent_buffer(leaf,
3615 btrfs_item_ptr_offset(leaf, path->slots[0]),
3616 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
3622 * make the item pointed to by the path smaller. new_size indicates
3623 * how small to make it, and from_end tells us if we just chop bytes
3624 * off the end of the item or if we shift the item to chop bytes off
3627 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
3630 struct extent_buffer *leaf;
3631 struct btrfs_item *item;
3633 unsigned int data_end;
3634 unsigned int old_data_start;
3635 unsigned int old_size;
3636 unsigned int size_diff;
3638 struct btrfs_map_token token;
3640 leaf = path->nodes[0];
3641 slot = path->slots[0];
3643 old_size = btrfs_item_size_nr(leaf, slot);
3644 if (old_size == new_size)
3647 nritems = btrfs_header_nritems(leaf);
3648 data_end = leaf_data_end(leaf);
3650 old_data_start = btrfs_item_offset_nr(leaf, slot);
3652 size_diff = old_size - new_size;
3655 BUG_ON(slot >= nritems);
3658 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3660 /* first correct the data pointers */
3661 btrfs_init_map_token(&token, leaf);
3662 for (i = slot; i < nritems; i++) {
3664 item = btrfs_item_nr(i);
3666 ioff = btrfs_token_item_offset(&token, item);
3667 btrfs_set_token_item_offset(&token, item, ioff + size_diff);
3670 /* shift the data */
3672 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3673 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
3674 data_end, old_data_start + new_size - data_end);
3676 struct btrfs_disk_key disk_key;
3679 btrfs_item_key(leaf, &disk_key, slot);
3681 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3683 struct btrfs_file_extent_item *fi;
3685 fi = btrfs_item_ptr(leaf, slot,
3686 struct btrfs_file_extent_item);
3687 fi = (struct btrfs_file_extent_item *)(
3688 (unsigned long)fi - size_diff);
3690 if (btrfs_file_extent_type(leaf, fi) ==
3691 BTRFS_FILE_EXTENT_INLINE) {
3692 ptr = btrfs_item_ptr_offset(leaf, slot);
3693 memmove_extent_buffer(leaf, ptr,
3695 BTRFS_FILE_EXTENT_INLINE_DATA_START);
3699 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3700 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
3701 data_end, old_data_start - data_end);
3703 offset = btrfs_disk_key_offset(&disk_key);
3704 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3705 btrfs_set_item_key(leaf, &disk_key, slot);
3707 fixup_low_keys(path, &disk_key, 1);
3710 item = btrfs_item_nr(slot);
3711 btrfs_set_item_size(leaf, item, new_size);
3712 btrfs_mark_buffer_dirty(leaf);
3714 if (btrfs_leaf_free_space(leaf) < 0) {
3715 btrfs_print_leaf(leaf);
3721 * make the item pointed to by the path bigger, data_size is the added size.
3723 void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
3726 struct extent_buffer *leaf;
3727 struct btrfs_item *item;
3729 unsigned int data_end;
3730 unsigned int old_data;
3731 unsigned int old_size;
3733 struct btrfs_map_token token;
3735 leaf = path->nodes[0];
3737 nritems = btrfs_header_nritems(leaf);
3738 data_end = leaf_data_end(leaf);
3740 if (btrfs_leaf_free_space(leaf) < data_size) {
3741 btrfs_print_leaf(leaf);
3744 slot = path->slots[0];
3745 old_data = btrfs_item_end_nr(leaf, slot);
3748 if (slot >= nritems) {
3749 btrfs_print_leaf(leaf);
3750 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
3756 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3758 /* first correct the data pointers */
3759 btrfs_init_map_token(&token, leaf);
3760 for (i = slot; i < nritems; i++) {
3762 item = btrfs_item_nr(i);
3764 ioff = btrfs_token_item_offset(&token, item);
3765 btrfs_set_token_item_offset(&token, item, ioff - data_size);
3768 /* shift the data */
3769 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3770 data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
3771 data_end, old_data - data_end);
3773 data_end = old_data;
3774 old_size = btrfs_item_size_nr(leaf, slot);
3775 item = btrfs_item_nr(slot);
3776 btrfs_set_item_size(leaf, item, old_size + data_size);
3777 btrfs_mark_buffer_dirty(leaf);
3779 if (btrfs_leaf_free_space(leaf) < 0) {
3780 btrfs_print_leaf(leaf);
3786 * setup_items_for_insert - Helper called before inserting one or more items
3787 * to a leaf. Main purpose is to save stack depth by doing the bulk of the work
3788 * in a function that doesn't call btrfs_search_slot
3790 * @root: root we are inserting items to
3791 * @path: points to the leaf/slot where we are going to insert new items
3792 * @cpu_key: array of keys for items to be inserted
3793 * @data_size: size of the body of each item we are going to insert
3794 * @nr: size of @cpu_key/@data_size arrays
3796 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
3797 const struct btrfs_key *cpu_key, u32 *data_size,
3800 struct btrfs_fs_info *fs_info = root->fs_info;
3801 struct btrfs_item *item;
3804 unsigned int data_end;
3805 struct btrfs_disk_key disk_key;
3806 struct extent_buffer *leaf;
3808 struct btrfs_map_token token;
3812 for (i = 0; i < nr; i++)
3813 total_data += data_size[i];
3814 total_size = total_data + (nr * sizeof(struct btrfs_item));
3816 if (path->slots[0] == 0) {
3817 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3818 fixup_low_keys(path, &disk_key, 1);
3820 btrfs_unlock_up_safe(path, 1);
3822 leaf = path->nodes[0];
3823 slot = path->slots[0];
3825 nritems = btrfs_header_nritems(leaf);
3826 data_end = leaf_data_end(leaf);
3828 if (btrfs_leaf_free_space(leaf) < total_size) {
3829 btrfs_print_leaf(leaf);
3830 btrfs_crit(fs_info, "not enough freespace need %u have %d",
3831 total_size, btrfs_leaf_free_space(leaf));
3835 btrfs_init_map_token(&token, leaf);
3836 if (slot != nritems) {
3837 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3839 if (old_data < data_end) {
3840 btrfs_print_leaf(leaf);
3842 "item at slot %d with data offset %u beyond data end of leaf %u",
3843 slot, old_data, data_end);
3847 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3849 /* first correct the data pointers */
3850 for (i = slot; i < nritems; i++) {
3853 item = btrfs_item_nr(i);
3854 ioff = btrfs_token_item_offset(&token, item);
3855 btrfs_set_token_item_offset(&token, item,
3858 /* shift the items */
3859 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3860 btrfs_item_nr_offset(slot),
3861 (nritems - slot) * sizeof(struct btrfs_item));
3863 /* shift the data */
3864 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3865 data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
3866 data_end, old_data - data_end);
3867 data_end = old_data;
3870 /* setup the item for the new data */
3871 for (i = 0; i < nr; i++) {
3872 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3873 btrfs_set_item_key(leaf, &disk_key, slot + i);
3874 item = btrfs_item_nr(slot + i);
3875 data_end -= data_size[i];
3876 btrfs_set_token_item_offset(&token, item, data_end);
3877 btrfs_set_token_item_size(&token, item, data_size[i]);
3880 btrfs_set_header_nritems(leaf, nritems + nr);
3881 btrfs_mark_buffer_dirty(leaf);
3883 if (btrfs_leaf_free_space(leaf) < 0) {
3884 btrfs_print_leaf(leaf);
3890 * Given a key and some data, insert items into the tree.
3891 * This does all the path init required, making room in the tree if needed.
3893 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3894 struct btrfs_root *root,
3895 struct btrfs_path *path,
3896 const struct btrfs_key *cpu_key, u32 *data_size,
3905 for (i = 0; i < nr; i++)
3906 total_data += data_size[i];
3908 total_size = total_data + (nr * sizeof(struct btrfs_item));
3909 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3915 slot = path->slots[0];
3918 setup_items_for_insert(root, path, cpu_key, data_size, nr);
3923 * Given a key and some data, insert an item into the tree.
3924 * This does all the path init required, making room in the tree if needed.
3926 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3927 const struct btrfs_key *cpu_key, void *data,
3931 struct btrfs_path *path;
3932 struct extent_buffer *leaf;
3935 path = btrfs_alloc_path();
3938 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
3940 leaf = path->nodes[0];
3941 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3942 write_extent_buffer(leaf, data, ptr, data_size);
3943 btrfs_mark_buffer_dirty(leaf);
3945 btrfs_free_path(path);
3950 * delete the pointer from a given node.
3952 * the tree should have been previously balanced so the deletion does not
3955 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
3956 int level, int slot)
3958 struct extent_buffer *parent = path->nodes[level];
3962 nritems = btrfs_header_nritems(parent);
3963 if (slot != nritems - 1) {
3965 ret = btrfs_tree_mod_log_insert_move(parent, slot,
3966 slot + 1, nritems - slot - 1);
3969 memmove_extent_buffer(parent,
3970 btrfs_node_key_ptr_offset(slot),
3971 btrfs_node_key_ptr_offset(slot + 1),
3972 sizeof(struct btrfs_key_ptr) *
3973 (nritems - slot - 1));
3975 ret = btrfs_tree_mod_log_insert_key(parent, slot,
3976 BTRFS_MOD_LOG_KEY_REMOVE, GFP_NOFS);
3981 btrfs_set_header_nritems(parent, nritems);
3982 if (nritems == 0 && parent == root->node) {
3983 BUG_ON(btrfs_header_level(root->node) != 1);
3984 /* just turn the root into a leaf and break */
3985 btrfs_set_header_level(root->node, 0);
3986 } else if (slot == 0) {
3987 struct btrfs_disk_key disk_key;
3989 btrfs_node_key(parent, &disk_key, 0);
3990 fixup_low_keys(path, &disk_key, level + 1);
3992 btrfs_mark_buffer_dirty(parent);
3996 * a helper function to delete the leaf pointed to by path->slots[1] and
3999 * This deletes the pointer in path->nodes[1] and frees the leaf
4000 * block extent. zero is returned if it all worked out, < 0 otherwise.
4002 * The path must have already been setup for deleting the leaf, including
4003 * all the proper balancing. path->nodes[1] must be locked.
4005 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4006 struct btrfs_root *root,
4007 struct btrfs_path *path,
4008 struct extent_buffer *leaf)
4010 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4011 del_ptr(root, path, 1, path->slots[1]);
4014 * btrfs_free_extent is expensive, we want to make sure we
4015 * aren't holding any locks when we call it
4017 btrfs_unlock_up_safe(path, 0);
4019 root_sub_used(root, leaf->len);
4021 atomic_inc(&leaf->refs);
4022 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4023 free_extent_buffer_stale(leaf);
4026 * delete the item at the leaf level in path. If that empties
4027 * the leaf, remove it from the tree
4029 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4030 struct btrfs_path *path, int slot, int nr)
4032 struct btrfs_fs_info *fs_info = root->fs_info;
4033 struct extent_buffer *leaf;
4034 struct btrfs_item *item;
4042 leaf = path->nodes[0];
4043 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4045 for (i = 0; i < nr; i++)
4046 dsize += btrfs_item_size_nr(leaf, slot + i);
4048 nritems = btrfs_header_nritems(leaf);
4050 if (slot + nr != nritems) {
4051 int data_end = leaf_data_end(leaf);
4052 struct btrfs_map_token token;
4054 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4056 BTRFS_LEAF_DATA_OFFSET + data_end,
4057 last_off - data_end);
4059 btrfs_init_map_token(&token, leaf);
4060 for (i = slot + nr; i < nritems; i++) {
4063 item = btrfs_item_nr(i);
4064 ioff = btrfs_token_item_offset(&token, item);
4065 btrfs_set_token_item_offset(&token, item, ioff + dsize);
4068 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4069 btrfs_item_nr_offset(slot + nr),
4070 sizeof(struct btrfs_item) *
4071 (nritems - slot - nr));
4073 btrfs_set_header_nritems(leaf, nritems - nr);
4076 /* delete the leaf if we've emptied it */
4078 if (leaf == root->node) {
4079 btrfs_set_header_level(leaf, 0);
4081 btrfs_clean_tree_block(leaf);
4082 btrfs_del_leaf(trans, root, path, leaf);
4085 int used = leaf_space_used(leaf, 0, nritems);
4087 struct btrfs_disk_key disk_key;
4089 btrfs_item_key(leaf, &disk_key, 0);
4090 fixup_low_keys(path, &disk_key, 1);
4093 /* delete the leaf if it is mostly empty */
4094 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
4095 /* push_leaf_left fixes the path.
4096 * make sure the path still points to our leaf
4097 * for possible call to del_ptr below
4099 slot = path->slots[1];
4100 atomic_inc(&leaf->refs);
4102 wret = push_leaf_left(trans, root, path, 1, 1,
4104 if (wret < 0 && wret != -ENOSPC)
4107 if (path->nodes[0] == leaf &&
4108 btrfs_header_nritems(leaf)) {
4109 wret = push_leaf_right(trans, root, path, 1,
4111 if (wret < 0 && wret != -ENOSPC)
4115 if (btrfs_header_nritems(leaf) == 0) {
4116 path->slots[1] = slot;
4117 btrfs_del_leaf(trans, root, path, leaf);
4118 free_extent_buffer(leaf);
4121 /* if we're still in the path, make sure
4122 * we're dirty. Otherwise, one of the
4123 * push_leaf functions must have already
4124 * dirtied this buffer
4126 if (path->nodes[0] == leaf)
4127 btrfs_mark_buffer_dirty(leaf);
4128 free_extent_buffer(leaf);
4131 btrfs_mark_buffer_dirty(leaf);
4138 * search the tree again to find a leaf with lesser keys
4139 * returns 0 if it found something or 1 if there are no lesser leaves.
4140 * returns < 0 on io errors.
4142 * This may release the path, and so you may lose any locks held at the
4145 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4147 struct btrfs_key key;
4148 struct btrfs_disk_key found_key;
4151 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4153 if (key.offset > 0) {
4155 } else if (key.type > 0) {
4157 key.offset = (u64)-1;
4158 } else if (key.objectid > 0) {
4161 key.offset = (u64)-1;
4166 btrfs_release_path(path);
4167 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4170 btrfs_item_key(path->nodes[0], &found_key, 0);
4171 ret = comp_keys(&found_key, &key);
4173 * We might have had an item with the previous key in the tree right
4174 * before we released our path. And after we released our path, that
4175 * item might have been pushed to the first slot (0) of the leaf we
4176 * were holding due to a tree balance. Alternatively, an item with the
4177 * previous key can exist as the only element of a leaf (big fat item).
4178 * Therefore account for these 2 cases, so that our callers (like
4179 * btrfs_previous_item) don't miss an existing item with a key matching
4180 * the previous key we computed above.
4188 * A helper function to walk down the tree starting at min_key, and looking
4189 * for nodes or leaves that are have a minimum transaction id.
4190 * This is used by the btree defrag code, and tree logging
4192 * This does not cow, but it does stuff the starting key it finds back
4193 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4194 * key and get a writable path.
4196 * This honors path->lowest_level to prevent descent past a given level
4199 * min_trans indicates the oldest transaction that you are interested
4200 * in walking through. Any nodes or leaves older than min_trans are
4201 * skipped over (without reading them).
4203 * returns zero if something useful was found, < 0 on error and 1 if there
4204 * was nothing in the tree that matched the search criteria.
4206 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4207 struct btrfs_path *path,
4210 struct extent_buffer *cur;
4211 struct btrfs_key found_key;
4217 int keep_locks = path->keep_locks;
4219 path->keep_locks = 1;
4221 cur = btrfs_read_lock_root_node(root);
4222 level = btrfs_header_level(cur);
4223 WARN_ON(path->nodes[level]);
4224 path->nodes[level] = cur;
4225 path->locks[level] = BTRFS_READ_LOCK;
4227 if (btrfs_header_generation(cur) < min_trans) {
4232 nritems = btrfs_header_nritems(cur);
4233 level = btrfs_header_level(cur);
4234 sret = btrfs_bin_search(cur, min_key, &slot);
4240 /* at the lowest level, we're done, setup the path and exit */
4241 if (level == path->lowest_level) {
4242 if (slot >= nritems)
4245 path->slots[level] = slot;
4246 btrfs_item_key_to_cpu(cur, &found_key, slot);
4249 if (sret && slot > 0)
4252 * check this node pointer against the min_trans parameters.
4253 * If it is too old, skip to the next one.
4255 while (slot < nritems) {
4258 gen = btrfs_node_ptr_generation(cur, slot);
4259 if (gen < min_trans) {
4267 * we didn't find a candidate key in this node, walk forward
4268 * and find another one
4270 if (slot >= nritems) {
4271 path->slots[level] = slot;
4272 sret = btrfs_find_next_key(root, path, min_key, level,
4275 btrfs_release_path(path);
4281 /* save our key for returning back */
4282 btrfs_node_key_to_cpu(cur, &found_key, slot);
4283 path->slots[level] = slot;
4284 if (level == path->lowest_level) {
4288 cur = btrfs_read_node_slot(cur, slot);
4294 btrfs_tree_read_lock(cur);
4296 path->locks[level - 1] = BTRFS_READ_LOCK;
4297 path->nodes[level - 1] = cur;
4298 unlock_up(path, level, 1, 0, NULL);
4301 path->keep_locks = keep_locks;
4303 btrfs_unlock_up_safe(path, path->lowest_level + 1);
4304 memcpy(min_key, &found_key, sizeof(found_key));
4310 * this is similar to btrfs_next_leaf, but does not try to preserve
4311 * and fixup the path. It looks for and returns the next key in the
4312 * tree based on the current path and the min_trans parameters.
4314 * 0 is returned if another key is found, < 0 if there are any errors
4315 * and 1 is returned if there are no higher keys in the tree
4317 * path->keep_locks should be set to 1 on the search made before
4318 * calling this function.
4320 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4321 struct btrfs_key *key, int level, u64 min_trans)
4324 struct extent_buffer *c;
4326 WARN_ON(!path->keep_locks && !path->skip_locking);
4327 while (level < BTRFS_MAX_LEVEL) {
4328 if (!path->nodes[level])
4331 slot = path->slots[level] + 1;
4332 c = path->nodes[level];
4334 if (slot >= btrfs_header_nritems(c)) {
4337 struct btrfs_key cur_key;
4338 if (level + 1 >= BTRFS_MAX_LEVEL ||
4339 !path->nodes[level + 1])
4342 if (path->locks[level + 1] || path->skip_locking) {
4347 slot = btrfs_header_nritems(c) - 1;
4349 btrfs_item_key_to_cpu(c, &cur_key, slot);
4351 btrfs_node_key_to_cpu(c, &cur_key, slot);
4353 orig_lowest = path->lowest_level;
4354 btrfs_release_path(path);
4355 path->lowest_level = level;
4356 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4358 path->lowest_level = orig_lowest;
4362 c = path->nodes[level];
4363 slot = path->slots[level];
4370 btrfs_item_key_to_cpu(c, key, slot);
4372 u64 gen = btrfs_node_ptr_generation(c, slot);
4374 if (gen < min_trans) {
4378 btrfs_node_key_to_cpu(c, key, slot);
4386 * search the tree again to find a leaf with greater keys
4387 * returns 0 if it found something or 1 if there are no greater leaves.
4388 * returns < 0 on io errors.
4390 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4392 return btrfs_next_old_leaf(root, path, 0);
4395 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
4400 struct extent_buffer *c;
4401 struct extent_buffer *next;
4402 struct btrfs_key key;
4407 nritems = btrfs_header_nritems(path->nodes[0]);
4411 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4415 btrfs_release_path(path);
4417 path->keep_locks = 1;
4420 ret = btrfs_search_old_slot(root, &key, path, time_seq);
4422 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4423 path->keep_locks = 0;
4428 nritems = btrfs_header_nritems(path->nodes[0]);
4430 * by releasing the path above we dropped all our locks. A balance
4431 * could have added more items next to the key that used to be
4432 * at the very end of the block. So, check again here and
4433 * advance the path if there are now more items available.
4435 if (nritems > 0 && path->slots[0] < nritems - 1) {
4442 * So the above check misses one case:
4443 * - after releasing the path above, someone has removed the item that
4444 * used to be at the very end of the block, and balance between leafs
4445 * gets another one with bigger key.offset to replace it.
4447 * This one should be returned as well, or we can get leaf corruption
4448 * later(esp. in __btrfs_drop_extents()).
4450 * And a bit more explanation about this check,
4451 * with ret > 0, the key isn't found, the path points to the slot
4452 * where it should be inserted, so the path->slots[0] item must be the
4455 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
4460 while (level < BTRFS_MAX_LEVEL) {
4461 if (!path->nodes[level]) {
4466 slot = path->slots[level] + 1;
4467 c = path->nodes[level];
4468 if (slot >= btrfs_header_nritems(c)) {
4470 if (level == BTRFS_MAX_LEVEL) {
4479 * Our current level is where we're going to start from, and to
4480 * make sure lockdep doesn't complain we need to drop our locks
4481 * and nodes from 0 to our current level.
4483 for (i = 0; i < level; i++) {
4484 if (path->locks[level]) {
4485 btrfs_tree_read_unlock(path->nodes[i]);
4488 free_extent_buffer(path->nodes[i]);
4489 path->nodes[i] = NULL;
4493 ret = read_block_for_search(root, path, &next, level,
4499 btrfs_release_path(path);
4503 if (!path->skip_locking) {
4504 ret = btrfs_try_tree_read_lock(next);
4505 if (!ret && time_seq) {
4507 * If we don't get the lock, we may be racing
4508 * with push_leaf_left, holding that lock while
4509 * itself waiting for the leaf we've currently
4510 * locked. To solve this situation, we give up
4511 * on our lock and cycle.
4513 free_extent_buffer(next);
4514 btrfs_release_path(path);
4519 btrfs_tree_read_lock(next);
4523 path->slots[level] = slot;
4526 path->nodes[level] = next;
4527 path->slots[level] = 0;
4528 if (!path->skip_locking)
4529 path->locks[level] = BTRFS_READ_LOCK;
4533 ret = read_block_for_search(root, path, &next, level,
4539 btrfs_release_path(path);
4543 if (!path->skip_locking)
4544 btrfs_tree_read_lock(next);
4548 unlock_up(path, 0, 1, 0, NULL);
4554 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4555 * searching until it gets past min_objectid or finds an item of 'type'
4557 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4559 int btrfs_previous_item(struct btrfs_root *root,
4560 struct btrfs_path *path, u64 min_objectid,
4563 struct btrfs_key found_key;
4564 struct extent_buffer *leaf;
4569 if (path->slots[0] == 0) {
4570 ret = btrfs_prev_leaf(root, path);
4576 leaf = path->nodes[0];
4577 nritems = btrfs_header_nritems(leaf);
4580 if (path->slots[0] == nritems)
4583 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4584 if (found_key.objectid < min_objectid)
4586 if (found_key.type == type)
4588 if (found_key.objectid == min_objectid &&
4589 found_key.type < type)
4596 * search in extent tree to find a previous Metadata/Data extent item with
4599 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4601 int btrfs_previous_extent_item(struct btrfs_root *root,
4602 struct btrfs_path *path, u64 min_objectid)
4604 struct btrfs_key found_key;
4605 struct extent_buffer *leaf;
4610 if (path->slots[0] == 0) {
4611 ret = btrfs_prev_leaf(root, path);
4617 leaf = path->nodes[0];
4618 nritems = btrfs_header_nritems(leaf);
4621 if (path->slots[0] == nritems)
4624 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4625 if (found_key.objectid < min_objectid)
4627 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
4628 found_key.type == BTRFS_METADATA_ITEM_KEY)
4630 if (found_key.objectid == min_objectid &&
4631 found_key.type < BTRFS_EXTENT_ITEM_KEY)