2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
23 #include "delayed-ref.h"
24 #include "transaction.h"
26 struct kmem_cache *btrfs_delayed_ref_head_cachep;
27 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
28 struct kmem_cache *btrfs_delayed_data_ref_cachep;
29 struct kmem_cache *btrfs_delayed_extent_op_cachep;
31 * delayed back reference update tracking. For subvolume trees
32 * we queue up extent allocations and backref maintenance for
33 * delayed processing. This avoids deep call chains where we
34 * add extents in the middle of btrfs_search_slot, and it allows
35 * us to buffer up frequently modified backrefs in an rb tree instead
36 * of hammering updates on the extent allocation tree.
40 * compare two delayed tree backrefs with same bytenr and type
42 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
43 struct btrfs_delayed_tree_ref *ref1, int type)
45 if (type == BTRFS_TREE_BLOCK_REF_KEY) {
46 if (ref1->root < ref2->root)
48 if (ref1->root > ref2->root)
51 if (ref1->parent < ref2->parent)
53 if (ref1->parent > ref2->parent)
60 * compare two delayed data backrefs with same bytenr and type
62 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
63 struct btrfs_delayed_data_ref *ref1)
65 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
66 if (ref1->root < ref2->root)
68 if (ref1->root > ref2->root)
70 if (ref1->objectid < ref2->objectid)
72 if (ref1->objectid > ref2->objectid)
74 if (ref1->offset < ref2->offset)
76 if (ref1->offset > ref2->offset)
79 if (ref1->parent < ref2->parent)
81 if (ref1->parent > ref2->parent)
88 * entries in the rb tree are ordered by the byte number of the extent,
89 * type of the delayed backrefs and content of delayed backrefs.
91 static int comp_entry(struct btrfs_delayed_ref_node *ref2,
92 struct btrfs_delayed_ref_node *ref1,
95 if (ref1->bytenr < ref2->bytenr)
97 if (ref1->bytenr > ref2->bytenr)
99 if (ref1->is_head && ref2->is_head)
105 if (ref1->type < ref2->type)
107 if (ref1->type > ref2->type)
109 /* merging of sequenced refs is not allowed */
111 if (ref1->seq < ref2->seq)
113 if (ref1->seq > ref2->seq)
116 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
117 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
118 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
119 btrfs_delayed_node_to_tree_ref(ref1),
121 } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
122 ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
123 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
124 btrfs_delayed_node_to_data_ref(ref1));
131 * insert a new ref into the rbtree. This returns any existing refs
132 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
135 static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
136 struct rb_node *node)
138 struct rb_node **p = &root->rb_node;
139 struct rb_node *parent_node = NULL;
140 struct btrfs_delayed_ref_node *entry;
141 struct btrfs_delayed_ref_node *ins;
144 ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
147 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
150 cmp = comp_entry(entry, ins, 1);
159 rb_link_node(node, parent_node, p);
160 rb_insert_color(node, root);
164 /* insert a new ref to head ref rbtree */
165 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
166 struct rb_node *node)
168 struct rb_node **p = &root->rb_node;
169 struct rb_node *parent_node = NULL;
170 struct btrfs_delayed_ref_head *entry;
171 struct btrfs_delayed_ref_head *ins;
174 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
175 bytenr = ins->node.bytenr;
178 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
181 if (bytenr < entry->node.bytenr)
183 else if (bytenr > entry->node.bytenr)
189 rb_link_node(node, parent_node, p);
190 rb_insert_color(node, root);
195 * find an head entry based on bytenr. This returns the delayed ref
196 * head if it was able to find one, or NULL if nothing was in that spot.
197 * If return_bigger is given, the next bigger entry is returned if no exact
200 static struct btrfs_delayed_ref_head *
201 find_ref_head(struct rb_root *root, u64 bytenr,
202 struct btrfs_delayed_ref_head **last, int return_bigger)
205 struct btrfs_delayed_ref_head *entry;
212 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
216 if (bytenr < entry->node.bytenr)
218 else if (bytenr > entry->node.bytenr)
230 if (entry && return_bigger) {
232 n = rb_next(&entry->href_node);
235 entry = rb_entry(n, struct btrfs_delayed_ref_head,
237 bytenr = entry->node.bytenr;
246 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
247 struct btrfs_delayed_ref_head *head)
249 struct btrfs_delayed_ref_root *delayed_refs;
251 delayed_refs = &trans->transaction->delayed_refs;
252 assert_spin_locked(&delayed_refs->lock);
253 if (mutex_trylock(&head->mutex))
256 atomic_inc(&head->node.refs);
257 spin_unlock(&delayed_refs->lock);
259 mutex_lock(&head->mutex);
260 spin_lock(&delayed_refs->lock);
261 if (!head->node.in_tree) {
262 mutex_unlock(&head->mutex);
263 btrfs_put_delayed_ref(&head->node);
266 btrfs_put_delayed_ref(&head->node);
270 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
271 struct btrfs_delayed_ref_root *delayed_refs,
272 struct btrfs_delayed_ref_head *head,
273 struct btrfs_delayed_ref_node *ref)
275 if (btrfs_delayed_ref_is_head(ref)) {
276 head = btrfs_delayed_node_to_head(ref);
277 rb_erase(&head->href_node, &delayed_refs->href_root);
279 assert_spin_locked(&head->lock);
280 rb_erase(&ref->rb_node, &head->ref_root);
283 btrfs_put_delayed_ref(ref);
284 atomic_dec(&delayed_refs->num_entries);
285 if (trans->delayed_ref_updates)
286 trans->delayed_ref_updates--;
289 static int merge_ref(struct btrfs_trans_handle *trans,
290 struct btrfs_delayed_ref_root *delayed_refs,
291 struct btrfs_delayed_ref_head *head,
292 struct btrfs_delayed_ref_node *ref, u64 seq)
294 struct rb_node *node;
298 node = rb_next(&ref->rb_node);
299 while (!done && node) {
300 struct btrfs_delayed_ref_node *next;
302 next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
303 node = rb_next(node);
304 if (seq && next->seq >= seq)
306 if (comp_entry(ref, next, 0))
309 if (ref->action == next->action) {
312 if (ref->ref_mod < next->ref_mod) {
313 struct btrfs_delayed_ref_node *tmp;
320 mod = -next->ref_mod;
323 drop_delayed_ref(trans, delayed_refs, head, next);
325 if (ref->ref_mod == 0) {
326 drop_delayed_ref(trans, delayed_refs, head, ref);
330 * You can't have multiples of the same ref on a tree
333 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
334 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
340 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
341 struct btrfs_fs_info *fs_info,
342 struct btrfs_delayed_ref_root *delayed_refs,
343 struct btrfs_delayed_ref_head *head)
345 struct rb_node *node;
348 assert_spin_locked(&head->lock);
350 * We don't have too much refs to merge in the case of delayed data
356 spin_lock(&fs_info->tree_mod_seq_lock);
357 if (!list_empty(&fs_info->tree_mod_seq_list)) {
358 struct seq_list *elem;
360 elem = list_first_entry(&fs_info->tree_mod_seq_list,
361 struct seq_list, list);
364 spin_unlock(&fs_info->tree_mod_seq_lock);
366 node = rb_first(&head->ref_root);
368 struct btrfs_delayed_ref_node *ref;
370 ref = rb_entry(node, struct btrfs_delayed_ref_node,
372 /* We can't merge refs that are outside of our seq count */
373 if (seq && ref->seq >= seq)
375 if (merge_ref(trans, delayed_refs, head, ref, seq))
376 node = rb_first(&head->ref_root);
378 node = rb_next(&ref->rb_node);
382 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
383 struct btrfs_delayed_ref_root *delayed_refs,
386 struct seq_list *elem;
389 spin_lock(&fs_info->tree_mod_seq_lock);
390 if (!list_empty(&fs_info->tree_mod_seq_list)) {
391 elem = list_first_entry(&fs_info->tree_mod_seq_list,
392 struct seq_list, list);
393 if (seq >= elem->seq) {
394 pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
395 (u32)(seq >> 32), (u32)seq,
396 (u32)(elem->seq >> 32), (u32)elem->seq,
402 spin_unlock(&fs_info->tree_mod_seq_lock);
406 struct btrfs_delayed_ref_head *
407 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
409 struct btrfs_delayed_ref_root *delayed_refs;
410 struct btrfs_delayed_ref_head *head;
414 delayed_refs = &trans->transaction->delayed_refs;
417 start = delayed_refs->run_delayed_start;
418 head = find_ref_head(&delayed_refs->href_root, start, NULL, 1);
419 if (!head && !loop) {
420 delayed_refs->run_delayed_start = 0;
423 head = find_ref_head(&delayed_refs->href_root, start, NULL, 1);
426 } else if (!head && loop) {
430 while (head->processing) {
431 struct rb_node *node;
433 node = rb_next(&head->href_node);
437 delayed_refs->run_delayed_start = 0;
442 head = rb_entry(node, struct btrfs_delayed_ref_head,
446 head->processing = 1;
447 WARN_ON(delayed_refs->num_heads_ready == 0);
448 delayed_refs->num_heads_ready--;
449 delayed_refs->run_delayed_start = head->node.bytenr +
450 head->node.num_bytes;
455 * helper function to update an extent delayed ref in the
456 * rbtree. existing and update must both have the same
459 * This may free existing if the update cancels out whatever
460 * operation it was doing.
463 update_existing_ref(struct btrfs_trans_handle *trans,
464 struct btrfs_delayed_ref_root *delayed_refs,
465 struct btrfs_delayed_ref_head *head,
466 struct btrfs_delayed_ref_node *existing,
467 struct btrfs_delayed_ref_node *update)
469 if (update->action != existing->action) {
471 * this is effectively undoing either an add or a
472 * drop. We decrement the ref_mod, and if it goes
473 * down to zero we just delete the entry without
474 * every changing the extent allocation tree.
477 if (existing->ref_mod == 0)
478 drop_delayed_ref(trans, delayed_refs, head, existing);
480 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
481 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
483 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
484 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
486 * the action on the existing ref matches
487 * the action on the ref we're trying to add.
488 * Bump the ref_mod by one so the backref that
489 * is eventually added/removed has the correct
492 existing->ref_mod += update->ref_mod;
497 * helper function to update the accounting in the head ref
498 * existing and update must have the same bytenr
501 update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
502 struct btrfs_delayed_ref_node *update)
504 struct btrfs_delayed_ref_head *existing_ref;
505 struct btrfs_delayed_ref_head *ref;
507 existing_ref = btrfs_delayed_node_to_head(existing);
508 ref = btrfs_delayed_node_to_head(update);
509 BUG_ON(existing_ref->is_data != ref->is_data);
511 if (ref->must_insert_reserved) {
512 /* if the extent was freed and then
513 * reallocated before the delayed ref
514 * entries were processed, we can end up
515 * with an existing head ref without
516 * the must_insert_reserved flag set.
519 existing_ref->must_insert_reserved = ref->must_insert_reserved;
522 * update the num_bytes so we make sure the accounting
525 existing->num_bytes = update->num_bytes;
529 if (ref->extent_op) {
530 if (!existing_ref->extent_op) {
531 existing_ref->extent_op = ref->extent_op;
533 if (ref->extent_op->update_key) {
534 memcpy(&existing_ref->extent_op->key,
535 &ref->extent_op->key,
536 sizeof(ref->extent_op->key));
537 existing_ref->extent_op->update_key = 1;
539 if (ref->extent_op->update_flags) {
540 existing_ref->extent_op->flags_to_set |=
541 ref->extent_op->flags_to_set;
542 existing_ref->extent_op->update_flags = 1;
544 btrfs_free_delayed_extent_op(ref->extent_op);
548 * update the reference mod on the head to reflect this new operation,
549 * only need the lock for this case cause we could be processing it
550 * currently, for refs we just added we know we're a-ok.
552 spin_lock(&existing_ref->lock);
553 existing->ref_mod += update->ref_mod;
554 spin_unlock(&existing_ref->lock);
558 * helper function to actually insert a head node into the rbtree.
559 * this does all the dirty work in terms of maintaining the correct
560 * overall modification count.
562 static noinline struct btrfs_delayed_ref_head *
563 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
564 struct btrfs_trans_handle *trans,
565 struct btrfs_delayed_ref_node *ref, u64 bytenr,
566 u64 num_bytes, int action, int is_data)
568 struct btrfs_delayed_ref_head *existing;
569 struct btrfs_delayed_ref_head *head_ref = NULL;
570 struct btrfs_delayed_ref_root *delayed_refs;
572 int must_insert_reserved = 0;
575 * the head node stores the sum of all the mods, so dropping a ref
576 * should drop the sum in the head node by one.
578 if (action == BTRFS_UPDATE_DELAYED_HEAD)
580 else if (action == BTRFS_DROP_DELAYED_REF)
584 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
585 * the reserved accounting when the extent is finally added, or
586 * if a later modification deletes the delayed ref without ever
587 * inserting the extent into the extent allocation tree.
588 * ref->must_insert_reserved is the flag used to record
589 * that accounting mods are required.
591 * Once we record must_insert_reserved, switch the action to
592 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
594 if (action == BTRFS_ADD_DELAYED_EXTENT)
595 must_insert_reserved = 1;
597 must_insert_reserved = 0;
599 delayed_refs = &trans->transaction->delayed_refs;
601 /* first set the basic ref node struct up */
602 atomic_set(&ref->refs, 1);
603 ref->bytenr = bytenr;
604 ref->num_bytes = num_bytes;
605 ref->ref_mod = count_mod;
612 head_ref = btrfs_delayed_node_to_head(ref);
613 head_ref->must_insert_reserved = must_insert_reserved;
614 head_ref->is_data = is_data;
615 head_ref->ref_root = RB_ROOT;
616 head_ref->processing = 0;
618 spin_lock_init(&head_ref->lock);
619 mutex_init(&head_ref->mutex);
621 trace_add_delayed_ref_head(ref, head_ref, action);
623 existing = htree_insert(&delayed_refs->href_root,
624 &head_ref->href_node);
626 update_existing_head_ref(&existing->node, ref);
628 * we've updated the existing ref, free the newly
631 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
634 delayed_refs->num_heads++;
635 delayed_refs->num_heads_ready++;
636 atomic_inc(&delayed_refs->num_entries);
637 trans->delayed_ref_updates++;
643 * helper to insert a delayed tree ref into the rbtree.
646 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
647 struct btrfs_trans_handle *trans,
648 struct btrfs_delayed_ref_head *head_ref,
649 struct btrfs_delayed_ref_node *ref, u64 bytenr,
650 u64 num_bytes, u64 parent, u64 ref_root, int level,
651 int action, int for_cow)
653 struct btrfs_delayed_ref_node *existing;
654 struct btrfs_delayed_tree_ref *full_ref;
655 struct btrfs_delayed_ref_root *delayed_refs;
658 if (action == BTRFS_ADD_DELAYED_EXTENT)
659 action = BTRFS_ADD_DELAYED_REF;
661 delayed_refs = &trans->transaction->delayed_refs;
663 /* first set the basic ref node struct up */
664 atomic_set(&ref->refs, 1);
665 ref->bytenr = bytenr;
666 ref->num_bytes = num_bytes;
668 ref->action = action;
672 if (need_ref_seq(for_cow, ref_root))
673 seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
676 full_ref = btrfs_delayed_node_to_tree_ref(ref);
677 full_ref->parent = parent;
678 full_ref->root = ref_root;
680 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
682 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
683 full_ref->level = level;
685 trace_add_delayed_tree_ref(ref, full_ref, action);
687 spin_lock(&head_ref->lock);
688 existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
690 update_existing_ref(trans, delayed_refs, head_ref, existing,
693 * we've updated the existing ref, free the newly
696 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
698 atomic_inc(&delayed_refs->num_entries);
699 trans->delayed_ref_updates++;
701 spin_unlock(&head_ref->lock);
705 * helper to insert a delayed data ref into the rbtree.
708 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
709 struct btrfs_trans_handle *trans,
710 struct btrfs_delayed_ref_head *head_ref,
711 struct btrfs_delayed_ref_node *ref, u64 bytenr,
712 u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
713 u64 offset, int action, int for_cow)
715 struct btrfs_delayed_ref_node *existing;
716 struct btrfs_delayed_data_ref *full_ref;
717 struct btrfs_delayed_ref_root *delayed_refs;
720 if (action == BTRFS_ADD_DELAYED_EXTENT)
721 action = BTRFS_ADD_DELAYED_REF;
723 delayed_refs = &trans->transaction->delayed_refs;
725 /* first set the basic ref node struct up */
726 atomic_set(&ref->refs, 1);
727 ref->bytenr = bytenr;
728 ref->num_bytes = num_bytes;
730 ref->action = action;
734 if (need_ref_seq(for_cow, ref_root))
735 seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
738 full_ref = btrfs_delayed_node_to_data_ref(ref);
739 full_ref->parent = parent;
740 full_ref->root = ref_root;
742 ref->type = BTRFS_SHARED_DATA_REF_KEY;
744 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
746 full_ref->objectid = owner;
747 full_ref->offset = offset;
749 trace_add_delayed_data_ref(ref, full_ref, action);
751 spin_lock(&head_ref->lock);
752 existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
754 update_existing_ref(trans, delayed_refs, head_ref, existing,
757 * we've updated the existing ref, free the newly
760 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
762 atomic_inc(&delayed_refs->num_entries);
763 trans->delayed_ref_updates++;
765 spin_unlock(&head_ref->lock);
769 * add a delayed tree ref. This does all of the accounting required
770 * to make sure the delayed ref is eventually processed before this
771 * transaction commits.
773 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
774 struct btrfs_trans_handle *trans,
775 u64 bytenr, u64 num_bytes, u64 parent,
776 u64 ref_root, int level, int action,
777 struct btrfs_delayed_extent_op *extent_op,
780 struct btrfs_delayed_tree_ref *ref;
781 struct btrfs_delayed_ref_head *head_ref;
782 struct btrfs_delayed_ref_root *delayed_refs;
784 BUG_ON(extent_op && extent_op->is_data);
785 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
789 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
791 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
795 head_ref->extent_op = extent_op;
797 delayed_refs = &trans->transaction->delayed_refs;
798 spin_lock(&delayed_refs->lock);
801 * insert both the head node and the new ref without dropping
804 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
805 bytenr, num_bytes, action, 0);
807 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
808 num_bytes, parent, ref_root, level, action,
810 spin_unlock(&delayed_refs->lock);
811 if (need_ref_seq(for_cow, ref_root))
812 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
818 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
820 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
821 struct btrfs_trans_handle *trans,
822 u64 bytenr, u64 num_bytes,
823 u64 parent, u64 ref_root,
824 u64 owner, u64 offset, int action,
825 struct btrfs_delayed_extent_op *extent_op,
828 struct btrfs_delayed_data_ref *ref;
829 struct btrfs_delayed_ref_head *head_ref;
830 struct btrfs_delayed_ref_root *delayed_refs;
832 BUG_ON(extent_op && !extent_op->is_data);
833 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
837 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
839 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
843 head_ref->extent_op = extent_op;
845 delayed_refs = &trans->transaction->delayed_refs;
846 spin_lock(&delayed_refs->lock);
849 * insert both the head node and the new ref without dropping
852 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
853 bytenr, num_bytes, action, 1);
855 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
856 num_bytes, parent, ref_root, owner, offset,
858 spin_unlock(&delayed_refs->lock);
859 if (need_ref_seq(for_cow, ref_root))
860 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
865 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
866 struct btrfs_trans_handle *trans,
867 u64 bytenr, u64 num_bytes,
868 struct btrfs_delayed_extent_op *extent_op)
870 struct btrfs_delayed_ref_head *head_ref;
871 struct btrfs_delayed_ref_root *delayed_refs;
873 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
877 head_ref->extent_op = extent_op;
879 delayed_refs = &trans->transaction->delayed_refs;
880 spin_lock(&delayed_refs->lock);
882 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
883 num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
886 spin_unlock(&delayed_refs->lock);
891 * this does a simple search for the head node for a given extent.
892 * It must be called with the delayed ref spinlock held, and it returns
893 * the head node if any where found, or NULL if not.
895 struct btrfs_delayed_ref_head *
896 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
898 struct btrfs_delayed_ref_root *delayed_refs;
900 delayed_refs = &trans->transaction->delayed_refs;
901 return find_ref_head(&delayed_refs->href_root, bytenr, NULL, 0);
904 void btrfs_delayed_ref_exit(void)
906 if (btrfs_delayed_ref_head_cachep)
907 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
908 if (btrfs_delayed_tree_ref_cachep)
909 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
910 if (btrfs_delayed_data_ref_cachep)
911 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
912 if (btrfs_delayed_extent_op_cachep)
913 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
916 int btrfs_delayed_ref_init(void)
918 btrfs_delayed_ref_head_cachep = kmem_cache_create(
919 "btrfs_delayed_ref_head",
920 sizeof(struct btrfs_delayed_ref_head), 0,
921 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
922 if (!btrfs_delayed_ref_head_cachep)
925 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
926 "btrfs_delayed_tree_ref",
927 sizeof(struct btrfs_delayed_tree_ref), 0,
928 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
929 if (!btrfs_delayed_tree_ref_cachep)
932 btrfs_delayed_data_ref_cachep = kmem_cache_create(
933 "btrfs_delayed_data_ref",
934 sizeof(struct btrfs_delayed_data_ref), 0,
935 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
936 if (!btrfs_delayed_data_ref_cachep)
939 btrfs_delayed_extent_op_cachep = kmem_cache_create(
940 "btrfs_delayed_extent_op",
941 sizeof(struct btrfs_delayed_extent_op), 0,
942 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
943 if (!btrfs_delayed_extent_op_cachep)
948 btrfs_delayed_ref_exit();