1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
10 #include "delayed-ref.h"
11 #include "transaction.h"
13 #include "space-info.h"
14 #include "tree-mod-log.h"
16 struct kmem_cache *btrfs_delayed_ref_head_cachep;
17 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
18 struct kmem_cache *btrfs_delayed_data_ref_cachep;
19 struct kmem_cache *btrfs_delayed_extent_op_cachep;
21 * delayed back reference update tracking. For subvolume trees
22 * we queue up extent allocations and backref maintenance for
23 * delayed processing. This avoids deep call chains where we
24 * add extents in the middle of btrfs_search_slot, and it allows
25 * us to buffer up frequently modified backrefs in an rb tree instead
26 * of hammering updates on the extent allocation tree.
29 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
31 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
32 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
36 spin_lock(&global_rsv->lock);
37 reserved = global_rsv->reserved;
38 spin_unlock(&global_rsv->lock);
41 * Since the global reserve is just kind of magic we don't really want
42 * to rely on it to save our bacon, so if our size is more than the
43 * delayed_refs_rsv and the global rsv then it's time to think about
46 spin_lock(&delayed_refs_rsv->lock);
47 reserved += delayed_refs_rsv->reserved;
48 if (delayed_refs_rsv->size >= reserved)
50 spin_unlock(&delayed_refs_rsv->lock);
54 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
57 atomic_read(&trans->transaction->delayed_refs.num_entries);
62 avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
63 val = num_entries * avg_runtime;
64 if (val >= NSEC_PER_SEC)
66 if (val >= NSEC_PER_SEC / 2)
69 return btrfs_check_space_for_delayed_refs(trans->fs_info);
73 * Release a ref head's reservation
75 * @fs_info: the filesystem
76 * @nr: number of items to drop
78 * This drops the delayed ref head's count from the delayed refs rsv and frees
79 * any excess reservation we had.
81 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
83 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
84 u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
87 released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
89 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
94 * btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv
95 * @trans - the trans that may have generated delayed refs
97 * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
98 * it'll calculate the additional size and add it to the delayed_refs_rsv.
100 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
102 struct btrfs_fs_info *fs_info = trans->fs_info;
103 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
106 if (!trans->delayed_ref_updates)
109 num_bytes = btrfs_calc_insert_metadata_size(fs_info,
110 trans->delayed_ref_updates);
111 spin_lock(&delayed_rsv->lock);
112 delayed_rsv->size += num_bytes;
113 delayed_rsv->full = 0;
114 spin_unlock(&delayed_rsv->lock);
115 trans->delayed_ref_updates = 0;
119 * Transfer bytes to our delayed refs rsv
121 * @fs_info: the filesystem
122 * @src: source block rsv to transfer from
123 * @num_bytes: number of bytes to transfer
125 * This transfers up to the num_bytes amount from the src rsv to the
126 * delayed_refs_rsv. Any extra bytes are returned to the space info.
128 void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
129 struct btrfs_block_rsv *src,
132 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
135 spin_lock(&src->lock);
136 src->reserved -= num_bytes;
137 src->size -= num_bytes;
138 spin_unlock(&src->lock);
140 spin_lock(&delayed_refs_rsv->lock);
141 if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
142 u64 delta = delayed_refs_rsv->size -
143 delayed_refs_rsv->reserved;
144 if (num_bytes > delta) {
145 to_free = num_bytes - delta;
154 delayed_refs_rsv->reserved += num_bytes;
155 if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
156 delayed_refs_rsv->full = 1;
157 spin_unlock(&delayed_refs_rsv->lock);
160 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
163 btrfs_space_info_free_bytes_may_use(fs_info,
164 delayed_refs_rsv->space_info, to_free);
168 * Refill based on our delayed refs usage
170 * @fs_info: the filesystem
171 * @flush: control how we can flush for this reservation.
173 * This will refill the delayed block_rsv up to 1 items size worth of space and
174 * will return -ENOSPC if we can't make the reservation.
176 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
177 enum btrfs_reserve_flush_enum flush)
179 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
180 u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
184 spin_lock(&block_rsv->lock);
185 if (block_rsv->reserved < block_rsv->size) {
186 num_bytes = block_rsv->size - block_rsv->reserved;
187 num_bytes = min(num_bytes, limit);
189 spin_unlock(&block_rsv->lock);
194 ret = btrfs_reserve_metadata_bytes(fs_info->extent_root, block_rsv,
198 btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
199 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
205 * compare two delayed tree backrefs with same bytenr and type
207 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
208 struct btrfs_delayed_tree_ref *ref2)
210 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
211 if (ref1->root < ref2->root)
213 if (ref1->root > ref2->root)
216 if (ref1->parent < ref2->parent)
218 if (ref1->parent > ref2->parent)
225 * compare two delayed data backrefs with same bytenr and type
227 static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
228 struct btrfs_delayed_data_ref *ref2)
230 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
231 if (ref1->root < ref2->root)
233 if (ref1->root > ref2->root)
235 if (ref1->objectid < ref2->objectid)
237 if (ref1->objectid > ref2->objectid)
239 if (ref1->offset < ref2->offset)
241 if (ref1->offset > ref2->offset)
244 if (ref1->parent < ref2->parent)
246 if (ref1->parent > ref2->parent)
252 static int comp_refs(struct btrfs_delayed_ref_node *ref1,
253 struct btrfs_delayed_ref_node *ref2,
258 if (ref1->type < ref2->type)
260 if (ref1->type > ref2->type)
262 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
263 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
264 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
265 btrfs_delayed_node_to_tree_ref(ref2));
267 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
268 btrfs_delayed_node_to_data_ref(ref2));
272 if (ref1->seq < ref2->seq)
274 if (ref1->seq > ref2->seq)
280 /* insert a new ref to head ref rbtree */
281 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
282 struct rb_node *node)
284 struct rb_node **p = &root->rb_root.rb_node;
285 struct rb_node *parent_node = NULL;
286 struct btrfs_delayed_ref_head *entry;
287 struct btrfs_delayed_ref_head *ins;
289 bool leftmost = true;
291 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
292 bytenr = ins->bytenr;
295 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
298 if (bytenr < entry->bytenr) {
300 } else if (bytenr > entry->bytenr) {
308 rb_link_node(node, parent_node, p);
309 rb_insert_color_cached(node, root, leftmost);
313 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
314 struct btrfs_delayed_ref_node *ins)
316 struct rb_node **p = &root->rb_root.rb_node;
317 struct rb_node *node = &ins->ref_node;
318 struct rb_node *parent_node = NULL;
319 struct btrfs_delayed_ref_node *entry;
320 bool leftmost = true;
326 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
328 comp = comp_refs(ins, entry, true);
331 } else if (comp > 0) {
339 rb_link_node(node, parent_node, p);
340 rb_insert_color_cached(node, root, leftmost);
344 static struct btrfs_delayed_ref_head *find_first_ref_head(
345 struct btrfs_delayed_ref_root *dr)
348 struct btrfs_delayed_ref_head *entry;
350 n = rb_first_cached(&dr->href_root);
354 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
360 * Find a head entry based on bytenr. This returns the delayed ref head if it
361 * was able to find one, or NULL if nothing was in that spot. If return_bigger
362 * is given, the next bigger entry is returned if no exact match is found.
364 static struct btrfs_delayed_ref_head *find_ref_head(
365 struct btrfs_delayed_ref_root *dr, u64 bytenr,
368 struct rb_root *root = &dr->href_root.rb_root;
370 struct btrfs_delayed_ref_head *entry;
375 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
377 if (bytenr < entry->bytenr)
379 else if (bytenr > entry->bytenr)
384 if (entry && return_bigger) {
385 if (bytenr > entry->bytenr) {
386 n = rb_next(&entry->href_node);
389 entry = rb_entry(n, struct btrfs_delayed_ref_head,
397 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
398 struct btrfs_delayed_ref_head *head)
400 lockdep_assert_held(&delayed_refs->lock);
401 if (mutex_trylock(&head->mutex))
404 refcount_inc(&head->refs);
405 spin_unlock(&delayed_refs->lock);
407 mutex_lock(&head->mutex);
408 spin_lock(&delayed_refs->lock);
409 if (RB_EMPTY_NODE(&head->href_node)) {
410 mutex_unlock(&head->mutex);
411 btrfs_put_delayed_ref_head(head);
414 btrfs_put_delayed_ref_head(head);
418 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
419 struct btrfs_delayed_ref_root *delayed_refs,
420 struct btrfs_delayed_ref_head *head,
421 struct btrfs_delayed_ref_node *ref)
423 lockdep_assert_held(&head->lock);
424 rb_erase_cached(&ref->ref_node, &head->ref_tree);
425 RB_CLEAR_NODE(&ref->ref_node);
426 if (!list_empty(&ref->add_list))
427 list_del(&ref->add_list);
429 btrfs_put_delayed_ref(ref);
430 atomic_dec(&delayed_refs->num_entries);
433 static bool merge_ref(struct btrfs_trans_handle *trans,
434 struct btrfs_delayed_ref_root *delayed_refs,
435 struct btrfs_delayed_ref_head *head,
436 struct btrfs_delayed_ref_node *ref,
439 struct btrfs_delayed_ref_node *next;
440 struct rb_node *node = rb_next(&ref->ref_node);
443 while (!done && node) {
446 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
447 node = rb_next(node);
448 if (seq && next->seq >= seq)
450 if (comp_refs(ref, next, false))
453 if (ref->action == next->action) {
456 if (ref->ref_mod < next->ref_mod) {
460 mod = -next->ref_mod;
463 drop_delayed_ref(trans, delayed_refs, head, next);
465 if (ref->ref_mod == 0) {
466 drop_delayed_ref(trans, delayed_refs, head, ref);
470 * Can't have multiples of the same ref on a tree block.
472 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
473 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
480 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
481 struct btrfs_delayed_ref_root *delayed_refs,
482 struct btrfs_delayed_ref_head *head)
484 struct btrfs_fs_info *fs_info = trans->fs_info;
485 struct btrfs_delayed_ref_node *ref;
486 struct rb_node *node;
489 lockdep_assert_held(&head->lock);
491 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
494 /* We don't have too many refs to merge for data. */
498 seq = btrfs_tree_mod_log_lowest_seq(fs_info);
500 for (node = rb_first_cached(&head->ref_tree); node;
501 node = rb_next(node)) {
502 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
503 if (seq && ref->seq >= seq)
505 if (merge_ref(trans, delayed_refs, head, ref, seq))
510 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
513 u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
515 if (min_seq != 0 && seq >= min_seq) {
517 "holding back delayed_ref %llu, lowest is %llu",
525 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
526 struct btrfs_delayed_ref_root *delayed_refs)
528 struct btrfs_delayed_ref_head *head;
531 head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
533 if (!head && delayed_refs->run_delayed_start != 0) {
534 delayed_refs->run_delayed_start = 0;
535 head = find_first_ref_head(delayed_refs);
540 while (head->processing) {
541 struct rb_node *node;
543 node = rb_next(&head->href_node);
545 if (delayed_refs->run_delayed_start == 0)
547 delayed_refs->run_delayed_start = 0;
550 head = rb_entry(node, struct btrfs_delayed_ref_head,
554 head->processing = 1;
555 WARN_ON(delayed_refs->num_heads_ready == 0);
556 delayed_refs->num_heads_ready--;
557 delayed_refs->run_delayed_start = head->bytenr +
562 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
563 struct btrfs_delayed_ref_head *head)
565 lockdep_assert_held(&delayed_refs->lock);
566 lockdep_assert_held(&head->lock);
568 rb_erase_cached(&head->href_node, &delayed_refs->href_root);
569 RB_CLEAR_NODE(&head->href_node);
570 atomic_dec(&delayed_refs->num_entries);
571 delayed_refs->num_heads--;
572 if (head->processing == 0)
573 delayed_refs->num_heads_ready--;
577 * Helper to insert the ref_node to the tail or merge with tail.
579 * Return 0 for insert.
580 * Return >0 for merge.
582 static int insert_delayed_ref(struct btrfs_trans_handle *trans,
583 struct btrfs_delayed_ref_root *root,
584 struct btrfs_delayed_ref_head *href,
585 struct btrfs_delayed_ref_node *ref)
587 struct btrfs_delayed_ref_node *exist;
591 spin_lock(&href->lock);
592 exist = tree_insert(&href->ref_tree, ref);
596 /* Now we are sure we can merge */
598 if (exist->action == ref->action) {
601 /* Need to change action */
602 if (exist->ref_mod < ref->ref_mod) {
603 exist->action = ref->action;
604 mod = -exist->ref_mod;
605 exist->ref_mod = ref->ref_mod;
606 if (ref->action == BTRFS_ADD_DELAYED_REF)
607 list_add_tail(&exist->add_list,
608 &href->ref_add_list);
609 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
610 ASSERT(!list_empty(&exist->add_list));
611 list_del(&exist->add_list);
618 exist->ref_mod += mod;
620 /* remove existing tail if its ref_mod is zero */
621 if (exist->ref_mod == 0)
622 drop_delayed_ref(trans, root, href, exist);
623 spin_unlock(&href->lock);
626 if (ref->action == BTRFS_ADD_DELAYED_REF)
627 list_add_tail(&ref->add_list, &href->ref_add_list);
628 atomic_inc(&root->num_entries);
629 spin_unlock(&href->lock);
634 * helper function to update the accounting in the head ref
635 * existing and update must have the same bytenr
637 static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
638 struct btrfs_delayed_ref_head *existing,
639 struct btrfs_delayed_ref_head *update)
641 struct btrfs_delayed_ref_root *delayed_refs =
642 &trans->transaction->delayed_refs;
643 struct btrfs_fs_info *fs_info = trans->fs_info;
644 u64 flags = btrfs_ref_head_to_space_flags(existing);
647 BUG_ON(existing->is_data != update->is_data);
649 spin_lock(&existing->lock);
650 if (update->must_insert_reserved) {
651 /* if the extent was freed and then
652 * reallocated before the delayed ref
653 * entries were processed, we can end up
654 * with an existing head ref without
655 * the must_insert_reserved flag set.
658 existing->must_insert_reserved = update->must_insert_reserved;
661 * update the num_bytes so we make sure the accounting
664 existing->num_bytes = update->num_bytes;
668 if (update->extent_op) {
669 if (!existing->extent_op) {
670 existing->extent_op = update->extent_op;
672 if (update->extent_op->update_key) {
673 memcpy(&existing->extent_op->key,
674 &update->extent_op->key,
675 sizeof(update->extent_op->key));
676 existing->extent_op->update_key = true;
678 if (update->extent_op->update_flags) {
679 existing->extent_op->flags_to_set |=
680 update->extent_op->flags_to_set;
681 existing->extent_op->update_flags = true;
683 btrfs_free_delayed_extent_op(update->extent_op);
687 * update the reference mod on the head to reflect this new operation,
688 * only need the lock for this case cause we could be processing it
689 * currently, for refs we just added we know we're a-ok.
691 old_ref_mod = existing->total_ref_mod;
692 existing->ref_mod += update->ref_mod;
693 existing->total_ref_mod += update->ref_mod;
696 * If we are going to from a positive ref mod to a negative or vice
697 * versa we need to make sure to adjust pending_csums accordingly.
699 if (existing->is_data) {
701 btrfs_csum_bytes_to_leaves(fs_info,
702 existing->num_bytes);
704 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
705 delayed_refs->pending_csums -= existing->num_bytes;
706 btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
708 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
709 delayed_refs->pending_csums += existing->num_bytes;
710 trans->delayed_ref_updates += csum_leaves;
715 * This handles the following conditions:
717 * 1. We had a ref mod of 0 or more and went negative, indicating that
718 * we may be freeing space, so add our space to the
719 * total_bytes_pinned counter.
720 * 2. We were negative and went to 0 or positive, so no longer can say
721 * that the space would be pinned, decrement our counter from the
722 * total_bytes_pinned counter.
723 * 3. We are now at 0 and have ->must_insert_reserved set, which means
724 * this was a new allocation and then we dropped it, and thus must
725 * add our space to the total_bytes_pinned counter.
727 if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
728 btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
729 else if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
730 btrfs_mod_total_bytes_pinned(fs_info, flags, -existing->num_bytes);
731 else if (existing->total_ref_mod == 0 && existing->must_insert_reserved)
732 btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
734 spin_unlock(&existing->lock);
737 static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
738 struct btrfs_qgroup_extent_record *qrecord,
739 u64 bytenr, u64 num_bytes, u64 ref_root,
740 u64 reserved, int action, bool is_data,
744 int must_insert_reserved = 0;
746 /* If reserved is provided, it must be a data extent. */
747 BUG_ON(!is_data && reserved);
750 * The head node stores the sum of all the mods, so dropping a ref
751 * should drop the sum in the head node by one.
753 if (action == BTRFS_UPDATE_DELAYED_HEAD)
755 else if (action == BTRFS_DROP_DELAYED_REF)
759 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
760 * accounting when the extent is finally added, or if a later
761 * modification deletes the delayed ref without ever inserting the
762 * extent into the extent allocation tree. ref->must_insert_reserved
763 * is the flag used to record that accounting mods are required.
765 * Once we record must_insert_reserved, switch the action to
766 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
768 if (action == BTRFS_ADD_DELAYED_EXTENT)
769 must_insert_reserved = 1;
771 must_insert_reserved = 0;
773 refcount_set(&head_ref->refs, 1);
774 head_ref->bytenr = bytenr;
775 head_ref->num_bytes = num_bytes;
776 head_ref->ref_mod = count_mod;
777 head_ref->must_insert_reserved = must_insert_reserved;
778 head_ref->is_data = is_data;
779 head_ref->is_system = is_system;
780 head_ref->ref_tree = RB_ROOT_CACHED;
781 INIT_LIST_HEAD(&head_ref->ref_add_list);
782 RB_CLEAR_NODE(&head_ref->href_node);
783 head_ref->processing = 0;
784 head_ref->total_ref_mod = count_mod;
785 spin_lock_init(&head_ref->lock);
786 mutex_init(&head_ref->mutex);
789 if (ref_root && reserved) {
790 qrecord->data_rsv = reserved;
791 qrecord->data_rsv_refroot = ref_root;
793 qrecord->bytenr = bytenr;
794 qrecord->num_bytes = num_bytes;
795 qrecord->old_roots = NULL;
800 * helper function to actually insert a head node into the rbtree.
801 * this does all the dirty work in terms of maintaining the correct
802 * overall modification count.
804 static noinline struct btrfs_delayed_ref_head *
805 add_delayed_ref_head(struct btrfs_trans_handle *trans,
806 struct btrfs_delayed_ref_head *head_ref,
807 struct btrfs_qgroup_extent_record *qrecord,
808 int action, int *qrecord_inserted_ret)
810 struct btrfs_delayed_ref_head *existing;
811 struct btrfs_delayed_ref_root *delayed_refs;
812 int qrecord_inserted = 0;
814 delayed_refs = &trans->transaction->delayed_refs;
816 /* Record qgroup extent info if provided */
818 if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
819 delayed_refs, qrecord))
822 qrecord_inserted = 1;
825 trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
827 existing = htree_insert(&delayed_refs->href_root,
828 &head_ref->href_node);
830 update_existing_head_ref(trans, existing, head_ref);
832 * we've updated the existing ref, free the newly
835 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
838 u64 flags = btrfs_ref_head_to_space_flags(head_ref);
840 if (head_ref->is_data && head_ref->ref_mod < 0) {
841 delayed_refs->pending_csums += head_ref->num_bytes;
842 trans->delayed_ref_updates +=
843 btrfs_csum_bytes_to_leaves(trans->fs_info,
844 head_ref->num_bytes);
846 if (head_ref->ref_mod < 0)
847 btrfs_mod_total_bytes_pinned(trans->fs_info, flags,
848 head_ref->num_bytes);
849 delayed_refs->num_heads++;
850 delayed_refs->num_heads_ready++;
851 atomic_inc(&delayed_refs->num_entries);
852 trans->delayed_ref_updates++;
854 if (qrecord_inserted_ret)
855 *qrecord_inserted_ret = qrecord_inserted;
861 * init_delayed_ref_common - Initialize the structure which represents a
862 * modification to a an extent.
864 * @fs_info: Internal to the mounted filesystem mount structure.
866 * @ref: The structure which is going to be initialized.
868 * @bytenr: The logical address of the extent for which a modification is
869 * going to be recorded.
871 * @num_bytes: Size of the extent whose modification is being recorded.
873 * @ref_root: The id of the root where this modification has originated, this
874 * can be either one of the well-known metadata trees or the
875 * subvolume id which references this extent.
877 * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
878 * BTRFS_ADD_DELAYED_EXTENT
880 * @ref_type: Holds the type of the extent which is being recorded, can be
881 * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
882 * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
883 * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
885 static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
886 struct btrfs_delayed_ref_node *ref,
887 u64 bytenr, u64 num_bytes, u64 ref_root,
888 int action, u8 ref_type)
892 if (action == BTRFS_ADD_DELAYED_EXTENT)
893 action = BTRFS_ADD_DELAYED_REF;
895 if (is_fstree(ref_root))
896 seq = atomic64_read(&fs_info->tree_mod_seq);
898 refcount_set(&ref->refs, 1);
899 ref->bytenr = bytenr;
900 ref->num_bytes = num_bytes;
902 ref->action = action;
906 ref->type = ref_type;
907 RB_CLEAR_NODE(&ref->ref_node);
908 INIT_LIST_HEAD(&ref->add_list);
912 * add a delayed tree ref. This does all of the accounting required
913 * to make sure the delayed ref is eventually processed before this
914 * transaction commits.
916 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
917 struct btrfs_ref *generic_ref,
918 struct btrfs_delayed_extent_op *extent_op)
920 struct btrfs_fs_info *fs_info = trans->fs_info;
921 struct btrfs_delayed_tree_ref *ref;
922 struct btrfs_delayed_ref_head *head_ref;
923 struct btrfs_delayed_ref_root *delayed_refs;
924 struct btrfs_qgroup_extent_record *record = NULL;
925 int qrecord_inserted;
927 int action = generic_ref->action;
928 int level = generic_ref->tree_ref.level;
930 u64 bytenr = generic_ref->bytenr;
931 u64 num_bytes = generic_ref->len;
932 u64 parent = generic_ref->parent;
935 is_system = (generic_ref->real_root == BTRFS_CHUNK_TREE_OBJECTID);
937 ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
938 BUG_ON(extent_op && extent_op->is_data);
939 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
943 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
945 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
949 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
950 is_fstree(generic_ref->real_root) &&
951 is_fstree(generic_ref->tree_ref.root) &&
952 !generic_ref->skip_qgroup) {
953 record = kzalloc(sizeof(*record), GFP_NOFS);
955 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
956 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
962 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
964 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
966 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
967 generic_ref->tree_ref.root, action, ref_type);
968 ref->root = generic_ref->tree_ref.root;
969 ref->parent = parent;
972 init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
973 generic_ref->tree_ref.root, 0, action, false,
975 head_ref->extent_op = extent_op;
977 delayed_refs = &trans->transaction->delayed_refs;
978 spin_lock(&delayed_refs->lock);
981 * insert both the head node and the new ref without dropping
984 head_ref = add_delayed_ref_head(trans, head_ref, record,
985 action, &qrecord_inserted);
987 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
988 spin_unlock(&delayed_refs->lock);
991 * Need to update the delayed_refs_rsv with any changes we may have
994 btrfs_update_delayed_refs_rsv(trans);
996 trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
997 action == BTRFS_ADD_DELAYED_EXTENT ?
998 BTRFS_ADD_DELAYED_REF : action);
1000 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
1002 if (qrecord_inserted)
1003 btrfs_qgroup_trace_extent_post(fs_info, record);
1009 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
1011 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
1012 struct btrfs_ref *generic_ref,
1015 struct btrfs_fs_info *fs_info = trans->fs_info;
1016 struct btrfs_delayed_data_ref *ref;
1017 struct btrfs_delayed_ref_head *head_ref;
1018 struct btrfs_delayed_ref_root *delayed_refs;
1019 struct btrfs_qgroup_extent_record *record = NULL;
1020 int qrecord_inserted;
1021 int action = generic_ref->action;
1023 u64 bytenr = generic_ref->bytenr;
1024 u64 num_bytes = generic_ref->len;
1025 u64 parent = generic_ref->parent;
1026 u64 ref_root = generic_ref->data_ref.ref_root;
1027 u64 owner = generic_ref->data_ref.ino;
1028 u64 offset = generic_ref->data_ref.offset;
1031 ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
1032 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
1037 ref_type = BTRFS_SHARED_DATA_REF_KEY;
1039 ref_type = BTRFS_EXTENT_DATA_REF_KEY;
1040 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
1041 ref_root, action, ref_type);
1042 ref->root = ref_root;
1043 ref->parent = parent;
1044 ref->objectid = owner;
1045 ref->offset = offset;
1048 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1050 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1054 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
1055 is_fstree(ref_root) &&
1056 is_fstree(generic_ref->real_root) &&
1057 !generic_ref->skip_qgroup) {
1058 record = kzalloc(sizeof(*record), GFP_NOFS);
1060 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1061 kmem_cache_free(btrfs_delayed_ref_head_cachep,
1067 init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
1068 reserved, action, true, false);
1069 head_ref->extent_op = NULL;
1071 delayed_refs = &trans->transaction->delayed_refs;
1072 spin_lock(&delayed_refs->lock);
1075 * insert both the head node and the new ref without dropping
1078 head_ref = add_delayed_ref_head(trans, head_ref, record,
1079 action, &qrecord_inserted);
1081 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
1082 spin_unlock(&delayed_refs->lock);
1085 * Need to update the delayed_refs_rsv with any changes we may have
1088 btrfs_update_delayed_refs_rsv(trans);
1090 trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
1091 action == BTRFS_ADD_DELAYED_EXTENT ?
1092 BTRFS_ADD_DELAYED_REF : action);
1094 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1097 if (qrecord_inserted)
1098 return btrfs_qgroup_trace_extent_post(fs_info, record);
1102 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1103 u64 bytenr, u64 num_bytes,
1104 struct btrfs_delayed_extent_op *extent_op)
1106 struct btrfs_delayed_ref_head *head_ref;
1107 struct btrfs_delayed_ref_root *delayed_refs;
1109 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1113 init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
1114 BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
1116 head_ref->extent_op = extent_op;
1118 delayed_refs = &trans->transaction->delayed_refs;
1119 spin_lock(&delayed_refs->lock);
1121 add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
1124 spin_unlock(&delayed_refs->lock);
1127 * Need to update the delayed_refs_rsv with any changes we may have
1130 btrfs_update_delayed_refs_rsv(trans);
1135 * This does a simple search for the head node for a given extent. Returns the
1136 * head node if found, or NULL if not.
1138 struct btrfs_delayed_ref_head *
1139 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
1141 lockdep_assert_held(&delayed_refs->lock);
1143 return find_ref_head(delayed_refs, bytenr, false);
1146 void __cold btrfs_delayed_ref_exit(void)
1148 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1149 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
1150 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
1151 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1154 int __init btrfs_delayed_ref_init(void)
1156 btrfs_delayed_ref_head_cachep = kmem_cache_create(
1157 "btrfs_delayed_ref_head",
1158 sizeof(struct btrfs_delayed_ref_head), 0,
1159 SLAB_MEM_SPREAD, NULL);
1160 if (!btrfs_delayed_ref_head_cachep)
1163 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
1164 "btrfs_delayed_tree_ref",
1165 sizeof(struct btrfs_delayed_tree_ref), 0,
1166 SLAB_MEM_SPREAD, NULL);
1167 if (!btrfs_delayed_tree_ref_cachep)
1170 btrfs_delayed_data_ref_cachep = kmem_cache_create(
1171 "btrfs_delayed_data_ref",
1172 sizeof(struct btrfs_delayed_data_ref), 0,
1173 SLAB_MEM_SPREAD, NULL);
1174 if (!btrfs_delayed_data_ref_cachep)
1177 btrfs_delayed_extent_op_cachep = kmem_cache_create(
1178 "btrfs_delayed_extent_op",
1179 sizeof(struct btrfs_delayed_extent_op), 0,
1180 SLAB_MEM_SPREAD, NULL);
1181 if (!btrfs_delayed_extent_op_cachep)
1186 btrfs_delayed_ref_exit();