1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/sched/signal.h>
8 #include <linux/pagemap.h>
9 #include <linux/writeback.h>
10 #include <linux/blkdev.h>
11 #include <linux/sort.h>
12 #include <linux/rcupdate.h>
13 #include <linux/kthread.h>
14 #include <linux/slab.h>
15 #include <linux/ratelimit.h>
16 #include <linux/percpu_counter.h>
17 #include <linux/lockdep.h>
18 #include <linux/crc32c.h>
21 #include "print-tree.h"
25 #include "free-space-cache.h"
26 #include "free-space-tree.h"
30 #include "ref-verify.h"
32 #undef SCRAMBLE_DELAYED_REFS
35 * control flags for do_chunk_alloc's force field
36 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
37 * if we really need one.
39 * CHUNK_ALLOC_LIMITED means to only try and allocate one
40 * if we have very few chunks already allocated. This is
41 * used as part of the clustering code to help make sure
42 * we have a good pool of storage to cluster in, without
43 * filling the FS with empty chunks
45 * CHUNK_ALLOC_FORCE means it must try to allocate one
49 CHUNK_ALLOC_NO_FORCE = 0,
50 CHUNK_ALLOC_LIMITED = 1,
51 CHUNK_ALLOC_FORCE = 2,
54 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
55 struct btrfs_delayed_ref_node *node, u64 parent,
56 u64 root_objectid, u64 owner_objectid,
57 u64 owner_offset, int refs_to_drop,
58 struct btrfs_delayed_extent_op *extra_op);
59 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
60 struct extent_buffer *leaf,
61 struct btrfs_extent_item *ei);
62 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
63 u64 parent, u64 root_objectid,
64 u64 flags, u64 owner, u64 offset,
65 struct btrfs_key *ins, int ref_mod);
66 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
67 struct btrfs_delayed_ref_node *node,
68 struct btrfs_delayed_extent_op *extent_op);
69 static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
71 static int find_next_key(struct btrfs_path *path, int level,
72 struct btrfs_key *key);
73 static void dump_space_info(struct btrfs_fs_info *fs_info,
74 struct btrfs_space_info *info, u64 bytes,
75 int dump_block_groups);
76 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
78 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
79 struct btrfs_space_info *space_info,
81 static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
82 struct btrfs_space_info *space_info,
86 block_group_cache_done(struct btrfs_block_group_cache *cache)
89 return cache->cached == BTRFS_CACHE_FINISHED ||
90 cache->cached == BTRFS_CACHE_ERROR;
93 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
95 return (cache->flags & bits) == bits;
98 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
100 atomic_inc(&cache->count);
103 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
105 if (atomic_dec_and_test(&cache->count)) {
106 WARN_ON(cache->pinned > 0);
107 WARN_ON(cache->reserved > 0);
110 * If not empty, someone is still holding mutex of
111 * full_stripe_lock, which can only be released by caller.
112 * And it will definitely cause use-after-free when caller
113 * tries to release full stripe lock.
115 * No better way to resolve, but only to warn.
117 WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
118 kfree(cache->free_space_ctl);
124 * this adds the block group to the fs_info rb tree for the block group
127 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
128 struct btrfs_block_group_cache *block_group)
131 struct rb_node *parent = NULL;
132 struct btrfs_block_group_cache *cache;
134 spin_lock(&info->block_group_cache_lock);
135 p = &info->block_group_cache_tree.rb_node;
139 cache = rb_entry(parent, struct btrfs_block_group_cache,
141 if (block_group->key.objectid < cache->key.objectid) {
143 } else if (block_group->key.objectid > cache->key.objectid) {
146 spin_unlock(&info->block_group_cache_lock);
151 rb_link_node(&block_group->cache_node, parent, p);
152 rb_insert_color(&block_group->cache_node,
153 &info->block_group_cache_tree);
155 if (info->first_logical_byte > block_group->key.objectid)
156 info->first_logical_byte = block_group->key.objectid;
158 spin_unlock(&info->block_group_cache_lock);
164 * This will return the block group at or after bytenr if contains is 0, else
165 * it will return the block group that contains the bytenr
167 static struct btrfs_block_group_cache *
168 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
171 struct btrfs_block_group_cache *cache, *ret = NULL;
175 spin_lock(&info->block_group_cache_lock);
176 n = info->block_group_cache_tree.rb_node;
179 cache = rb_entry(n, struct btrfs_block_group_cache,
181 end = cache->key.objectid + cache->key.offset - 1;
182 start = cache->key.objectid;
184 if (bytenr < start) {
185 if (!contains && (!ret || start < ret->key.objectid))
188 } else if (bytenr > start) {
189 if (contains && bytenr <= end) {
200 btrfs_get_block_group(ret);
201 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
202 info->first_logical_byte = ret->key.objectid;
204 spin_unlock(&info->block_group_cache_lock);
209 static int add_excluded_extent(struct btrfs_fs_info *fs_info,
210 u64 start, u64 num_bytes)
212 u64 end = start + num_bytes - 1;
213 set_extent_bits(&fs_info->freed_extents[0],
214 start, end, EXTENT_UPTODATE);
215 set_extent_bits(&fs_info->freed_extents[1],
216 start, end, EXTENT_UPTODATE);
220 static void free_excluded_extents(struct btrfs_block_group_cache *cache)
222 struct btrfs_fs_info *fs_info = cache->fs_info;
225 start = cache->key.objectid;
226 end = start + cache->key.offset - 1;
228 clear_extent_bits(&fs_info->freed_extents[0],
229 start, end, EXTENT_UPTODATE);
230 clear_extent_bits(&fs_info->freed_extents[1],
231 start, end, EXTENT_UPTODATE);
234 static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
236 struct btrfs_fs_info *fs_info = cache->fs_info;
242 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
243 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
244 cache->bytes_super += stripe_len;
245 ret = add_excluded_extent(fs_info, cache->key.objectid,
251 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
252 bytenr = btrfs_sb_offset(i);
253 ret = btrfs_rmap_block(fs_info, cache->key.objectid,
254 bytenr, &logical, &nr, &stripe_len);
261 if (logical[nr] > cache->key.objectid +
265 if (logical[nr] + stripe_len <= cache->key.objectid)
269 if (start < cache->key.objectid) {
270 start = cache->key.objectid;
271 len = (logical[nr] + stripe_len) - start;
273 len = min_t(u64, stripe_len,
274 cache->key.objectid +
275 cache->key.offset - start);
278 cache->bytes_super += len;
279 ret = add_excluded_extent(fs_info, start, len);
291 static struct btrfs_caching_control *
292 get_caching_control(struct btrfs_block_group_cache *cache)
294 struct btrfs_caching_control *ctl;
296 spin_lock(&cache->lock);
297 if (!cache->caching_ctl) {
298 spin_unlock(&cache->lock);
302 ctl = cache->caching_ctl;
303 refcount_inc(&ctl->count);
304 spin_unlock(&cache->lock);
308 static void put_caching_control(struct btrfs_caching_control *ctl)
310 if (refcount_dec_and_test(&ctl->count))
314 #ifdef CONFIG_BTRFS_DEBUG
315 static void fragment_free_space(struct btrfs_block_group_cache *block_group)
317 struct btrfs_fs_info *fs_info = block_group->fs_info;
318 u64 start = block_group->key.objectid;
319 u64 len = block_group->key.offset;
320 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
321 fs_info->nodesize : fs_info->sectorsize;
322 u64 step = chunk << 1;
324 while (len > chunk) {
325 btrfs_remove_free_space(block_group, start, chunk);
336 * this is only called by cache_block_group, since we could have freed extents
337 * we need to check the pinned_extents for any extents that can't be used yet
338 * since their free space will be released as soon as the transaction commits.
340 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
343 struct btrfs_fs_info *info = block_group->fs_info;
344 u64 extent_start, extent_end, size, total_added = 0;
347 while (start < end) {
348 ret = find_first_extent_bit(info->pinned_extents, start,
349 &extent_start, &extent_end,
350 EXTENT_DIRTY | EXTENT_UPTODATE,
355 if (extent_start <= start) {
356 start = extent_end + 1;
357 } else if (extent_start > start && extent_start < end) {
358 size = extent_start - start;
360 ret = btrfs_add_free_space(block_group, start,
362 BUG_ON(ret); /* -ENOMEM or logic error */
363 start = extent_end + 1;
372 ret = btrfs_add_free_space(block_group, start, size);
373 BUG_ON(ret); /* -ENOMEM or logic error */
379 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
381 struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
382 struct btrfs_fs_info *fs_info = block_group->fs_info;
383 struct btrfs_root *extent_root = fs_info->extent_root;
384 struct btrfs_path *path;
385 struct extent_buffer *leaf;
386 struct btrfs_key key;
393 path = btrfs_alloc_path();
397 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
399 #ifdef CONFIG_BTRFS_DEBUG
401 * If we're fragmenting we don't want to make anybody think we can
402 * allocate from this block group until we've had a chance to fragment
405 if (btrfs_should_fragment_free_space(block_group))
409 * We don't want to deadlock with somebody trying to allocate a new
410 * extent for the extent root while also trying to search the extent
411 * root to add free space. So we skip locking and search the commit
412 * root, since its read-only
414 path->skip_locking = 1;
415 path->search_commit_root = 1;
416 path->reada = READA_FORWARD;
420 key.type = BTRFS_EXTENT_ITEM_KEY;
423 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
427 leaf = path->nodes[0];
428 nritems = btrfs_header_nritems(leaf);
431 if (btrfs_fs_closing(fs_info) > 1) {
436 if (path->slots[0] < nritems) {
437 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
439 ret = find_next_key(path, 0, &key);
443 if (need_resched() ||
444 rwsem_is_contended(&fs_info->commit_root_sem)) {
446 caching_ctl->progress = last;
447 btrfs_release_path(path);
448 up_read(&fs_info->commit_root_sem);
449 mutex_unlock(&caching_ctl->mutex);
451 mutex_lock(&caching_ctl->mutex);
452 down_read(&fs_info->commit_root_sem);
456 ret = btrfs_next_leaf(extent_root, path);
461 leaf = path->nodes[0];
462 nritems = btrfs_header_nritems(leaf);
466 if (key.objectid < last) {
469 key.type = BTRFS_EXTENT_ITEM_KEY;
472 caching_ctl->progress = last;
473 btrfs_release_path(path);
477 if (key.objectid < block_group->key.objectid) {
482 if (key.objectid >= block_group->key.objectid +
483 block_group->key.offset)
486 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
487 key.type == BTRFS_METADATA_ITEM_KEY) {
488 total_found += add_new_free_space(block_group, last,
490 if (key.type == BTRFS_METADATA_ITEM_KEY)
491 last = key.objectid +
494 last = key.objectid + key.offset;
496 if (total_found > CACHING_CTL_WAKE_UP) {
499 wake_up(&caching_ctl->wait);
506 total_found += add_new_free_space(block_group, last,
507 block_group->key.objectid +
508 block_group->key.offset);
509 caching_ctl->progress = (u64)-1;
512 btrfs_free_path(path);
516 static noinline void caching_thread(struct btrfs_work *work)
518 struct btrfs_block_group_cache *block_group;
519 struct btrfs_fs_info *fs_info;
520 struct btrfs_caching_control *caching_ctl;
523 caching_ctl = container_of(work, struct btrfs_caching_control, work);
524 block_group = caching_ctl->block_group;
525 fs_info = block_group->fs_info;
527 mutex_lock(&caching_ctl->mutex);
528 down_read(&fs_info->commit_root_sem);
530 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
531 ret = load_free_space_tree(caching_ctl);
533 ret = load_extent_tree_free(caching_ctl);
535 spin_lock(&block_group->lock);
536 block_group->caching_ctl = NULL;
537 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
538 spin_unlock(&block_group->lock);
540 #ifdef CONFIG_BTRFS_DEBUG
541 if (btrfs_should_fragment_free_space(block_group)) {
544 spin_lock(&block_group->space_info->lock);
545 spin_lock(&block_group->lock);
546 bytes_used = block_group->key.offset -
547 btrfs_block_group_used(&block_group->item);
548 block_group->space_info->bytes_used += bytes_used >> 1;
549 spin_unlock(&block_group->lock);
550 spin_unlock(&block_group->space_info->lock);
551 fragment_free_space(block_group);
555 caching_ctl->progress = (u64)-1;
557 up_read(&fs_info->commit_root_sem);
558 free_excluded_extents(block_group);
559 mutex_unlock(&caching_ctl->mutex);
561 wake_up(&caching_ctl->wait);
563 put_caching_control(caching_ctl);
564 btrfs_put_block_group(block_group);
567 static int cache_block_group(struct btrfs_block_group_cache *cache,
571 struct btrfs_fs_info *fs_info = cache->fs_info;
572 struct btrfs_caching_control *caching_ctl;
575 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
579 INIT_LIST_HEAD(&caching_ctl->list);
580 mutex_init(&caching_ctl->mutex);
581 init_waitqueue_head(&caching_ctl->wait);
582 caching_ctl->block_group = cache;
583 caching_ctl->progress = cache->key.objectid;
584 refcount_set(&caching_ctl->count, 1);
585 btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
586 caching_thread, NULL, NULL);
588 spin_lock(&cache->lock);
590 * This should be a rare occasion, but this could happen I think in the
591 * case where one thread starts to load the space cache info, and then
592 * some other thread starts a transaction commit which tries to do an
593 * allocation while the other thread is still loading the space cache
594 * info. The previous loop should have kept us from choosing this block
595 * group, but if we've moved to the state where we will wait on caching
596 * block groups we need to first check if we're doing a fast load here,
597 * so we can wait for it to finish, otherwise we could end up allocating
598 * from a block group who's cache gets evicted for one reason or
601 while (cache->cached == BTRFS_CACHE_FAST) {
602 struct btrfs_caching_control *ctl;
604 ctl = cache->caching_ctl;
605 refcount_inc(&ctl->count);
606 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
607 spin_unlock(&cache->lock);
611 finish_wait(&ctl->wait, &wait);
612 put_caching_control(ctl);
613 spin_lock(&cache->lock);
616 if (cache->cached != BTRFS_CACHE_NO) {
617 spin_unlock(&cache->lock);
621 WARN_ON(cache->caching_ctl);
622 cache->caching_ctl = caching_ctl;
623 cache->cached = BTRFS_CACHE_FAST;
624 spin_unlock(&cache->lock);
626 if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
627 mutex_lock(&caching_ctl->mutex);
628 ret = load_free_space_cache(fs_info, cache);
630 spin_lock(&cache->lock);
632 cache->caching_ctl = NULL;
633 cache->cached = BTRFS_CACHE_FINISHED;
634 cache->last_byte_to_unpin = (u64)-1;
635 caching_ctl->progress = (u64)-1;
637 if (load_cache_only) {
638 cache->caching_ctl = NULL;
639 cache->cached = BTRFS_CACHE_NO;
641 cache->cached = BTRFS_CACHE_STARTED;
642 cache->has_caching_ctl = 1;
645 spin_unlock(&cache->lock);
646 #ifdef CONFIG_BTRFS_DEBUG
648 btrfs_should_fragment_free_space(cache)) {
651 spin_lock(&cache->space_info->lock);
652 spin_lock(&cache->lock);
653 bytes_used = cache->key.offset -
654 btrfs_block_group_used(&cache->item);
655 cache->space_info->bytes_used += bytes_used >> 1;
656 spin_unlock(&cache->lock);
657 spin_unlock(&cache->space_info->lock);
658 fragment_free_space(cache);
661 mutex_unlock(&caching_ctl->mutex);
663 wake_up(&caching_ctl->wait);
665 put_caching_control(caching_ctl);
666 free_excluded_extents(cache);
671 * We're either using the free space tree or no caching at all.
672 * Set cached to the appropriate value and wakeup any waiters.
674 spin_lock(&cache->lock);
675 if (load_cache_only) {
676 cache->caching_ctl = NULL;
677 cache->cached = BTRFS_CACHE_NO;
679 cache->cached = BTRFS_CACHE_STARTED;
680 cache->has_caching_ctl = 1;
682 spin_unlock(&cache->lock);
683 wake_up(&caching_ctl->wait);
686 if (load_cache_only) {
687 put_caching_control(caching_ctl);
691 down_write(&fs_info->commit_root_sem);
692 refcount_inc(&caching_ctl->count);
693 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
694 up_write(&fs_info->commit_root_sem);
696 btrfs_get_block_group(cache);
698 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
704 * return the block group that starts at or after bytenr
706 static struct btrfs_block_group_cache *
707 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
709 return block_group_cache_tree_search(info, bytenr, 0);
713 * return the block group that contains the given bytenr
715 struct btrfs_block_group_cache *btrfs_lookup_block_group(
716 struct btrfs_fs_info *info,
719 return block_group_cache_tree_search(info, bytenr, 1);
722 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
725 struct list_head *head = &info->space_info;
726 struct btrfs_space_info *found;
728 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
731 list_for_each_entry_rcu(found, head, list) {
732 if (found->flags & flags) {
741 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
742 bool metadata, u64 root_objectid)
744 struct btrfs_space_info *space_info;
748 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
749 flags = BTRFS_BLOCK_GROUP_SYSTEM;
751 flags = BTRFS_BLOCK_GROUP_METADATA;
753 flags = BTRFS_BLOCK_GROUP_DATA;
756 space_info = __find_space_info(fs_info, flags);
758 percpu_counter_add_batch(&space_info->total_bytes_pinned, num_bytes,
759 BTRFS_TOTAL_BYTES_PINNED_BATCH);
763 * after adding space to the filesystem, we need to clear the full flags
764 * on all the space infos.
766 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
768 struct list_head *head = &info->space_info;
769 struct btrfs_space_info *found;
772 list_for_each_entry_rcu(found, head, list)
777 /* simple helper to search for an existing data extent at a given offset */
778 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
781 struct btrfs_key key;
782 struct btrfs_path *path;
784 path = btrfs_alloc_path();
788 key.objectid = start;
790 key.type = BTRFS_EXTENT_ITEM_KEY;
791 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
792 btrfs_free_path(path);
797 * helper function to lookup reference count and flags of a tree block.
799 * the head node for delayed ref is used to store the sum of all the
800 * reference count modifications queued up in the rbtree. the head
801 * node may also store the extent flags to set. This way you can check
802 * to see what the reference count and extent flags would be if all of
803 * the delayed refs are not processed.
805 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
806 struct btrfs_fs_info *fs_info, u64 bytenr,
807 u64 offset, int metadata, u64 *refs, u64 *flags)
809 struct btrfs_delayed_ref_head *head;
810 struct btrfs_delayed_ref_root *delayed_refs;
811 struct btrfs_path *path;
812 struct btrfs_extent_item *ei;
813 struct extent_buffer *leaf;
814 struct btrfs_key key;
821 * If we don't have skinny metadata, don't bother doing anything
824 if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
825 offset = fs_info->nodesize;
829 path = btrfs_alloc_path();
834 path->skip_locking = 1;
835 path->search_commit_root = 1;
839 key.objectid = bytenr;
842 key.type = BTRFS_METADATA_ITEM_KEY;
844 key.type = BTRFS_EXTENT_ITEM_KEY;
846 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
850 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
851 if (path->slots[0]) {
853 btrfs_item_key_to_cpu(path->nodes[0], &key,
855 if (key.objectid == bytenr &&
856 key.type == BTRFS_EXTENT_ITEM_KEY &&
857 key.offset == fs_info->nodesize)
863 leaf = path->nodes[0];
864 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
865 if (item_size >= sizeof(*ei)) {
866 ei = btrfs_item_ptr(leaf, path->slots[0],
867 struct btrfs_extent_item);
868 num_refs = btrfs_extent_refs(leaf, ei);
869 extent_flags = btrfs_extent_flags(leaf, ei);
872 btrfs_print_v0_err(fs_info);
874 btrfs_abort_transaction(trans, ret);
876 btrfs_handle_fs_error(fs_info, ret, NULL);
881 BUG_ON(num_refs == 0);
891 delayed_refs = &trans->transaction->delayed_refs;
892 spin_lock(&delayed_refs->lock);
893 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
895 if (!mutex_trylock(&head->mutex)) {
896 refcount_inc(&head->refs);
897 spin_unlock(&delayed_refs->lock);
899 btrfs_release_path(path);
902 * Mutex was contended, block until it's released and try
905 mutex_lock(&head->mutex);
906 mutex_unlock(&head->mutex);
907 btrfs_put_delayed_ref_head(head);
910 spin_lock(&head->lock);
911 if (head->extent_op && head->extent_op->update_flags)
912 extent_flags |= head->extent_op->flags_to_set;
914 BUG_ON(num_refs == 0);
916 num_refs += head->ref_mod;
917 spin_unlock(&head->lock);
918 mutex_unlock(&head->mutex);
920 spin_unlock(&delayed_refs->lock);
922 WARN_ON(num_refs == 0);
926 *flags = extent_flags;
928 btrfs_free_path(path);
933 * Back reference rules. Back refs have three main goals:
935 * 1) differentiate between all holders of references to an extent so that
936 * when a reference is dropped we can make sure it was a valid reference
937 * before freeing the extent.
939 * 2) Provide enough information to quickly find the holders of an extent
940 * if we notice a given block is corrupted or bad.
942 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
943 * maintenance. This is actually the same as #2, but with a slightly
944 * different use case.
946 * There are two kinds of back refs. The implicit back refs is optimized
947 * for pointers in non-shared tree blocks. For a given pointer in a block,
948 * back refs of this kind provide information about the block's owner tree
949 * and the pointer's key. These information allow us to find the block by
950 * b-tree searching. The full back refs is for pointers in tree blocks not
951 * referenced by their owner trees. The location of tree block is recorded
952 * in the back refs. Actually the full back refs is generic, and can be
953 * used in all cases the implicit back refs is used. The major shortcoming
954 * of the full back refs is its overhead. Every time a tree block gets
955 * COWed, we have to update back refs entry for all pointers in it.
957 * For a newly allocated tree block, we use implicit back refs for
958 * pointers in it. This means most tree related operations only involve
959 * implicit back refs. For a tree block created in old transaction, the
960 * only way to drop a reference to it is COW it. So we can detect the
961 * event that tree block loses its owner tree's reference and do the
962 * back refs conversion.
964 * When a tree block is COWed through a tree, there are four cases:
966 * The reference count of the block is one and the tree is the block's
967 * owner tree. Nothing to do in this case.
969 * The reference count of the block is one and the tree is not the
970 * block's owner tree. In this case, full back refs is used for pointers
971 * in the block. Remove these full back refs, add implicit back refs for
972 * every pointers in the new block.
974 * The reference count of the block is greater than one and the tree is
975 * the block's owner tree. In this case, implicit back refs is used for
976 * pointers in the block. Add full back refs for every pointers in the
977 * block, increase lower level extents' reference counts. The original
978 * implicit back refs are entailed to the new block.
980 * The reference count of the block is greater than one and the tree is
981 * not the block's owner tree. Add implicit back refs for every pointer in
982 * the new block, increase lower level extents' reference count.
984 * Back Reference Key composing:
986 * The key objectid corresponds to the first byte in the extent,
987 * The key type is used to differentiate between types of back refs.
988 * There are different meanings of the key offset for different types
991 * File extents can be referenced by:
993 * - multiple snapshots, subvolumes, or different generations in one subvol
994 * - different files inside a single subvolume
995 * - different offsets inside a file (bookend extents in file.c)
997 * The extent ref structure for the implicit back refs has fields for:
999 * - Objectid of the subvolume root
1000 * - objectid of the file holding the reference
1001 * - original offset in the file
1002 * - how many bookend extents
1004 * The key offset for the implicit back refs is hash of the first
1007 * The extent ref structure for the full back refs has field for:
1009 * - number of pointers in the tree leaf
1011 * The key offset for the implicit back refs is the first byte of
1014 * When a file extent is allocated, The implicit back refs is used.
1015 * the fields are filled in:
1017 * (root_key.objectid, inode objectid, offset in file, 1)
1019 * When a file extent is removed file truncation, we find the
1020 * corresponding implicit back refs and check the following fields:
1022 * (btrfs_header_owner(leaf), inode objectid, offset in file)
1024 * Btree extents can be referenced by:
1026 * - Different subvolumes
1028 * Both the implicit back refs and the full back refs for tree blocks
1029 * only consist of key. The key offset for the implicit back refs is
1030 * objectid of block's owner tree. The key offset for the full back refs
1031 * is the first byte of parent block.
1033 * When implicit back refs is used, information about the lowest key and
1034 * level of the tree block are required. These information are stored in
1035 * tree block info structure.
1039 * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
1040 * is_data == BTRFS_REF_TYPE_DATA, data type is requried,
1041 * is_data == BTRFS_REF_TYPE_ANY, either type is OK.
1043 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
1044 struct btrfs_extent_inline_ref *iref,
1045 enum btrfs_inline_ref_type is_data)
1047 int type = btrfs_extent_inline_ref_type(eb, iref);
1048 u64 offset = btrfs_extent_inline_ref_offset(eb, iref);
1050 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1051 type == BTRFS_SHARED_BLOCK_REF_KEY ||
1052 type == BTRFS_SHARED_DATA_REF_KEY ||
1053 type == BTRFS_EXTENT_DATA_REF_KEY) {
1054 if (is_data == BTRFS_REF_TYPE_BLOCK) {
1055 if (type == BTRFS_TREE_BLOCK_REF_KEY)
1057 if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1058 ASSERT(eb->fs_info);
1060 * Every shared one has parent tree
1061 * block, which must be aligned to
1065 IS_ALIGNED(offset, eb->fs_info->nodesize))
1068 } else if (is_data == BTRFS_REF_TYPE_DATA) {
1069 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1071 if (type == BTRFS_SHARED_DATA_REF_KEY) {
1072 ASSERT(eb->fs_info);
1074 * Every shared one has parent tree
1075 * block, which must be aligned to
1079 IS_ALIGNED(offset, eb->fs_info->nodesize))
1083 ASSERT(is_data == BTRFS_REF_TYPE_ANY);
1088 btrfs_print_leaf((struct extent_buffer *)eb);
1089 btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d",
1093 return BTRFS_REF_TYPE_INVALID;
1096 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1098 u32 high_crc = ~(u32)0;
1099 u32 low_crc = ~(u32)0;
1102 lenum = cpu_to_le64(root_objectid);
1103 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1104 lenum = cpu_to_le64(owner);
1105 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1106 lenum = cpu_to_le64(offset);
1107 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1109 return ((u64)high_crc << 31) ^ (u64)low_crc;
1112 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1113 struct btrfs_extent_data_ref *ref)
1115 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1116 btrfs_extent_data_ref_objectid(leaf, ref),
1117 btrfs_extent_data_ref_offset(leaf, ref));
1120 static int match_extent_data_ref(struct extent_buffer *leaf,
1121 struct btrfs_extent_data_ref *ref,
1122 u64 root_objectid, u64 owner, u64 offset)
1124 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1125 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1126 btrfs_extent_data_ref_offset(leaf, ref) != offset)
1131 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1132 struct btrfs_path *path,
1133 u64 bytenr, u64 parent,
1135 u64 owner, u64 offset)
1137 struct btrfs_root *root = trans->fs_info->extent_root;
1138 struct btrfs_key key;
1139 struct btrfs_extent_data_ref *ref;
1140 struct extent_buffer *leaf;
1146 key.objectid = bytenr;
1148 key.type = BTRFS_SHARED_DATA_REF_KEY;
1149 key.offset = parent;
1151 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1152 key.offset = hash_extent_data_ref(root_objectid,
1157 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1169 leaf = path->nodes[0];
1170 nritems = btrfs_header_nritems(leaf);
1172 if (path->slots[0] >= nritems) {
1173 ret = btrfs_next_leaf(root, path);
1179 leaf = path->nodes[0];
1180 nritems = btrfs_header_nritems(leaf);
1184 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1185 if (key.objectid != bytenr ||
1186 key.type != BTRFS_EXTENT_DATA_REF_KEY)
1189 ref = btrfs_item_ptr(leaf, path->slots[0],
1190 struct btrfs_extent_data_ref);
1192 if (match_extent_data_ref(leaf, ref, root_objectid,
1195 btrfs_release_path(path);
1207 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1208 struct btrfs_path *path,
1209 u64 bytenr, u64 parent,
1210 u64 root_objectid, u64 owner,
1211 u64 offset, int refs_to_add)
1213 struct btrfs_root *root = trans->fs_info->extent_root;
1214 struct btrfs_key key;
1215 struct extent_buffer *leaf;
1220 key.objectid = bytenr;
1222 key.type = BTRFS_SHARED_DATA_REF_KEY;
1223 key.offset = parent;
1224 size = sizeof(struct btrfs_shared_data_ref);
1226 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1227 key.offset = hash_extent_data_ref(root_objectid,
1229 size = sizeof(struct btrfs_extent_data_ref);
1232 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1233 if (ret && ret != -EEXIST)
1236 leaf = path->nodes[0];
1238 struct btrfs_shared_data_ref *ref;
1239 ref = btrfs_item_ptr(leaf, path->slots[0],
1240 struct btrfs_shared_data_ref);
1242 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1244 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1245 num_refs += refs_to_add;
1246 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1249 struct btrfs_extent_data_ref *ref;
1250 while (ret == -EEXIST) {
1251 ref = btrfs_item_ptr(leaf, path->slots[0],
1252 struct btrfs_extent_data_ref);
1253 if (match_extent_data_ref(leaf, ref, root_objectid,
1256 btrfs_release_path(path);
1258 ret = btrfs_insert_empty_item(trans, root, path, &key,
1260 if (ret && ret != -EEXIST)
1263 leaf = path->nodes[0];
1265 ref = btrfs_item_ptr(leaf, path->slots[0],
1266 struct btrfs_extent_data_ref);
1268 btrfs_set_extent_data_ref_root(leaf, ref,
1270 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1271 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1272 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1274 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1275 num_refs += refs_to_add;
1276 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1279 btrfs_mark_buffer_dirty(leaf);
1282 btrfs_release_path(path);
1286 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1287 struct btrfs_path *path,
1288 int refs_to_drop, int *last_ref)
1290 struct btrfs_key key;
1291 struct btrfs_extent_data_ref *ref1 = NULL;
1292 struct btrfs_shared_data_ref *ref2 = NULL;
1293 struct extent_buffer *leaf;
1297 leaf = path->nodes[0];
1298 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1300 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1301 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1302 struct btrfs_extent_data_ref);
1303 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1304 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1305 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1306 struct btrfs_shared_data_ref);
1307 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1308 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
1309 btrfs_print_v0_err(trans->fs_info);
1310 btrfs_abort_transaction(trans, -EINVAL);
1316 BUG_ON(num_refs < refs_to_drop);
1317 num_refs -= refs_to_drop;
1319 if (num_refs == 0) {
1320 ret = btrfs_del_item(trans, trans->fs_info->extent_root, path);
1323 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1324 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1325 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1326 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1327 btrfs_mark_buffer_dirty(leaf);
1332 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1333 struct btrfs_extent_inline_ref *iref)
1335 struct btrfs_key key;
1336 struct extent_buffer *leaf;
1337 struct btrfs_extent_data_ref *ref1;
1338 struct btrfs_shared_data_ref *ref2;
1342 leaf = path->nodes[0];
1343 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1345 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
1348 * If type is invalid, we should have bailed out earlier than
1351 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
1352 ASSERT(type != BTRFS_REF_TYPE_INVALID);
1353 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1354 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1355 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1357 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1358 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1360 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1361 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1362 struct btrfs_extent_data_ref);
1363 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1364 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1365 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1366 struct btrfs_shared_data_ref);
1367 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1374 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1375 struct btrfs_path *path,
1376 u64 bytenr, u64 parent,
1379 struct btrfs_root *root = trans->fs_info->extent_root;
1380 struct btrfs_key key;
1383 key.objectid = bytenr;
1385 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1386 key.offset = parent;
1388 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1389 key.offset = root_objectid;
1392 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1398 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1399 struct btrfs_path *path,
1400 u64 bytenr, u64 parent,
1403 struct btrfs_key key;
1406 key.objectid = bytenr;
1408 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1409 key.offset = parent;
1411 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1412 key.offset = root_objectid;
1415 ret = btrfs_insert_empty_item(trans, trans->fs_info->extent_root,
1417 btrfs_release_path(path);
1421 static inline int extent_ref_type(u64 parent, u64 owner)
1424 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1426 type = BTRFS_SHARED_BLOCK_REF_KEY;
1428 type = BTRFS_TREE_BLOCK_REF_KEY;
1431 type = BTRFS_SHARED_DATA_REF_KEY;
1433 type = BTRFS_EXTENT_DATA_REF_KEY;
1438 static int find_next_key(struct btrfs_path *path, int level,
1439 struct btrfs_key *key)
1442 for (; level < BTRFS_MAX_LEVEL; level++) {
1443 if (!path->nodes[level])
1445 if (path->slots[level] + 1 >=
1446 btrfs_header_nritems(path->nodes[level]))
1449 btrfs_item_key_to_cpu(path->nodes[level], key,
1450 path->slots[level] + 1);
1452 btrfs_node_key_to_cpu(path->nodes[level], key,
1453 path->slots[level] + 1);
1460 * look for inline back ref. if back ref is found, *ref_ret is set
1461 * to the address of inline back ref, and 0 is returned.
1463 * if back ref isn't found, *ref_ret is set to the address where it
1464 * should be inserted, and -ENOENT is returned.
1466 * if insert is true and there are too many inline back refs, the path
1467 * points to the extent item, and -EAGAIN is returned.
1469 * NOTE: inline back refs are ordered in the same way that back ref
1470 * items in the tree are ordered.
1472 static noinline_for_stack
1473 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1474 struct btrfs_path *path,
1475 struct btrfs_extent_inline_ref **ref_ret,
1476 u64 bytenr, u64 num_bytes,
1477 u64 parent, u64 root_objectid,
1478 u64 owner, u64 offset, int insert)
1480 struct btrfs_fs_info *fs_info = trans->fs_info;
1481 struct btrfs_root *root = fs_info->extent_root;
1482 struct btrfs_key key;
1483 struct extent_buffer *leaf;
1484 struct btrfs_extent_item *ei;
1485 struct btrfs_extent_inline_ref *iref;
1495 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
1498 key.objectid = bytenr;
1499 key.type = BTRFS_EXTENT_ITEM_KEY;
1500 key.offset = num_bytes;
1502 want = extent_ref_type(parent, owner);
1504 extra_size = btrfs_extent_inline_ref_size(want);
1505 path->keep_locks = 1;
1510 * Owner is our level, so we can just add one to get the level for the
1511 * block we are interested in.
1513 if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1514 key.type = BTRFS_METADATA_ITEM_KEY;
1519 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1526 * We may be a newly converted file system which still has the old fat
1527 * extent entries for metadata, so try and see if we have one of those.
1529 if (ret > 0 && skinny_metadata) {
1530 skinny_metadata = false;
1531 if (path->slots[0]) {
1533 btrfs_item_key_to_cpu(path->nodes[0], &key,
1535 if (key.objectid == bytenr &&
1536 key.type == BTRFS_EXTENT_ITEM_KEY &&
1537 key.offset == num_bytes)
1541 key.objectid = bytenr;
1542 key.type = BTRFS_EXTENT_ITEM_KEY;
1543 key.offset = num_bytes;
1544 btrfs_release_path(path);
1549 if (ret && !insert) {
1552 } else if (WARN_ON(ret)) {
1557 leaf = path->nodes[0];
1558 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1559 if (unlikely(item_size < sizeof(*ei))) {
1561 btrfs_print_v0_err(fs_info);
1562 btrfs_abort_transaction(trans, err);
1566 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1567 flags = btrfs_extent_flags(leaf, ei);
1569 ptr = (unsigned long)(ei + 1);
1570 end = (unsigned long)ei + item_size;
1572 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1573 ptr += sizeof(struct btrfs_tree_block_info);
1577 if (owner >= BTRFS_FIRST_FREE_OBJECTID)
1578 needed = BTRFS_REF_TYPE_DATA;
1580 needed = BTRFS_REF_TYPE_BLOCK;
1588 iref = (struct btrfs_extent_inline_ref *)ptr;
1589 type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
1590 if (type == BTRFS_REF_TYPE_INVALID) {
1598 ptr += btrfs_extent_inline_ref_size(type);
1602 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1603 struct btrfs_extent_data_ref *dref;
1604 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1605 if (match_extent_data_ref(leaf, dref, root_objectid,
1610 if (hash_extent_data_ref_item(leaf, dref) <
1611 hash_extent_data_ref(root_objectid, owner, offset))
1615 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1617 if (parent == ref_offset) {
1621 if (ref_offset < parent)
1624 if (root_objectid == ref_offset) {
1628 if (ref_offset < root_objectid)
1632 ptr += btrfs_extent_inline_ref_size(type);
1634 if (err == -ENOENT && insert) {
1635 if (item_size + extra_size >=
1636 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1641 * To add new inline back ref, we have to make sure
1642 * there is no corresponding back ref item.
1643 * For simplicity, we just do not add new inline back
1644 * ref if there is any kind of item for this block
1646 if (find_next_key(path, 0, &key) == 0 &&
1647 key.objectid == bytenr &&
1648 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1653 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1656 path->keep_locks = 0;
1657 btrfs_unlock_up_safe(path, 1);
1663 * helper to add new inline back ref
1665 static noinline_for_stack
1666 void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
1667 struct btrfs_path *path,
1668 struct btrfs_extent_inline_ref *iref,
1669 u64 parent, u64 root_objectid,
1670 u64 owner, u64 offset, int refs_to_add,
1671 struct btrfs_delayed_extent_op *extent_op)
1673 struct extent_buffer *leaf;
1674 struct btrfs_extent_item *ei;
1677 unsigned long item_offset;
1682 leaf = path->nodes[0];
1683 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1684 item_offset = (unsigned long)iref - (unsigned long)ei;
1686 type = extent_ref_type(parent, owner);
1687 size = btrfs_extent_inline_ref_size(type);
1689 btrfs_extend_item(fs_info, path, size);
1691 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1692 refs = btrfs_extent_refs(leaf, ei);
1693 refs += refs_to_add;
1694 btrfs_set_extent_refs(leaf, ei, refs);
1696 __run_delayed_extent_op(extent_op, leaf, ei);
1698 ptr = (unsigned long)ei + item_offset;
1699 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1700 if (ptr < end - size)
1701 memmove_extent_buffer(leaf, ptr + size, ptr,
1704 iref = (struct btrfs_extent_inline_ref *)ptr;
1705 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1706 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1707 struct btrfs_extent_data_ref *dref;
1708 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1709 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1710 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1711 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1712 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1713 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1714 struct btrfs_shared_data_ref *sref;
1715 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1716 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1717 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1718 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1719 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1721 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1723 btrfs_mark_buffer_dirty(leaf);
1726 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1727 struct btrfs_path *path,
1728 struct btrfs_extent_inline_ref **ref_ret,
1729 u64 bytenr, u64 num_bytes, u64 parent,
1730 u64 root_objectid, u64 owner, u64 offset)
1734 ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr,
1735 num_bytes, parent, root_objectid,
1740 btrfs_release_path(path);
1743 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1744 ret = lookup_tree_block_ref(trans, path, bytenr, parent,
1747 ret = lookup_extent_data_ref(trans, path, bytenr, parent,
1748 root_objectid, owner, offset);
1754 * helper to update/remove inline back ref
1756 static noinline_for_stack
1757 void update_inline_extent_backref(struct btrfs_path *path,
1758 struct btrfs_extent_inline_ref *iref,
1760 struct btrfs_delayed_extent_op *extent_op,
1763 struct extent_buffer *leaf = path->nodes[0];
1764 struct btrfs_fs_info *fs_info = leaf->fs_info;
1765 struct btrfs_extent_item *ei;
1766 struct btrfs_extent_data_ref *dref = NULL;
1767 struct btrfs_shared_data_ref *sref = NULL;
1775 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1776 refs = btrfs_extent_refs(leaf, ei);
1777 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1778 refs += refs_to_mod;
1779 btrfs_set_extent_refs(leaf, ei, refs);
1781 __run_delayed_extent_op(extent_op, leaf, ei);
1784 * If type is invalid, we should have bailed out after
1785 * lookup_inline_extent_backref().
1787 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
1788 ASSERT(type != BTRFS_REF_TYPE_INVALID);
1790 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1791 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1792 refs = btrfs_extent_data_ref_count(leaf, dref);
1793 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1794 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1795 refs = btrfs_shared_data_ref_count(leaf, sref);
1798 BUG_ON(refs_to_mod != -1);
1801 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1802 refs += refs_to_mod;
1805 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1806 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1808 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1811 size = btrfs_extent_inline_ref_size(type);
1812 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1813 ptr = (unsigned long)iref;
1814 end = (unsigned long)ei + item_size;
1815 if (ptr + size < end)
1816 memmove_extent_buffer(leaf, ptr, ptr + size,
1819 btrfs_truncate_item(fs_info, path, item_size, 1);
1821 btrfs_mark_buffer_dirty(leaf);
1824 static noinline_for_stack
1825 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1826 struct btrfs_path *path,
1827 u64 bytenr, u64 num_bytes, u64 parent,
1828 u64 root_objectid, u64 owner,
1829 u64 offset, int refs_to_add,
1830 struct btrfs_delayed_extent_op *extent_op)
1832 struct btrfs_extent_inline_ref *iref;
1835 ret = lookup_inline_extent_backref(trans, path, &iref, bytenr,
1836 num_bytes, parent, root_objectid,
1839 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1840 update_inline_extent_backref(path, iref, refs_to_add,
1842 } else if (ret == -ENOENT) {
1843 setup_inline_extent_backref(trans->fs_info, path, iref, parent,
1844 root_objectid, owner, offset,
1845 refs_to_add, extent_op);
1851 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1852 struct btrfs_path *path,
1853 u64 bytenr, u64 parent, u64 root_objectid,
1854 u64 owner, u64 offset, int refs_to_add)
1857 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1858 BUG_ON(refs_to_add != 1);
1859 ret = insert_tree_block_ref(trans, path, bytenr, parent,
1862 ret = insert_extent_data_ref(trans, path, bytenr, parent,
1863 root_objectid, owner, offset,
1869 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1870 struct btrfs_path *path,
1871 struct btrfs_extent_inline_ref *iref,
1872 int refs_to_drop, int is_data, int *last_ref)
1876 BUG_ON(!is_data && refs_to_drop != 1);
1878 update_inline_extent_backref(path, iref, -refs_to_drop, NULL,
1880 } else if (is_data) {
1881 ret = remove_extent_data_ref(trans, path, refs_to_drop,
1885 ret = btrfs_del_item(trans, trans->fs_info->extent_root, path);
1890 #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
1891 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1892 u64 *discarded_bytes)
1895 u64 bytes_left, end;
1896 u64 aligned_start = ALIGN(start, 1 << 9);
1898 if (WARN_ON(start != aligned_start)) {
1899 len -= aligned_start - start;
1900 len = round_down(len, 1 << 9);
1901 start = aligned_start;
1904 *discarded_bytes = 0;
1912 /* Skip any superblocks on this device. */
1913 for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1914 u64 sb_start = btrfs_sb_offset(j);
1915 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1916 u64 size = sb_start - start;
1918 if (!in_range(sb_start, start, bytes_left) &&
1919 !in_range(sb_end, start, bytes_left) &&
1920 !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1924 * Superblock spans beginning of range. Adjust start and
1927 if (sb_start <= start) {
1928 start += sb_end - start;
1933 bytes_left = end - start;
1938 ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1941 *discarded_bytes += size;
1942 else if (ret != -EOPNOTSUPP)
1951 bytes_left = end - start;
1955 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
1958 *discarded_bytes += bytes_left;
1963 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
1964 u64 num_bytes, u64 *actual_bytes)
1967 u64 discarded_bytes = 0;
1968 struct btrfs_bio *bbio = NULL;
1972 * Avoid races with device replace and make sure our bbio has devices
1973 * associated to its stripes that don't go away while we are discarding.
1975 btrfs_bio_counter_inc_blocked(fs_info);
1976 /* Tell the block device(s) that the sectors can be discarded */
1977 ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes,
1979 /* Error condition is -ENOMEM */
1981 struct btrfs_bio_stripe *stripe = bbio->stripes;
1985 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1987 struct request_queue *req_q;
1989 if (!stripe->dev->bdev) {
1990 ASSERT(btrfs_test_opt(fs_info, DEGRADED));
1993 req_q = bdev_get_queue(stripe->dev->bdev);
1994 if (!blk_queue_discard(req_q))
1997 ret = btrfs_issue_discard(stripe->dev->bdev,
2002 discarded_bytes += bytes;
2003 else if (ret != -EOPNOTSUPP)
2004 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2007 * Just in case we get back EOPNOTSUPP for some reason,
2008 * just ignore the return value so we don't screw up
2009 * people calling discard_extent.
2013 btrfs_put_bbio(bbio);
2015 btrfs_bio_counter_dec(fs_info);
2018 *actual_bytes = discarded_bytes;
2021 if (ret == -EOPNOTSUPP)
2026 /* Can return -ENOMEM */
2027 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2028 struct btrfs_root *root,
2029 u64 bytenr, u64 num_bytes, u64 parent,
2030 u64 root_objectid, u64 owner, u64 offset)
2032 struct btrfs_fs_info *fs_info = root->fs_info;
2033 int old_ref_mod, new_ref_mod;
2036 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2037 root_objectid == BTRFS_TREE_LOG_OBJECTID);
2039 btrfs_ref_tree_mod(root, bytenr, num_bytes, parent, root_objectid,
2040 owner, offset, BTRFS_ADD_DELAYED_REF);
2042 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2043 ret = btrfs_add_delayed_tree_ref(trans, bytenr,
2045 root_objectid, (int)owner,
2046 BTRFS_ADD_DELAYED_REF, NULL,
2047 &old_ref_mod, &new_ref_mod);
2049 ret = btrfs_add_delayed_data_ref(trans, bytenr,
2051 root_objectid, owner, offset,
2052 0, BTRFS_ADD_DELAYED_REF,
2053 &old_ref_mod, &new_ref_mod);
2056 if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0) {
2057 bool metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
2059 add_pinned_bytes(fs_info, -num_bytes, metadata, root_objectid);
2066 * __btrfs_inc_extent_ref - insert backreference for a given extent
2068 * @trans: Handle of transaction
2070 * @node: The delayed ref node used to get the bytenr/length for
2071 * extent whose references are incremented.
2073 * @parent: If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/
2074 * BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical
2075 * bytenr of the parent block. Since new extents are always
2076 * created with indirect references, this will only be the case
2077 * when relocating a shared extent. In that case, root_objectid
2078 * will be BTRFS_TREE_RELOC_OBJECTID. Otheriwse, parent must
2081 * @root_objectid: The id of the root where this modification has originated,
2082 * this can be either one of the well-known metadata trees or
2083 * the subvolume id which references this extent.
2085 * @owner: For data extents it is the inode number of the owning file.
2086 * For metadata extents this parameter holds the level in the
2087 * tree of the extent.
2089 * @offset: For metadata extents the offset is ignored and is currently
2090 * always passed as 0. For data extents it is the fileoffset
2091 * this extent belongs to.
2093 * @refs_to_add Number of references to add
2095 * @extent_op Pointer to a structure, holding information necessary when
2096 * updating a tree block's flags
2099 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2100 struct btrfs_delayed_ref_node *node,
2101 u64 parent, u64 root_objectid,
2102 u64 owner, u64 offset, int refs_to_add,
2103 struct btrfs_delayed_extent_op *extent_op)
2105 struct btrfs_path *path;
2106 struct extent_buffer *leaf;
2107 struct btrfs_extent_item *item;
2108 struct btrfs_key key;
2109 u64 bytenr = node->bytenr;
2110 u64 num_bytes = node->num_bytes;
2114 path = btrfs_alloc_path();
2118 path->reada = READA_FORWARD;
2119 path->leave_spinning = 1;
2120 /* this will setup the path even if it fails to insert the back ref */
2121 ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes,
2122 parent, root_objectid, owner,
2123 offset, refs_to_add, extent_op);
2124 if ((ret < 0 && ret != -EAGAIN) || !ret)
2128 * Ok we had -EAGAIN which means we didn't have space to insert and
2129 * inline extent ref, so just update the reference count and add a
2132 leaf = path->nodes[0];
2133 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2134 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2135 refs = btrfs_extent_refs(leaf, item);
2136 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2138 __run_delayed_extent_op(extent_op, leaf, item);
2140 btrfs_mark_buffer_dirty(leaf);
2141 btrfs_release_path(path);
2143 path->reada = READA_FORWARD;
2144 path->leave_spinning = 1;
2145 /* now insert the actual backref */
2146 ret = insert_extent_backref(trans, path, bytenr, parent, root_objectid,
2147 owner, offset, refs_to_add);
2149 btrfs_abort_transaction(trans, ret);
2151 btrfs_free_path(path);
2155 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2156 struct btrfs_delayed_ref_node *node,
2157 struct btrfs_delayed_extent_op *extent_op,
2158 int insert_reserved)
2161 struct btrfs_delayed_data_ref *ref;
2162 struct btrfs_key ins;
2167 ins.objectid = node->bytenr;
2168 ins.offset = node->num_bytes;
2169 ins.type = BTRFS_EXTENT_ITEM_KEY;
2171 ref = btrfs_delayed_node_to_data_ref(node);
2172 trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action);
2174 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2175 parent = ref->parent;
2176 ref_root = ref->root;
2178 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2180 flags |= extent_op->flags_to_set;
2181 ret = alloc_reserved_file_extent(trans, parent, ref_root,
2182 flags, ref->objectid,
2185 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2186 ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
2187 ref->objectid, ref->offset,
2188 node->ref_mod, extent_op);
2189 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2190 ret = __btrfs_free_extent(trans, node, parent,
2191 ref_root, ref->objectid,
2192 ref->offset, node->ref_mod,
2200 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2201 struct extent_buffer *leaf,
2202 struct btrfs_extent_item *ei)
2204 u64 flags = btrfs_extent_flags(leaf, ei);
2205 if (extent_op->update_flags) {
2206 flags |= extent_op->flags_to_set;
2207 btrfs_set_extent_flags(leaf, ei, flags);
2210 if (extent_op->update_key) {
2211 struct btrfs_tree_block_info *bi;
2212 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2213 bi = (struct btrfs_tree_block_info *)(ei + 1);
2214 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2218 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2219 struct btrfs_delayed_ref_head *head,
2220 struct btrfs_delayed_extent_op *extent_op)
2222 struct btrfs_fs_info *fs_info = trans->fs_info;
2223 struct btrfs_key key;
2224 struct btrfs_path *path;
2225 struct btrfs_extent_item *ei;
2226 struct extent_buffer *leaf;
2230 int metadata = !extent_op->is_data;
2235 if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2238 path = btrfs_alloc_path();
2242 key.objectid = head->bytenr;
2245 key.type = BTRFS_METADATA_ITEM_KEY;
2246 key.offset = extent_op->level;
2248 key.type = BTRFS_EXTENT_ITEM_KEY;
2249 key.offset = head->num_bytes;
2253 path->reada = READA_FORWARD;
2254 path->leave_spinning = 1;
2255 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1);
2262 if (path->slots[0] > 0) {
2264 btrfs_item_key_to_cpu(path->nodes[0], &key,
2266 if (key.objectid == head->bytenr &&
2267 key.type == BTRFS_EXTENT_ITEM_KEY &&
2268 key.offset == head->num_bytes)
2272 btrfs_release_path(path);
2275 key.objectid = head->bytenr;
2276 key.offset = head->num_bytes;
2277 key.type = BTRFS_EXTENT_ITEM_KEY;
2286 leaf = path->nodes[0];
2287 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2289 if (unlikely(item_size < sizeof(*ei))) {
2291 btrfs_print_v0_err(fs_info);
2292 btrfs_abort_transaction(trans, err);
2296 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2297 __run_delayed_extent_op(extent_op, leaf, ei);
2299 btrfs_mark_buffer_dirty(leaf);
2301 btrfs_free_path(path);
2305 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2306 struct btrfs_delayed_ref_node *node,
2307 struct btrfs_delayed_extent_op *extent_op,
2308 int insert_reserved)
2311 struct btrfs_delayed_tree_ref *ref;
2315 ref = btrfs_delayed_node_to_tree_ref(node);
2316 trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action);
2318 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2319 parent = ref->parent;
2320 ref_root = ref->root;
2322 if (node->ref_mod != 1) {
2323 btrfs_err(trans->fs_info,
2324 "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
2325 node->bytenr, node->ref_mod, node->action, ref_root,
2329 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2330 BUG_ON(!extent_op || !extent_op->update_flags);
2331 ret = alloc_reserved_tree_block(trans, node, extent_op);
2332 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2333 ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
2334 ref->level, 0, 1, extent_op);
2335 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2336 ret = __btrfs_free_extent(trans, node, parent, ref_root,
2337 ref->level, 0, 1, extent_op);
2344 /* helper function to actually process a single delayed ref entry */
2345 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2346 struct btrfs_delayed_ref_node *node,
2347 struct btrfs_delayed_extent_op *extent_op,
2348 int insert_reserved)
2352 if (trans->aborted) {
2353 if (insert_reserved)
2354 btrfs_pin_extent(trans->fs_info, node->bytenr,
2355 node->num_bytes, 1);
2359 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2360 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2361 ret = run_delayed_tree_ref(trans, node, extent_op,
2363 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2364 node->type == BTRFS_SHARED_DATA_REF_KEY)
2365 ret = run_delayed_data_ref(trans, node, extent_op,
2369 if (ret && insert_reserved)
2370 btrfs_pin_extent(trans->fs_info, node->bytenr,
2371 node->num_bytes, 1);
2375 static inline struct btrfs_delayed_ref_node *
2376 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2378 struct btrfs_delayed_ref_node *ref;
2380 if (RB_EMPTY_ROOT(&head->ref_tree))
2384 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2385 * This is to prevent a ref count from going down to zero, which deletes
2386 * the extent item from the extent tree, when there still are references
2387 * to add, which would fail because they would not find the extent item.
2389 if (!list_empty(&head->ref_add_list))
2390 return list_first_entry(&head->ref_add_list,
2391 struct btrfs_delayed_ref_node, add_list);
2393 ref = rb_entry(rb_first(&head->ref_tree),
2394 struct btrfs_delayed_ref_node, ref_node);
2395 ASSERT(list_empty(&ref->add_list));
2399 static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
2400 struct btrfs_delayed_ref_head *head)
2402 spin_lock(&delayed_refs->lock);
2403 head->processing = 0;
2404 delayed_refs->num_heads_ready++;
2405 spin_unlock(&delayed_refs->lock);
2406 btrfs_delayed_ref_unlock(head);
2409 static int cleanup_extent_op(struct btrfs_trans_handle *trans,
2410 struct btrfs_delayed_ref_head *head)
2412 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
2417 head->extent_op = NULL;
2418 if (head->must_insert_reserved) {
2419 btrfs_free_delayed_extent_op(extent_op);
2422 spin_unlock(&head->lock);
2423 ret = run_delayed_extent_op(trans, head, extent_op);
2424 btrfs_free_delayed_extent_op(extent_op);
2425 return ret ? ret : 1;
2428 static int cleanup_ref_head(struct btrfs_trans_handle *trans,
2429 struct btrfs_delayed_ref_head *head)
2432 struct btrfs_fs_info *fs_info = trans->fs_info;
2433 struct btrfs_delayed_ref_root *delayed_refs;
2436 delayed_refs = &trans->transaction->delayed_refs;
2438 ret = cleanup_extent_op(trans, head);
2440 unselect_delayed_ref_head(delayed_refs, head);
2441 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2448 * Need to drop our head ref lock and re-acquire the delayed ref lock
2449 * and then re-check to make sure nobody got added.
2451 spin_unlock(&head->lock);
2452 spin_lock(&delayed_refs->lock);
2453 spin_lock(&head->lock);
2454 if (!RB_EMPTY_ROOT(&head->ref_tree) || head->extent_op) {
2455 spin_unlock(&head->lock);
2456 spin_unlock(&delayed_refs->lock);
2459 delayed_refs->num_heads--;
2460 rb_erase(&head->href_node, &delayed_refs->href_root);
2461 RB_CLEAR_NODE(&head->href_node);
2462 spin_unlock(&head->lock);
2463 spin_unlock(&delayed_refs->lock);
2464 atomic_dec(&delayed_refs->num_entries);
2466 trace_run_delayed_ref_head(fs_info, head, 0);
2468 if (head->total_ref_mod < 0) {
2469 struct btrfs_space_info *space_info;
2473 flags = BTRFS_BLOCK_GROUP_DATA;
2474 else if (head->is_system)
2475 flags = BTRFS_BLOCK_GROUP_SYSTEM;
2477 flags = BTRFS_BLOCK_GROUP_METADATA;
2478 space_info = __find_space_info(fs_info, flags);
2480 percpu_counter_add_batch(&space_info->total_bytes_pinned,
2482 BTRFS_TOTAL_BYTES_PINNED_BATCH);
2484 if (head->is_data) {
2485 spin_lock(&delayed_refs->lock);
2486 delayed_refs->pending_csums -= head->num_bytes;
2487 spin_unlock(&delayed_refs->lock);
2491 if (head->must_insert_reserved) {
2492 btrfs_pin_extent(fs_info, head->bytenr,
2493 head->num_bytes, 1);
2494 if (head->is_data) {
2495 ret = btrfs_del_csums(trans, fs_info, head->bytenr,
2500 /* Also free its reserved qgroup space */
2501 btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
2502 head->qgroup_reserved);
2503 btrfs_delayed_ref_unlock(head);
2504 btrfs_put_delayed_ref_head(head);
2509 * Returns 0 on success or if called with an already aborted transaction.
2510 * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2512 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2515 struct btrfs_fs_info *fs_info = trans->fs_info;
2516 struct btrfs_delayed_ref_root *delayed_refs;
2517 struct btrfs_delayed_ref_node *ref;
2518 struct btrfs_delayed_ref_head *locked_ref = NULL;
2519 struct btrfs_delayed_extent_op *extent_op;
2520 ktime_t start = ktime_get();
2522 unsigned long count = 0;
2523 unsigned long actual_count = 0;
2524 int must_insert_reserved = 0;
2526 delayed_refs = &trans->transaction->delayed_refs;
2532 spin_lock(&delayed_refs->lock);
2533 locked_ref = btrfs_select_ref_head(trans);
2535 spin_unlock(&delayed_refs->lock);
2539 /* grab the lock that says we are going to process
2540 * all the refs for this head */
2541 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2542 spin_unlock(&delayed_refs->lock);
2544 * we may have dropped the spin lock to get the head
2545 * mutex lock, and that might have given someone else
2546 * time to free the head. If that's true, it has been
2547 * removed from our list and we can move on.
2549 if (ret == -EAGAIN) {
2557 * We need to try and merge add/drops of the same ref since we
2558 * can run into issues with relocate dropping the implicit ref
2559 * and then it being added back again before the drop can
2560 * finish. If we merged anything we need to re-loop so we can
2562 * Or we can get node references of the same type that weren't
2563 * merged when created due to bumps in the tree mod seq, and
2564 * we need to merge them to prevent adding an inline extent
2565 * backref before dropping it (triggering a BUG_ON at
2566 * insert_inline_extent_backref()).
2568 spin_lock(&locked_ref->lock);
2569 btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
2571 ref = select_delayed_ref(locked_ref);
2573 if (ref && ref->seq &&
2574 btrfs_check_delayed_seq(fs_info, ref->seq)) {
2575 spin_unlock(&locked_ref->lock);
2576 unselect_delayed_ref_head(delayed_refs, locked_ref);
2584 * We're done processing refs in this ref_head, clean everything
2585 * up and move on to the next ref_head.
2588 ret = cleanup_ref_head(trans, locked_ref);
2590 /* We dropped our lock, we need to loop. */
2603 rb_erase(&ref->ref_node, &locked_ref->ref_tree);
2604 RB_CLEAR_NODE(&ref->ref_node);
2605 if (!list_empty(&ref->add_list))
2606 list_del(&ref->add_list);
2608 * When we play the delayed ref, also correct the ref_mod on
2611 switch (ref->action) {
2612 case BTRFS_ADD_DELAYED_REF:
2613 case BTRFS_ADD_DELAYED_EXTENT:
2614 locked_ref->ref_mod -= ref->ref_mod;
2616 case BTRFS_DROP_DELAYED_REF:
2617 locked_ref->ref_mod += ref->ref_mod;
2622 atomic_dec(&delayed_refs->num_entries);
2625 * Record the must-insert_reserved flag before we drop the spin
2628 must_insert_reserved = locked_ref->must_insert_reserved;
2629 locked_ref->must_insert_reserved = 0;
2631 extent_op = locked_ref->extent_op;
2632 locked_ref->extent_op = NULL;
2633 spin_unlock(&locked_ref->lock);
2635 ret = run_one_delayed_ref(trans, ref, extent_op,
2636 must_insert_reserved);
2638 btrfs_free_delayed_extent_op(extent_op);
2640 unselect_delayed_ref_head(delayed_refs, locked_ref);
2641 btrfs_put_delayed_ref(ref);
2642 btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
2647 btrfs_put_delayed_ref(ref);
2653 * We don't want to include ref heads since we can have empty ref heads
2654 * and those will drastically skew our runtime down since we just do
2655 * accounting, no actual extent tree updates.
2657 if (actual_count > 0) {
2658 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2662 * We weigh the current average higher than our current runtime
2663 * to avoid large swings in the average.
2665 spin_lock(&delayed_refs->lock);
2666 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2667 fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */
2668 spin_unlock(&delayed_refs->lock);
2673 #ifdef SCRAMBLE_DELAYED_REFS
2675 * Normally delayed refs get processed in ascending bytenr order. This
2676 * correlates in most cases to the order added. To expose dependencies on this
2677 * order, we start to process the tree in the middle instead of the beginning
2679 static u64 find_middle(struct rb_root *root)
2681 struct rb_node *n = root->rb_node;
2682 struct btrfs_delayed_ref_node *entry;
2685 u64 first = 0, last = 0;
2689 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2690 first = entry->bytenr;
2694 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2695 last = entry->bytenr;
2700 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2701 WARN_ON(!entry->in_tree);
2703 middle = entry->bytenr;
2716 static inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads)
2720 num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2721 sizeof(struct btrfs_extent_inline_ref));
2722 if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2723 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2726 * We don't ever fill up leaves all the way so multiply by 2 just to be
2727 * closer to what we're really going to want to use.
2729 return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(fs_info));
2733 * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2734 * would require to store the csums for that many bytes.
2736 u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
2739 u64 num_csums_per_leaf;
2742 csum_size = BTRFS_MAX_ITEM_SIZE(fs_info);
2743 num_csums_per_leaf = div64_u64(csum_size,
2744 (u64)btrfs_super_csum_size(fs_info->super_copy));
2745 num_csums = div64_u64(csum_bytes, fs_info->sectorsize);
2746 num_csums += num_csums_per_leaf - 1;
2747 num_csums = div64_u64(num_csums, num_csums_per_leaf);
2751 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2752 struct btrfs_fs_info *fs_info)
2754 struct btrfs_block_rsv *global_rsv;
2755 u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2756 u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2757 unsigned int num_dirty_bgs = trans->transaction->num_dirty_bgs;
2758 u64 num_bytes, num_dirty_bgs_bytes;
2761 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
2762 num_heads = heads_to_leaves(fs_info, num_heads);
2764 num_bytes += (num_heads - 1) * fs_info->nodesize;
2766 num_bytes += btrfs_csum_bytes_to_leaves(fs_info, csum_bytes) *
2768 num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(fs_info,
2770 global_rsv = &fs_info->global_block_rsv;
2773 * If we can't allocate any more chunks lets make sure we have _lots_ of
2774 * wiggle room since running delayed refs can create more delayed refs.
2776 if (global_rsv->space_info->full) {
2777 num_dirty_bgs_bytes <<= 1;
2781 spin_lock(&global_rsv->lock);
2782 if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2784 spin_unlock(&global_rsv->lock);
2788 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2789 struct btrfs_fs_info *fs_info)
2792 atomic_read(&trans->transaction->delayed_refs.num_entries);
2797 avg_runtime = fs_info->avg_delayed_ref_runtime;
2798 val = num_entries * avg_runtime;
2799 if (val >= NSEC_PER_SEC)
2801 if (val >= NSEC_PER_SEC / 2)
2804 return btrfs_check_space_for_delayed_refs(trans, fs_info);
2807 struct async_delayed_refs {
2808 struct btrfs_root *root;
2813 struct completion wait;
2814 struct btrfs_work work;
2817 static inline struct async_delayed_refs *
2818 to_async_delayed_refs(struct btrfs_work *work)
2820 return container_of(work, struct async_delayed_refs, work);
2823 static void delayed_ref_async_start(struct btrfs_work *work)
2825 struct async_delayed_refs *async = to_async_delayed_refs(work);
2826 struct btrfs_trans_handle *trans;
2827 struct btrfs_fs_info *fs_info = async->root->fs_info;
2830 /* if the commit is already started, we don't need to wait here */
2831 if (btrfs_transaction_blocked(fs_info))
2834 trans = btrfs_join_transaction(async->root);
2835 if (IS_ERR(trans)) {
2836 async->error = PTR_ERR(trans);
2841 * trans->sync means that when we call end_transaction, we won't
2842 * wait on delayed refs
2846 /* Don't bother flushing if we got into a different transaction */
2847 if (trans->transid > async->transid)
2850 ret = btrfs_run_delayed_refs(trans, async->count);
2854 ret = btrfs_end_transaction(trans);
2855 if (ret && !async->error)
2859 complete(&async->wait);
2864 int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
2865 unsigned long count, u64 transid, int wait)
2867 struct async_delayed_refs *async;
2870 async = kmalloc(sizeof(*async), GFP_NOFS);
2874 async->root = fs_info->tree_root;
2875 async->count = count;
2877 async->transid = transid;
2882 init_completion(&async->wait);
2884 btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2885 delayed_ref_async_start, NULL, NULL);
2887 btrfs_queue_work(fs_info->extent_workers, &async->work);
2890 wait_for_completion(&async->wait);
2899 * this starts processing the delayed reference count updates and
2900 * extent insertions we have queued up so far. count can be
2901 * 0, which means to process everything in the tree at the start
2902 * of the run (but not newly added entries), or it can be some target
2903 * number you'd like to process.
2905 * Returns 0 on success or if called with an aborted transaction
2906 * Returns <0 on error and aborts the transaction
2908 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2909 unsigned long count)
2911 struct btrfs_fs_info *fs_info = trans->fs_info;
2912 struct rb_node *node;
2913 struct btrfs_delayed_ref_root *delayed_refs;
2914 struct btrfs_delayed_ref_head *head;
2916 int run_all = count == (unsigned long)-1;
2918 /* We'll clean this up in btrfs_cleanup_transaction */
2922 if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
2925 delayed_refs = &trans->transaction->delayed_refs;
2927 count = atomic_read(&delayed_refs->num_entries) * 2;
2930 #ifdef SCRAMBLE_DELAYED_REFS
2931 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2933 ret = __btrfs_run_delayed_refs(trans, count);
2935 btrfs_abort_transaction(trans, ret);
2940 if (!list_empty(&trans->new_bgs))
2941 btrfs_create_pending_block_groups(trans);
2943 spin_lock(&delayed_refs->lock);
2944 node = rb_first(&delayed_refs->href_root);
2946 spin_unlock(&delayed_refs->lock);
2949 head = rb_entry(node, struct btrfs_delayed_ref_head,
2951 refcount_inc(&head->refs);
2952 spin_unlock(&delayed_refs->lock);
2954 /* Mutex was contended, block until it's released and retry. */
2955 mutex_lock(&head->mutex);
2956 mutex_unlock(&head->mutex);
2958 btrfs_put_delayed_ref_head(head);
2966 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2967 struct btrfs_fs_info *fs_info,
2968 u64 bytenr, u64 num_bytes, u64 flags,
2969 int level, int is_data)
2971 struct btrfs_delayed_extent_op *extent_op;
2974 extent_op = btrfs_alloc_delayed_extent_op();
2978 extent_op->flags_to_set = flags;
2979 extent_op->update_flags = true;
2980 extent_op->update_key = false;
2981 extent_op->is_data = is_data ? true : false;
2982 extent_op->level = level;
2984 ret = btrfs_add_delayed_extent_op(fs_info, trans, bytenr,
2985 num_bytes, extent_op);
2987 btrfs_free_delayed_extent_op(extent_op);
2991 static noinline int check_delayed_ref(struct btrfs_root *root,
2992 struct btrfs_path *path,
2993 u64 objectid, u64 offset, u64 bytenr)
2995 struct btrfs_delayed_ref_head *head;
2996 struct btrfs_delayed_ref_node *ref;
2997 struct btrfs_delayed_data_ref *data_ref;
2998 struct btrfs_delayed_ref_root *delayed_refs;
2999 struct btrfs_transaction *cur_trans;
3000 struct rb_node *node;
3003 spin_lock(&root->fs_info->trans_lock);
3004 cur_trans = root->fs_info->running_transaction;
3006 refcount_inc(&cur_trans->use_count);
3007 spin_unlock(&root->fs_info->trans_lock);
3011 delayed_refs = &cur_trans->delayed_refs;
3012 spin_lock(&delayed_refs->lock);
3013 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
3015 spin_unlock(&delayed_refs->lock);
3016 btrfs_put_transaction(cur_trans);
3020 if (!mutex_trylock(&head->mutex)) {
3021 refcount_inc(&head->refs);
3022 spin_unlock(&delayed_refs->lock);
3024 btrfs_release_path(path);
3027 * Mutex was contended, block until it's released and let
3030 mutex_lock(&head->mutex);
3031 mutex_unlock(&head->mutex);
3032 btrfs_put_delayed_ref_head(head);
3033 btrfs_put_transaction(cur_trans);
3036 spin_unlock(&delayed_refs->lock);
3038 spin_lock(&head->lock);
3040 * XXX: We should replace this with a proper search function in the
3043 for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
3044 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
3045 /* If it's a shared ref we know a cross reference exists */
3046 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3051 data_ref = btrfs_delayed_node_to_data_ref(ref);
3054 * If our ref doesn't match the one we're currently looking at
3055 * then we have a cross reference.
3057 if (data_ref->root != root->root_key.objectid ||
3058 data_ref->objectid != objectid ||
3059 data_ref->offset != offset) {
3064 spin_unlock(&head->lock);
3065 mutex_unlock(&head->mutex);
3066 btrfs_put_transaction(cur_trans);
3070 static noinline int check_committed_ref(struct btrfs_root *root,
3071 struct btrfs_path *path,
3072 u64 objectid, u64 offset, u64 bytenr)
3074 struct btrfs_fs_info *fs_info = root->fs_info;
3075 struct btrfs_root *extent_root = fs_info->extent_root;
3076 struct extent_buffer *leaf;
3077 struct btrfs_extent_data_ref *ref;
3078 struct btrfs_extent_inline_ref *iref;
3079 struct btrfs_extent_item *ei;
3080 struct btrfs_key key;
3085 key.objectid = bytenr;
3086 key.offset = (u64)-1;
3087 key.type = BTRFS_EXTENT_ITEM_KEY;
3089 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3092 BUG_ON(ret == 0); /* Corruption */
3095 if (path->slots[0] == 0)
3099 leaf = path->nodes[0];
3100 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3102 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3106 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3107 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3109 if (item_size != sizeof(*ei) +
3110 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3113 if (btrfs_extent_generation(leaf, ei) <=
3114 btrfs_root_last_snapshot(&root->root_item))
3117 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3119 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
3120 if (type != BTRFS_EXTENT_DATA_REF_KEY)
3123 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3124 if (btrfs_extent_refs(leaf, ei) !=
3125 btrfs_extent_data_ref_count(leaf, ref) ||
3126 btrfs_extent_data_ref_root(leaf, ref) !=
3127 root->root_key.objectid ||
3128 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3129 btrfs_extent_data_ref_offset(leaf, ref) != offset)
3137 int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
3140 struct btrfs_path *path;
3144 path = btrfs_alloc_path();
3149 ret = check_committed_ref(root, path, objectid,
3151 if (ret && ret != -ENOENT)
3154 ret2 = check_delayed_ref(root, path, objectid,
3156 } while (ret2 == -EAGAIN);
3158 if (ret2 && ret2 != -ENOENT) {
3163 if (ret != -ENOENT || ret2 != -ENOENT)
3166 btrfs_free_path(path);
3167 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3172 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3173 struct btrfs_root *root,
3174 struct extent_buffer *buf,
3175 int full_backref, int inc)
3177 struct btrfs_fs_info *fs_info = root->fs_info;
3183 struct btrfs_key key;
3184 struct btrfs_file_extent_item *fi;
3188 int (*process_func)(struct btrfs_trans_handle *,
3189 struct btrfs_root *,
3190 u64, u64, u64, u64, u64, u64);
3193 if (btrfs_is_testing(fs_info))
3196 ref_root = btrfs_header_owner(buf);
3197 nritems = btrfs_header_nritems(buf);
3198 level = btrfs_header_level(buf);
3200 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3204 process_func = btrfs_inc_extent_ref;
3206 process_func = btrfs_free_extent;
3209 parent = buf->start;
3213 for (i = 0; i < nritems; i++) {
3215 btrfs_item_key_to_cpu(buf, &key, i);
3216 if (key.type != BTRFS_EXTENT_DATA_KEY)
3218 fi = btrfs_item_ptr(buf, i,
3219 struct btrfs_file_extent_item);
3220 if (btrfs_file_extent_type(buf, fi) ==
3221 BTRFS_FILE_EXTENT_INLINE)
3223 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3227 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3228 key.offset -= btrfs_file_extent_offset(buf, fi);
3229 ret = process_func(trans, root, bytenr, num_bytes,
3230 parent, ref_root, key.objectid,
3235 bytenr = btrfs_node_blockptr(buf, i);
3236 num_bytes = fs_info->nodesize;
3237 ret = process_func(trans, root, bytenr, num_bytes,
3238 parent, ref_root, level - 1, 0);
3248 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3249 struct extent_buffer *buf, int full_backref)
3251 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3254 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3255 struct extent_buffer *buf, int full_backref)
3257 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3260 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3261 struct btrfs_fs_info *fs_info,
3262 struct btrfs_path *path,
3263 struct btrfs_block_group_cache *cache)
3266 struct btrfs_root *extent_root = fs_info->extent_root;
3268 struct extent_buffer *leaf;
3270 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3277 leaf = path->nodes[0];
3278 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3279 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3280 btrfs_mark_buffer_dirty(leaf);
3282 btrfs_release_path(path);
3287 static struct btrfs_block_group_cache *
3288 next_block_group(struct btrfs_fs_info *fs_info,
3289 struct btrfs_block_group_cache *cache)
3291 struct rb_node *node;
3293 spin_lock(&fs_info->block_group_cache_lock);
3295 /* If our block group was removed, we need a full search. */
3296 if (RB_EMPTY_NODE(&cache->cache_node)) {
3297 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3299 spin_unlock(&fs_info->block_group_cache_lock);
3300 btrfs_put_block_group(cache);
3301 cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
3303 node = rb_next(&cache->cache_node);
3304 btrfs_put_block_group(cache);
3306 cache = rb_entry(node, struct btrfs_block_group_cache,
3308 btrfs_get_block_group(cache);
3311 spin_unlock(&fs_info->block_group_cache_lock);
3315 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3316 struct btrfs_trans_handle *trans,
3317 struct btrfs_path *path)
3319 struct btrfs_fs_info *fs_info = block_group->fs_info;
3320 struct btrfs_root *root = fs_info->tree_root;
3321 struct inode *inode = NULL;
3322 struct extent_changeset *data_reserved = NULL;
3324 int dcs = BTRFS_DC_ERROR;
3330 * If this block group is smaller than 100 megs don't bother caching the
3333 if (block_group->key.offset < (100 * SZ_1M)) {
3334 spin_lock(&block_group->lock);
3335 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3336 spin_unlock(&block_group->lock);
3343 inode = lookup_free_space_inode(fs_info, block_group, path);
3344 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3345 ret = PTR_ERR(inode);
3346 btrfs_release_path(path);
3350 if (IS_ERR(inode)) {
3354 if (block_group->ro)
3357 ret = create_free_space_inode(fs_info, trans, block_group,
3365 * We want to set the generation to 0, that way if anything goes wrong
3366 * from here on out we know not to trust this cache when we load up next
3369 BTRFS_I(inode)->generation = 0;
3370 ret = btrfs_update_inode(trans, root, inode);
3373 * So theoretically we could recover from this, simply set the
3374 * super cache generation to 0 so we know to invalidate the
3375 * cache, but then we'd have to keep track of the block groups
3376 * that fail this way so we know we _have_ to reset this cache
3377 * before the next commit or risk reading stale cache. So to
3378 * limit our exposure to horrible edge cases lets just abort the
3379 * transaction, this only happens in really bad situations
3382 btrfs_abort_transaction(trans, ret);
3387 /* We've already setup this transaction, go ahead and exit */
3388 if (block_group->cache_generation == trans->transid &&
3389 i_size_read(inode)) {
3390 dcs = BTRFS_DC_SETUP;
3394 if (i_size_read(inode) > 0) {
3395 ret = btrfs_check_trunc_cache_free_space(fs_info,
3396 &fs_info->global_block_rsv);
3400 ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
3405 spin_lock(&block_group->lock);
3406 if (block_group->cached != BTRFS_CACHE_FINISHED ||
3407 !btrfs_test_opt(fs_info, SPACE_CACHE)) {
3409 * don't bother trying to write stuff out _if_
3410 * a) we're not cached,
3411 * b) we're with nospace_cache mount option,
3412 * c) we're with v2 space_cache (FREE_SPACE_TREE).
3414 dcs = BTRFS_DC_WRITTEN;
3415 spin_unlock(&block_group->lock);
3418 spin_unlock(&block_group->lock);
3421 * We hit an ENOSPC when setting up the cache in this transaction, just
3422 * skip doing the setup, we've already cleared the cache so we're safe.
3424 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
3430 * Try to preallocate enough space based on how big the block group is.
3431 * Keep in mind this has to include any pinned space which could end up
3432 * taking up quite a bit since it's not folded into the other space
3435 num_pages = div_u64(block_group->key.offset, SZ_256M);
3440 num_pages *= PAGE_SIZE;
3442 ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
3446 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3447 num_pages, num_pages,
3450 * Our cache requires contiguous chunks so that we don't modify a bunch
3451 * of metadata or split extents when writing the cache out, which means
3452 * we can enospc if we are heavily fragmented in addition to just normal
3453 * out of space conditions. So if we hit this just skip setting up any
3454 * other block groups for this transaction, maybe we'll unpin enough
3455 * space the next time around.
3458 dcs = BTRFS_DC_SETUP;
3459 else if (ret == -ENOSPC)
3460 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3465 btrfs_release_path(path);
3467 spin_lock(&block_group->lock);
3468 if (!ret && dcs == BTRFS_DC_SETUP)
3469 block_group->cache_generation = trans->transid;
3470 block_group->disk_cache_state = dcs;
3471 spin_unlock(&block_group->lock);
3473 extent_changeset_free(data_reserved);
3477 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3478 struct btrfs_fs_info *fs_info)
3480 struct btrfs_block_group_cache *cache, *tmp;
3481 struct btrfs_transaction *cur_trans = trans->transaction;
3482 struct btrfs_path *path;
3484 if (list_empty(&cur_trans->dirty_bgs) ||
3485 !btrfs_test_opt(fs_info, SPACE_CACHE))
3488 path = btrfs_alloc_path();
3492 /* Could add new block groups, use _safe just in case */
3493 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3495 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3496 cache_save_setup(cache, trans, path);
3499 btrfs_free_path(path);
3504 * transaction commit does final block group cache writeback during a
3505 * critical section where nothing is allowed to change the FS. This is
3506 * required in order for the cache to actually match the block group,
3507 * but can introduce a lot of latency into the commit.
3509 * So, btrfs_start_dirty_block_groups is here to kick off block group
3510 * cache IO. There's a chance we'll have to redo some of it if the
3511 * block group changes again during the commit, but it greatly reduces
3512 * the commit latency by getting rid of the easy block groups while
3513 * we're still allowing others to join the commit.
3515 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
3517 struct btrfs_fs_info *fs_info = trans->fs_info;
3518 struct btrfs_block_group_cache *cache;
3519 struct btrfs_transaction *cur_trans = trans->transaction;
3522 struct btrfs_path *path = NULL;
3524 struct list_head *io = &cur_trans->io_bgs;
3525 int num_started = 0;
3528 spin_lock(&cur_trans->dirty_bgs_lock);
3529 if (list_empty(&cur_trans->dirty_bgs)) {
3530 spin_unlock(&cur_trans->dirty_bgs_lock);
3533 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3534 spin_unlock(&cur_trans->dirty_bgs_lock);
3538 * make sure all the block groups on our dirty list actually
3541 btrfs_create_pending_block_groups(trans);
3544 path = btrfs_alloc_path();
3550 * cache_write_mutex is here only to save us from balance or automatic
3551 * removal of empty block groups deleting this block group while we are
3552 * writing out the cache
3554 mutex_lock(&trans->transaction->cache_write_mutex);
3555 while (!list_empty(&dirty)) {
3556 cache = list_first_entry(&dirty,
3557 struct btrfs_block_group_cache,
3560 * this can happen if something re-dirties a block
3561 * group that is already under IO. Just wait for it to
3562 * finish and then do it all again
3564 if (!list_empty(&cache->io_list)) {
3565 list_del_init(&cache->io_list);
3566 btrfs_wait_cache_io(trans, cache, path);
3567 btrfs_put_block_group(cache);
3572 * btrfs_wait_cache_io uses the cache->dirty_list to decide
3573 * if it should update the cache_state. Don't delete
3574 * until after we wait.
3576 * Since we're not running in the commit critical section
3577 * we need the dirty_bgs_lock to protect from update_block_group
3579 spin_lock(&cur_trans->dirty_bgs_lock);
3580 list_del_init(&cache->dirty_list);
3581 spin_unlock(&cur_trans->dirty_bgs_lock);
3585 cache_save_setup(cache, trans, path);
3587 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3588 cache->io_ctl.inode = NULL;
3589 ret = btrfs_write_out_cache(fs_info, trans,
3591 if (ret == 0 && cache->io_ctl.inode) {
3596 * The cache_write_mutex is protecting the
3597 * io_list, also refer to the definition of
3598 * btrfs_transaction::io_bgs for more details
3600 list_add_tail(&cache->io_list, io);
3603 * if we failed to write the cache, the
3604 * generation will be bad and life goes on
3610 ret = write_one_cache_group(trans, fs_info,
3613 * Our block group might still be attached to the list
3614 * of new block groups in the transaction handle of some
3615 * other task (struct btrfs_trans_handle->new_bgs). This
3616 * means its block group item isn't yet in the extent
3617 * tree. If this happens ignore the error, as we will
3618 * try again later in the critical section of the
3619 * transaction commit.
3621 if (ret == -ENOENT) {
3623 spin_lock(&cur_trans->dirty_bgs_lock);
3624 if (list_empty(&cache->dirty_list)) {
3625 list_add_tail(&cache->dirty_list,
3626 &cur_trans->dirty_bgs);
3627 btrfs_get_block_group(cache);
3629 spin_unlock(&cur_trans->dirty_bgs_lock);
3631 btrfs_abort_transaction(trans, ret);
3635 /* if its not on the io list, we need to put the block group */
3637 btrfs_put_block_group(cache);
3643 * Avoid blocking other tasks for too long. It might even save
3644 * us from writing caches for block groups that are going to be
3647 mutex_unlock(&trans->transaction->cache_write_mutex);
3648 mutex_lock(&trans->transaction->cache_write_mutex);
3650 mutex_unlock(&trans->transaction->cache_write_mutex);
3653 * go through delayed refs for all the stuff we've just kicked off
3654 * and then loop back (just once)
3656 ret = btrfs_run_delayed_refs(trans, 0);
3657 if (!ret && loops == 0) {
3659 spin_lock(&cur_trans->dirty_bgs_lock);
3660 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3662 * dirty_bgs_lock protects us from concurrent block group
3663 * deletes too (not just cache_write_mutex).
3665 if (!list_empty(&dirty)) {
3666 spin_unlock(&cur_trans->dirty_bgs_lock);
3669 spin_unlock(&cur_trans->dirty_bgs_lock);
3670 } else if (ret < 0) {
3671 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
3674 btrfs_free_path(path);
3678 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3679 struct btrfs_fs_info *fs_info)
3681 struct btrfs_block_group_cache *cache;
3682 struct btrfs_transaction *cur_trans = trans->transaction;
3685 struct btrfs_path *path;
3686 struct list_head *io = &cur_trans->io_bgs;
3687 int num_started = 0;
3689 path = btrfs_alloc_path();
3694 * Even though we are in the critical section of the transaction commit,
3695 * we can still have concurrent tasks adding elements to this
3696 * transaction's list of dirty block groups. These tasks correspond to
3697 * endio free space workers started when writeback finishes for a
3698 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
3699 * allocate new block groups as a result of COWing nodes of the root
3700 * tree when updating the free space inode. The writeback for the space
3701 * caches is triggered by an earlier call to
3702 * btrfs_start_dirty_block_groups() and iterations of the following
3704 * Also we want to do the cache_save_setup first and then run the
3705 * delayed refs to make sure we have the best chance at doing this all
3708 spin_lock(&cur_trans->dirty_bgs_lock);
3709 while (!list_empty(&cur_trans->dirty_bgs)) {
3710 cache = list_first_entry(&cur_trans->dirty_bgs,
3711 struct btrfs_block_group_cache,
3715 * this can happen if cache_save_setup re-dirties a block
3716 * group that is already under IO. Just wait for it to
3717 * finish and then do it all again
3719 if (!list_empty(&cache->io_list)) {
3720 spin_unlock(&cur_trans->dirty_bgs_lock);
3721 list_del_init(&cache->io_list);
3722 btrfs_wait_cache_io(trans, cache, path);
3723 btrfs_put_block_group(cache);
3724 spin_lock(&cur_trans->dirty_bgs_lock);
3728 * don't remove from the dirty list until after we've waited
3731 list_del_init(&cache->dirty_list);
3732 spin_unlock(&cur_trans->dirty_bgs_lock);
3735 cache_save_setup(cache, trans, path);
3738 ret = btrfs_run_delayed_refs(trans,
3739 (unsigned long) -1);
3741 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3742 cache->io_ctl.inode = NULL;
3743 ret = btrfs_write_out_cache(fs_info, trans,
3745 if (ret == 0 && cache->io_ctl.inode) {
3748 list_add_tail(&cache->io_list, io);
3751 * if we failed to write the cache, the
3752 * generation will be bad and life goes on
3758 ret = write_one_cache_group(trans, fs_info,
3761 * One of the free space endio workers might have
3762 * created a new block group while updating a free space
3763 * cache's inode (at inode.c:btrfs_finish_ordered_io())
3764 * and hasn't released its transaction handle yet, in
3765 * which case the new block group is still attached to
3766 * its transaction handle and its creation has not
3767 * finished yet (no block group item in the extent tree
3768 * yet, etc). If this is the case, wait for all free
3769 * space endio workers to finish and retry. This is a
3770 * a very rare case so no need for a more efficient and
3773 if (ret == -ENOENT) {
3774 wait_event(cur_trans->writer_wait,
3775 atomic_read(&cur_trans->num_writers) == 1);
3776 ret = write_one_cache_group(trans, fs_info,
3780 btrfs_abort_transaction(trans, ret);
3783 /* if its not on the io list, we need to put the block group */
3785 btrfs_put_block_group(cache);
3786 spin_lock(&cur_trans->dirty_bgs_lock);
3788 spin_unlock(&cur_trans->dirty_bgs_lock);
3791 * Refer to the definition of io_bgs member for details why it's safe
3792 * to use it without any locking
3794 while (!list_empty(io)) {
3795 cache = list_first_entry(io, struct btrfs_block_group_cache,
3797 list_del_init(&cache->io_list);
3798 btrfs_wait_cache_io(trans, cache, path);
3799 btrfs_put_block_group(cache);
3802 btrfs_free_path(path);
3806 int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
3808 struct btrfs_block_group_cache *block_group;
3811 block_group = btrfs_lookup_block_group(fs_info, bytenr);
3812 if (!block_group || block_group->ro)
3815 btrfs_put_block_group(block_group);
3819 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
3821 struct btrfs_block_group_cache *bg;
3824 bg = btrfs_lookup_block_group(fs_info, bytenr);
3828 spin_lock(&bg->lock);
3832 atomic_inc(&bg->nocow_writers);
3833 spin_unlock(&bg->lock);
3835 /* no put on block group, done by btrfs_dec_nocow_writers */
3837 btrfs_put_block_group(bg);
3843 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
3845 struct btrfs_block_group_cache *bg;
3847 bg = btrfs_lookup_block_group(fs_info, bytenr);
3849 if (atomic_dec_and_test(&bg->nocow_writers))
3850 wake_up_var(&bg->nocow_writers);
3852 * Once for our lookup and once for the lookup done by a previous call
3853 * to btrfs_inc_nocow_writers()
3855 btrfs_put_block_group(bg);
3856 btrfs_put_block_group(bg);
3859 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
3861 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
3864 static const char *alloc_name(u64 flags)
3867 case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3869 case BTRFS_BLOCK_GROUP_METADATA:
3871 case BTRFS_BLOCK_GROUP_DATA:
3873 case BTRFS_BLOCK_GROUP_SYSTEM:
3877 return "invalid-combination";
3881 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
3884 struct btrfs_space_info *space_info;
3888 space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
3892 ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
3899 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3900 INIT_LIST_HEAD(&space_info->block_groups[i]);
3901 init_rwsem(&space_info->groups_sem);
3902 spin_lock_init(&space_info->lock);
3903 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3904 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3905 init_waitqueue_head(&space_info->wait);
3906 INIT_LIST_HEAD(&space_info->ro_bgs);
3907 INIT_LIST_HEAD(&space_info->tickets);
3908 INIT_LIST_HEAD(&space_info->priority_tickets);
3910 ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype,
3911 info->space_info_kobj, "%s",
3912 alloc_name(space_info->flags));
3914 percpu_counter_destroy(&space_info->total_bytes_pinned);
3919 list_add_rcu(&space_info->list, &info->space_info);
3920 if (flags & BTRFS_BLOCK_GROUP_DATA)
3921 info->data_sinfo = space_info;
3926 static void update_space_info(struct btrfs_fs_info *info, u64 flags,
3927 u64 total_bytes, u64 bytes_used,
3929 struct btrfs_space_info **space_info)
3931 struct btrfs_space_info *found;
3934 factor = btrfs_bg_type_to_factor(flags);
3936 found = __find_space_info(info, flags);
3938 spin_lock(&found->lock);
3939 found->total_bytes += total_bytes;
3940 found->disk_total += total_bytes * factor;
3941 found->bytes_used += bytes_used;
3942 found->disk_used += bytes_used * factor;
3943 found->bytes_readonly += bytes_readonly;
3944 if (total_bytes > 0)
3946 space_info_add_new_bytes(info, found, total_bytes -
3947 bytes_used - bytes_readonly);
3948 spin_unlock(&found->lock);
3949 *space_info = found;
3952 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3954 u64 extra_flags = chunk_to_extended(flags) &
3955 BTRFS_EXTENDED_PROFILE_MASK;
3957 write_seqlock(&fs_info->profiles_lock);
3958 if (flags & BTRFS_BLOCK_GROUP_DATA)
3959 fs_info->avail_data_alloc_bits |= extra_flags;
3960 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3961 fs_info->avail_metadata_alloc_bits |= extra_flags;
3962 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3963 fs_info->avail_system_alloc_bits |= extra_flags;
3964 write_sequnlock(&fs_info->profiles_lock);
3968 * returns target flags in extended format or 0 if restripe for this
3969 * chunk_type is not in progress
3971 * should be called with balance_lock held
3973 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3975 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3981 if (flags & BTRFS_BLOCK_GROUP_DATA &&
3982 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3983 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3984 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3985 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3986 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3987 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3988 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3989 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3996 * @flags: available profiles in extended format (see ctree.h)
3998 * Returns reduced profile in chunk format. If profile changing is in
3999 * progress (either running or paused) picks the target profile (if it's
4000 * already available), otherwise falls back to plain reducing.
4002 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
4004 u64 num_devices = fs_info->fs_devices->rw_devices;
4010 * see if restripe for this chunk_type is in progress, if so
4011 * try to reduce to the target profile
4013 spin_lock(&fs_info->balance_lock);
4014 target = get_restripe_target(fs_info, flags);
4016 /* pick target profile only if it's already available */
4017 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
4018 spin_unlock(&fs_info->balance_lock);
4019 return extended_to_chunk(target);
4022 spin_unlock(&fs_info->balance_lock);
4024 /* First, mask out the RAID levels which aren't possible */
4025 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
4026 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
4027 allowed |= btrfs_raid_array[raid_type].bg_flag;
4031 if (allowed & BTRFS_BLOCK_GROUP_RAID6)
4032 allowed = BTRFS_BLOCK_GROUP_RAID6;
4033 else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
4034 allowed = BTRFS_BLOCK_GROUP_RAID5;
4035 else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
4036 allowed = BTRFS_BLOCK_GROUP_RAID10;
4037 else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
4038 allowed = BTRFS_BLOCK_GROUP_RAID1;
4039 else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
4040 allowed = BTRFS_BLOCK_GROUP_RAID0;
4042 flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
4044 return extended_to_chunk(flags | allowed);
4047 static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
4054 seq = read_seqbegin(&fs_info->profiles_lock);
4056 if (flags & BTRFS_BLOCK_GROUP_DATA)
4057 flags |= fs_info->avail_data_alloc_bits;
4058 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
4059 flags |= fs_info->avail_system_alloc_bits;
4060 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
4061 flags |= fs_info->avail_metadata_alloc_bits;
4062 } while (read_seqretry(&fs_info->profiles_lock, seq));
4064 return btrfs_reduce_alloc_profile(fs_info, flags);
4067 static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
4069 struct btrfs_fs_info *fs_info = root->fs_info;
4074 flags = BTRFS_BLOCK_GROUP_DATA;
4075 else if (root == fs_info->chunk_root)
4076 flags = BTRFS_BLOCK_GROUP_SYSTEM;
4078 flags = BTRFS_BLOCK_GROUP_METADATA;
4080 ret = get_alloc_profile(fs_info, flags);
4084 u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
4086 return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
4089 u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
4091 return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4094 u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
4096 return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4099 static u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
4100 bool may_use_included)
4103 return s_info->bytes_used + s_info->bytes_reserved +
4104 s_info->bytes_pinned + s_info->bytes_readonly +
4105 (may_use_included ? s_info->bytes_may_use : 0);
4108 int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes)
4110 struct btrfs_root *root = inode->root;
4111 struct btrfs_fs_info *fs_info = root->fs_info;
4112 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
4115 int need_commit = 2;
4116 int have_pinned_space;
4118 /* make sure bytes are sectorsize aligned */
4119 bytes = ALIGN(bytes, fs_info->sectorsize);
4121 if (btrfs_is_free_space_inode(inode)) {
4123 ASSERT(current->journal_info);
4127 /* make sure we have enough space to handle the data first */
4128 spin_lock(&data_sinfo->lock);
4129 used = btrfs_space_info_used(data_sinfo, true);
4131 if (used + bytes > data_sinfo->total_bytes) {
4132 struct btrfs_trans_handle *trans;
4135 * if we don't have enough free bytes in this space then we need
4136 * to alloc a new chunk.
4138 if (!data_sinfo->full) {
4141 data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4142 spin_unlock(&data_sinfo->lock);
4144 alloc_target = btrfs_data_alloc_profile(fs_info);
4146 * It is ugly that we don't call nolock join
4147 * transaction for the free space inode case here.
4148 * But it is safe because we only do the data space
4149 * reservation for the free space cache in the
4150 * transaction context, the common join transaction
4151 * just increase the counter of the current transaction
4152 * handler, doesn't try to acquire the trans_lock of
4155 trans = btrfs_join_transaction(root);
4157 return PTR_ERR(trans);
4159 ret = do_chunk_alloc(trans, alloc_target,
4160 CHUNK_ALLOC_NO_FORCE);
4161 btrfs_end_transaction(trans);
4166 have_pinned_space = 1;
4175 * If we don't have enough pinned space to deal with this
4176 * allocation, and no removed chunk in current transaction,
4177 * don't bother committing the transaction.
4179 have_pinned_space = __percpu_counter_compare(
4180 &data_sinfo->total_bytes_pinned,
4181 used + bytes - data_sinfo->total_bytes,
4182 BTRFS_TOTAL_BYTES_PINNED_BATCH);
4183 spin_unlock(&data_sinfo->lock);
4185 /* commit the current transaction and try again */
4190 if (need_commit > 0) {
4191 btrfs_start_delalloc_roots(fs_info, -1);
4192 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0,
4196 trans = btrfs_join_transaction(root);
4198 return PTR_ERR(trans);
4199 if (have_pinned_space >= 0 ||
4200 test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
4201 &trans->transaction->flags) ||
4203 ret = btrfs_commit_transaction(trans);
4207 * The cleaner kthread might still be doing iput
4208 * operations. Wait for it to finish so that
4209 * more space is released.
4211 mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
4212 mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
4215 btrfs_end_transaction(trans);
4219 trace_btrfs_space_reservation(fs_info,
4220 "space_info:enospc",
4221 data_sinfo->flags, bytes, 1);
4224 data_sinfo->bytes_may_use += bytes;
4225 trace_btrfs_space_reservation(fs_info, "space_info",
4226 data_sinfo->flags, bytes, 1);
4227 spin_unlock(&data_sinfo->lock);
4232 int btrfs_check_data_free_space(struct inode *inode,
4233 struct extent_changeset **reserved, u64 start, u64 len)
4235 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4238 /* align the range */
4239 len = round_up(start + len, fs_info->sectorsize) -
4240 round_down(start, fs_info->sectorsize);
4241 start = round_down(start, fs_info->sectorsize);
4243 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), len);
4247 /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
4248 ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
4250 btrfs_free_reserved_data_space_noquota(inode, start, len);
4257 * Called if we need to clear a data reservation for this inode
4258 * Normally in a error case.
4260 * This one will *NOT* use accurate qgroup reserved space API, just for case
4261 * which we can't sleep and is sure it won't affect qgroup reserved space.
4262 * Like clear_bit_hook().
4264 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4267 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4268 struct btrfs_space_info *data_sinfo;
4270 /* Make sure the range is aligned to sectorsize */
4271 len = round_up(start + len, fs_info->sectorsize) -
4272 round_down(start, fs_info->sectorsize);
4273 start = round_down(start, fs_info->sectorsize);
4275 data_sinfo = fs_info->data_sinfo;
4276 spin_lock(&data_sinfo->lock);
4277 if (WARN_ON(data_sinfo->bytes_may_use < len))
4278 data_sinfo->bytes_may_use = 0;
4280 data_sinfo->bytes_may_use -= len;
4281 trace_btrfs_space_reservation(fs_info, "space_info",
4282 data_sinfo->flags, len, 0);
4283 spin_unlock(&data_sinfo->lock);
4287 * Called if we need to clear a data reservation for this inode
4288 * Normally in a error case.
4290 * This one will handle the per-inode data rsv map for accurate reserved
4293 void btrfs_free_reserved_data_space(struct inode *inode,
4294 struct extent_changeset *reserved, u64 start, u64 len)
4296 struct btrfs_root *root = BTRFS_I(inode)->root;
4298 /* Make sure the range is aligned to sectorsize */
4299 len = round_up(start + len, root->fs_info->sectorsize) -
4300 round_down(start, root->fs_info->sectorsize);
4301 start = round_down(start, root->fs_info->sectorsize);
4303 btrfs_free_reserved_data_space_noquota(inode, start, len);
4304 btrfs_qgroup_free_data(inode, reserved, start, len);
4307 static void force_metadata_allocation(struct btrfs_fs_info *info)
4309 struct list_head *head = &info->space_info;
4310 struct btrfs_space_info *found;
4313 list_for_each_entry_rcu(found, head, list) {
4314 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4315 found->force_alloc = CHUNK_ALLOC_FORCE;
4320 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4322 return (global->size << 1);
4325 static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
4326 struct btrfs_space_info *sinfo, int force)
4328 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4329 u64 bytes_used = btrfs_space_info_used(sinfo, false);
4332 if (force == CHUNK_ALLOC_FORCE)
4336 * We need to take into account the global rsv because for all intents
4337 * and purposes it's used space. Don't worry about locking the
4338 * global_rsv, it doesn't change except when the transaction commits.
4340 if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4341 bytes_used += calc_global_rsv_need_space(global_rsv);
4344 * in limited mode, we want to have some free space up to
4345 * about 1% of the FS size.
4347 if (force == CHUNK_ALLOC_LIMITED) {
4348 thresh = btrfs_super_total_bytes(fs_info->super_copy);
4349 thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
4351 if (sinfo->total_bytes - bytes_used < thresh)
4355 if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
4360 static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
4364 if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4365 BTRFS_BLOCK_GROUP_RAID0 |
4366 BTRFS_BLOCK_GROUP_RAID5 |
4367 BTRFS_BLOCK_GROUP_RAID6))
4368 num_dev = fs_info->fs_devices->rw_devices;
4369 else if (type & BTRFS_BLOCK_GROUP_RAID1)
4372 num_dev = 1; /* DUP or single */
4378 * If @is_allocation is true, reserve space in the system space info necessary
4379 * for allocating a chunk, otherwise if it's false, reserve space necessary for
4382 void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
4384 struct btrfs_fs_info *fs_info = trans->fs_info;
4385 struct btrfs_space_info *info;
4392 * Needed because we can end up allocating a system chunk and for an
4393 * atomic and race free space reservation in the chunk block reserve.
4395 lockdep_assert_held(&fs_info->chunk_mutex);
4397 info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4398 spin_lock(&info->lock);
4399 left = info->total_bytes - btrfs_space_info_used(info, true);
4400 spin_unlock(&info->lock);
4402 num_devs = get_profile_num_devs(fs_info, type);
4404 /* num_devs device items to update and 1 chunk item to add or remove */
4405 thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) +
4406 btrfs_calc_trans_metadata_size(fs_info, 1);
4408 if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
4409 btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
4410 left, thresh, type);
4411 dump_space_info(fs_info, info, 0, 0);
4414 if (left < thresh) {
4415 u64 flags = btrfs_system_alloc_profile(fs_info);
4418 * Ignore failure to create system chunk. We might end up not
4419 * needing it, as we might not need to COW all nodes/leafs from
4420 * the paths we visit in the chunk tree (they were already COWed
4421 * or created in the current transaction for example).
4423 ret = btrfs_alloc_chunk(trans, flags);
4427 ret = btrfs_block_rsv_add(fs_info->chunk_root,
4428 &fs_info->chunk_block_rsv,
4429 thresh, BTRFS_RESERVE_NO_FLUSH);
4431 trans->chunk_bytes_reserved += thresh;
4436 * If force is CHUNK_ALLOC_FORCE:
4437 * - return 1 if it successfully allocates a chunk,
4438 * - return errors including -ENOSPC otherwise.
4439 * If force is NOT CHUNK_ALLOC_FORCE:
4440 * - return 0 if it doesn't need to allocate a new chunk,
4441 * - return 1 if it successfully allocates a chunk,
4442 * - return errors including -ENOSPC otherwise.
4444 static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
4447 struct btrfs_fs_info *fs_info = trans->fs_info;
4448 struct btrfs_space_info *space_info;
4449 bool wait_for_alloc = false;
4450 bool should_alloc = false;
4453 /* Don't re-enter if we're already allocating a chunk */
4454 if (trans->allocating_chunk)
4457 space_info = __find_space_info(fs_info, flags);
4461 spin_lock(&space_info->lock);
4462 if (force < space_info->force_alloc)
4463 force = space_info->force_alloc;
4464 should_alloc = should_alloc_chunk(fs_info, space_info, force);
4465 if (space_info->full) {
4466 /* No more free physical space */
4471 spin_unlock(&space_info->lock);
4473 } else if (!should_alloc) {
4474 spin_unlock(&space_info->lock);
4476 } else if (space_info->chunk_alloc) {
4478 * Someone is already allocating, so we need to block
4479 * until this someone is finished and then loop to
4480 * recheck if we should continue with our allocation
4483 wait_for_alloc = true;
4484 spin_unlock(&space_info->lock);
4485 mutex_lock(&fs_info->chunk_mutex);
4486 mutex_unlock(&fs_info->chunk_mutex);
4488 /* Proceed with allocation */
4489 space_info->chunk_alloc = 1;
4490 wait_for_alloc = false;
4491 spin_unlock(&space_info->lock);
4495 } while (wait_for_alloc);
4497 mutex_lock(&fs_info->chunk_mutex);
4498 trans->allocating_chunk = true;
4501 * If we have mixed data/metadata chunks we want to make sure we keep
4502 * allocating mixed chunks instead of individual chunks.
4504 if (btrfs_mixed_space_info(space_info))
4505 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4508 * if we're doing a data chunk, go ahead and make sure that
4509 * we keep a reasonable number of metadata chunks allocated in the
4512 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4513 fs_info->data_chunk_allocations++;
4514 if (!(fs_info->data_chunk_allocations %
4515 fs_info->metadata_ratio))
4516 force_metadata_allocation(fs_info);
4520 * Check if we have enough space in SYSTEM chunk because we may need
4521 * to update devices.
4523 check_system_chunk(trans, flags);
4525 ret = btrfs_alloc_chunk(trans, flags);
4526 trans->allocating_chunk = false;
4528 spin_lock(&space_info->lock);
4531 space_info->full = 1;
4536 space_info->max_extent_size = 0;
4539 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4541 space_info->chunk_alloc = 0;
4542 spin_unlock(&space_info->lock);
4543 mutex_unlock(&fs_info->chunk_mutex);
4545 * When we allocate a new chunk we reserve space in the chunk block
4546 * reserve to make sure we can COW nodes/leafs in the chunk tree or
4547 * add new nodes/leafs to it if we end up needing to do it when
4548 * inserting the chunk item and updating device items as part of the
4549 * second phase of chunk allocation, performed by
4550 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4551 * large number of new block groups to create in our transaction
4552 * handle's new_bgs list to avoid exhausting the chunk block reserve
4553 * in extreme cases - like having a single transaction create many new
4554 * block groups when starting to write out the free space caches of all
4555 * the block groups that were made dirty during the lifetime of the
4558 if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
4559 btrfs_create_pending_block_groups(trans);
4564 static int can_overcommit(struct btrfs_fs_info *fs_info,
4565 struct btrfs_space_info *space_info, u64 bytes,
4566 enum btrfs_reserve_flush_enum flush,
4569 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4576 /* Don't overcommit when in mixed mode. */
4577 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
4581 profile = btrfs_system_alloc_profile(fs_info);
4583 profile = btrfs_metadata_alloc_profile(fs_info);
4585 used = btrfs_space_info_used(space_info, false);
4588 * We only want to allow over committing if we have lots of actual space
4589 * free, but if we don't have enough space to handle the global reserve
4590 * space then we could end up having a real enospc problem when trying
4591 * to allocate a chunk or some other such important allocation.
4593 spin_lock(&global_rsv->lock);
4594 space_size = calc_global_rsv_need_space(global_rsv);
4595 spin_unlock(&global_rsv->lock);
4596 if (used + space_size >= space_info->total_bytes)
4599 used += space_info->bytes_may_use;
4601 avail = atomic64_read(&fs_info->free_chunk_space);
4604 * If we have dup, raid1 or raid10 then only half of the free
4605 * space is actually useable. For raid56, the space info used
4606 * doesn't include the parity drive, so we don't have to
4609 factor = btrfs_bg_type_to_factor(profile);
4610 avail = div_u64(avail, factor);
4613 * If we aren't flushing all things, let us overcommit up to
4614 * 1/2th of the space. If we can flush, don't let us overcommit
4615 * too much, let it overcommit up to 1/8 of the space.
4617 if (flush == BTRFS_RESERVE_FLUSH_ALL)
4622 if (used + bytes < space_info->total_bytes + avail)
4627 static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
4628 unsigned long nr_pages, int nr_items)
4630 struct super_block *sb = fs_info->sb;
4632 if (down_read_trylock(&sb->s_umount)) {
4633 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4634 up_read(&sb->s_umount);
4637 * We needn't worry the filesystem going from r/w to r/o though
4638 * we don't acquire ->s_umount mutex, because the filesystem
4639 * should guarantee the delalloc inodes list be empty after
4640 * the filesystem is readonly(all dirty pages are written to
4643 btrfs_start_delalloc_roots(fs_info, nr_items);
4644 if (!current->journal_info)
4645 btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1);
4649 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
4655 bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
4656 nr = div64_u64(to_reclaim, bytes);
4662 #define EXTENT_SIZE_PER_ITEM SZ_256K
4665 * shrink metadata reservation for delalloc
4667 static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
4668 u64 orig, bool wait_ordered)
4670 struct btrfs_space_info *space_info;
4671 struct btrfs_trans_handle *trans;
4676 unsigned long nr_pages;
4679 /* Calc the number of the pages we need flush for space reservation */
4680 items = calc_reclaim_items_nr(fs_info, to_reclaim);
4681 to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4683 trans = (struct btrfs_trans_handle *)current->journal_info;
4684 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4686 delalloc_bytes = percpu_counter_sum_positive(
4687 &fs_info->delalloc_bytes);
4688 if (delalloc_bytes == 0) {
4692 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
4697 while (delalloc_bytes && loops < 3) {
4698 max_reclaim = min(delalloc_bytes, to_reclaim);
4699 nr_pages = max_reclaim >> PAGE_SHIFT;
4700 btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
4702 * We need to wait for the async pages to actually start before
4705 max_reclaim = atomic_read(&fs_info->async_delalloc_pages);
4709 if (max_reclaim <= nr_pages)
4712 max_reclaim -= nr_pages;
4714 wait_event(fs_info->async_submit_wait,
4715 atomic_read(&fs_info->async_delalloc_pages) <=
4718 spin_lock(&space_info->lock);
4719 if (list_empty(&space_info->tickets) &&
4720 list_empty(&space_info->priority_tickets)) {
4721 spin_unlock(&space_info->lock);
4724 spin_unlock(&space_info->lock);
4727 if (wait_ordered && !trans) {
4728 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
4730 time_left = schedule_timeout_killable(1);
4734 delalloc_bytes = percpu_counter_sum_positive(
4735 &fs_info->delalloc_bytes);
4739 struct reserve_ticket {
4742 struct list_head list;
4743 wait_queue_head_t wait;
4747 * maybe_commit_transaction - possibly commit the transaction if its ok to
4748 * @root - the root we're allocating for
4749 * @bytes - the number of bytes we want to reserve
4750 * @force - force the commit
4752 * This will check to make sure that committing the transaction will actually
4753 * get us somewhere and then commit the transaction if it does. Otherwise it
4754 * will return -ENOSPC.
4756 static int may_commit_transaction(struct btrfs_fs_info *fs_info,
4757 struct btrfs_space_info *space_info)
4759 struct reserve_ticket *ticket = NULL;
4760 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
4761 struct btrfs_trans_handle *trans;
4764 trans = (struct btrfs_trans_handle *)current->journal_info;
4768 spin_lock(&space_info->lock);
4769 if (!list_empty(&space_info->priority_tickets))
4770 ticket = list_first_entry(&space_info->priority_tickets,
4771 struct reserve_ticket, list);
4772 else if (!list_empty(&space_info->tickets))
4773 ticket = list_first_entry(&space_info->tickets,
4774 struct reserve_ticket, list);
4775 bytes = (ticket) ? ticket->bytes : 0;
4776 spin_unlock(&space_info->lock);
4781 /* See if there is enough pinned space to make this reservation */
4782 if (__percpu_counter_compare(&space_info->total_bytes_pinned,
4784 BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0)
4788 * See if there is some space in the delayed insertion reservation for
4791 if (space_info != delayed_rsv->space_info)
4794 spin_lock(&delayed_rsv->lock);
4795 if (delayed_rsv->size > bytes)
4798 bytes -= delayed_rsv->size;
4799 spin_unlock(&delayed_rsv->lock);
4801 if (__percpu_counter_compare(&space_info->total_bytes_pinned,
4803 BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0) {
4808 trans = btrfs_join_transaction(fs_info->extent_root);
4812 return btrfs_commit_transaction(trans);
4816 * Try to flush some data based on policy set by @state. This is only advisory
4817 * and may fail for various reasons. The caller is supposed to examine the
4818 * state of @space_info to detect the outcome.
4820 static void flush_space(struct btrfs_fs_info *fs_info,
4821 struct btrfs_space_info *space_info, u64 num_bytes,
4824 struct btrfs_root *root = fs_info->extent_root;
4825 struct btrfs_trans_handle *trans;
4830 case FLUSH_DELAYED_ITEMS_NR:
4831 case FLUSH_DELAYED_ITEMS:
4832 if (state == FLUSH_DELAYED_ITEMS_NR)
4833 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
4837 trans = btrfs_join_transaction(root);
4838 if (IS_ERR(trans)) {
4839 ret = PTR_ERR(trans);
4842 ret = btrfs_run_delayed_items_nr(trans, nr);
4843 btrfs_end_transaction(trans);
4845 case FLUSH_DELALLOC:
4846 case FLUSH_DELALLOC_WAIT:
4847 shrink_delalloc(fs_info, num_bytes * 2, num_bytes,
4848 state == FLUSH_DELALLOC_WAIT);
4851 trans = btrfs_join_transaction(root);
4852 if (IS_ERR(trans)) {
4853 ret = PTR_ERR(trans);
4856 ret = do_chunk_alloc(trans,
4857 btrfs_metadata_alloc_profile(fs_info),
4858 CHUNK_ALLOC_NO_FORCE);
4859 btrfs_end_transaction(trans);
4860 if (ret > 0 || ret == -ENOSPC)
4864 ret = may_commit_transaction(fs_info, space_info);
4871 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
4877 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
4878 struct btrfs_space_info *space_info,
4881 struct reserve_ticket *ticket;
4886 list_for_each_entry(ticket, &space_info->tickets, list)
4887 to_reclaim += ticket->bytes;
4888 list_for_each_entry(ticket, &space_info->priority_tickets, list)
4889 to_reclaim += ticket->bytes;
4893 to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
4894 if (can_overcommit(fs_info, space_info, to_reclaim,
4895 BTRFS_RESERVE_FLUSH_ALL, system_chunk))
4898 used = btrfs_space_info_used(space_info, true);
4900 if (can_overcommit(fs_info, space_info, SZ_1M,
4901 BTRFS_RESERVE_FLUSH_ALL, system_chunk))
4902 expected = div_factor_fine(space_info->total_bytes, 95);
4904 expected = div_factor_fine(space_info->total_bytes, 90);
4906 if (used > expected)
4907 to_reclaim = used - expected;
4910 to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4911 space_info->bytes_reserved);
4915 static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
4916 struct btrfs_space_info *space_info,
4917 u64 used, bool system_chunk)
4919 u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4921 /* If we're just plain full then async reclaim just slows us down. */
4922 if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
4925 if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info,
4929 return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4930 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4933 static void wake_all_tickets(struct list_head *head)
4935 struct reserve_ticket *ticket;
4937 while (!list_empty(head)) {
4938 ticket = list_first_entry(head, struct reserve_ticket, list);
4939 list_del_init(&ticket->list);
4940 ticket->error = -ENOSPC;
4941 wake_up(&ticket->wait);
4946 * This is for normal flushers, we can wait all goddamned day if we want to. We
4947 * will loop and continuously try to flush as long as we are making progress.
4948 * We count progress as clearing off tickets each time we have to loop.
4950 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4952 struct btrfs_fs_info *fs_info;
4953 struct btrfs_space_info *space_info;
4956 int commit_cycles = 0;
4957 u64 last_tickets_id;
4959 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4960 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4962 spin_lock(&space_info->lock);
4963 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
4966 space_info->flush = 0;
4967 spin_unlock(&space_info->lock);
4970 last_tickets_id = space_info->tickets_id;
4971 spin_unlock(&space_info->lock);
4973 flush_state = FLUSH_DELAYED_ITEMS_NR;
4975 flush_space(fs_info, space_info, to_reclaim, flush_state);
4976 spin_lock(&space_info->lock);
4977 if (list_empty(&space_info->tickets)) {
4978 space_info->flush = 0;
4979 spin_unlock(&space_info->lock);
4982 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
4985 if (last_tickets_id == space_info->tickets_id) {
4988 last_tickets_id = space_info->tickets_id;
4989 flush_state = FLUSH_DELAYED_ITEMS_NR;
4994 if (flush_state > COMMIT_TRANS) {
4996 if (commit_cycles > 2) {
4997 wake_all_tickets(&space_info->tickets);
4998 space_info->flush = 0;
5000 flush_state = FLUSH_DELAYED_ITEMS_NR;
5003 spin_unlock(&space_info->lock);
5004 } while (flush_state <= COMMIT_TRANS);
5007 void btrfs_init_async_reclaim_work(struct work_struct *work)
5009 INIT_WORK(work, btrfs_async_reclaim_metadata_space);
5012 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
5013 struct btrfs_space_info *space_info,
5014 struct reserve_ticket *ticket)
5017 int flush_state = FLUSH_DELAYED_ITEMS_NR;
5019 spin_lock(&space_info->lock);
5020 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
5023 spin_unlock(&space_info->lock);
5026 spin_unlock(&space_info->lock);
5029 flush_space(fs_info, space_info, to_reclaim, flush_state);
5031 spin_lock(&space_info->lock);
5032 if (ticket->bytes == 0) {
5033 spin_unlock(&space_info->lock);
5036 spin_unlock(&space_info->lock);
5039 * Priority flushers can't wait on delalloc without
5042 if (flush_state == FLUSH_DELALLOC ||
5043 flush_state == FLUSH_DELALLOC_WAIT)
5044 flush_state = ALLOC_CHUNK;
5045 } while (flush_state < COMMIT_TRANS);
5048 static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
5049 struct btrfs_space_info *space_info,
5050 struct reserve_ticket *ticket, u64 orig_bytes)
5056 spin_lock(&space_info->lock);
5057 while (ticket->bytes > 0 && ticket->error == 0) {
5058 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
5063 spin_unlock(&space_info->lock);
5067 finish_wait(&ticket->wait, &wait);
5068 spin_lock(&space_info->lock);
5071 ret = ticket->error;
5072 if (!list_empty(&ticket->list))
5073 list_del_init(&ticket->list);
5074 if (ticket->bytes && ticket->bytes < orig_bytes) {
5075 u64 num_bytes = orig_bytes - ticket->bytes;
5076 space_info->bytes_may_use -= num_bytes;
5077 trace_btrfs_space_reservation(fs_info, "space_info",
5078 space_info->flags, num_bytes, 0);
5080 spin_unlock(&space_info->lock);
5086 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
5087 * @root - the root we're allocating for
5088 * @space_info - the space info we want to allocate from
5089 * @orig_bytes - the number of bytes we want
5090 * @flush - whether or not we can flush to make our reservation
5092 * This will reserve orig_bytes number of bytes from the space info associated
5093 * with the block_rsv. If there is not enough space it will make an attempt to
5094 * flush out space to make room. It will do this by flushing delalloc if
5095 * possible or committing the transaction. If flush is 0 then no attempts to
5096 * regain reservations will be made and this will fail if there is not enough
5099 static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
5100 struct btrfs_space_info *space_info,
5102 enum btrfs_reserve_flush_enum flush,
5105 struct reserve_ticket ticket;
5110 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
5112 spin_lock(&space_info->lock);
5114 used = btrfs_space_info_used(space_info, true);
5117 * If we have enough space then hooray, make our reservation and carry
5118 * on. If not see if we can overcommit, and if we can, hooray carry on.
5119 * If not things get more complicated.
5121 if (used + orig_bytes <= space_info->total_bytes) {
5122 space_info->bytes_may_use += orig_bytes;
5123 trace_btrfs_space_reservation(fs_info, "space_info",
5124 space_info->flags, orig_bytes, 1);
5126 } else if (can_overcommit(fs_info, space_info, orig_bytes, flush,
5128 space_info->bytes_may_use += orig_bytes;
5129 trace_btrfs_space_reservation(fs_info, "space_info",
5130 space_info->flags, orig_bytes, 1);
5135 * If we couldn't make a reservation then setup our reservation ticket
5136 * and kick the async worker if it's not already running.
5138 * If we are a priority flusher then we just need to add our ticket to
5139 * the list and we will do our own flushing further down.
5141 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
5142 ticket.bytes = orig_bytes;
5144 init_waitqueue_head(&ticket.wait);
5145 if (flush == BTRFS_RESERVE_FLUSH_ALL) {
5146 list_add_tail(&ticket.list, &space_info->tickets);
5147 if (!space_info->flush) {
5148 space_info->flush = 1;
5149 trace_btrfs_trigger_flush(fs_info,
5153 queue_work(system_unbound_wq,
5154 &fs_info->async_reclaim_work);
5157 list_add_tail(&ticket.list,
5158 &space_info->priority_tickets);
5160 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
5163 * We will do the space reservation dance during log replay,
5164 * which means we won't have fs_info->fs_root set, so don't do
5165 * the async reclaim as we will panic.
5167 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
5168 need_do_async_reclaim(fs_info, space_info,
5169 used, system_chunk) &&
5170 !work_busy(&fs_info->async_reclaim_work)) {
5171 trace_btrfs_trigger_flush(fs_info, space_info->flags,
5172 orig_bytes, flush, "preempt");
5173 queue_work(system_unbound_wq,
5174 &fs_info->async_reclaim_work);
5177 spin_unlock(&space_info->lock);
5178 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
5181 if (flush == BTRFS_RESERVE_FLUSH_ALL)
5182 return wait_reserve_ticket(fs_info, space_info, &ticket,
5186 priority_reclaim_metadata_space(fs_info, space_info, &ticket);
5187 spin_lock(&space_info->lock);
5189 if (ticket.bytes < orig_bytes) {
5190 u64 num_bytes = orig_bytes - ticket.bytes;
5191 space_info->bytes_may_use -= num_bytes;
5192 trace_btrfs_space_reservation(fs_info, "space_info",
5197 list_del_init(&ticket.list);
5200 spin_unlock(&space_info->lock);
5201 ASSERT(list_empty(&ticket.list));
5206 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
5207 * @root - the root we're allocating for
5208 * @block_rsv - the block_rsv we're allocating for
5209 * @orig_bytes - the number of bytes we want
5210 * @flush - whether or not we can flush to make our reservation
5212 * This will reserve orgi_bytes number of bytes from the space info associated
5213 * with the block_rsv. If there is not enough space it will make an attempt to
5214 * flush out space to make room. It will do this by flushing delalloc if
5215 * possible or committing the transaction. If flush is 0 then no attempts to
5216 * regain reservations will be made and this will fail if there is not enough
5219 static int reserve_metadata_bytes(struct btrfs_root *root,
5220 struct btrfs_block_rsv *block_rsv,
5222 enum btrfs_reserve_flush_enum flush)
5224 struct btrfs_fs_info *fs_info = root->fs_info;
5225 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5227 bool system_chunk = (root == fs_info->chunk_root);
5229 ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info,
5230 orig_bytes, flush, system_chunk);
5231 if (ret == -ENOSPC &&
5232 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
5233 if (block_rsv != global_rsv &&
5234 !block_rsv_use_bytes(global_rsv, orig_bytes))
5237 if (ret == -ENOSPC) {
5238 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
5239 block_rsv->space_info->flags,
5242 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
5243 dump_space_info(fs_info, block_rsv->space_info,
5249 static struct btrfs_block_rsv *get_block_rsv(
5250 const struct btrfs_trans_handle *trans,
5251 const struct btrfs_root *root)
5253 struct btrfs_fs_info *fs_info = root->fs_info;
5254 struct btrfs_block_rsv *block_rsv = NULL;
5256 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
5257 (root == fs_info->csum_root && trans->adding_csums) ||
5258 (root == fs_info->uuid_root))
5259 block_rsv = trans->block_rsv;
5262 block_rsv = root->block_rsv;
5265 block_rsv = &fs_info->empty_block_rsv;
5270 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
5274 spin_lock(&block_rsv->lock);
5275 if (block_rsv->reserved >= num_bytes) {
5276 block_rsv->reserved -= num_bytes;
5277 if (block_rsv->reserved < block_rsv->size)
5278 block_rsv->full = 0;
5281 spin_unlock(&block_rsv->lock);
5285 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
5286 u64 num_bytes, int update_size)
5288 spin_lock(&block_rsv->lock);
5289 block_rsv->reserved += num_bytes;
5291 block_rsv->size += num_bytes;
5292 else if (block_rsv->reserved >= block_rsv->size)
5293 block_rsv->full = 1;
5294 spin_unlock(&block_rsv->lock);
5297 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
5298 struct btrfs_block_rsv *dest, u64 num_bytes,
5301 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5304 if (global_rsv->space_info != dest->space_info)
5307 spin_lock(&global_rsv->lock);
5308 min_bytes = div_factor(global_rsv->size, min_factor);
5309 if (global_rsv->reserved < min_bytes + num_bytes) {
5310 spin_unlock(&global_rsv->lock);
5313 global_rsv->reserved -= num_bytes;
5314 if (global_rsv->reserved < global_rsv->size)
5315 global_rsv->full = 0;
5316 spin_unlock(&global_rsv->lock);
5318 block_rsv_add_bytes(dest, num_bytes, 1);
5323 * This is for space we already have accounted in space_info->bytes_may_use, so
5324 * basically when we're returning space from block_rsv's.
5326 static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
5327 struct btrfs_space_info *space_info,
5330 struct reserve_ticket *ticket;
5331 struct list_head *head;
5333 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
5334 bool check_overcommit = false;
5336 spin_lock(&space_info->lock);
5337 head = &space_info->priority_tickets;
5340 * If we are over our limit then we need to check and see if we can
5341 * overcommit, and if we can't then we just need to free up our space
5342 * and not satisfy any requests.
5344 used = btrfs_space_info_used(space_info, true);
5345 if (used - num_bytes >= space_info->total_bytes)
5346 check_overcommit = true;
5348 while (!list_empty(head) && num_bytes) {
5349 ticket = list_first_entry(head, struct reserve_ticket,
5352 * We use 0 bytes because this space is already reserved, so
5353 * adding the ticket space would be a double count.
5355 if (check_overcommit &&
5356 !can_overcommit(fs_info, space_info, 0, flush, false))
5358 if (num_bytes >= ticket->bytes) {
5359 list_del_init(&ticket->list);
5360 num_bytes -= ticket->bytes;
5362 space_info->tickets_id++;
5363 wake_up(&ticket->wait);
5365 ticket->bytes -= num_bytes;
5370 if (num_bytes && head == &space_info->priority_tickets) {
5371 head = &space_info->tickets;
5372 flush = BTRFS_RESERVE_FLUSH_ALL;
5375 space_info->bytes_may_use -= num_bytes;
5376 trace_btrfs_space_reservation(fs_info, "space_info",
5377 space_info->flags, num_bytes, 0);
5378 spin_unlock(&space_info->lock);
5382 * This is for newly allocated space that isn't accounted in
5383 * space_info->bytes_may_use yet. So if we allocate a chunk or unpin an extent
5384 * we use this helper.
5386 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
5387 struct btrfs_space_info *space_info,
5390 struct reserve_ticket *ticket;
5391 struct list_head *head = &space_info->priority_tickets;
5394 while (!list_empty(head) && num_bytes) {
5395 ticket = list_first_entry(head, struct reserve_ticket,
5397 if (num_bytes >= ticket->bytes) {
5398 trace_btrfs_space_reservation(fs_info, "space_info",
5401 list_del_init(&ticket->list);
5402 num_bytes -= ticket->bytes;
5403 space_info->bytes_may_use += ticket->bytes;
5405 space_info->tickets_id++;
5406 wake_up(&ticket->wait);
5408 trace_btrfs_space_reservation(fs_info, "space_info",
5411 space_info->bytes_may_use += num_bytes;
5412 ticket->bytes -= num_bytes;
5417 if (num_bytes && head == &space_info->priority_tickets) {
5418 head = &space_info->tickets;
5423 static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5424 struct btrfs_block_rsv *block_rsv,
5425 struct btrfs_block_rsv *dest, u64 num_bytes,
5426 u64 *qgroup_to_release_ret)
5428 struct btrfs_space_info *space_info = block_rsv->space_info;
5429 u64 qgroup_to_release = 0;
5432 spin_lock(&block_rsv->lock);
5433 if (num_bytes == (u64)-1) {
5434 num_bytes = block_rsv->size;
5435 qgroup_to_release = block_rsv->qgroup_rsv_size;
5437 block_rsv->size -= num_bytes;
5438 if (block_rsv->reserved >= block_rsv->size) {
5439 num_bytes = block_rsv->reserved - block_rsv->size;
5440 block_rsv->reserved = block_rsv->size;
5441 block_rsv->full = 1;
5445 if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
5446 qgroup_to_release = block_rsv->qgroup_rsv_reserved -
5447 block_rsv->qgroup_rsv_size;
5448 block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
5450 qgroup_to_release = 0;
5452 spin_unlock(&block_rsv->lock);
5455 if (num_bytes > 0) {
5457 spin_lock(&dest->lock);
5461 bytes_to_add = dest->size - dest->reserved;
5462 bytes_to_add = min(num_bytes, bytes_to_add);
5463 dest->reserved += bytes_to_add;
5464 if (dest->reserved >= dest->size)
5466 num_bytes -= bytes_to_add;
5468 spin_unlock(&dest->lock);
5471 space_info_add_old_bytes(fs_info, space_info,
5474 if (qgroup_to_release_ret)
5475 *qgroup_to_release_ret = qgroup_to_release;
5479 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
5480 struct btrfs_block_rsv *dst, u64 num_bytes,
5485 ret = block_rsv_use_bytes(src, num_bytes);
5489 block_rsv_add_bytes(dst, num_bytes, update_size);
5493 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5495 memset(rsv, 0, sizeof(*rsv));
5496 spin_lock_init(&rsv->lock);
5500 void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
5501 struct btrfs_block_rsv *rsv,
5502 unsigned short type)
5504 btrfs_init_block_rsv(rsv, type);
5505 rsv->space_info = __find_space_info(fs_info,
5506 BTRFS_BLOCK_GROUP_METADATA);
5509 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
5510 unsigned short type)
5512 struct btrfs_block_rsv *block_rsv;
5514 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5518 btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
5522 void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
5523 struct btrfs_block_rsv *rsv)
5527 btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
5531 int btrfs_block_rsv_add(struct btrfs_root *root,
5532 struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5533 enum btrfs_reserve_flush_enum flush)
5540 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5542 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5549 int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
5557 spin_lock(&block_rsv->lock);
5558 num_bytes = div_factor(block_rsv->size, min_factor);
5559 if (block_rsv->reserved >= num_bytes)
5561 spin_unlock(&block_rsv->lock);
5566 int btrfs_block_rsv_refill(struct btrfs_root *root,
5567 struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5568 enum btrfs_reserve_flush_enum flush)
5576 spin_lock(&block_rsv->lock);
5577 num_bytes = min_reserved;
5578 if (block_rsv->reserved >= num_bytes)
5581 num_bytes -= block_rsv->reserved;
5582 spin_unlock(&block_rsv->lock);
5587 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5589 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5597 * btrfs_inode_rsv_refill - refill the inode block rsv.
5598 * @inode - the inode we are refilling.
5599 * @flush - the flusing restriction.
5601 * Essentially the same as btrfs_block_rsv_refill, except it uses the
5602 * block_rsv->size as the minimum size. We'll either refill the missing amount
5603 * or return if we already have enough space. This will also handle the resreve
5604 * tracepoint for the reserved amount.
5606 static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
5607 enum btrfs_reserve_flush_enum flush)
5609 struct btrfs_root *root = inode->root;
5610 struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
5612 u64 qgroup_num_bytes = 0;
5615 spin_lock(&block_rsv->lock);
5616 if (block_rsv->reserved < block_rsv->size)
5617 num_bytes = block_rsv->size - block_rsv->reserved;
5618 if (block_rsv->qgroup_rsv_reserved < block_rsv->qgroup_rsv_size)
5619 qgroup_num_bytes = block_rsv->qgroup_rsv_size -
5620 block_rsv->qgroup_rsv_reserved;
5621 spin_unlock(&block_rsv->lock);
5626 ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_num_bytes, true);
5629 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5631 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5632 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5633 btrfs_ino(inode), num_bytes, 1);
5635 /* Don't forget to increase qgroup_rsv_reserved */
5636 spin_lock(&block_rsv->lock);
5637 block_rsv->qgroup_rsv_reserved += qgroup_num_bytes;
5638 spin_unlock(&block_rsv->lock);
5640 btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
5645 * btrfs_inode_rsv_release - release any excessive reservation.
5646 * @inode - the inode we need to release from.
5647 * @qgroup_free - free or convert qgroup meta.
5648 * Unlike normal operation, qgroup meta reservation needs to know if we are
5649 * freeing qgroup reservation or just converting it into per-trans. Normally
5650 * @qgroup_free is true for error handling, and false for normal release.
5652 * This is the same as btrfs_block_rsv_release, except that it handles the
5653 * tracepoint for the reservation.
5655 static void btrfs_inode_rsv_release(struct btrfs_inode *inode, bool qgroup_free)
5657 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5658 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5659 struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
5661 u64 qgroup_to_release = 0;
5664 * Since we statically set the block_rsv->size we just want to say we
5665 * are releasing 0 bytes, and then we'll just get the reservation over
5668 released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0,
5669 &qgroup_to_release);
5671 trace_btrfs_space_reservation(fs_info, "delalloc",
5672 btrfs_ino(inode), released, 0);
5674 btrfs_qgroup_free_meta_prealloc(inode->root, qgroup_to_release);
5676 btrfs_qgroup_convert_reserved_meta(inode->root,
5680 void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
5681 struct btrfs_block_rsv *block_rsv,
5684 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5686 if (global_rsv == block_rsv ||
5687 block_rsv->space_info != global_rsv->space_info)
5689 block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes, NULL);
5692 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5694 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5695 struct btrfs_space_info *sinfo = block_rsv->space_info;
5699 * The global block rsv is based on the size of the extent tree, the
5700 * checksum tree and the root tree. If the fs is empty we want to set
5701 * it to a minimal amount for safety.
5703 num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
5704 btrfs_root_used(&fs_info->csum_root->root_item) +
5705 btrfs_root_used(&fs_info->tree_root->root_item);
5706 num_bytes = max_t(u64, num_bytes, SZ_16M);
5708 spin_lock(&sinfo->lock);
5709 spin_lock(&block_rsv->lock);
5711 block_rsv->size = min_t(u64, num_bytes, SZ_512M);
5713 if (block_rsv->reserved < block_rsv->size) {
5714 num_bytes = btrfs_space_info_used(sinfo, true);
5715 if (sinfo->total_bytes > num_bytes) {
5716 num_bytes = sinfo->total_bytes - num_bytes;
5717 num_bytes = min(num_bytes,
5718 block_rsv->size - block_rsv->reserved);
5719 block_rsv->reserved += num_bytes;
5720 sinfo->bytes_may_use += num_bytes;
5721 trace_btrfs_space_reservation(fs_info, "space_info",
5722 sinfo->flags, num_bytes,
5725 } else if (block_rsv->reserved > block_rsv->size) {
5726 num_bytes = block_rsv->reserved - block_rsv->size;
5727 sinfo->bytes_may_use -= num_bytes;
5728 trace_btrfs_space_reservation(fs_info, "space_info",
5729 sinfo->flags, num_bytes, 0);
5730 block_rsv->reserved = block_rsv->size;
5733 if (block_rsv->reserved == block_rsv->size)
5734 block_rsv->full = 1;
5736 block_rsv->full = 0;
5738 spin_unlock(&block_rsv->lock);
5739 spin_unlock(&sinfo->lock);
5742 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5744 struct btrfs_space_info *space_info;
5746 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5747 fs_info->chunk_block_rsv.space_info = space_info;
5749 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5750 fs_info->global_block_rsv.space_info = space_info;
5751 fs_info->trans_block_rsv.space_info = space_info;
5752 fs_info->empty_block_rsv.space_info = space_info;
5753 fs_info->delayed_block_rsv.space_info = space_info;
5755 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5756 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5757 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5758 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5759 if (fs_info->quota_root)
5760 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5761 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5763 update_global_block_rsv(fs_info);
5766 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5768 block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5770 WARN_ON(fs_info->trans_block_rsv.size > 0);
5771 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5772 WARN_ON(fs_info->chunk_block_rsv.size > 0);
5773 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5774 WARN_ON(fs_info->delayed_block_rsv.size > 0);
5775 WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5780 * To be called after all the new block groups attached to the transaction
5781 * handle have been created (btrfs_create_pending_block_groups()).
5783 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5785 struct btrfs_fs_info *fs_info = trans->fs_info;
5787 if (!trans->chunk_bytes_reserved)
5790 WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5792 block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5793 trans->chunk_bytes_reserved, NULL);
5794 trans->chunk_bytes_reserved = 0;
5798 * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5799 * root: the root of the parent directory
5800 * rsv: block reservation
5801 * items: the number of items that we need do reservation
5802 * use_global_rsv: allow fallback to the global block reservation
5804 * This function is used to reserve the space for snapshot/subvolume
5805 * creation and deletion. Those operations are different with the
5806 * common file/directory operations, they change two fs/file trees
5807 * and root tree, the number of items that the qgroup reserves is
5808 * different with the free space reservation. So we can not use
5809 * the space reservation mechanism in start_transaction().
5811 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5812 struct btrfs_block_rsv *rsv, int items,
5813 bool use_global_rsv)
5815 u64 qgroup_num_bytes = 0;
5818 struct btrfs_fs_info *fs_info = root->fs_info;
5819 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5821 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
5822 /* One for parent inode, two for dir entries */
5823 qgroup_num_bytes = 3 * fs_info->nodesize;
5824 ret = btrfs_qgroup_reserve_meta_prealloc(root,
5825 qgroup_num_bytes, true);
5830 num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
5831 rsv->space_info = __find_space_info(fs_info,
5832 BTRFS_BLOCK_GROUP_METADATA);
5833 ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5834 BTRFS_RESERVE_FLUSH_ALL);
5836 if (ret == -ENOSPC && use_global_rsv)
5837 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
5839 if (ret && qgroup_num_bytes)
5840 btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
5845 void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
5846 struct btrfs_block_rsv *rsv)
5848 btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
5851 static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
5852 struct btrfs_inode *inode)
5854 struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
5855 u64 reserve_size = 0;
5856 u64 qgroup_rsv_size = 0;
5858 unsigned outstanding_extents;
5860 lockdep_assert_held(&inode->lock);
5861 outstanding_extents = inode->outstanding_extents;
5862 if (outstanding_extents)
5863 reserve_size = btrfs_calc_trans_metadata_size(fs_info,
5864 outstanding_extents + 1);
5865 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info,
5867 reserve_size += btrfs_calc_trans_metadata_size(fs_info,
5870 * For qgroup rsv, the calculation is very simple:
5871 * account one nodesize for each outstanding extent
5873 * This is overestimating in most cases.
5875 qgroup_rsv_size = outstanding_extents * fs_info->nodesize;
5877 spin_lock(&block_rsv->lock);
5878 block_rsv->size = reserve_size;
5879 block_rsv->qgroup_rsv_size = qgroup_rsv_size;
5880 spin_unlock(&block_rsv->lock);
5883 int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
5885 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5886 unsigned nr_extents;
5887 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5889 bool delalloc_lock = true;
5891 /* If we are a free space inode we need to not flush since we will be in
5892 * the middle of a transaction commit. We also don't need the delalloc
5893 * mutex since we won't race with anybody. We need this mostly to make
5894 * lockdep shut its filthy mouth.
5896 * If we have a transaction open (can happen if we call truncate_block
5897 * from truncate), then we need FLUSH_LIMIT so we don't deadlock.
5899 if (btrfs_is_free_space_inode(inode)) {
5900 flush = BTRFS_RESERVE_NO_FLUSH;
5901 delalloc_lock = false;
5903 if (current->journal_info)
5904 flush = BTRFS_RESERVE_FLUSH_LIMIT;
5906 if (btrfs_transaction_in_commit(fs_info))
5907 schedule_timeout(1);
5911 mutex_lock(&inode->delalloc_mutex);
5913 num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
5915 /* Add our new extents and calculate the new rsv size. */
5916 spin_lock(&inode->lock);
5917 nr_extents = count_max_extents(num_bytes);
5918 btrfs_mod_outstanding_extents(inode, nr_extents);
5919 inode->csum_bytes += num_bytes;
5920 btrfs_calculate_inode_block_rsv_size(fs_info, inode);
5921 spin_unlock(&inode->lock);
5923 ret = btrfs_inode_rsv_refill(inode, flush);
5928 mutex_unlock(&inode->delalloc_mutex);
5932 spin_lock(&inode->lock);
5933 nr_extents = count_max_extents(num_bytes);
5934 btrfs_mod_outstanding_extents(inode, -nr_extents);
5935 inode->csum_bytes -= num_bytes;
5936 btrfs_calculate_inode_block_rsv_size(fs_info, inode);
5937 spin_unlock(&inode->lock);
5939 btrfs_inode_rsv_release(inode, true);
5941 mutex_unlock(&inode->delalloc_mutex);
5946 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5947 * @inode: the inode to release the reservation for.
5948 * @num_bytes: the number of bytes we are releasing.
5949 * @qgroup_free: free qgroup reservation or convert it to per-trans reservation
5951 * This will release the metadata reservation for an inode. This can be called
5952 * once we complete IO for a given set of bytes to release their metadata
5953 * reservations, or on error for the same reason.
5955 void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
5958 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5960 num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
5961 spin_lock(&inode->lock);
5962 inode->csum_bytes -= num_bytes;
5963 btrfs_calculate_inode_block_rsv_size(fs_info, inode);
5964 spin_unlock(&inode->lock);
5966 if (btrfs_is_testing(fs_info))
5969 btrfs_inode_rsv_release(inode, qgroup_free);
5973 * btrfs_delalloc_release_extents - release our outstanding_extents
5974 * @inode: the inode to balance the reservation for.
5975 * @num_bytes: the number of bytes we originally reserved with
5976 * @qgroup_free: do we need to free qgroup meta reservation or convert them.
5978 * When we reserve space we increase outstanding_extents for the extents we may
5979 * add. Once we've set the range as delalloc or created our ordered extents we
5980 * have outstanding_extents to track the real usage, so we use this to free our
5981 * temporarily tracked outstanding_extents. This _must_ be used in conjunction
5982 * with btrfs_delalloc_reserve_metadata.
5984 void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
5987 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5988 unsigned num_extents;
5990 spin_lock(&inode->lock);
5991 num_extents = count_max_extents(num_bytes);
5992 btrfs_mod_outstanding_extents(inode, -num_extents);
5993 btrfs_calculate_inode_block_rsv_size(fs_info, inode);
5994 spin_unlock(&inode->lock);
5996 if (btrfs_is_testing(fs_info))
5999 btrfs_inode_rsv_release(inode, qgroup_free);
6003 * btrfs_delalloc_reserve_space - reserve data and metadata space for
6005 * @inode: inode we're writing to
6006 * @start: start range we are writing to
6007 * @len: how long the range we are writing to
6008 * @reserved: mandatory parameter, record actually reserved qgroup ranges of
6009 * current reservation.
6011 * This will do the following things
6013 * o reserve space in data space info for num bytes
6014 * and reserve precious corresponding qgroup space
6015 * (Done in check_data_free_space)
6017 * o reserve space for metadata space, based on the number of outstanding
6018 * extents and how much csums will be needed
6019 * also reserve metadata space in a per root over-reserve method.
6020 * o add to the inodes->delalloc_bytes
6021 * o add it to the fs_info's delalloc inodes list.
6022 * (Above 3 all done in delalloc_reserve_metadata)
6024 * Return 0 for success
6025 * Return <0 for error(-ENOSPC or -EQUOT)
6027 int btrfs_delalloc_reserve_space(struct inode *inode,
6028 struct extent_changeset **reserved, u64 start, u64 len)
6032 ret = btrfs_check_data_free_space(inode, reserved, start, len);
6035 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len);
6037 btrfs_free_reserved_data_space(inode, *reserved, start, len);
6042 * btrfs_delalloc_release_space - release data and metadata space for delalloc
6043 * @inode: inode we're releasing space for
6044 * @start: start position of the space already reserved
6045 * @len: the len of the space already reserved
6046 * @release_bytes: the len of the space we consumed or didn't use
6048 * This function will release the metadata space that was not used and will
6049 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
6050 * list if there are no delalloc bytes left.
6051 * Also it will handle the qgroup reserved space.
6053 void btrfs_delalloc_release_space(struct inode *inode,
6054 struct extent_changeset *reserved,
6055 u64 start, u64 len, bool qgroup_free)
6057 btrfs_delalloc_release_metadata(BTRFS_I(inode), len, qgroup_free);
6058 btrfs_free_reserved_data_space(inode, reserved, start, len);
6061 static int update_block_group(struct btrfs_trans_handle *trans,
6062 struct btrfs_fs_info *info, u64 bytenr,
6063 u64 num_bytes, int alloc)
6065 struct btrfs_block_group_cache *cache = NULL;
6066 u64 total = num_bytes;
6071 /* block accounting for super block */
6072 spin_lock(&info->delalloc_root_lock);
6073 old_val = btrfs_super_bytes_used(info->super_copy);
6075 old_val += num_bytes;
6077 old_val -= num_bytes;
6078 btrfs_set_super_bytes_used(info->super_copy, old_val);
6079 spin_unlock(&info->delalloc_root_lock);
6082 cache = btrfs_lookup_block_group(info, bytenr);
6085 factor = btrfs_bg_type_to_factor(cache->flags);
6088 * If this block group has free space cache written out, we
6089 * need to make sure to load it if we are removing space. This
6090 * is because we need the unpinning stage to actually add the
6091 * space back to the block group, otherwise we will leak space.
6093 if (!alloc && cache->cached == BTRFS_CACHE_NO)
6094 cache_block_group(cache, 1);
6096 byte_in_group = bytenr - cache->key.objectid;
6097 WARN_ON(byte_in_group > cache->key.offset);
6099 spin_lock(&cache->space_info->lock);
6100 spin_lock(&cache->lock);
6102 if (btrfs_test_opt(info, SPACE_CACHE) &&
6103 cache->disk_cache_state < BTRFS_DC_CLEAR)
6104 cache->disk_cache_state = BTRFS_DC_CLEAR;
6106 old_val = btrfs_block_group_used(&cache->item);
6107 num_bytes = min(total, cache->key.offset - byte_in_group);
6109 old_val += num_bytes;
6110 btrfs_set_block_group_used(&cache->item, old_val);
6111 cache->reserved -= num_bytes;
6112 cache->space_info->bytes_reserved -= num_bytes;
6113 cache->space_info->bytes_used += num_bytes;
6114 cache->space_info->disk_used += num_bytes * factor;
6115 spin_unlock(&cache->lock);
6116 spin_unlock(&cache->space_info->lock);
6118 old_val -= num_bytes;
6119 btrfs_set_block_group_used(&cache->item, old_val);
6120 cache->pinned += num_bytes;
6121 cache->space_info->bytes_pinned += num_bytes;
6122 cache->space_info->bytes_used -= num_bytes;
6123 cache->space_info->disk_used -= num_bytes * factor;
6124 spin_unlock(&cache->lock);
6125 spin_unlock(&cache->space_info->lock);
6127 trace_btrfs_space_reservation(info, "pinned",
6128 cache->space_info->flags,
6130 percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
6132 BTRFS_TOTAL_BYTES_PINNED_BATCH);
6133 set_extent_dirty(info->pinned_extents,
6134 bytenr, bytenr + num_bytes - 1,
6135 GFP_NOFS | __GFP_NOFAIL);
6138 spin_lock(&trans->transaction->dirty_bgs_lock);
6139 if (list_empty(&cache->dirty_list)) {
6140 list_add_tail(&cache->dirty_list,
6141 &trans->transaction->dirty_bgs);
6142 trans->transaction->num_dirty_bgs++;
6143 btrfs_get_block_group(cache);
6145 spin_unlock(&trans->transaction->dirty_bgs_lock);
6148 * No longer have used bytes in this block group, queue it for
6149 * deletion. We do this after adding the block group to the
6150 * dirty list to avoid races between cleaner kthread and space
6153 if (!alloc && old_val == 0)
6154 btrfs_mark_bg_unused(cache);
6156 btrfs_put_block_group(cache);
6158 bytenr += num_bytes;
6163 static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
6165 struct btrfs_block_group_cache *cache;
6168 spin_lock(&fs_info->block_group_cache_lock);
6169 bytenr = fs_info->first_logical_byte;
6170 spin_unlock(&fs_info->block_group_cache_lock);
6172 if (bytenr < (u64)-1)
6175 cache = btrfs_lookup_first_block_group(fs_info, search_start);
6179 bytenr = cache->key.objectid;
6180 btrfs_put_block_group(cache);
6185 static int pin_down_extent(struct btrfs_fs_info *fs_info,
6186 struct btrfs_block_group_cache *cache,
6187 u64 bytenr, u64 num_bytes, int reserved)
6189 spin_lock(&cache->space_info->lock);
6190 spin_lock(&cache->lock);
6191 cache->pinned += num_bytes;
6192 cache->space_info->bytes_pinned += num_bytes;
6194 cache->reserved -= num_bytes;
6195 cache->space_info->bytes_reserved -= num_bytes;
6197 spin_unlock(&cache->lock);
6198 spin_unlock(&cache->space_info->lock);
6200 trace_btrfs_space_reservation(fs_info, "pinned",
6201 cache->space_info->flags, num_bytes, 1);
6202 percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
6203 num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
6204 set_extent_dirty(fs_info->pinned_extents, bytenr,
6205 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
6210 * this function must be called within transaction
6212 int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
6213 u64 bytenr, u64 num_bytes, int reserved)
6215 struct btrfs_block_group_cache *cache;
6217 cache = btrfs_lookup_block_group(fs_info, bytenr);
6218 BUG_ON(!cache); /* Logic error */
6220 pin_down_extent(fs_info, cache, bytenr, num_bytes, reserved);
6222 btrfs_put_block_group(cache);
6227 * this function must be called within transaction
6229 int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
6230 u64 bytenr, u64 num_bytes)
6232 struct btrfs_block_group_cache *cache;
6235 cache = btrfs_lookup_block_group(fs_info, bytenr);
6240 * pull in the free space cache (if any) so that our pin
6241 * removes the free space from the cache. We have load_only set
6242 * to one because the slow code to read in the free extents does check
6243 * the pinned extents.
6245 cache_block_group(cache, 1);
6247 pin_down_extent(fs_info, cache, bytenr, num_bytes, 0);
6249 /* remove us from the free space cache (if we're there at all) */
6250 ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
6251 btrfs_put_block_group(cache);
6255 static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
6256 u64 start, u64 num_bytes)
6259 struct btrfs_block_group_cache *block_group;
6260 struct btrfs_caching_control *caching_ctl;
6262 block_group = btrfs_lookup_block_group(fs_info, start);
6266 cache_block_group(block_group, 0);
6267 caching_ctl = get_caching_control(block_group);
6271 BUG_ON(!block_group_cache_done(block_group));
6272 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6274 mutex_lock(&caching_ctl->mutex);
6276 if (start >= caching_ctl->progress) {
6277 ret = add_excluded_extent(fs_info, start, num_bytes);
6278 } else if (start + num_bytes <= caching_ctl->progress) {
6279 ret = btrfs_remove_free_space(block_group,
6282 num_bytes = caching_ctl->progress - start;
6283 ret = btrfs_remove_free_space(block_group,
6288 num_bytes = (start + num_bytes) -
6289 caching_ctl->progress;
6290 start = caching_ctl->progress;
6291 ret = add_excluded_extent(fs_info, start, num_bytes);
6294 mutex_unlock(&caching_ctl->mutex);
6295 put_caching_control(caching_ctl);
6297 btrfs_put_block_group(block_group);
6301 int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info,
6302 struct extent_buffer *eb)
6304 struct btrfs_file_extent_item *item;
6305 struct btrfs_key key;
6310 if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS))
6313 for (i = 0; i < btrfs_header_nritems(eb); i++) {
6314 btrfs_item_key_to_cpu(eb, &key, i);
6315 if (key.type != BTRFS_EXTENT_DATA_KEY)
6317 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6318 found_type = btrfs_file_extent_type(eb, item);
6319 if (found_type == BTRFS_FILE_EXTENT_INLINE)
6321 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6323 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6324 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6325 ret = __exclude_logged_extent(fs_info, key.objectid, key.offset);
6334 btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
6336 atomic_inc(&bg->reservations);
6339 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
6342 struct btrfs_block_group_cache *bg;
6344 bg = btrfs_lookup_block_group(fs_info, start);
6346 if (atomic_dec_and_test(&bg->reservations))
6347 wake_up_var(&bg->reservations);
6348 btrfs_put_block_group(bg);
6351 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
6353 struct btrfs_space_info *space_info = bg->space_info;
6357 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
6361 * Our block group is read only but before we set it to read only,
6362 * some task might have had allocated an extent from it already, but it
6363 * has not yet created a respective ordered extent (and added it to a
6364 * root's list of ordered extents).
6365 * Therefore wait for any task currently allocating extents, since the
6366 * block group's reservations counter is incremented while a read lock
6367 * on the groups' semaphore is held and decremented after releasing
6368 * the read access on that semaphore and creating the ordered extent.
6370 down_write(&space_info->groups_sem);
6371 up_write(&space_info->groups_sem);
6373 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
6377 * btrfs_add_reserved_bytes - update the block_group and space info counters
6378 * @cache: The cache we are manipulating
6379 * @ram_bytes: The number of bytes of file content, and will be same to
6380 * @num_bytes except for the compress path.
6381 * @num_bytes: The number of bytes in question
6382 * @delalloc: The blocks are allocated for the delalloc write
6384 * This is called by the allocator when it reserves space. If this is a
6385 * reservation and the block group has become read only we cannot make the
6386 * reservation and return -EAGAIN, otherwise this function always succeeds.
6388 static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
6389 u64 ram_bytes, u64 num_bytes, int delalloc)
6391 struct btrfs_space_info *space_info = cache->space_info;
6394 spin_lock(&space_info->lock);
6395 spin_lock(&cache->lock);
6399 cache->reserved += num_bytes;
6400 space_info->bytes_reserved += num_bytes;
6402 trace_btrfs_space_reservation(cache->fs_info,
6403 "space_info", space_info->flags,
6405 space_info->bytes_may_use -= ram_bytes;
6407 cache->delalloc_bytes += num_bytes;
6409 spin_unlock(&cache->lock);
6410 spin_unlock(&space_info->lock);
6415 * btrfs_free_reserved_bytes - update the block_group and space info counters
6416 * @cache: The cache we are manipulating
6417 * @num_bytes: The number of bytes in question
6418 * @delalloc: The blocks are allocated for the delalloc write
6420 * This is called by somebody who is freeing space that was never actually used
6421 * on disk. For example if you reserve some space for a new leaf in transaction
6422 * A and before transaction A commits you free that leaf, you call this with
6423 * reserve set to 0 in order to clear the reservation.
6426 static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
6427 u64 num_bytes, int delalloc)
6429 struct btrfs_space_info *space_info = cache->space_info;
6432 spin_lock(&space_info->lock);
6433 spin_lock(&cache->lock);
6435 space_info->bytes_readonly += num_bytes;
6436 cache->reserved -= num_bytes;
6437 space_info->bytes_reserved -= num_bytes;
6438 space_info->max_extent_size = 0;
6441 cache->delalloc_bytes -= num_bytes;
6442 spin_unlock(&cache->lock);
6443 spin_unlock(&space_info->lock);
6446 void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
6448 struct btrfs_caching_control *next;
6449 struct btrfs_caching_control *caching_ctl;
6450 struct btrfs_block_group_cache *cache;
6452 down_write(&fs_info->commit_root_sem);
6454 list_for_each_entry_safe(caching_ctl, next,
6455 &fs_info->caching_block_groups, list) {
6456 cache = caching_ctl->block_group;
6457 if (block_group_cache_done(cache)) {
6458 cache->last_byte_to_unpin = (u64)-1;
6459 list_del_init(&caching_ctl->list);
6460 put_caching_control(caching_ctl);
6462 cache->last_byte_to_unpin = caching_ctl->progress;
6466 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6467 fs_info->pinned_extents = &fs_info->freed_extents[1];
6469 fs_info->pinned_extents = &fs_info->freed_extents[0];
6471 up_write(&fs_info->commit_root_sem);
6473 update_global_block_rsv(fs_info);
6477 * Returns the free cluster for the given space info and sets empty_cluster to
6478 * what it should be based on the mount options.
6480 static struct btrfs_free_cluster *
6481 fetch_cluster_info(struct btrfs_fs_info *fs_info,
6482 struct btrfs_space_info *space_info, u64 *empty_cluster)
6484 struct btrfs_free_cluster *ret = NULL;
6487 if (btrfs_mixed_space_info(space_info))
6490 if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
6491 ret = &fs_info->meta_alloc_cluster;
6492 if (btrfs_test_opt(fs_info, SSD))
6493 *empty_cluster = SZ_2M;
6495 *empty_cluster = SZ_64K;
6496 } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) &&
6497 btrfs_test_opt(fs_info, SSD_SPREAD)) {
6498 *empty_cluster = SZ_2M;
6499 ret = &fs_info->data_alloc_cluster;
6505 static int unpin_extent_range(struct btrfs_fs_info *fs_info,
6507 const bool return_free_space)
6509 struct btrfs_block_group_cache *cache = NULL;
6510 struct btrfs_space_info *space_info;
6511 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6512 struct btrfs_free_cluster *cluster = NULL;
6514 u64 total_unpinned = 0;
6515 u64 empty_cluster = 0;
6518 while (start <= end) {
6521 start >= cache->key.objectid + cache->key.offset) {
6523 btrfs_put_block_group(cache);
6525 cache = btrfs_lookup_block_group(fs_info, start);
6526 BUG_ON(!cache); /* Logic error */
6528 cluster = fetch_cluster_info(fs_info,
6531 empty_cluster <<= 1;
6534 len = cache->key.objectid + cache->key.offset - start;
6535 len = min(len, end + 1 - start);
6537 if (start < cache->last_byte_to_unpin) {
6538 len = min(len, cache->last_byte_to_unpin - start);
6539 if (return_free_space)
6540 btrfs_add_free_space(cache, start, len);
6544 total_unpinned += len;
6545 space_info = cache->space_info;
6548 * If this space cluster has been marked as fragmented and we've
6549 * unpinned enough in this block group to potentially allow a
6550 * cluster to be created inside of it go ahead and clear the
6553 if (cluster && cluster->fragmented &&
6554 total_unpinned > empty_cluster) {
6555 spin_lock(&cluster->lock);
6556 cluster->fragmented = 0;
6557 spin_unlock(&cluster->lock);
6560 spin_lock(&space_info->lock);
6561 spin_lock(&cache->lock);
6562 cache->pinned -= len;
6563 space_info->bytes_pinned -= len;
6565 trace_btrfs_space_reservation(fs_info, "pinned",
6566 space_info->flags, len, 0);
6567 space_info->max_extent_size = 0;
6568 percpu_counter_add_batch(&space_info->total_bytes_pinned,
6569 -len, BTRFS_TOTAL_BYTES_PINNED_BATCH);
6571 space_info->bytes_readonly += len;
6574 spin_unlock(&cache->lock);
6575 if (!readonly && return_free_space &&
6576 global_rsv->space_info == space_info) {
6579 spin_lock(&global_rsv->lock);
6580 if (!global_rsv->full) {
6581 to_add = min(len, global_rsv->size -
6582 global_rsv->reserved);
6583 global_rsv->reserved += to_add;
6584 space_info->bytes_may_use += to_add;
6585 if (global_rsv->reserved >= global_rsv->size)
6586 global_rsv->full = 1;
6587 trace_btrfs_space_reservation(fs_info,
6593 spin_unlock(&global_rsv->lock);
6594 /* Add to any tickets we may have */
6596 space_info_add_new_bytes(fs_info, space_info,
6599 spin_unlock(&space_info->lock);
6603 btrfs_put_block_group(cache);
6607 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
6609 struct btrfs_fs_info *fs_info = trans->fs_info;
6610 struct btrfs_block_group_cache *block_group, *tmp;
6611 struct list_head *deleted_bgs;
6612 struct extent_io_tree *unpin;
6617 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6618 unpin = &fs_info->freed_extents[1];
6620 unpin = &fs_info->freed_extents[0];
6622 while (!trans->aborted) {
6623 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6624 ret = find_first_extent_bit(unpin, 0, &start, &end,
6625 EXTENT_DIRTY, NULL);
6627 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6631 if (btrfs_test_opt(fs_info, DISCARD))
6632 ret = btrfs_discard_extent(fs_info, start,
6633 end + 1 - start, NULL);
6635 clear_extent_dirty(unpin, start, end);
6636 unpin_extent_range(fs_info, start, end, true);
6637 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6642 * Transaction is finished. We don't need the lock anymore. We
6643 * do need to clean up the block groups in case of a transaction
6646 deleted_bgs = &trans->transaction->deleted_bgs;
6647 list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6651 if (!trans->aborted)
6652 ret = btrfs_discard_extent(fs_info,
6653 block_group->key.objectid,
6654 block_group->key.offset,
6657 list_del_init(&block_group->bg_list);
6658 btrfs_put_block_group_trimming(block_group);
6659 btrfs_put_block_group(block_group);
6662 const char *errstr = btrfs_decode_error(ret);
6664 "discard failed while removing blockgroup: errno=%d %s",
6672 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6673 struct btrfs_delayed_ref_node *node, u64 parent,
6674 u64 root_objectid, u64 owner_objectid,
6675 u64 owner_offset, int refs_to_drop,
6676 struct btrfs_delayed_extent_op *extent_op)
6678 struct btrfs_fs_info *info = trans->fs_info;
6679 struct btrfs_key key;
6680 struct btrfs_path *path;
6681 struct btrfs_root *extent_root = info->extent_root;
6682 struct extent_buffer *leaf;
6683 struct btrfs_extent_item *ei;
6684 struct btrfs_extent_inline_ref *iref;
6687 int extent_slot = 0;
6688 int found_extent = 0;
6692 u64 bytenr = node->bytenr;
6693 u64 num_bytes = node->num_bytes;
6695 bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
6697 path = btrfs_alloc_path();
6701 path->reada = READA_FORWARD;
6702 path->leave_spinning = 1;
6704 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6705 BUG_ON(!is_data && refs_to_drop != 1);
6708 skinny_metadata = false;
6710 ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes,
6711 parent, root_objectid, owner_objectid,
6714 extent_slot = path->slots[0];
6715 while (extent_slot >= 0) {
6716 btrfs_item_key_to_cpu(path->nodes[0], &key,
6718 if (key.objectid != bytenr)
6720 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6721 key.offset == num_bytes) {
6725 if (key.type == BTRFS_METADATA_ITEM_KEY &&
6726 key.offset == owner_objectid) {
6730 if (path->slots[0] - extent_slot > 5)
6735 if (!found_extent) {
6737 ret = remove_extent_backref(trans, path, NULL,
6739 is_data, &last_ref);
6741 btrfs_abort_transaction(trans, ret);
6744 btrfs_release_path(path);
6745 path->leave_spinning = 1;
6747 key.objectid = bytenr;
6748 key.type = BTRFS_EXTENT_ITEM_KEY;
6749 key.offset = num_bytes;
6751 if (!is_data && skinny_metadata) {
6752 key.type = BTRFS_METADATA_ITEM_KEY;
6753 key.offset = owner_objectid;
6756 ret = btrfs_search_slot(trans, extent_root,
6758 if (ret > 0 && skinny_metadata && path->slots[0]) {
6760 * Couldn't find our skinny metadata item,
6761 * see if we have ye olde extent item.
6764 btrfs_item_key_to_cpu(path->nodes[0], &key,
6766 if (key.objectid == bytenr &&
6767 key.type == BTRFS_EXTENT_ITEM_KEY &&
6768 key.offset == num_bytes)
6772 if (ret > 0 && skinny_metadata) {
6773 skinny_metadata = false;
6774 key.objectid = bytenr;
6775 key.type = BTRFS_EXTENT_ITEM_KEY;
6776 key.offset = num_bytes;
6777 btrfs_release_path(path);
6778 ret = btrfs_search_slot(trans, extent_root,
6784 "umm, got %d back from search, was looking for %llu",
6787 btrfs_print_leaf(path->nodes[0]);
6790 btrfs_abort_transaction(trans, ret);
6793 extent_slot = path->slots[0];
6795 } else if (WARN_ON(ret == -ENOENT)) {
6796 btrfs_print_leaf(path->nodes[0]);
6798 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
6799 bytenr, parent, root_objectid, owner_objectid,
6801 btrfs_abort_transaction(trans, ret);
6804 btrfs_abort_transaction(trans, ret);
6808 leaf = path->nodes[0];
6809 item_size = btrfs_item_size_nr(leaf, extent_slot);
6810 if (unlikely(item_size < sizeof(*ei))) {
6812 btrfs_print_v0_err(info);
6813 btrfs_abort_transaction(trans, ret);
6816 ei = btrfs_item_ptr(leaf, extent_slot,
6817 struct btrfs_extent_item);
6818 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6819 key.type == BTRFS_EXTENT_ITEM_KEY) {
6820 struct btrfs_tree_block_info *bi;
6821 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6822 bi = (struct btrfs_tree_block_info *)(ei + 1);
6823 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6826 refs = btrfs_extent_refs(leaf, ei);
6827 if (refs < refs_to_drop) {
6829 "trying to drop %d refs but we only have %Lu for bytenr %Lu",
6830 refs_to_drop, refs, bytenr);
6832 btrfs_abort_transaction(trans, ret);
6835 refs -= refs_to_drop;
6839 __run_delayed_extent_op(extent_op, leaf, ei);
6841 * In the case of inline back ref, reference count will
6842 * be updated by remove_extent_backref
6845 BUG_ON(!found_extent);
6847 btrfs_set_extent_refs(leaf, ei, refs);
6848 btrfs_mark_buffer_dirty(leaf);
6851 ret = remove_extent_backref(trans, path, iref,
6852 refs_to_drop, is_data,
6855 btrfs_abort_transaction(trans, ret);
6861 BUG_ON(is_data && refs_to_drop !=
6862 extent_data_ref_count(path, iref));
6864 BUG_ON(path->slots[0] != extent_slot);
6866 BUG_ON(path->slots[0] != extent_slot + 1);
6867 path->slots[0] = extent_slot;
6873 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6876 btrfs_abort_transaction(trans, ret);
6879 btrfs_release_path(path);
6882 ret = btrfs_del_csums(trans, info, bytenr, num_bytes);
6884 btrfs_abort_transaction(trans, ret);
6889 ret = add_to_free_space_tree(trans, bytenr, num_bytes);
6891 btrfs_abort_transaction(trans, ret);
6895 ret = update_block_group(trans, info, bytenr, num_bytes, 0);
6897 btrfs_abort_transaction(trans, ret);
6901 btrfs_release_path(path);
6904 btrfs_free_path(path);
6909 * when we free an block, it is possible (and likely) that we free the last
6910 * delayed ref for that extent as well. This searches the delayed ref tree for
6911 * a given extent, and if there are no other delayed refs to be processed, it
6912 * removes it from the tree.
6914 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6917 struct btrfs_delayed_ref_head *head;
6918 struct btrfs_delayed_ref_root *delayed_refs;
6921 delayed_refs = &trans->transaction->delayed_refs;
6922 spin_lock(&delayed_refs->lock);
6923 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
6925 goto out_delayed_unlock;
6927 spin_lock(&head->lock);
6928 if (!RB_EMPTY_ROOT(&head->ref_tree))
6931 if (head->extent_op) {
6932 if (!head->must_insert_reserved)
6934 btrfs_free_delayed_extent_op(head->extent_op);
6935 head->extent_op = NULL;
6939 * waiting for the lock here would deadlock. If someone else has it
6940 * locked they are already in the process of dropping it anyway
6942 if (!mutex_trylock(&head->mutex))
6946 * at this point we have a head with no other entries. Go
6947 * ahead and process it.
6949 rb_erase(&head->href_node, &delayed_refs->href_root);
6950 RB_CLEAR_NODE(&head->href_node);
6951 atomic_dec(&delayed_refs->num_entries);
6954 * we don't take a ref on the node because we're removing it from the
6955 * tree, so we just steal the ref the tree was holding.
6957 delayed_refs->num_heads--;
6958 if (head->processing == 0)
6959 delayed_refs->num_heads_ready--;
6960 head->processing = 0;
6961 spin_unlock(&head->lock);
6962 spin_unlock(&delayed_refs->lock);
6964 BUG_ON(head->extent_op);
6965 if (head->must_insert_reserved)
6968 mutex_unlock(&head->mutex);
6969 btrfs_put_delayed_ref_head(head);
6972 spin_unlock(&head->lock);
6975 spin_unlock(&delayed_refs->lock);
6979 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6980 struct btrfs_root *root,
6981 struct extent_buffer *buf,
6982 u64 parent, int last_ref)
6984 struct btrfs_fs_info *fs_info = root->fs_info;
6988 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6989 int old_ref_mod, new_ref_mod;
6991 btrfs_ref_tree_mod(root, buf->start, buf->len, parent,
6992 root->root_key.objectid,
6993 btrfs_header_level(buf), 0,
6994 BTRFS_DROP_DELAYED_REF);
6995 ret = btrfs_add_delayed_tree_ref(trans, buf->start,
6997 root->root_key.objectid,
6998 btrfs_header_level(buf),
6999 BTRFS_DROP_DELAYED_REF, NULL,
7000 &old_ref_mod, &new_ref_mod);
7001 BUG_ON(ret); /* -ENOMEM */
7002 pin = old_ref_mod >= 0 && new_ref_mod < 0;
7005 if (last_ref && btrfs_header_generation(buf) == trans->transid) {
7006 struct btrfs_block_group_cache *cache;
7008 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
7009 ret = check_ref_cleanup(trans, buf->start);
7015 cache = btrfs_lookup_block_group(fs_info, buf->start);
7017 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
7018 pin_down_extent(fs_info, cache, buf->start,
7020 btrfs_put_block_group(cache);
7024 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
7026 btrfs_add_free_space(cache, buf->start, buf->len);
7027 btrfs_free_reserved_bytes(cache, buf->len, 0);
7028 btrfs_put_block_group(cache);
7029 trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
7033 add_pinned_bytes(fs_info, buf->len, true,
7034 root->root_key.objectid);
7038 * Deleting the buffer, clear the corrupt flag since it doesn't
7041 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
7045 /* Can return -ENOMEM */
7046 int btrfs_free_extent(struct btrfs_trans_handle *trans,
7047 struct btrfs_root *root,
7048 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
7049 u64 owner, u64 offset)
7051 struct btrfs_fs_info *fs_info = root->fs_info;
7052 int old_ref_mod, new_ref_mod;
7055 if (btrfs_is_testing(fs_info))
7058 if (root_objectid != BTRFS_TREE_LOG_OBJECTID)
7059 btrfs_ref_tree_mod(root, bytenr, num_bytes, parent,
7060 root_objectid, owner, offset,
7061 BTRFS_DROP_DELAYED_REF);
7064 * tree log blocks never actually go into the extent allocation
7065 * tree, just update pinning info and exit early.
7067 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
7068 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
7069 /* unlocks the pinned mutex */
7070 btrfs_pin_extent(fs_info, bytenr, num_bytes, 1);
7071 old_ref_mod = new_ref_mod = 0;
7073 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
7074 ret = btrfs_add_delayed_tree_ref(trans, bytenr,
7076 root_objectid, (int)owner,
7077 BTRFS_DROP_DELAYED_REF, NULL,
7078 &old_ref_mod, &new_ref_mod);
7080 ret = btrfs_add_delayed_data_ref(trans, bytenr,
7082 root_objectid, owner, offset,
7083 0, BTRFS_DROP_DELAYED_REF,
7084 &old_ref_mod, &new_ref_mod);
7087 if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0) {
7088 bool metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
7090 add_pinned_bytes(fs_info, num_bytes, metadata, root_objectid);
7097 * when we wait for progress in the block group caching, its because
7098 * our allocation attempt failed at least once. So, we must sleep
7099 * and let some progress happen before we try again.
7101 * This function will sleep at least once waiting for new free space to
7102 * show up, and then it will check the block group free space numbers
7103 * for our min num_bytes. Another option is to have it go ahead
7104 * and look in the rbtree for a free extent of a given size, but this
7107 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
7108 * any of the information in this block group.
7110 static noinline void
7111 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
7114 struct btrfs_caching_control *caching_ctl;
7116 caching_ctl = get_caching_control(cache);
7120 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
7121 (cache->free_space_ctl->free_space >= num_bytes));
7123 put_caching_control(caching_ctl);
7127 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
7129 struct btrfs_caching_control *caching_ctl;
7132 caching_ctl = get_caching_control(cache);
7134 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
7136 wait_event(caching_ctl->wait, block_group_cache_done(cache));
7137 if (cache->cached == BTRFS_CACHE_ERROR)
7139 put_caching_control(caching_ctl);
7143 enum btrfs_loop_type {
7144 LOOP_CACHING_NOWAIT = 0,
7145 LOOP_CACHING_WAIT = 1,
7146 LOOP_ALLOC_CHUNK = 2,
7147 LOOP_NO_EMPTY_SIZE = 3,
7151 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
7155 down_read(&cache->data_rwsem);
7159 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
7162 btrfs_get_block_group(cache);
7164 down_read(&cache->data_rwsem);
7167 static struct btrfs_block_group_cache *
7168 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
7169 struct btrfs_free_cluster *cluster,
7172 struct btrfs_block_group_cache *used_bg = NULL;
7174 spin_lock(&cluster->refill_lock);
7176 used_bg = cluster->block_group;
7180 if (used_bg == block_group)
7183 btrfs_get_block_group(used_bg);
7188 if (down_read_trylock(&used_bg->data_rwsem))
7191 spin_unlock(&cluster->refill_lock);
7193 /* We should only have one-level nested. */
7194 down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
7196 spin_lock(&cluster->refill_lock);
7197 if (used_bg == cluster->block_group)
7200 up_read(&used_bg->data_rwsem);
7201 btrfs_put_block_group(used_bg);
7206 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
7210 up_read(&cache->data_rwsem);
7211 btrfs_put_block_group(cache);
7215 * walks the btree of allocated extents and find a hole of a given size.
7216 * The key ins is changed to record the hole:
7217 * ins->objectid == start position
7218 * ins->flags = BTRFS_EXTENT_ITEM_KEY
7219 * ins->offset == the size of the hole.
7220 * Any available blocks before search_start are skipped.
7222 * If there is no suitable free space, we will record the max size of
7223 * the free space extent currently.
7225 static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
7226 u64 ram_bytes, u64 num_bytes, u64 empty_size,
7227 u64 hint_byte, struct btrfs_key *ins,
7228 u64 flags, int delalloc)
7231 struct btrfs_root *root = fs_info->extent_root;
7232 struct btrfs_free_cluster *last_ptr = NULL;
7233 struct btrfs_block_group_cache *block_group = NULL;
7234 u64 search_start = 0;
7235 u64 max_extent_size = 0;
7236 u64 max_free_space = 0;
7237 u64 empty_cluster = 0;
7238 struct btrfs_space_info *space_info;
7240 int index = btrfs_bg_flags_to_raid_index(flags);
7241 bool failed_cluster_refill = false;
7242 bool failed_alloc = false;
7243 bool use_cluster = true;
7244 bool have_caching_bg = false;
7245 bool orig_have_caching_bg = false;
7246 bool full_search = false;
7248 WARN_ON(num_bytes < fs_info->sectorsize);
7249 ins->type = BTRFS_EXTENT_ITEM_KEY;
7253 trace_find_free_extent(fs_info, num_bytes, empty_size, flags);
7255 space_info = __find_space_info(fs_info, flags);
7257 btrfs_err(fs_info, "No space info for %llu", flags);
7262 * If our free space is heavily fragmented we may not be able to make
7263 * big contiguous allocations, so instead of doing the expensive search
7264 * for free space, simply return ENOSPC with our max_extent_size so we
7265 * can go ahead and search for a more manageable chunk.
7267 * If our max_extent_size is large enough for our allocation simply
7268 * disable clustering since we will likely not be able to find enough
7269 * space to create a cluster and induce latency trying.
7271 if (unlikely(space_info->max_extent_size)) {
7272 spin_lock(&space_info->lock);
7273 if (space_info->max_extent_size &&
7274 num_bytes > space_info->max_extent_size) {
7275 ins->offset = space_info->max_extent_size;
7276 spin_unlock(&space_info->lock);
7278 } else if (space_info->max_extent_size) {
7279 use_cluster = false;
7281 spin_unlock(&space_info->lock);
7284 last_ptr = fetch_cluster_info(fs_info, space_info, &empty_cluster);
7286 spin_lock(&last_ptr->lock);
7287 if (last_ptr->block_group)
7288 hint_byte = last_ptr->window_start;
7289 if (last_ptr->fragmented) {
7291 * We still set window_start so we can keep track of the
7292 * last place we found an allocation to try and save
7295 hint_byte = last_ptr->window_start;
7296 use_cluster = false;
7298 spin_unlock(&last_ptr->lock);
7301 search_start = max(search_start, first_logical_byte(fs_info, 0));
7302 search_start = max(search_start, hint_byte);
7303 if (search_start == hint_byte) {
7304 block_group = btrfs_lookup_block_group(fs_info, search_start);
7306 * we don't want to use the block group if it doesn't match our
7307 * allocation bits, or if its not cached.
7309 * However if we are re-searching with an ideal block group
7310 * picked out then we don't care that the block group is cached.
7312 if (block_group && block_group_bits(block_group, flags) &&
7313 block_group->cached != BTRFS_CACHE_NO) {
7314 down_read(&space_info->groups_sem);
7315 if (list_empty(&block_group->list) ||
7318 * someone is removing this block group,
7319 * we can't jump into the have_block_group
7320 * target because our list pointers are not
7323 btrfs_put_block_group(block_group);
7324 up_read(&space_info->groups_sem);
7326 index = btrfs_bg_flags_to_raid_index(
7327 block_group->flags);
7328 btrfs_lock_block_group(block_group, delalloc);
7329 goto have_block_group;
7331 } else if (block_group) {
7332 btrfs_put_block_group(block_group);
7336 have_caching_bg = false;
7337 if (index == 0 || index == btrfs_bg_flags_to_raid_index(flags))
7339 down_read(&space_info->groups_sem);
7340 list_for_each_entry(block_group, &space_info->block_groups[index],
7345 /* If the block group is read-only, we can skip it entirely. */
7346 if (unlikely(block_group->ro))
7349 btrfs_grab_block_group(block_group, delalloc);
7350 search_start = block_group->key.objectid;
7353 * this can happen if we end up cycling through all the
7354 * raid types, but we want to make sure we only allocate
7355 * for the proper type.
7357 if (!block_group_bits(block_group, flags)) {
7358 u64 extra = BTRFS_BLOCK_GROUP_DUP |
7359 BTRFS_BLOCK_GROUP_RAID1 |
7360 BTRFS_BLOCK_GROUP_RAID5 |
7361 BTRFS_BLOCK_GROUP_RAID6 |
7362 BTRFS_BLOCK_GROUP_RAID10;
7365 * if they asked for extra copies and this block group
7366 * doesn't provide them, bail. This does allow us to
7367 * fill raid0 from raid1.
7369 if ((flags & extra) && !(block_group->flags & extra))
7374 cached = block_group_cache_done(block_group);
7375 if (unlikely(!cached)) {
7376 have_caching_bg = true;
7377 ret = cache_block_group(block_group, 0);
7382 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7386 * Ok we want to try and use the cluster allocator, so
7389 if (last_ptr && use_cluster) {
7390 struct btrfs_block_group_cache *used_block_group;
7391 unsigned long aligned_cluster;
7393 * the refill lock keeps out other
7394 * people trying to start a new cluster
7396 used_block_group = btrfs_lock_cluster(block_group,
7399 if (!used_block_group)
7400 goto refill_cluster;
7402 if (used_block_group != block_group &&
7403 (used_block_group->ro ||
7404 !block_group_bits(used_block_group, flags)))
7405 goto release_cluster;
7407 offset = btrfs_alloc_from_cluster(used_block_group,
7410 used_block_group->key.objectid,
7413 /* we have a block, we're done */
7414 spin_unlock(&last_ptr->refill_lock);
7415 trace_btrfs_reserve_extent_cluster(
7417 search_start, num_bytes);
7418 if (used_block_group != block_group) {
7419 btrfs_release_block_group(block_group,
7421 block_group = used_block_group;
7426 WARN_ON(last_ptr->block_group != used_block_group);
7428 /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7429 * set up a new clusters, so lets just skip it
7430 * and let the allocator find whatever block
7431 * it can find. If we reach this point, we
7432 * will have tried the cluster allocator
7433 * plenty of times and not have found
7434 * anything, so we are likely way too
7435 * fragmented for the clustering stuff to find
7438 * However, if the cluster is taken from the
7439 * current block group, release the cluster
7440 * first, so that we stand a better chance of
7441 * succeeding in the unclustered
7443 if (loop >= LOOP_NO_EMPTY_SIZE &&
7444 used_block_group != block_group) {
7445 spin_unlock(&last_ptr->refill_lock);
7446 btrfs_release_block_group(used_block_group,
7448 goto unclustered_alloc;
7452 * this cluster didn't work out, free it and
7455 btrfs_return_cluster_to_free_space(NULL, last_ptr);
7457 if (used_block_group != block_group)
7458 btrfs_release_block_group(used_block_group,
7461 if (loop >= LOOP_NO_EMPTY_SIZE) {
7462 spin_unlock(&last_ptr->refill_lock);
7463 goto unclustered_alloc;
7466 aligned_cluster = max_t(unsigned long,
7467 empty_cluster + empty_size,
7468 block_group->full_stripe_len);
7470 /* allocate a cluster in this block group */
7471 ret = btrfs_find_space_cluster(fs_info, block_group,
7472 last_ptr, search_start,
7477 * now pull our allocation out of this
7480 offset = btrfs_alloc_from_cluster(block_group,
7486 /* we found one, proceed */
7487 spin_unlock(&last_ptr->refill_lock);
7488 trace_btrfs_reserve_extent_cluster(
7489 block_group, search_start,
7493 } else if (!cached && loop > LOOP_CACHING_NOWAIT
7494 && !failed_cluster_refill) {
7495 spin_unlock(&last_ptr->refill_lock);
7497 failed_cluster_refill = true;
7498 wait_block_group_cache_progress(block_group,
7499 num_bytes + empty_cluster + empty_size);
7500 goto have_block_group;
7504 * at this point we either didn't find a cluster
7505 * or we weren't able to allocate a block from our
7506 * cluster. Free the cluster we've been trying
7507 * to use, and go to the next block group
7509 btrfs_return_cluster_to_free_space(NULL, last_ptr);
7510 spin_unlock(&last_ptr->refill_lock);
7516 * We are doing an unclustered alloc, set the fragmented flag so
7517 * we don't bother trying to setup a cluster again until we get
7520 if (unlikely(last_ptr)) {
7521 spin_lock(&last_ptr->lock);
7522 last_ptr->fragmented = 1;
7523 spin_unlock(&last_ptr->lock);
7526 struct btrfs_free_space_ctl *ctl =
7527 block_group->free_space_ctl;
7529 spin_lock(&ctl->tree_lock);
7530 if (ctl->free_space <
7531 num_bytes + empty_cluster + empty_size) {
7532 max_free_space = max(max_free_space,
7534 spin_unlock(&ctl->tree_lock);
7537 spin_unlock(&ctl->tree_lock);
7540 offset = btrfs_find_space_for_alloc(block_group, search_start,
7541 num_bytes, empty_size,
7544 * If we didn't find a chunk, and we haven't failed on this
7545 * block group before, and this block group is in the middle of
7546 * caching and we are ok with waiting, then go ahead and wait
7547 * for progress to be made, and set failed_alloc to true.
7549 * If failed_alloc is true then we've already waited on this
7550 * block group once and should move on to the next block group.
7552 if (!offset && !failed_alloc && !cached &&
7553 loop > LOOP_CACHING_NOWAIT) {
7554 wait_block_group_cache_progress(block_group,
7555 num_bytes + empty_size);
7556 failed_alloc = true;
7557 goto have_block_group;
7558 } else if (!offset) {
7562 search_start = round_up(offset, fs_info->stripesize);
7564 /* move on to the next group */
7565 if (search_start + num_bytes >
7566 block_group->key.objectid + block_group->key.offset) {
7567 btrfs_add_free_space(block_group, offset, num_bytes);
7571 if (offset < search_start)
7572 btrfs_add_free_space(block_group, offset,
7573 search_start - offset);
7575 ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
7576 num_bytes, delalloc);
7577 if (ret == -EAGAIN) {
7578 btrfs_add_free_space(block_group, offset, num_bytes);
7581 btrfs_inc_block_group_reservations(block_group);
7583 /* we are all good, lets return */
7584 ins->objectid = search_start;
7585 ins->offset = num_bytes;
7587 trace_btrfs_reserve_extent(block_group, search_start, num_bytes);
7588 btrfs_release_block_group(block_group, delalloc);
7591 failed_cluster_refill = false;
7592 failed_alloc = false;
7593 BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) !=
7595 btrfs_release_block_group(block_group, delalloc);
7598 up_read(&space_info->groups_sem);
7600 if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
7601 && !orig_have_caching_bg)
7602 orig_have_caching_bg = true;
7604 if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7607 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7611 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7612 * caching kthreads as we move along
7613 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7614 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7615 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7618 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7620 if (loop == LOOP_CACHING_NOWAIT) {
7622 * We want to skip the LOOP_CACHING_WAIT step if we
7623 * don't have any uncached bgs and we've already done a
7624 * full search through.
7626 if (orig_have_caching_bg || !full_search)
7627 loop = LOOP_CACHING_WAIT;
7629 loop = LOOP_ALLOC_CHUNK;
7634 if (loop == LOOP_ALLOC_CHUNK) {
7635 struct btrfs_trans_handle *trans;
7638 trans = current->journal_info;
7642 trans = btrfs_join_transaction(root);
7644 if (IS_ERR(trans)) {
7645 ret = PTR_ERR(trans);
7649 ret = do_chunk_alloc(trans, flags, CHUNK_ALLOC_FORCE);
7652 * If we can't allocate a new chunk we've already looped
7653 * through at least once, move on to the NO_EMPTY_SIZE
7657 loop = LOOP_NO_EMPTY_SIZE;
7660 * Do not bail out on ENOSPC since we
7661 * can do more things.
7663 if (ret < 0 && ret != -ENOSPC)
7664 btrfs_abort_transaction(trans, ret);
7668 btrfs_end_transaction(trans);
7673 if (loop == LOOP_NO_EMPTY_SIZE) {
7675 * Don't loop again if we already have no empty_size and
7678 if (empty_size == 0 &&
7679 empty_cluster == 0) {
7688 } else if (!ins->objectid) {
7690 } else if (ins->objectid) {
7691 if (!use_cluster && last_ptr) {
7692 spin_lock(&last_ptr->lock);
7693 last_ptr->window_start = ins->objectid;
7694 spin_unlock(&last_ptr->lock);
7699 if (ret == -ENOSPC) {
7700 if (!max_extent_size)
7701 max_extent_size = max_free_space;
7702 spin_lock(&space_info->lock);
7703 space_info->max_extent_size = max_extent_size;
7704 spin_unlock(&space_info->lock);
7705 ins->offset = max_extent_size;
7710 static void dump_space_info(struct btrfs_fs_info *fs_info,
7711 struct btrfs_space_info *info, u64 bytes,
7712 int dump_block_groups)
7714 struct btrfs_block_group_cache *cache;
7717 spin_lock(&info->lock);
7718 btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
7720 info->total_bytes - btrfs_space_info_used(info, true),
7721 info->full ? "" : "not ");
7723 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
7724 info->total_bytes, info->bytes_used, info->bytes_pinned,
7725 info->bytes_reserved, info->bytes_may_use,
7726 info->bytes_readonly);
7727 spin_unlock(&info->lock);
7729 if (!dump_block_groups)
7732 down_read(&info->groups_sem);
7734 list_for_each_entry(cache, &info->block_groups[index], list) {
7735 spin_lock(&cache->lock);
7737 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
7738 cache->key.objectid, cache->key.offset,
7739 btrfs_block_group_used(&cache->item), cache->pinned,
7740 cache->reserved, cache->ro ? "[readonly]" : "");
7741 btrfs_dump_free_space(cache, bytes);
7742 spin_unlock(&cache->lock);
7744 if (++index < BTRFS_NR_RAID_TYPES)
7746 up_read(&info->groups_sem);
7750 * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a
7751 * hole that is at least as big as @num_bytes.
7753 * @root - The root that will contain this extent
7755 * @ram_bytes - The amount of space in ram that @num_bytes take. This
7756 * is used for accounting purposes. This value differs
7757 * from @num_bytes only in the case of compressed extents.
7759 * @num_bytes - Number of bytes to allocate on-disk.
7761 * @min_alloc_size - Indicates the minimum amount of space that the
7762 * allocator should try to satisfy. In some cases
7763 * @num_bytes may be larger than what is required and if
7764 * the filesystem is fragmented then allocation fails.
7765 * However, the presence of @min_alloc_size gives a
7766 * chance to try and satisfy the smaller allocation.
7768 * @empty_size - A hint that you plan on doing more COW. This is the
7769 * size in bytes the allocator should try to find free
7770 * next to the block it returns. This is just a hint and
7771 * may be ignored by the allocator.
7773 * @hint_byte - Hint to the allocator to start searching above the byte
7774 * address passed. It might be ignored.
7776 * @ins - This key is modified to record the found hole. It will
7777 * have the following values:
7778 * ins->objectid == start position
7779 * ins->flags = BTRFS_EXTENT_ITEM_KEY
7780 * ins->offset == the size of the hole.
7782 * @is_data - Boolean flag indicating whether an extent is
7783 * allocated for data (true) or metadata (false)
7785 * @delalloc - Boolean flag indicating whether this allocation is for
7786 * delalloc or not. If 'true' data_rwsem of block groups
7787 * is going to be acquired.
7790 * Returns 0 when an allocation succeeded or < 0 when an error occurred. In
7791 * case -ENOSPC is returned then @ins->offset will contain the size of the
7792 * largest available hole the allocator managed to find.
7794 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
7795 u64 num_bytes, u64 min_alloc_size,
7796 u64 empty_size, u64 hint_byte,
7797 struct btrfs_key *ins, int is_data, int delalloc)
7799 struct btrfs_fs_info *fs_info = root->fs_info;
7800 bool final_tried = num_bytes == min_alloc_size;
7804 flags = get_alloc_profile_by_root(root, is_data);
7806 WARN_ON(num_bytes < fs_info->sectorsize);
7807 ret = find_free_extent(fs_info, ram_bytes, num_bytes, empty_size,
7808 hint_byte, ins, flags, delalloc);
7809 if (!ret && !is_data) {
7810 btrfs_dec_block_group_reservations(fs_info, ins->objectid);
7811 } else if (ret == -ENOSPC) {
7812 if (!final_tried && ins->offset) {
7813 num_bytes = min(num_bytes >> 1, ins->offset);
7814 num_bytes = round_down(num_bytes,
7815 fs_info->sectorsize);
7816 num_bytes = max(num_bytes, min_alloc_size);
7817 ram_bytes = num_bytes;
7818 if (num_bytes == min_alloc_size)
7821 } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
7822 struct btrfs_space_info *sinfo;
7824 sinfo = __find_space_info(fs_info, flags);
7826 "allocation failed flags %llu, wanted %llu",
7829 dump_space_info(fs_info, sinfo, num_bytes, 1);
7836 static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
7838 int pin, int delalloc)
7840 struct btrfs_block_group_cache *cache;
7843 cache = btrfs_lookup_block_group(fs_info, start);
7845 btrfs_err(fs_info, "Unable to find block group for %llu",
7851 pin_down_extent(fs_info, cache, start, len, 1);
7853 if (btrfs_test_opt(fs_info, DISCARD))
7854 ret = btrfs_discard_extent(fs_info, start, len, NULL);
7855 btrfs_add_free_space(cache, start, len);
7856 btrfs_free_reserved_bytes(cache, len, delalloc);
7857 trace_btrfs_reserved_extent_free(fs_info, start, len);
7860 btrfs_put_block_group(cache);
7864 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
7865 u64 start, u64 len, int delalloc)
7867 return __btrfs_free_reserved_extent(fs_info, start, len, 0, delalloc);
7870 int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
7873 return __btrfs_free_reserved_extent(fs_info, start, len, 1, 0);
7876 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7877 u64 parent, u64 root_objectid,
7878 u64 flags, u64 owner, u64 offset,
7879 struct btrfs_key *ins, int ref_mod)
7881 struct btrfs_fs_info *fs_info = trans->fs_info;
7883 struct btrfs_extent_item *extent_item;
7884 struct btrfs_extent_inline_ref *iref;
7885 struct btrfs_path *path;
7886 struct extent_buffer *leaf;
7891 type = BTRFS_SHARED_DATA_REF_KEY;
7893 type = BTRFS_EXTENT_DATA_REF_KEY;
7895 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7897 path = btrfs_alloc_path();
7901 path->leave_spinning = 1;
7902 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7905 btrfs_free_path(path);
7909 leaf = path->nodes[0];
7910 extent_item = btrfs_item_ptr(leaf, path->slots[0],
7911 struct btrfs_extent_item);
7912 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7913 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7914 btrfs_set_extent_flags(leaf, extent_item,
7915 flags | BTRFS_EXTENT_FLAG_DATA);
7917 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7918 btrfs_set_extent_inline_ref_type(leaf, iref, type);
7920 struct btrfs_shared_data_ref *ref;
7921 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7922 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7923 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7925 struct btrfs_extent_data_ref *ref;
7926 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7927 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7928 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7929 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7930 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7933 btrfs_mark_buffer_dirty(path->nodes[0]);
7934 btrfs_free_path(path);
7936 ret = remove_from_free_space_tree(trans, ins->objectid, ins->offset);
7940 ret = update_block_group(trans, fs_info, ins->objectid, ins->offset, 1);
7941 if (ret) { /* -ENOENT, logic error */
7942 btrfs_err(fs_info, "update block group failed for %llu %llu",
7943 ins->objectid, ins->offset);
7946 trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid, ins->offset);
7950 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7951 struct btrfs_delayed_ref_node *node,
7952 struct btrfs_delayed_extent_op *extent_op)
7954 struct btrfs_fs_info *fs_info = trans->fs_info;
7956 struct btrfs_extent_item *extent_item;
7957 struct btrfs_key extent_key;
7958 struct btrfs_tree_block_info *block_info;
7959 struct btrfs_extent_inline_ref *iref;
7960 struct btrfs_path *path;
7961 struct extent_buffer *leaf;
7962 struct btrfs_delayed_tree_ref *ref;
7963 u32 size = sizeof(*extent_item) + sizeof(*iref);
7965 u64 flags = extent_op->flags_to_set;
7966 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
7968 ref = btrfs_delayed_node_to_tree_ref(node);
7970 extent_key.objectid = node->bytenr;
7971 if (skinny_metadata) {
7972 extent_key.offset = ref->level;
7973 extent_key.type = BTRFS_METADATA_ITEM_KEY;
7974 num_bytes = fs_info->nodesize;
7976 extent_key.offset = node->num_bytes;
7977 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
7978 size += sizeof(*block_info);
7979 num_bytes = node->num_bytes;
7982 path = btrfs_alloc_path();
7986 path->leave_spinning = 1;
7987 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7990 btrfs_free_path(path);
7994 leaf = path->nodes[0];
7995 extent_item = btrfs_item_ptr(leaf, path->slots[0],
7996 struct btrfs_extent_item);
7997 btrfs_set_extent_refs(leaf, extent_item, 1);
7998 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7999 btrfs_set_extent_flags(leaf, extent_item,
8000 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
8002 if (skinny_metadata) {
8003 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
8005 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
8006 btrfs_set_tree_block_key(leaf, block_info, &extent_op->key);
8007 btrfs_set_tree_block_level(leaf, block_info, ref->level);
8008 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
8011 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
8012 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
8013 btrfs_set_extent_inline_ref_type(leaf, iref,
8014 BTRFS_SHARED_BLOCK_REF_KEY);
8015 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent);
8017 btrfs_set_extent_inline_ref_type(leaf, iref,
8018 BTRFS_TREE_BLOCK_REF_KEY);
8019 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root);
8022 btrfs_mark_buffer_dirty(leaf);
8023 btrfs_free_path(path);
8025 ret = remove_from_free_space_tree(trans, extent_key.objectid,
8030 ret = update_block_group(trans, fs_info, extent_key.objectid,
8031 fs_info->nodesize, 1);
8032 if (ret) { /* -ENOENT, logic error */
8033 btrfs_err(fs_info, "update block group failed for %llu %llu",
8034 extent_key.objectid, extent_key.offset);
8038 trace_btrfs_reserved_extent_alloc(fs_info, extent_key.objectid,
8043 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
8044 struct btrfs_root *root, u64 owner,
8045 u64 offset, u64 ram_bytes,
8046 struct btrfs_key *ins)
8050 BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
8052 btrfs_ref_tree_mod(root, ins->objectid, ins->offset, 0,
8053 root->root_key.objectid, owner, offset,
8054 BTRFS_ADD_DELAYED_EXTENT);
8056 ret = btrfs_add_delayed_data_ref(trans, ins->objectid,
8058 root->root_key.objectid, owner,
8060 BTRFS_ADD_DELAYED_EXTENT, NULL, NULL);
8065 * this is used by the tree logging recovery code. It records that
8066 * an extent has been allocated and makes sure to clear the free
8067 * space cache bits as well
8069 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
8070 u64 root_objectid, u64 owner, u64 offset,
8071 struct btrfs_key *ins)
8073 struct btrfs_fs_info *fs_info = trans->fs_info;
8075 struct btrfs_block_group_cache *block_group;
8076 struct btrfs_space_info *space_info;
8079 * Mixed block groups will exclude before processing the log so we only
8080 * need to do the exclude dance if this fs isn't mixed.
8082 if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
8083 ret = __exclude_logged_extent(fs_info, ins->objectid,
8089 block_group = btrfs_lookup_block_group(fs_info, ins->objectid);
8093 space_info = block_group->space_info;
8094 spin_lock(&space_info->lock);
8095 spin_lock(&block_group->lock);
8096 space_info->bytes_reserved += ins->offset;
8097 block_group->reserved += ins->offset;
8098 spin_unlock(&block_group->lock);
8099 spin_unlock(&space_info->lock);
8101 ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
8103 btrfs_put_block_group(block_group);
8107 static struct extent_buffer *
8108 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
8109 u64 bytenr, int level, u64 owner)
8111 struct btrfs_fs_info *fs_info = root->fs_info;
8112 struct extent_buffer *buf;
8114 buf = btrfs_find_create_tree_block(fs_info, bytenr);
8119 * Extra safety check in case the extent tree is corrupted and extent
8120 * allocator chooses to use a tree block which is already used and
8123 if (buf->lock_owner == current->pid) {
8124 btrfs_err_rl(fs_info,
8125 "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
8126 buf->start, btrfs_header_owner(buf), current->pid);
8127 free_extent_buffer(buf);
8128 return ERR_PTR(-EUCLEAN);
8131 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
8132 btrfs_tree_lock(buf);
8133 clean_tree_block(fs_info, buf);
8134 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
8136 btrfs_set_lock_blocking(buf);
8137 set_extent_buffer_uptodate(buf);
8139 memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header));
8140 btrfs_set_header_level(buf, level);
8141 btrfs_set_header_bytenr(buf, buf->start);
8142 btrfs_set_header_generation(buf, trans->transid);
8143 btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV);
8144 btrfs_set_header_owner(buf, owner);
8145 write_extent_buffer_fsid(buf, fs_info->fsid);
8146 write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid);
8147 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
8148 buf->log_index = root->log_transid % 2;
8150 * we allow two log transactions at a time, use different
8151 * EXENT bit to differentiate dirty pages.
8153 if (buf->log_index == 0)
8154 set_extent_dirty(&root->dirty_log_pages, buf->start,
8155 buf->start + buf->len - 1, GFP_NOFS);
8157 set_extent_new(&root->dirty_log_pages, buf->start,
8158 buf->start + buf->len - 1);
8160 buf->log_index = -1;
8161 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
8162 buf->start + buf->len - 1, GFP_NOFS);
8164 trans->dirty = true;
8165 /* this returns a buffer locked for blocking */
8169 static struct btrfs_block_rsv *
8170 use_block_rsv(struct btrfs_trans_handle *trans,
8171 struct btrfs_root *root, u32 blocksize)
8173 struct btrfs_fs_info *fs_info = root->fs_info;
8174 struct btrfs_block_rsv *block_rsv;
8175 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
8177 bool global_updated = false;
8179 block_rsv = get_block_rsv(trans, root);
8181 if (unlikely(block_rsv->size == 0))
8184 ret = block_rsv_use_bytes(block_rsv, blocksize);
8188 if (block_rsv->failfast)
8189 return ERR_PTR(ret);
8191 if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
8192 global_updated = true;
8193 update_global_block_rsv(fs_info);
8197 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
8198 static DEFINE_RATELIMIT_STATE(_rs,
8199 DEFAULT_RATELIMIT_INTERVAL * 10,
8200 /*DEFAULT_RATELIMIT_BURST*/ 1);
8201 if (__ratelimit(&_rs))
8203 "BTRFS: block rsv returned %d\n", ret);
8206 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
8207 BTRFS_RESERVE_NO_FLUSH);
8211 * If we couldn't reserve metadata bytes try and use some from
8212 * the global reserve if its space type is the same as the global
8215 if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
8216 block_rsv->space_info == global_rsv->space_info) {
8217 ret = block_rsv_use_bytes(global_rsv, blocksize);
8221 return ERR_PTR(ret);
8224 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
8225 struct btrfs_block_rsv *block_rsv, u32 blocksize)
8227 block_rsv_add_bytes(block_rsv, blocksize, 0);
8228 block_rsv_release_bytes(fs_info, block_rsv, NULL, 0, NULL);
8232 * finds a free extent and does all the dirty work required for allocation
8233 * returns the tree buffer or an ERR_PTR on error.
8235 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
8236 struct btrfs_root *root,
8237 u64 parent, u64 root_objectid,
8238 const struct btrfs_disk_key *key,
8239 int level, u64 hint,
8242 struct btrfs_fs_info *fs_info = root->fs_info;
8243 struct btrfs_key ins;
8244 struct btrfs_block_rsv *block_rsv;
8245 struct extent_buffer *buf;
8246 struct btrfs_delayed_extent_op *extent_op;
8249 u32 blocksize = fs_info->nodesize;
8250 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
8252 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8253 if (btrfs_is_testing(fs_info)) {
8254 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
8255 level, root_objectid);
8257 root->alloc_bytenr += blocksize;
8262 block_rsv = use_block_rsv(trans, root, blocksize);
8263 if (IS_ERR(block_rsv))
8264 return ERR_CAST(block_rsv);
8266 ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
8267 empty_size, hint, &ins, 0, 0);
8271 buf = btrfs_init_new_buffer(trans, root, ins.objectid, level,
8275 goto out_free_reserved;
8278 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
8280 parent = ins.objectid;
8281 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
8285 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
8286 extent_op = btrfs_alloc_delayed_extent_op();
8292 memcpy(&extent_op->key, key, sizeof(extent_op->key));
8294 memset(&extent_op->key, 0, sizeof(extent_op->key));
8295 extent_op->flags_to_set = flags;
8296 extent_op->update_key = skinny_metadata ? false : true;
8297 extent_op->update_flags = true;
8298 extent_op->is_data = false;
8299 extent_op->level = level;
8301 btrfs_ref_tree_mod(root, ins.objectid, ins.offset, parent,
8302 root_objectid, level, 0,
8303 BTRFS_ADD_DELAYED_EXTENT);
8304 ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
8306 root_objectid, level,
8307 BTRFS_ADD_DELAYED_EXTENT,
8308 extent_op, NULL, NULL);
8310 goto out_free_delayed;
8315 btrfs_free_delayed_extent_op(extent_op);
8317 free_extent_buffer(buf);
8319 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
8321 unuse_block_rsv(fs_info, block_rsv, blocksize);
8322 return ERR_PTR(ret);
8325 struct walk_control {
8326 u64 refs[BTRFS_MAX_LEVEL];
8327 u64 flags[BTRFS_MAX_LEVEL];
8328 struct btrfs_key update_progress;
8338 #define DROP_REFERENCE 1
8339 #define UPDATE_BACKREF 2
8341 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8342 struct btrfs_root *root,
8343 struct walk_control *wc,
8344 struct btrfs_path *path)
8346 struct btrfs_fs_info *fs_info = root->fs_info;
8352 struct btrfs_key key;
8353 struct extent_buffer *eb;
8358 if (path->slots[wc->level] < wc->reada_slot) {
8359 wc->reada_count = wc->reada_count * 2 / 3;
8360 wc->reada_count = max(wc->reada_count, 2);
8362 wc->reada_count = wc->reada_count * 3 / 2;
8363 wc->reada_count = min_t(int, wc->reada_count,
8364 BTRFS_NODEPTRS_PER_BLOCK(fs_info));
8367 eb = path->nodes[wc->level];
8368 nritems = btrfs_header_nritems(eb);
8370 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
8371 if (nread >= wc->reada_count)
8375 bytenr = btrfs_node_blockptr(eb, slot);
8376 generation = btrfs_node_ptr_generation(eb, slot);
8378 if (slot == path->slots[wc->level])
8381 if (wc->stage == UPDATE_BACKREF &&
8382 generation <= root->root_key.offset)
8385 /* We don't lock the tree block, it's OK to be racy here */
8386 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
8387 wc->level - 1, 1, &refs,
8389 /* We don't care about errors in readahead. */
8394 if (wc->stage == DROP_REFERENCE) {
8398 if (wc->level == 1 &&
8399 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8401 if (!wc->update_ref ||
8402 generation <= root->root_key.offset)
8404 btrfs_node_key_to_cpu(eb, &key, slot);
8405 ret = btrfs_comp_cpu_keys(&key,
8406 &wc->update_progress);
8410 if (wc->level == 1 &&
8411 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8415 readahead_tree_block(fs_info, bytenr);
8418 wc->reada_slot = slot;
8422 * helper to process tree block while walking down the tree.
8424 * when wc->stage == UPDATE_BACKREF, this function updates
8425 * back refs for pointers in the block.
8427 * NOTE: return value 1 means we should stop walking down.
8429 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8430 struct btrfs_root *root,
8431 struct btrfs_path *path,
8432 struct walk_control *wc, int lookup_info)
8434 struct btrfs_fs_info *fs_info = root->fs_info;
8435 int level = wc->level;
8436 struct extent_buffer *eb = path->nodes[level];
8437 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8440 if (wc->stage == UPDATE_BACKREF &&
8441 btrfs_header_owner(eb) != root->root_key.objectid)
8445 * when reference count of tree block is 1, it won't increase
8446 * again. once full backref flag is set, we never clear it.
8449 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8450 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8451 BUG_ON(!path->locks[level]);
8452 ret = btrfs_lookup_extent_info(trans, fs_info,
8453 eb->start, level, 1,
8456 BUG_ON(ret == -ENOMEM);
8459 BUG_ON(wc->refs[level] == 0);
8462 if (wc->stage == DROP_REFERENCE) {
8463 if (wc->refs[level] > 1)
8466 if (path->locks[level] && !wc->keep_locks) {
8467 btrfs_tree_unlock_rw(eb, path->locks[level]);
8468 path->locks[level] = 0;
8473 /* wc->stage == UPDATE_BACKREF */
8474 if (!(wc->flags[level] & flag)) {
8475 BUG_ON(!path->locks[level]);
8476 ret = btrfs_inc_ref(trans, root, eb, 1);
8477 BUG_ON(ret); /* -ENOMEM */
8478 ret = btrfs_dec_ref(trans, root, eb, 0);
8479 BUG_ON(ret); /* -ENOMEM */
8480 ret = btrfs_set_disk_extent_flags(trans, fs_info, eb->start,
8482 btrfs_header_level(eb), 0);
8483 BUG_ON(ret); /* -ENOMEM */
8484 wc->flags[level] |= flag;
8488 * the block is shared by multiple trees, so it's not good to
8489 * keep the tree lock
8491 if (path->locks[level] && level > 0) {
8492 btrfs_tree_unlock_rw(eb, path->locks[level]);
8493 path->locks[level] = 0;
8499 * helper to process tree block pointer.
8501 * when wc->stage == DROP_REFERENCE, this function checks
8502 * reference count of the block pointed to. if the block
8503 * is shared and we need update back refs for the subtree
8504 * rooted at the block, this function changes wc->stage to
8505 * UPDATE_BACKREF. if the block is shared and there is no
8506 * need to update back, this function drops the reference
8509 * NOTE: return value 1 means we should stop walking down.
8511 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8512 struct btrfs_root *root,
8513 struct btrfs_path *path,
8514 struct walk_control *wc, int *lookup_info)
8516 struct btrfs_fs_info *fs_info = root->fs_info;
8521 struct btrfs_key key;
8522 struct btrfs_key first_key;
8523 struct extent_buffer *next;
8524 int level = wc->level;
8527 bool need_account = false;
8529 generation = btrfs_node_ptr_generation(path->nodes[level],
8530 path->slots[level]);
8532 * if the lower level block was created before the snapshot
8533 * was created, we know there is no need to update back refs
8536 if (wc->stage == UPDATE_BACKREF &&
8537 generation <= root->root_key.offset) {
8542 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8543 btrfs_node_key_to_cpu(path->nodes[level], &first_key,
8544 path->slots[level]);
8545 blocksize = fs_info->nodesize;
8547 next = find_extent_buffer(fs_info, bytenr);
8549 next = btrfs_find_create_tree_block(fs_info, bytenr);
8551 return PTR_ERR(next);
8553 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8557 btrfs_tree_lock(next);
8558 btrfs_set_lock_blocking(next);
8560 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
8561 &wc->refs[level - 1],
8562 &wc->flags[level - 1]);
8566 if (unlikely(wc->refs[level - 1] == 0)) {
8567 btrfs_err(fs_info, "Missing references.");
8573 if (wc->stage == DROP_REFERENCE) {
8574 if (wc->refs[level - 1] > 1) {
8575 need_account = true;
8577 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8580 if (!wc->update_ref ||
8581 generation <= root->root_key.offset)
8584 btrfs_node_key_to_cpu(path->nodes[level], &key,
8585 path->slots[level]);
8586 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8590 wc->stage = UPDATE_BACKREF;
8591 wc->shared_level = level - 1;
8595 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8599 if (!btrfs_buffer_uptodate(next, generation, 0)) {
8600 btrfs_tree_unlock(next);
8601 free_extent_buffer(next);
8607 if (reada && level == 1)
8608 reada_walk_down(trans, root, wc, path);
8609 next = read_tree_block(fs_info, bytenr, generation, level - 1,
8612 return PTR_ERR(next);
8613 } else if (!extent_buffer_uptodate(next)) {
8614 free_extent_buffer(next);
8617 btrfs_tree_lock(next);
8618 btrfs_set_lock_blocking(next);
8622 ASSERT(level == btrfs_header_level(next));
8623 if (level != btrfs_header_level(next)) {
8624 btrfs_err(root->fs_info, "mismatched level");
8628 path->nodes[level] = next;
8629 path->slots[level] = 0;
8630 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8636 wc->refs[level - 1] = 0;
8637 wc->flags[level - 1] = 0;
8638 if (wc->stage == DROP_REFERENCE) {
8639 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8640 parent = path->nodes[level]->start;
8642 ASSERT(root->root_key.objectid ==
8643 btrfs_header_owner(path->nodes[level]));
8644 if (root->root_key.objectid !=
8645 btrfs_header_owner(path->nodes[level])) {
8646 btrfs_err(root->fs_info,
8647 "mismatched block owner");
8655 ret = btrfs_qgroup_trace_subtree(trans, next,
8656 generation, level - 1);
8658 btrfs_err_rl(fs_info,
8659 "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
8663 ret = btrfs_free_extent(trans, root, bytenr, blocksize,
8664 parent, root->root_key.objectid,
8674 btrfs_tree_unlock(next);
8675 free_extent_buffer(next);
8681 * helper to process tree block while walking up the tree.
8683 * when wc->stage == DROP_REFERENCE, this function drops
8684 * reference count on the block.
8686 * when wc->stage == UPDATE_BACKREF, this function changes
8687 * wc->stage back to DROP_REFERENCE if we changed wc->stage
8688 * to UPDATE_BACKREF previously while processing the block.
8690 * NOTE: return value 1 means we should stop walking up.
8692 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8693 struct btrfs_root *root,
8694 struct btrfs_path *path,
8695 struct walk_control *wc)
8697 struct btrfs_fs_info *fs_info = root->fs_info;
8699 int level = wc->level;
8700 struct extent_buffer *eb = path->nodes[level];
8703 if (wc->stage == UPDATE_BACKREF) {
8704 BUG_ON(wc->shared_level < level);
8705 if (level < wc->shared_level)
8708 ret = find_next_key(path, level + 1, &wc->update_progress);
8712 wc->stage = DROP_REFERENCE;
8713 wc->shared_level = -1;
8714 path->slots[level] = 0;
8717 * check reference count again if the block isn't locked.
8718 * we should start walking down the tree again if reference
8721 if (!path->locks[level]) {
8723 btrfs_tree_lock(eb);
8724 btrfs_set_lock_blocking(eb);
8725 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8727 ret = btrfs_lookup_extent_info(trans, fs_info,
8728 eb->start, level, 1,
8732 btrfs_tree_unlock_rw(eb, path->locks[level]);
8733 path->locks[level] = 0;
8736 BUG_ON(wc->refs[level] == 0);
8737 if (wc->refs[level] == 1) {
8738 btrfs_tree_unlock_rw(eb, path->locks[level]);
8739 path->locks[level] = 0;
8745 /* wc->stage == DROP_REFERENCE */
8746 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8748 if (wc->refs[level] == 1) {
8750 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8751 ret = btrfs_dec_ref(trans, root, eb, 1);
8753 ret = btrfs_dec_ref(trans, root, eb, 0);
8754 BUG_ON(ret); /* -ENOMEM */
8755 ret = btrfs_qgroup_trace_leaf_items(trans, eb);
8757 btrfs_err_rl(fs_info,
8758 "error %d accounting leaf items. Quota is out of sync, rescan required.",
8762 /* make block locked assertion in clean_tree_block happy */
8763 if (!path->locks[level] &&
8764 btrfs_header_generation(eb) == trans->transid) {
8765 btrfs_tree_lock(eb);
8766 btrfs_set_lock_blocking(eb);
8767 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8769 clean_tree_block(fs_info, eb);
8772 if (eb == root->node) {
8773 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8775 else if (root->root_key.objectid != btrfs_header_owner(eb))
8776 goto owner_mismatch;
8778 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8779 parent = path->nodes[level + 1]->start;
8780 else if (root->root_key.objectid !=
8781 btrfs_header_owner(path->nodes[level + 1]))
8782 goto owner_mismatch;
8785 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8787 wc->refs[level] = 0;
8788 wc->flags[level] = 0;
8792 btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu",
8793 btrfs_header_owner(eb), root->root_key.objectid);
8797 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8798 struct btrfs_root *root,
8799 struct btrfs_path *path,
8800 struct walk_control *wc)
8802 int level = wc->level;
8803 int lookup_info = 1;
8806 while (level >= 0) {
8807 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8814 if (path->slots[level] >=
8815 btrfs_header_nritems(path->nodes[level]))
8818 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8820 path->slots[level]++;
8829 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8830 struct btrfs_root *root,
8831 struct btrfs_path *path,
8832 struct walk_control *wc, int max_level)
8834 int level = wc->level;
8837 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8838 while (level < max_level && path->nodes[level]) {
8840 if (path->slots[level] + 1 <
8841 btrfs_header_nritems(path->nodes[level])) {
8842 path->slots[level]++;
8845 ret = walk_up_proc(trans, root, path, wc);
8851 if (path->locks[level]) {
8852 btrfs_tree_unlock_rw(path->nodes[level],
8853 path->locks[level]);
8854 path->locks[level] = 0;
8856 free_extent_buffer(path->nodes[level]);
8857 path->nodes[level] = NULL;
8865 * drop a subvolume tree.
8867 * this function traverses the tree freeing any blocks that only
8868 * referenced by the tree.
8870 * when a shared tree block is found. this function decreases its
8871 * reference count by one. if update_ref is true, this function
8872 * also make sure backrefs for the shared block and all lower level
8873 * blocks are properly updated.
8875 * If called with for_reloc == 0, may exit early with -EAGAIN
8877 int btrfs_drop_snapshot(struct btrfs_root *root,
8878 struct btrfs_block_rsv *block_rsv, int update_ref,
8881 struct btrfs_fs_info *fs_info = root->fs_info;
8882 struct btrfs_path *path;
8883 struct btrfs_trans_handle *trans;
8884 struct btrfs_root *tree_root = fs_info->tree_root;
8885 struct btrfs_root_item *root_item = &root->root_item;
8886 struct walk_control *wc;
8887 struct btrfs_key key;
8891 bool root_dropped = false;
8893 btrfs_debug(fs_info, "Drop subvolume %llu", root->objectid);
8895 path = btrfs_alloc_path();
8901 wc = kzalloc(sizeof(*wc), GFP_NOFS);
8903 btrfs_free_path(path);
8908 trans = btrfs_start_transaction(tree_root, 0);
8909 if (IS_ERR(trans)) {
8910 err = PTR_ERR(trans);
8914 err = btrfs_run_delayed_items(trans);
8919 trans->block_rsv = block_rsv;
8921 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8922 level = btrfs_header_level(root->node);
8923 path->nodes[level] = btrfs_lock_root_node(root);
8924 btrfs_set_lock_blocking(path->nodes[level]);
8925 path->slots[level] = 0;
8926 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8927 memset(&wc->update_progress, 0,
8928 sizeof(wc->update_progress));
8930 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8931 memcpy(&wc->update_progress, &key,
8932 sizeof(wc->update_progress));
8934 level = root_item->drop_level;
8936 path->lowest_level = level;
8937 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8938 path->lowest_level = 0;
8946 * unlock our path, this is safe because only this
8947 * function is allowed to delete this snapshot
8949 btrfs_unlock_up_safe(path, 0);
8951 level = btrfs_header_level(root->node);
8953 btrfs_tree_lock(path->nodes[level]);
8954 btrfs_set_lock_blocking(path->nodes[level]);
8955 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8957 ret = btrfs_lookup_extent_info(trans, fs_info,
8958 path->nodes[level]->start,
8959 level, 1, &wc->refs[level],
8965 BUG_ON(wc->refs[level] == 0);
8967 if (level == root_item->drop_level)
8970 btrfs_tree_unlock(path->nodes[level]);
8971 path->locks[level] = 0;
8972 WARN_ON(wc->refs[level] != 1);
8978 wc->shared_level = -1;
8979 wc->stage = DROP_REFERENCE;
8980 wc->update_ref = update_ref;
8982 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
8986 ret = walk_down_tree(trans, root, path, wc);
8992 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8999 BUG_ON(wc->stage != DROP_REFERENCE);
9003 if (wc->stage == DROP_REFERENCE) {
9005 btrfs_node_key(path->nodes[level],
9006 &root_item->drop_progress,
9007 path->slots[level]);
9008 root_item->drop_level = level;
9011 BUG_ON(wc->level == 0);
9012 if (btrfs_should_end_transaction(trans) ||
9013 (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) {
9014 ret = btrfs_update_root(trans, tree_root,
9018 btrfs_abort_transaction(trans, ret);
9023 btrfs_end_transaction_throttle(trans);
9024 if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
9025 btrfs_debug(fs_info,
9026 "drop snapshot early exit");
9031 trans = btrfs_start_transaction(tree_root, 0);
9032 if (IS_ERR(trans)) {
9033 err = PTR_ERR(trans);
9037 trans->block_rsv = block_rsv;
9040 btrfs_release_path(path);
9044 ret = btrfs_del_root(trans, &root->root_key);
9046 btrfs_abort_transaction(trans, ret);
9051 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
9052 ret = btrfs_find_root(tree_root, &root->root_key, path,
9055 btrfs_abort_transaction(trans, ret);
9058 } else if (ret > 0) {
9059 /* if we fail to delete the orphan item this time
9060 * around, it'll get picked up the next time.
9062 * The most common failure here is just -ENOENT.
9064 btrfs_del_orphan_item(trans, tree_root,
9065 root->root_key.objectid);
9069 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
9070 btrfs_add_dropped_root(trans, root);
9072 free_extent_buffer(root->node);
9073 free_extent_buffer(root->commit_root);
9074 btrfs_put_fs_root(root);
9076 root_dropped = true;
9078 btrfs_end_transaction_throttle(trans);
9081 btrfs_free_path(path);
9084 * So if we need to stop dropping the snapshot for whatever reason we
9085 * need to make sure to add it back to the dead root list so that we
9086 * keep trying to do the work later. This also cleans up roots if we
9087 * don't have it in the radix (like when we recover after a power fail
9088 * or unmount) so we don't leak memory.
9090 if (!for_reloc && !root_dropped)
9091 btrfs_add_dead_root(root);
9092 if (err && err != -EAGAIN)
9093 btrfs_handle_fs_error(fs_info, err, NULL);
9098 * drop subtree rooted at tree block 'node'.
9100 * NOTE: this function will unlock and release tree block 'node'
9101 * only used by relocation code
9103 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
9104 struct btrfs_root *root,
9105 struct extent_buffer *node,
9106 struct extent_buffer *parent)
9108 struct btrfs_fs_info *fs_info = root->fs_info;
9109 struct btrfs_path *path;
9110 struct walk_control *wc;
9116 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
9118 path = btrfs_alloc_path();
9122 wc = kzalloc(sizeof(*wc), GFP_NOFS);
9124 btrfs_free_path(path);
9128 btrfs_assert_tree_locked(parent);
9129 parent_level = btrfs_header_level(parent);
9130 extent_buffer_get(parent);
9131 path->nodes[parent_level] = parent;
9132 path->slots[parent_level] = btrfs_header_nritems(parent);
9134 btrfs_assert_tree_locked(node);
9135 level = btrfs_header_level(node);
9136 path->nodes[level] = node;
9137 path->slots[level] = 0;
9138 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9140 wc->refs[parent_level] = 1;
9141 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
9143 wc->shared_level = -1;
9144 wc->stage = DROP_REFERENCE;
9147 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
9150 wret = walk_down_tree(trans, root, path, wc);
9156 wret = walk_up_tree(trans, root, path, wc, parent_level);
9164 btrfs_free_path(path);
9168 static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
9174 * if restripe for this chunk_type is on pick target profile and
9175 * return, otherwise do the usual balance
9177 stripped = get_restripe_target(fs_info, flags);
9179 return extended_to_chunk(stripped);
9181 num_devices = fs_info->fs_devices->rw_devices;
9183 stripped = BTRFS_BLOCK_GROUP_RAID0 |
9184 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
9185 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
9187 if (num_devices == 1) {
9188 stripped |= BTRFS_BLOCK_GROUP_DUP;
9189 stripped = flags & ~stripped;
9191 /* turn raid0 into single device chunks */
9192 if (flags & BTRFS_BLOCK_GROUP_RAID0)
9195 /* turn mirroring into duplication */
9196 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
9197 BTRFS_BLOCK_GROUP_RAID10))
9198 return stripped | BTRFS_BLOCK_GROUP_DUP;
9200 /* they already had raid on here, just return */
9201 if (flags & stripped)
9204 stripped |= BTRFS_BLOCK_GROUP_DUP;
9205 stripped = flags & ~stripped;
9207 /* switch duplicated blocks with raid1 */
9208 if (flags & BTRFS_BLOCK_GROUP_DUP)
9209 return stripped | BTRFS_BLOCK_GROUP_RAID1;
9211 /* this is drive concat, leave it alone */
9217 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
9219 struct btrfs_space_info *sinfo = cache->space_info;
9221 u64 min_allocable_bytes;
9225 * We need some metadata space and system metadata space for
9226 * allocating chunks in some corner cases until we force to set
9227 * it to be readonly.
9230 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
9232 min_allocable_bytes = SZ_1M;
9234 min_allocable_bytes = 0;
9236 spin_lock(&sinfo->lock);
9237 spin_lock(&cache->lock);
9245 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
9246 cache->bytes_super - btrfs_block_group_used(&cache->item);
9248 if (btrfs_space_info_used(sinfo, true) + num_bytes +
9249 min_allocable_bytes <= sinfo->total_bytes) {
9250 sinfo->bytes_readonly += num_bytes;
9252 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
9256 spin_unlock(&cache->lock);
9257 spin_unlock(&sinfo->lock);
9261 int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache)
9264 struct btrfs_fs_info *fs_info = cache->fs_info;
9265 struct btrfs_trans_handle *trans;
9270 trans = btrfs_join_transaction(fs_info->extent_root);
9272 return PTR_ERR(trans);
9275 * we're not allowed to set block groups readonly after the dirty
9276 * block groups cache has started writing. If it already started,
9277 * back off and let this transaction commit
9279 mutex_lock(&fs_info->ro_block_group_mutex);
9280 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
9281 u64 transid = trans->transid;
9283 mutex_unlock(&fs_info->ro_block_group_mutex);
9284 btrfs_end_transaction(trans);
9286 ret = btrfs_wait_for_commit(fs_info, transid);
9293 * if we are changing raid levels, try to allocate a corresponding
9294 * block group with the new raid level.
9296 alloc_flags = update_block_group_flags(fs_info, cache->flags);
9297 if (alloc_flags != cache->flags) {
9298 ret = do_chunk_alloc(trans, alloc_flags,
9301 * ENOSPC is allowed here, we may have enough space
9302 * already allocated at the new raid level to
9311 ret = inc_block_group_ro(cache, 0);
9314 alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags);
9315 ret = do_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
9318 ret = inc_block_group_ro(cache, 0);
9320 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
9321 alloc_flags = update_block_group_flags(fs_info, cache->flags);
9322 mutex_lock(&fs_info->chunk_mutex);
9323 check_system_chunk(trans, alloc_flags);
9324 mutex_unlock(&fs_info->chunk_mutex);
9326 mutex_unlock(&fs_info->ro_block_group_mutex);
9328 btrfs_end_transaction(trans);
9332 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
9334 u64 alloc_flags = get_alloc_profile(trans->fs_info, type);
9336 return do_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
9340 * helper to account the unused space of all the readonly block group in the
9341 * space_info. takes mirrors into account.
9343 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9345 struct btrfs_block_group_cache *block_group;
9349 /* It's df, we don't care if it's racy */
9350 if (list_empty(&sinfo->ro_bgs))
9353 spin_lock(&sinfo->lock);
9354 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9355 spin_lock(&block_group->lock);
9357 if (!block_group->ro) {
9358 spin_unlock(&block_group->lock);
9362 factor = btrfs_bg_type_to_factor(block_group->flags);
9363 free_bytes += (block_group->key.offset -
9364 btrfs_block_group_used(&block_group->item)) *
9367 spin_unlock(&block_group->lock);
9369 spin_unlock(&sinfo->lock);
9374 void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
9376 struct btrfs_space_info *sinfo = cache->space_info;
9381 spin_lock(&sinfo->lock);
9382 spin_lock(&cache->lock);
9384 num_bytes = cache->key.offset - cache->reserved -
9385 cache->pinned - cache->bytes_super -
9386 btrfs_block_group_used(&cache->item);
9387 sinfo->bytes_readonly -= num_bytes;
9388 list_del_init(&cache->ro_list);
9390 spin_unlock(&cache->lock);
9391 spin_unlock(&sinfo->lock);
9395 * checks to see if its even possible to relocate this block group.
9397 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9398 * ok to go ahead and try.
9400 int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr)
9402 struct btrfs_root *root = fs_info->extent_root;
9403 struct btrfs_block_group_cache *block_group;
9404 struct btrfs_space_info *space_info;
9405 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
9406 struct btrfs_device *device;
9407 struct btrfs_trans_handle *trans;
9417 debug = btrfs_test_opt(fs_info, ENOSPC_DEBUG);
9419 block_group = btrfs_lookup_block_group(fs_info, bytenr);
9421 /* odd, couldn't find the block group, leave it alone */
9425 "can't find block group for bytenr %llu",
9430 min_free = btrfs_block_group_used(&block_group->item);
9432 /* no bytes used, we're good */
9436 space_info = block_group->space_info;
9437 spin_lock(&space_info->lock);
9439 full = space_info->full;
9442 * if this is the last block group we have in this space, we can't
9443 * relocate it unless we're able to allocate a new chunk below.
9445 * Otherwise, we need to make sure we have room in the space to handle
9446 * all of the extents from this block group. If we can, we're good
9448 if ((space_info->total_bytes != block_group->key.offset) &&
9449 (btrfs_space_info_used(space_info, false) + min_free <
9450 space_info->total_bytes)) {
9451 spin_unlock(&space_info->lock);
9454 spin_unlock(&space_info->lock);
9457 * ok we don't have enough space, but maybe we have free space on our
9458 * devices to allocate new chunks for relocation, so loop through our
9459 * alloc devices and guess if we have enough space. if this block
9460 * group is going to be restriped, run checks against the target
9461 * profile instead of the current one.
9473 target = get_restripe_target(fs_info, block_group->flags);
9475 index = btrfs_bg_flags_to_raid_index(extended_to_chunk(target));
9478 * this is just a balance, so if we were marked as full
9479 * we know there is no space for a new chunk
9484 "no space to alloc new chunk for block group %llu",
9485 block_group->key.objectid);
9489 index = btrfs_bg_flags_to_raid_index(block_group->flags);
9492 if (index == BTRFS_RAID_RAID10) {
9496 } else if (index == BTRFS_RAID_RAID1) {
9498 } else if (index == BTRFS_RAID_DUP) {
9501 } else if (index == BTRFS_RAID_RAID0) {
9502 dev_min = fs_devices->rw_devices;
9503 min_free = div64_u64(min_free, dev_min);
9506 /* We need to do this so that we can look at pending chunks */
9507 trans = btrfs_join_transaction(root);
9508 if (IS_ERR(trans)) {
9509 ret = PTR_ERR(trans);
9513 mutex_lock(&fs_info->chunk_mutex);
9514 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9518 * check to make sure we can actually find a chunk with enough
9519 * space to fit our block group in.
9521 if (device->total_bytes > device->bytes_used + min_free &&
9522 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
9523 ret = find_free_dev_extent(trans, device, min_free,
9528 if (dev_nr >= dev_min)
9534 if (debug && ret == -1)
9536 "no space to allocate a new chunk for block group %llu",
9537 block_group->key.objectid);
9538 mutex_unlock(&fs_info->chunk_mutex);
9539 btrfs_end_transaction(trans);
9541 btrfs_put_block_group(block_group);
9545 static int find_first_block_group(struct btrfs_fs_info *fs_info,
9546 struct btrfs_path *path,
9547 struct btrfs_key *key)
9549 struct btrfs_root *root = fs_info->extent_root;
9551 struct btrfs_key found_key;
9552 struct extent_buffer *leaf;
9553 struct btrfs_block_group_item bg;
9557 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9562 slot = path->slots[0];
9563 leaf = path->nodes[0];
9564 if (slot >= btrfs_header_nritems(leaf)) {
9565 ret = btrfs_next_leaf(root, path);
9572 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9574 if (found_key.objectid >= key->objectid &&
9575 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9576 struct extent_map_tree *em_tree;
9577 struct extent_map *em;
9579 em_tree = &root->fs_info->mapping_tree.map_tree;
9580 read_lock(&em_tree->lock);
9581 em = lookup_extent_mapping(em_tree, found_key.objectid,
9583 read_unlock(&em_tree->lock);
9586 "logical %llu len %llu found bg but no related chunk",
9587 found_key.objectid, found_key.offset);
9589 } else if (em->start != found_key.objectid ||
9590 em->len != found_key.offset) {
9592 "block group %llu len %llu mismatch with chunk %llu len %llu",
9593 found_key.objectid, found_key.offset,
9594 em->start, em->len);
9597 read_extent_buffer(leaf, &bg,
9598 btrfs_item_ptr_offset(leaf, slot),
9600 flags = btrfs_block_group_flags(&bg) &
9601 BTRFS_BLOCK_GROUP_TYPE_MASK;
9603 if (flags != (em->map_lookup->type &
9604 BTRFS_BLOCK_GROUP_TYPE_MASK)) {
9606 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
9608 found_key.offset, flags,
9609 (BTRFS_BLOCK_GROUP_TYPE_MASK &
9610 em->map_lookup->type));
9616 free_extent_map(em);
9625 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9627 struct btrfs_block_group_cache *block_group;
9631 struct inode *inode;
9633 block_group = btrfs_lookup_first_block_group(info, last);
9634 while (block_group) {
9635 wait_block_group_cache_done(block_group);
9636 spin_lock(&block_group->lock);
9637 if (block_group->iref)
9639 spin_unlock(&block_group->lock);
9640 block_group = next_block_group(info, block_group);
9649 inode = block_group->inode;
9650 block_group->iref = 0;
9651 block_group->inode = NULL;
9652 spin_unlock(&block_group->lock);
9653 ASSERT(block_group->io_ctl.inode == NULL);
9655 last = block_group->key.objectid + block_group->key.offset;
9656 btrfs_put_block_group(block_group);
9661 * Must be called only after stopping all workers, since we could have block
9662 * group caching kthreads running, and therefore they could race with us if we
9663 * freed the block groups before stopping them.
9665 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9667 struct btrfs_block_group_cache *block_group;
9668 struct btrfs_space_info *space_info;
9669 struct btrfs_caching_control *caching_ctl;
9672 down_write(&info->commit_root_sem);
9673 while (!list_empty(&info->caching_block_groups)) {
9674 caching_ctl = list_entry(info->caching_block_groups.next,
9675 struct btrfs_caching_control, list);
9676 list_del(&caching_ctl->list);
9677 put_caching_control(caching_ctl);
9679 up_write(&info->commit_root_sem);
9681 spin_lock(&info->unused_bgs_lock);
9682 while (!list_empty(&info->unused_bgs)) {
9683 block_group = list_first_entry(&info->unused_bgs,
9684 struct btrfs_block_group_cache,
9686 list_del_init(&block_group->bg_list);
9687 btrfs_put_block_group(block_group);
9689 spin_unlock(&info->unused_bgs_lock);
9691 spin_lock(&info->block_group_cache_lock);
9692 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9693 block_group = rb_entry(n, struct btrfs_block_group_cache,
9695 rb_erase(&block_group->cache_node,
9696 &info->block_group_cache_tree);
9697 RB_CLEAR_NODE(&block_group->cache_node);
9698 spin_unlock(&info->block_group_cache_lock);
9700 down_write(&block_group->space_info->groups_sem);
9701 list_del(&block_group->list);
9702 up_write(&block_group->space_info->groups_sem);
9705 * We haven't cached this block group, which means we could
9706 * possibly have excluded extents on this block group.
9708 if (block_group->cached == BTRFS_CACHE_NO ||
9709 block_group->cached == BTRFS_CACHE_ERROR)
9710 free_excluded_extents(block_group);
9712 btrfs_remove_free_space_cache(block_group);
9713 ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
9714 ASSERT(list_empty(&block_group->dirty_list));
9715 ASSERT(list_empty(&block_group->io_list));
9716 ASSERT(list_empty(&block_group->bg_list));
9717 ASSERT(atomic_read(&block_group->count) == 1);
9718 btrfs_put_block_group(block_group);
9720 spin_lock(&info->block_group_cache_lock);
9722 spin_unlock(&info->block_group_cache_lock);
9724 /* now that all the block groups are freed, go through and
9725 * free all the space_info structs. This is only called during
9726 * the final stages of unmount, and so we know nobody is
9727 * using them. We call synchronize_rcu() once before we start,
9728 * just to be on the safe side.
9732 release_global_block_rsv(info);
9734 while (!list_empty(&info->space_info)) {
9737 space_info = list_entry(info->space_info.next,
9738 struct btrfs_space_info,
9742 * Do not hide this behind enospc_debug, this is actually
9743 * important and indicates a real bug if this happens.
9745 if (WARN_ON(space_info->bytes_pinned > 0 ||
9746 space_info->bytes_reserved > 0 ||
9747 space_info->bytes_may_use > 0))
9748 dump_space_info(info, space_info, 0, 0);
9749 list_del(&space_info->list);
9750 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9751 struct kobject *kobj;
9752 kobj = space_info->block_group_kobjs[i];
9753 space_info->block_group_kobjs[i] = NULL;
9759 kobject_del(&space_info->kobj);
9760 kobject_put(&space_info->kobj);
9765 /* link_block_group will queue up kobjects to add when we're reclaim-safe */
9766 void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info)
9768 struct btrfs_space_info *space_info;
9769 struct raid_kobject *rkobj;
9774 spin_lock(&fs_info->pending_raid_kobjs_lock);
9775 list_splice_init(&fs_info->pending_raid_kobjs, &list);
9776 spin_unlock(&fs_info->pending_raid_kobjs_lock);
9778 list_for_each_entry(rkobj, &list, list) {
9779 space_info = __find_space_info(fs_info, rkobj->flags);
9780 index = btrfs_bg_flags_to_raid_index(rkobj->flags);
9782 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9783 "%s", get_raid_name(index));
9785 kobject_put(&rkobj->kobj);
9791 "failed to add kobject for block cache, ignoring");
9794 static void link_block_group(struct btrfs_block_group_cache *cache)
9796 struct btrfs_space_info *space_info = cache->space_info;
9797 struct btrfs_fs_info *fs_info = cache->fs_info;
9798 int index = btrfs_bg_flags_to_raid_index(cache->flags);
9801 down_write(&space_info->groups_sem);
9802 if (list_empty(&space_info->block_groups[index]))
9804 list_add_tail(&cache->list, &space_info->block_groups[index]);
9805 up_write(&space_info->groups_sem);
9808 struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9810 btrfs_warn(cache->fs_info,
9811 "couldn't alloc memory for raid level kobject");
9814 rkobj->flags = cache->flags;
9815 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9817 spin_lock(&fs_info->pending_raid_kobjs_lock);
9818 list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs);
9819 spin_unlock(&fs_info->pending_raid_kobjs_lock);
9820 space_info->block_group_kobjs[index] = &rkobj->kobj;
9824 static struct btrfs_block_group_cache *
9825 btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
9826 u64 start, u64 size)
9828 struct btrfs_block_group_cache *cache;
9830 cache = kzalloc(sizeof(*cache), GFP_NOFS);
9834 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9836 if (!cache->free_space_ctl) {
9841 cache->key.objectid = start;
9842 cache->key.offset = size;
9843 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9845 cache->fs_info = fs_info;
9846 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
9847 set_free_space_tree_thresholds(cache);
9849 atomic_set(&cache->count, 1);
9850 spin_lock_init(&cache->lock);
9851 init_rwsem(&cache->data_rwsem);
9852 INIT_LIST_HEAD(&cache->list);
9853 INIT_LIST_HEAD(&cache->cluster_list);
9854 INIT_LIST_HEAD(&cache->bg_list);
9855 INIT_LIST_HEAD(&cache->ro_list);
9856 INIT_LIST_HEAD(&cache->dirty_list);
9857 INIT_LIST_HEAD(&cache->io_list);
9858 btrfs_init_free_space_ctl(cache);
9859 atomic_set(&cache->trimming, 0);
9860 mutex_init(&cache->free_space_lock);
9861 btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
9868 * Iterate all chunks and verify that each of them has the corresponding block
9871 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
9873 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
9874 struct extent_map *em;
9875 struct btrfs_block_group_cache *bg;
9880 read_lock(&map_tree->map_tree.lock);
9882 * lookup_extent_mapping will return the first extent map
9883 * intersecting the range, so setting @len to 1 is enough to
9884 * get the first chunk.
9886 em = lookup_extent_mapping(&map_tree->map_tree, start, 1);
9887 read_unlock(&map_tree->map_tree.lock);
9891 bg = btrfs_lookup_block_group(fs_info, em->start);
9894 "chunk start=%llu len=%llu doesn't have corresponding block group",
9895 em->start, em->len);
9897 free_extent_map(em);
9900 if (bg->key.objectid != em->start ||
9901 bg->key.offset != em->len ||
9902 (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
9903 (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
9905 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
9907 em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
9908 bg->key.objectid, bg->key.offset,
9909 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
9911 free_extent_map(em);
9912 btrfs_put_block_group(bg);
9915 start = em->start + em->len;
9916 free_extent_map(em);
9917 btrfs_put_block_group(bg);
9922 int btrfs_read_block_groups(struct btrfs_fs_info *info)
9924 struct btrfs_path *path;
9926 struct btrfs_block_group_cache *cache;
9927 struct btrfs_space_info *space_info;
9928 struct btrfs_key key;
9929 struct btrfs_key found_key;
9930 struct extent_buffer *leaf;
9936 feature = btrfs_super_incompat_flags(info->super_copy);
9937 mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
9941 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9942 path = btrfs_alloc_path();
9945 path->reada = READA_FORWARD;
9947 cache_gen = btrfs_super_cache_generation(info->super_copy);
9948 if (btrfs_test_opt(info, SPACE_CACHE) &&
9949 btrfs_super_generation(info->super_copy) != cache_gen)
9951 if (btrfs_test_opt(info, CLEAR_CACHE))
9955 ret = find_first_block_group(info, path, &key);
9961 leaf = path->nodes[0];
9962 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9964 cache = btrfs_create_block_group_cache(info, found_key.objectid,
9973 * When we mount with old space cache, we need to
9974 * set BTRFS_DC_CLEAR and set dirty flag.
9976 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9977 * truncate the old free space cache inode and
9979 * b) Setting 'dirty flag' makes sure that we flush
9980 * the new space cache info onto disk.
9982 if (btrfs_test_opt(info, SPACE_CACHE))
9983 cache->disk_cache_state = BTRFS_DC_CLEAR;
9986 read_extent_buffer(leaf, &cache->item,
9987 btrfs_item_ptr_offset(leaf, path->slots[0]),
9988 sizeof(cache->item));
9989 cache->flags = btrfs_block_group_flags(&cache->item);
9991 ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
9992 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
9994 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
9995 cache->key.objectid);
10000 key.objectid = found_key.objectid + found_key.offset;
10001 btrfs_release_path(path);
10004 * We need to exclude the super stripes now so that the space
10005 * info has super bytes accounted for, otherwise we'll think
10006 * we have more space than we actually do.
10008 ret = exclude_super_stripes(cache);
10011 * We may have excluded something, so call this just in
10014 free_excluded_extents(cache);
10015 btrfs_put_block_group(cache);
10020 * check for two cases, either we are full, and therefore
10021 * don't need to bother with the caching work since we won't
10022 * find any space, or we are empty, and we can just add all
10023 * the space in and be done with it. This saves us _alot_ of
10024 * time, particularly in the full case.
10026 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
10027 cache->last_byte_to_unpin = (u64)-1;
10028 cache->cached = BTRFS_CACHE_FINISHED;
10029 free_excluded_extents(cache);
10030 } else if (btrfs_block_group_used(&cache->item) == 0) {
10031 cache->last_byte_to_unpin = (u64)-1;
10032 cache->cached = BTRFS_CACHE_FINISHED;
10033 add_new_free_space(cache, found_key.objectid,
10034 found_key.objectid +
10036 free_excluded_extents(cache);
10039 ret = btrfs_add_block_group_cache(info, cache);
10041 btrfs_remove_free_space_cache(cache);
10042 btrfs_put_block_group(cache);
10046 trace_btrfs_add_block_group(info, cache, 0);
10047 update_space_info(info, cache->flags, found_key.offset,
10048 btrfs_block_group_used(&cache->item),
10049 cache->bytes_super, &space_info);
10051 cache->space_info = space_info;
10053 link_block_group(cache);
10055 set_avail_alloc_bits(info, cache->flags);
10056 if (btrfs_chunk_readonly(info, cache->key.objectid)) {
10057 inc_block_group_ro(cache, 1);
10058 } else if (btrfs_block_group_used(&cache->item) == 0) {
10059 ASSERT(list_empty(&cache->bg_list));
10060 btrfs_mark_bg_unused(cache);
10064 list_for_each_entry_rcu(space_info, &info->space_info, list) {
10065 if (!(get_alloc_profile(info, space_info->flags) &
10066 (BTRFS_BLOCK_GROUP_RAID10 |
10067 BTRFS_BLOCK_GROUP_RAID1 |
10068 BTRFS_BLOCK_GROUP_RAID5 |
10069 BTRFS_BLOCK_GROUP_RAID6 |
10070 BTRFS_BLOCK_GROUP_DUP)))
10073 * avoid allocating from un-mirrored block group if there are
10074 * mirrored block groups.
10076 list_for_each_entry(cache,
10077 &space_info->block_groups[BTRFS_RAID_RAID0],
10079 inc_block_group_ro(cache, 1);
10080 list_for_each_entry(cache,
10081 &space_info->block_groups[BTRFS_RAID_SINGLE],
10083 inc_block_group_ro(cache, 1);
10086 btrfs_add_raid_kobjects(info);
10087 init_global_block_rsv(info);
10088 ret = check_chunk_block_group_mappings(info);
10090 btrfs_free_path(path);
10094 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
10096 struct btrfs_fs_info *fs_info = trans->fs_info;
10097 struct btrfs_block_group_cache *block_group;
10098 struct btrfs_root *extent_root = fs_info->extent_root;
10099 struct btrfs_block_group_item item;
10100 struct btrfs_key key;
10103 if (!trans->can_flush_pending_bgs)
10106 while (!list_empty(&trans->new_bgs)) {
10107 block_group = list_first_entry(&trans->new_bgs,
10108 struct btrfs_block_group_cache,
10113 spin_lock(&block_group->lock);
10114 memcpy(&item, &block_group->item, sizeof(item));
10115 memcpy(&key, &block_group->key, sizeof(key));
10116 spin_unlock(&block_group->lock);
10118 ret = btrfs_insert_item(trans, extent_root, &key, &item,
10121 btrfs_abort_transaction(trans, ret);
10122 ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset);
10124 btrfs_abort_transaction(trans, ret);
10125 add_block_group_free_space(trans, block_group);
10126 /* already aborted the transaction if it failed. */
10128 list_del_init(&block_group->bg_list);
10130 btrfs_trans_release_chunk_metadata(trans);
10133 int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
10134 u64 type, u64 chunk_offset, u64 size)
10136 struct btrfs_fs_info *fs_info = trans->fs_info;
10137 struct btrfs_block_group_cache *cache;
10140 btrfs_set_log_full_commit(fs_info, trans);
10142 cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
10146 btrfs_set_block_group_used(&cache->item, bytes_used);
10147 btrfs_set_block_group_chunk_objectid(&cache->item,
10148 BTRFS_FIRST_CHUNK_TREE_OBJECTID);
10149 btrfs_set_block_group_flags(&cache->item, type);
10151 cache->flags = type;
10152 cache->last_byte_to_unpin = (u64)-1;
10153 cache->cached = BTRFS_CACHE_FINISHED;
10154 cache->needs_free_space = 1;
10155 ret = exclude_super_stripes(cache);
10158 * We may have excluded something, so call this just in
10161 free_excluded_extents(cache);
10162 btrfs_put_block_group(cache);
10166 add_new_free_space(cache, chunk_offset, chunk_offset + size);
10168 free_excluded_extents(cache);
10170 #ifdef CONFIG_BTRFS_DEBUG
10171 if (btrfs_should_fragment_free_space(cache)) {
10172 u64 new_bytes_used = size - bytes_used;
10174 bytes_used += new_bytes_used >> 1;
10175 fragment_free_space(cache);
10179 * Ensure the corresponding space_info object is created and
10180 * assigned to our block group. We want our bg to be added to the rbtree
10181 * with its ->space_info set.
10183 cache->space_info = __find_space_info(fs_info, cache->flags);
10184 ASSERT(cache->space_info);
10186 ret = btrfs_add_block_group_cache(fs_info, cache);
10188 btrfs_remove_free_space_cache(cache);
10189 btrfs_put_block_group(cache);
10194 * Now that our block group has its ->space_info set and is inserted in
10195 * the rbtree, update the space info's counters.
10197 trace_btrfs_add_block_group(fs_info, cache, 1);
10198 update_space_info(fs_info, cache->flags, size, bytes_used,
10199 cache->bytes_super, &cache->space_info);
10200 update_global_block_rsv(fs_info);
10202 link_block_group(cache);
10204 list_add_tail(&cache->bg_list, &trans->new_bgs);
10206 set_avail_alloc_bits(fs_info, type);
10210 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
10212 u64 extra_flags = chunk_to_extended(flags) &
10213 BTRFS_EXTENDED_PROFILE_MASK;
10215 write_seqlock(&fs_info->profiles_lock);
10216 if (flags & BTRFS_BLOCK_GROUP_DATA)
10217 fs_info->avail_data_alloc_bits &= ~extra_flags;
10218 if (flags & BTRFS_BLOCK_GROUP_METADATA)
10219 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
10220 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
10221 fs_info->avail_system_alloc_bits &= ~extra_flags;
10222 write_sequnlock(&fs_info->profiles_lock);
10225 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
10226 u64 group_start, struct extent_map *em)
10228 struct btrfs_fs_info *fs_info = trans->fs_info;
10229 struct btrfs_root *root = fs_info->extent_root;
10230 struct btrfs_path *path;
10231 struct btrfs_block_group_cache *block_group;
10232 struct btrfs_free_cluster *cluster;
10233 struct btrfs_root *tree_root = fs_info->tree_root;
10234 struct btrfs_key key;
10235 struct inode *inode;
10236 struct kobject *kobj = NULL;
10240 struct btrfs_caching_control *caching_ctl = NULL;
10243 block_group = btrfs_lookup_block_group(fs_info, group_start);
10244 BUG_ON(!block_group);
10245 BUG_ON(!block_group->ro);
10247 trace_btrfs_remove_block_group(block_group);
10249 * Free the reserved super bytes from this block group before
10252 free_excluded_extents(block_group);
10253 btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
10254 block_group->key.offset);
10256 memcpy(&key, &block_group->key, sizeof(key));
10257 index = btrfs_bg_flags_to_raid_index(block_group->flags);
10258 factor = btrfs_bg_type_to_factor(block_group->flags);
10260 /* make sure this block group isn't part of an allocation cluster */
10261 cluster = &fs_info->data_alloc_cluster;
10262 spin_lock(&cluster->refill_lock);
10263 btrfs_return_cluster_to_free_space(block_group, cluster);
10264 spin_unlock(&cluster->refill_lock);
10267 * make sure this block group isn't part of a metadata
10268 * allocation cluster
10270 cluster = &fs_info->meta_alloc_cluster;
10271 spin_lock(&cluster->refill_lock);
10272 btrfs_return_cluster_to_free_space(block_group, cluster);
10273 spin_unlock(&cluster->refill_lock);
10275 path = btrfs_alloc_path();
10282 * get the inode first so any iput calls done for the io_list
10283 * aren't the final iput (no unlinks allowed now)
10285 inode = lookup_free_space_inode(fs_info, block_group, path);
10287 mutex_lock(&trans->transaction->cache_write_mutex);
10289 * make sure our free spache cache IO is done before remove the
10292 spin_lock(&trans->transaction->dirty_bgs_lock);
10293 if (!list_empty(&block_group->io_list)) {
10294 list_del_init(&block_group->io_list);
10296 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
10298 spin_unlock(&trans->transaction->dirty_bgs_lock);
10299 btrfs_wait_cache_io(trans, block_group, path);
10300 btrfs_put_block_group(block_group);
10301 spin_lock(&trans->transaction->dirty_bgs_lock);
10304 if (!list_empty(&block_group->dirty_list)) {
10305 list_del_init(&block_group->dirty_list);
10306 btrfs_put_block_group(block_group);
10308 spin_unlock(&trans->transaction->dirty_bgs_lock);
10309 mutex_unlock(&trans->transaction->cache_write_mutex);
10311 if (!IS_ERR(inode)) {
10312 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
10314 btrfs_add_delayed_iput(inode);
10317 clear_nlink(inode);
10318 /* One for the block groups ref */
10319 spin_lock(&block_group->lock);
10320 if (block_group->iref) {
10321 block_group->iref = 0;
10322 block_group->inode = NULL;
10323 spin_unlock(&block_group->lock);
10326 spin_unlock(&block_group->lock);
10328 /* One for our lookup ref */
10329 btrfs_add_delayed_iput(inode);
10332 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
10333 key.offset = block_group->key.objectid;
10336 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
10340 btrfs_release_path(path);
10342 ret = btrfs_del_item(trans, tree_root, path);
10345 btrfs_release_path(path);
10348 spin_lock(&fs_info->block_group_cache_lock);
10349 rb_erase(&block_group->cache_node,
10350 &fs_info->block_group_cache_tree);
10351 RB_CLEAR_NODE(&block_group->cache_node);
10353 if (fs_info->first_logical_byte == block_group->key.objectid)
10354 fs_info->first_logical_byte = (u64)-1;
10355 spin_unlock(&fs_info->block_group_cache_lock);
10357 down_write(&block_group->space_info->groups_sem);
10359 * we must use list_del_init so people can check to see if they
10360 * are still on the list after taking the semaphore
10362 list_del_init(&block_group->list);
10363 if (list_empty(&block_group->space_info->block_groups[index])) {
10364 kobj = block_group->space_info->block_group_kobjs[index];
10365 block_group->space_info->block_group_kobjs[index] = NULL;
10366 clear_avail_alloc_bits(fs_info, block_group->flags);
10368 up_write(&block_group->space_info->groups_sem);
10374 if (block_group->has_caching_ctl)
10375 caching_ctl = get_caching_control(block_group);
10376 if (block_group->cached == BTRFS_CACHE_STARTED)
10377 wait_block_group_cache_done(block_group);
10378 if (block_group->has_caching_ctl) {
10379 down_write(&fs_info->commit_root_sem);
10380 if (!caching_ctl) {
10381 struct btrfs_caching_control *ctl;
10383 list_for_each_entry(ctl,
10384 &fs_info->caching_block_groups, list)
10385 if (ctl->block_group == block_group) {
10387 refcount_inc(&caching_ctl->count);
10392 list_del_init(&caching_ctl->list);
10393 up_write(&fs_info->commit_root_sem);
10395 /* Once for the caching bgs list and once for us. */
10396 put_caching_control(caching_ctl);
10397 put_caching_control(caching_ctl);
10401 spin_lock(&trans->transaction->dirty_bgs_lock);
10402 if (!list_empty(&block_group->dirty_list)) {
10405 if (!list_empty(&block_group->io_list)) {
10408 spin_unlock(&trans->transaction->dirty_bgs_lock);
10409 btrfs_remove_free_space_cache(block_group);
10411 spin_lock(&block_group->space_info->lock);
10412 list_del_init(&block_group->ro_list);
10414 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
10415 WARN_ON(block_group->space_info->total_bytes
10416 < block_group->key.offset);
10417 WARN_ON(block_group->space_info->bytes_readonly
10418 < block_group->key.offset);
10419 WARN_ON(block_group->space_info->disk_total
10420 < block_group->key.offset * factor);
10422 block_group->space_info->total_bytes -= block_group->key.offset;
10423 block_group->space_info->bytes_readonly -= block_group->key.offset;
10424 block_group->space_info->disk_total -= block_group->key.offset * factor;
10426 spin_unlock(&block_group->space_info->lock);
10428 memcpy(&key, &block_group->key, sizeof(key));
10430 mutex_lock(&fs_info->chunk_mutex);
10431 if (!list_empty(&em->list)) {
10432 /* We're in the transaction->pending_chunks list. */
10433 free_extent_map(em);
10435 spin_lock(&block_group->lock);
10436 block_group->removed = 1;
10438 * At this point trimming can't start on this block group, because we
10439 * removed the block group from the tree fs_info->block_group_cache_tree
10440 * so no one can't find it anymore and even if someone already got this
10441 * block group before we removed it from the rbtree, they have already
10442 * incremented block_group->trimming - if they didn't, they won't find
10443 * any free space entries because we already removed them all when we
10444 * called btrfs_remove_free_space_cache().
10446 * And we must not remove the extent map from the fs_info->mapping_tree
10447 * to prevent the same logical address range and physical device space
10448 * ranges from being reused for a new block group. This is because our
10449 * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
10450 * completely transactionless, so while it is trimming a range the
10451 * currently running transaction might finish and a new one start,
10452 * allowing for new block groups to be created that can reuse the same
10453 * physical device locations unless we take this special care.
10455 * There may also be an implicit trim operation if the file system
10456 * is mounted with -odiscard. The same protections must remain
10457 * in place until the extents have been discarded completely when
10458 * the transaction commit has completed.
10460 remove_em = (atomic_read(&block_group->trimming) == 0);
10462 * Make sure a trimmer task always sees the em in the pinned_chunks list
10463 * if it sees block_group->removed == 1 (needs to lock block_group->lock
10464 * before checking block_group->removed).
10468 * Our em might be in trans->transaction->pending_chunks which
10469 * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
10470 * and so is the fs_info->pinned_chunks list.
10472 * So at this point we must be holding the chunk_mutex to avoid
10473 * any races with chunk allocation (more specifically at
10474 * volumes.c:contains_pending_extent()), to ensure it always
10475 * sees the em, either in the pending_chunks list or in the
10476 * pinned_chunks list.
10478 list_move_tail(&em->list, &fs_info->pinned_chunks);
10480 spin_unlock(&block_group->lock);
10483 struct extent_map_tree *em_tree;
10485 em_tree = &fs_info->mapping_tree.map_tree;
10486 write_lock(&em_tree->lock);
10488 * The em might be in the pending_chunks list, so make sure the
10489 * chunk mutex is locked, since remove_extent_mapping() will
10490 * delete us from that list.
10492 remove_extent_mapping(em_tree, em);
10493 write_unlock(&em_tree->lock);
10494 /* once for the tree */
10495 free_extent_map(em);
10498 mutex_unlock(&fs_info->chunk_mutex);
10500 ret = remove_block_group_free_space(trans, block_group);
10504 btrfs_put_block_group(block_group);
10505 btrfs_put_block_group(block_group);
10507 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10513 ret = btrfs_del_item(trans, root, path);
10515 btrfs_free_path(path);
10519 struct btrfs_trans_handle *
10520 btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
10521 const u64 chunk_offset)
10523 struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
10524 struct extent_map *em;
10525 struct map_lookup *map;
10526 unsigned int num_items;
10528 read_lock(&em_tree->lock);
10529 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
10530 read_unlock(&em_tree->lock);
10531 ASSERT(em && em->start == chunk_offset);
10534 * We need to reserve 3 + N units from the metadata space info in order
10535 * to remove a block group (done at btrfs_remove_chunk() and at
10536 * btrfs_remove_block_group()), which are used for:
10538 * 1 unit for adding the free space inode's orphan (located in the tree
10540 * 1 unit for deleting the block group item (located in the extent
10542 * 1 unit for deleting the free space item (located in tree of tree
10544 * N units for deleting N device extent items corresponding to each
10545 * stripe (located in the device tree).
10547 * In order to remove a block group we also need to reserve units in the
10548 * system space info in order to update the chunk tree (update one or
10549 * more device items and remove one chunk item), but this is done at
10550 * btrfs_remove_chunk() through a call to check_system_chunk().
10552 map = em->map_lookup;
10553 num_items = 3 + map->num_stripes;
10554 free_extent_map(em);
10556 return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
10561 * Process the unused_bgs list and remove any that don't have any allocated
10562 * space inside of them.
10564 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10566 struct btrfs_block_group_cache *block_group;
10567 struct btrfs_space_info *space_info;
10568 struct btrfs_trans_handle *trans;
10571 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
10574 spin_lock(&fs_info->unused_bgs_lock);
10575 while (!list_empty(&fs_info->unused_bgs)) {
10579 block_group = list_first_entry(&fs_info->unused_bgs,
10580 struct btrfs_block_group_cache,
10582 list_del_init(&block_group->bg_list);
10584 space_info = block_group->space_info;
10586 if (ret || btrfs_mixed_space_info(space_info)) {
10587 btrfs_put_block_group(block_group);
10590 spin_unlock(&fs_info->unused_bgs_lock);
10592 mutex_lock(&fs_info->delete_unused_bgs_mutex);
10594 /* Don't want to race with allocators so take the groups_sem */
10595 down_write(&space_info->groups_sem);
10596 spin_lock(&block_group->lock);
10597 if (block_group->reserved || block_group->pinned ||
10598 btrfs_block_group_used(&block_group->item) ||
10600 list_is_singular(&block_group->list)) {
10602 * We want to bail if we made new allocations or have
10603 * outstanding allocations in this block group. We do
10604 * the ro check in case balance is currently acting on
10605 * this block group.
10607 trace_btrfs_skip_unused_block_group(block_group);
10608 spin_unlock(&block_group->lock);
10609 up_write(&space_info->groups_sem);
10612 spin_unlock(&block_group->lock);
10614 /* We don't want to force the issue, only flip if it's ok. */
10615 ret = inc_block_group_ro(block_group, 0);
10616 up_write(&space_info->groups_sem);
10623 * Want to do this before we do anything else so we can recover
10624 * properly if we fail to join the transaction.
10626 trans = btrfs_start_trans_remove_block_group(fs_info,
10627 block_group->key.objectid);
10628 if (IS_ERR(trans)) {
10629 btrfs_dec_block_group_ro(block_group);
10630 ret = PTR_ERR(trans);
10635 * We could have pending pinned extents for this block group,
10636 * just delete them, we don't care about them anymore.
10638 start = block_group->key.objectid;
10639 end = start + block_group->key.offset - 1;
10641 * Hold the unused_bg_unpin_mutex lock to avoid racing with
10642 * btrfs_finish_extent_commit(). If we are at transaction N,
10643 * another task might be running finish_extent_commit() for the
10644 * previous transaction N - 1, and have seen a range belonging
10645 * to the block group in freed_extents[] before we were able to
10646 * clear the whole block group range from freed_extents[]. This
10647 * means that task can lookup for the block group after we
10648 * unpinned it from freed_extents[] and removed it, leading to
10649 * a BUG_ON() at btrfs_unpin_extent_range().
10651 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10652 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10655 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10656 btrfs_dec_block_group_ro(block_group);
10659 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10662 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10663 btrfs_dec_block_group_ro(block_group);
10666 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10668 /* Reset pinned so btrfs_put_block_group doesn't complain */
10669 spin_lock(&space_info->lock);
10670 spin_lock(&block_group->lock);
10672 space_info->bytes_pinned -= block_group->pinned;
10673 space_info->bytes_readonly += block_group->pinned;
10674 percpu_counter_add_batch(&space_info->total_bytes_pinned,
10675 -block_group->pinned,
10676 BTRFS_TOTAL_BYTES_PINNED_BATCH);
10677 block_group->pinned = 0;
10679 spin_unlock(&block_group->lock);
10680 spin_unlock(&space_info->lock);
10682 /* DISCARD can flip during remount */
10683 trimming = btrfs_test_opt(fs_info, DISCARD);
10685 /* Implicit trim during transaction commit. */
10687 btrfs_get_block_group_trimming(block_group);
10690 * Btrfs_remove_chunk will abort the transaction if things go
10693 ret = btrfs_remove_chunk(trans, block_group->key.objectid);
10697 btrfs_put_block_group_trimming(block_group);
10702 * If we're not mounted with -odiscard, we can just forget
10703 * about this block group. Otherwise we'll need to wait
10704 * until transaction commit to do the actual discard.
10707 spin_lock(&fs_info->unused_bgs_lock);
10709 * A concurrent scrub might have added us to the list
10710 * fs_info->unused_bgs, so use a list_move operation
10711 * to add the block group to the deleted_bgs list.
10713 list_move(&block_group->bg_list,
10714 &trans->transaction->deleted_bgs);
10715 spin_unlock(&fs_info->unused_bgs_lock);
10716 btrfs_get_block_group(block_group);
10719 btrfs_end_transaction(trans);
10721 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
10722 btrfs_put_block_group(block_group);
10723 spin_lock(&fs_info->unused_bgs_lock);
10725 spin_unlock(&fs_info->unused_bgs_lock);
10728 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10730 struct btrfs_super_block *disk_super;
10736 disk_super = fs_info->super_copy;
10737 if (!btrfs_super_root(disk_super))
10740 features = btrfs_super_incompat_flags(disk_super);
10741 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10744 flags = BTRFS_BLOCK_GROUP_SYSTEM;
10745 ret = create_space_info(fs_info, flags);
10750 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10751 ret = create_space_info(fs_info, flags);
10753 flags = BTRFS_BLOCK_GROUP_METADATA;
10754 ret = create_space_info(fs_info, flags);
10758 flags = BTRFS_BLOCK_GROUP_DATA;
10759 ret = create_space_info(fs_info, flags);
10765 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
10766 u64 start, u64 end)
10768 return unpin_extent_range(fs_info, start, end, false);
10772 * It used to be that old block groups would be left around forever.
10773 * Iterating over them would be enough to trim unused space. Since we
10774 * now automatically remove them, we also need to iterate over unallocated
10777 * We don't want a transaction for this since the discard may take a
10778 * substantial amount of time. We don't require that a transaction be
10779 * running, but we do need to take a running transaction into account
10780 * to ensure that we're not discarding chunks that were released or
10781 * allocated in the current transaction.
10783 * Holding the chunks lock will prevent other threads from allocating
10784 * or releasing chunks, but it won't prevent a running transaction
10785 * from committing and releasing the memory that the pending chunks
10786 * list head uses. For that, we need to take a reference to the
10787 * transaction and hold the commit root sem. We only need to hold
10788 * it while performing the free space search since we have already
10789 * held back allocations.
10791 static int btrfs_trim_free_extents(struct btrfs_device *device,
10792 u64 minlen, u64 *trimmed)
10794 u64 start = 0, len = 0;
10799 /* Discard not supported = nothing to do. */
10800 if (!blk_queue_discard(bdev_get_queue(device->bdev)))
10803 /* Not writeable = nothing to do. */
10804 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
10807 /* No free space = nothing to do. */
10808 if (device->total_bytes <= device->bytes_used)
10814 struct btrfs_fs_info *fs_info = device->fs_info;
10815 struct btrfs_transaction *trans;
10818 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10822 ret = down_read_killable(&fs_info->commit_root_sem);
10824 mutex_unlock(&fs_info->chunk_mutex);
10828 spin_lock(&fs_info->trans_lock);
10829 trans = fs_info->running_transaction;
10831 refcount_inc(&trans->use_count);
10832 spin_unlock(&fs_info->trans_lock);
10835 up_read(&fs_info->commit_root_sem);
10837 ret = find_free_dev_extent_start(trans, device, minlen, start,
10840 up_read(&fs_info->commit_root_sem);
10841 btrfs_put_transaction(trans);
10845 mutex_unlock(&fs_info->chunk_mutex);
10846 if (ret == -ENOSPC)
10851 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10852 mutex_unlock(&fs_info->chunk_mutex);
10860 if (fatal_signal_pending(current)) {
10861 ret = -ERESTARTSYS;
10872 * Trim the whole filesystem by:
10873 * 1) trimming the free space in each block group
10874 * 2) trimming the unallocated space on each device
10876 * This will also continue trimming even if a block group or device encounters
10877 * an error. The return value will be the last error, or 0 if nothing bad
10880 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
10882 struct btrfs_block_group_cache *cache = NULL;
10883 struct btrfs_device *device;
10884 struct list_head *devices;
10890 u64 dev_failed = 0;
10895 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10896 for (; cache; cache = next_block_group(fs_info, cache)) {
10897 if (cache->key.objectid >= (range->start + range->len)) {
10898 btrfs_put_block_group(cache);
10902 start = max(range->start, cache->key.objectid);
10903 end = min(range->start + range->len,
10904 cache->key.objectid + cache->key.offset);
10906 if (end - start >= range->minlen) {
10907 if (!block_group_cache_done(cache)) {
10908 ret = cache_block_group(cache, 0);
10914 ret = wait_block_group_cache_done(cache);
10921 ret = btrfs_trim_block_group(cache,
10927 trimmed += group_trimmed;
10937 btrfs_warn(fs_info,
10938 "failed to trim %llu block group(s), last error %d",
10939 bg_failed, bg_ret);
10940 mutex_lock(&fs_info->fs_devices->device_list_mutex);
10941 devices = &fs_info->fs_devices->devices;
10942 list_for_each_entry(device, devices, dev_list) {
10943 ret = btrfs_trim_free_extents(device, range->minlen,
10951 trimmed += group_trimmed;
10953 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
10956 btrfs_warn(fs_info,
10957 "failed to trim %llu device(s), last error %d",
10958 dev_failed, dev_ret);
10959 range->len = trimmed;
10966 * btrfs_{start,end}_write_no_snapshotting() are similar to
10967 * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10968 * data into the page cache through nocow before the subvolume is snapshoted,
10969 * but flush the data into disk after the snapshot creation, or to prevent
10970 * operations while snapshotting is ongoing and that cause the snapshot to be
10971 * inconsistent (writes followed by expanding truncates for example).
10973 void btrfs_end_write_no_snapshotting(struct btrfs_root *root)
10975 percpu_counter_dec(&root->subv_writers->counter);
10976 cond_wake_up(&root->subv_writers->wait);
10979 int btrfs_start_write_no_snapshotting(struct btrfs_root *root)
10981 if (atomic_read(&root->will_be_snapshotted))
10984 percpu_counter_inc(&root->subv_writers->counter);
10986 * Make sure counter is updated before we check for snapshot creation.
10989 if (atomic_read(&root->will_be_snapshotted)) {
10990 btrfs_end_write_no_snapshotting(root);
10996 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
11001 ret = btrfs_start_write_no_snapshotting(root);
11004 wait_var_event(&root->will_be_snapshotted,
11005 !atomic_read(&root->will_be_snapshotted));
11009 void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
11011 struct btrfs_fs_info *fs_info = bg->fs_info;
11013 spin_lock(&fs_info->unused_bgs_lock);
11014 if (list_empty(&bg->bg_list)) {
11015 btrfs_get_block_group(bg);
11016 trace_btrfs_add_unused_block_group(bg);
11017 list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
11019 spin_unlock(&fs_info->unused_bgs_lock);