2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
32 #include "print-tree.h"
33 #include "transaction.h"
37 #include "free-space-cache.h"
40 #undef SCRAMBLE_DELAYED_REFS
43 * control flags for do_chunk_alloc's force field
44 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45 * if we really need one.
47 * CHUNK_ALLOC_LIMITED means to only try and allocate one
48 * if we have very few chunks already allocated. This is
49 * used as part of the clustering code to help make sure
50 * we have a good pool of storage to cluster in, without
51 * filling the FS with empty chunks
53 * CHUNK_ALLOC_FORCE means it must try to allocate one
57 CHUNK_ALLOC_NO_FORCE = 0,
58 CHUNK_ALLOC_LIMITED = 1,
59 CHUNK_ALLOC_FORCE = 2,
63 * Control how reservations are dealt with.
65 * RESERVE_FREE - freeing a reservation.
66 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
68 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69 * bytes_may_use as the ENOSPC accounting is done elsewhere
74 RESERVE_ALLOC_NO_ACCOUNT = 2,
77 static int update_block_group(struct btrfs_root *root,
78 u64 bytenr, u64 num_bytes, int alloc);
79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
80 struct btrfs_root *root,
81 u64 bytenr, u64 num_bytes, u64 parent,
82 u64 root_objectid, u64 owner_objectid,
83 u64 owner_offset, int refs_to_drop,
84 struct btrfs_delayed_extent_op *extra_op);
85 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
86 struct extent_buffer *leaf,
87 struct btrfs_extent_item *ei);
88 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
89 struct btrfs_root *root,
90 u64 parent, u64 root_objectid,
91 u64 flags, u64 owner, u64 offset,
92 struct btrfs_key *ins, int ref_mod);
93 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
94 struct btrfs_root *root,
95 u64 parent, u64 root_objectid,
96 u64 flags, struct btrfs_disk_key *key,
97 int level, struct btrfs_key *ins);
98 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
99 struct btrfs_root *extent_root, u64 flags,
101 static int find_next_key(struct btrfs_path *path, int level,
102 struct btrfs_key *key);
103 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
104 int dump_block_groups);
105 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
106 u64 num_bytes, int reserve);
107 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
109 int btrfs_pin_extent(struct btrfs_root *root,
110 u64 bytenr, u64 num_bytes, int reserved);
113 block_group_cache_done(struct btrfs_block_group_cache *cache)
116 return cache->cached == BTRFS_CACHE_FINISHED;
119 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
121 return (cache->flags & bits) == bits;
124 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
126 atomic_inc(&cache->count);
129 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
131 if (atomic_dec_and_test(&cache->count)) {
132 WARN_ON(cache->pinned > 0);
133 WARN_ON(cache->reserved > 0);
134 kfree(cache->free_space_ctl);
140 * this adds the block group to the fs_info rb tree for the block group
143 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
144 struct btrfs_block_group_cache *block_group)
147 struct rb_node *parent = NULL;
148 struct btrfs_block_group_cache *cache;
150 spin_lock(&info->block_group_cache_lock);
151 p = &info->block_group_cache_tree.rb_node;
155 cache = rb_entry(parent, struct btrfs_block_group_cache,
157 if (block_group->key.objectid < cache->key.objectid) {
159 } else if (block_group->key.objectid > cache->key.objectid) {
162 spin_unlock(&info->block_group_cache_lock);
167 rb_link_node(&block_group->cache_node, parent, p);
168 rb_insert_color(&block_group->cache_node,
169 &info->block_group_cache_tree);
171 if (info->first_logical_byte > block_group->key.objectid)
172 info->first_logical_byte = block_group->key.objectid;
174 spin_unlock(&info->block_group_cache_lock);
180 * This will return the block group at or after bytenr if contains is 0, else
181 * it will return the block group that contains the bytenr
183 static struct btrfs_block_group_cache *
184 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
187 struct btrfs_block_group_cache *cache, *ret = NULL;
191 spin_lock(&info->block_group_cache_lock);
192 n = info->block_group_cache_tree.rb_node;
195 cache = rb_entry(n, struct btrfs_block_group_cache,
197 end = cache->key.objectid + cache->key.offset - 1;
198 start = cache->key.objectid;
200 if (bytenr < start) {
201 if (!contains && (!ret || start < ret->key.objectid))
204 } else if (bytenr > start) {
205 if (contains && bytenr <= end) {
216 btrfs_get_block_group(ret);
217 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
218 info->first_logical_byte = ret->key.objectid;
220 spin_unlock(&info->block_group_cache_lock);
225 static int add_excluded_extent(struct btrfs_root *root,
226 u64 start, u64 num_bytes)
228 u64 end = start + num_bytes - 1;
229 set_extent_bits(&root->fs_info->freed_extents[0],
230 start, end, EXTENT_UPTODATE, GFP_NOFS);
231 set_extent_bits(&root->fs_info->freed_extents[1],
232 start, end, EXTENT_UPTODATE, GFP_NOFS);
236 static void free_excluded_extents(struct btrfs_root *root,
237 struct btrfs_block_group_cache *cache)
241 start = cache->key.objectid;
242 end = start + cache->key.offset - 1;
244 clear_extent_bits(&root->fs_info->freed_extents[0],
245 start, end, EXTENT_UPTODATE, GFP_NOFS);
246 clear_extent_bits(&root->fs_info->freed_extents[1],
247 start, end, EXTENT_UPTODATE, GFP_NOFS);
250 static int exclude_super_stripes(struct btrfs_root *root,
251 struct btrfs_block_group_cache *cache)
258 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
259 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
260 cache->bytes_super += stripe_len;
261 ret = add_excluded_extent(root, cache->key.objectid,
267 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
268 bytenr = btrfs_sb_offset(i);
269 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
270 cache->key.objectid, bytenr,
271 0, &logical, &nr, &stripe_len);
278 if (logical[nr] > cache->key.objectid +
282 if (logical[nr] + stripe_len <= cache->key.objectid)
286 if (start < cache->key.objectid) {
287 start = cache->key.objectid;
288 len = (logical[nr] + stripe_len) - start;
290 len = min_t(u64, stripe_len,
291 cache->key.objectid +
292 cache->key.offset - start);
295 cache->bytes_super += len;
296 ret = add_excluded_extent(root, start, len);
308 static struct btrfs_caching_control *
309 get_caching_control(struct btrfs_block_group_cache *cache)
311 struct btrfs_caching_control *ctl;
313 spin_lock(&cache->lock);
314 if (cache->cached != BTRFS_CACHE_STARTED) {
315 spin_unlock(&cache->lock);
319 /* We're loading it the fast way, so we don't have a caching_ctl. */
320 if (!cache->caching_ctl) {
321 spin_unlock(&cache->lock);
325 ctl = cache->caching_ctl;
326 atomic_inc(&ctl->count);
327 spin_unlock(&cache->lock);
331 static void put_caching_control(struct btrfs_caching_control *ctl)
333 if (atomic_dec_and_test(&ctl->count))
338 * this is only called by cache_block_group, since we could have freed extents
339 * we need to check the pinned_extents for any extents that can't be used yet
340 * since their free space will be released as soon as the transaction commits.
342 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
343 struct btrfs_fs_info *info, u64 start, u64 end)
345 u64 extent_start, extent_end, size, total_added = 0;
348 while (start < end) {
349 ret = find_first_extent_bit(info->pinned_extents, start,
350 &extent_start, &extent_end,
351 EXTENT_DIRTY | EXTENT_UPTODATE,
356 if (extent_start <= start) {
357 start = extent_end + 1;
358 } else if (extent_start > start && extent_start < end) {
359 size = extent_start - start;
361 ret = btrfs_add_free_space(block_group, start,
363 BUG_ON(ret); /* -ENOMEM or logic error */
364 start = extent_end + 1;
373 ret = btrfs_add_free_space(block_group, start, size);
374 BUG_ON(ret); /* -ENOMEM or logic error */
380 static noinline void caching_thread(struct btrfs_work *work)
382 struct btrfs_block_group_cache *block_group;
383 struct btrfs_fs_info *fs_info;
384 struct btrfs_caching_control *caching_ctl;
385 struct btrfs_root *extent_root;
386 struct btrfs_path *path;
387 struct extent_buffer *leaf;
388 struct btrfs_key key;
394 caching_ctl = container_of(work, struct btrfs_caching_control, work);
395 block_group = caching_ctl->block_group;
396 fs_info = block_group->fs_info;
397 extent_root = fs_info->extent_root;
399 path = btrfs_alloc_path();
403 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
406 * We don't want to deadlock with somebody trying to allocate a new
407 * extent for the extent root while also trying to search the extent
408 * root to add free space. So we skip locking and search the commit
409 * root, since its read-only
411 path->skip_locking = 1;
412 path->search_commit_root = 1;
417 key.type = BTRFS_EXTENT_ITEM_KEY;
419 mutex_lock(&caching_ctl->mutex);
420 /* need to make sure the commit_root doesn't disappear */
421 down_read(&fs_info->extent_commit_sem);
423 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
427 leaf = path->nodes[0];
428 nritems = btrfs_header_nritems(leaf);
431 if (btrfs_fs_closing(fs_info) > 1) {
436 if (path->slots[0] < nritems) {
437 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
439 ret = find_next_key(path, 0, &key);
443 if (need_resched()) {
444 caching_ctl->progress = last;
445 btrfs_release_path(path);
446 up_read(&fs_info->extent_commit_sem);
447 mutex_unlock(&caching_ctl->mutex);
452 ret = btrfs_next_leaf(extent_root, path);
457 leaf = path->nodes[0];
458 nritems = btrfs_header_nritems(leaf);
462 if (key.objectid < block_group->key.objectid) {
467 if (key.objectid >= block_group->key.objectid +
468 block_group->key.offset)
471 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
472 key.type == BTRFS_METADATA_ITEM_KEY) {
473 total_found += add_new_free_space(block_group,
476 if (key.type == BTRFS_METADATA_ITEM_KEY)
477 last = key.objectid +
478 fs_info->tree_root->leafsize;
480 last = key.objectid + key.offset;
482 if (total_found > (1024 * 1024 * 2)) {
484 wake_up(&caching_ctl->wait);
491 total_found += add_new_free_space(block_group, fs_info, last,
492 block_group->key.objectid +
493 block_group->key.offset);
494 caching_ctl->progress = (u64)-1;
496 spin_lock(&block_group->lock);
497 block_group->caching_ctl = NULL;
498 block_group->cached = BTRFS_CACHE_FINISHED;
499 spin_unlock(&block_group->lock);
502 btrfs_free_path(path);
503 up_read(&fs_info->extent_commit_sem);
505 free_excluded_extents(extent_root, block_group);
507 mutex_unlock(&caching_ctl->mutex);
509 wake_up(&caching_ctl->wait);
511 put_caching_control(caching_ctl);
512 btrfs_put_block_group(block_group);
515 static int cache_block_group(struct btrfs_block_group_cache *cache,
519 struct btrfs_fs_info *fs_info = cache->fs_info;
520 struct btrfs_caching_control *caching_ctl;
523 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
527 INIT_LIST_HEAD(&caching_ctl->list);
528 mutex_init(&caching_ctl->mutex);
529 init_waitqueue_head(&caching_ctl->wait);
530 caching_ctl->block_group = cache;
531 caching_ctl->progress = cache->key.objectid;
532 atomic_set(&caching_ctl->count, 1);
533 caching_ctl->work.func = caching_thread;
535 spin_lock(&cache->lock);
537 * This should be a rare occasion, but this could happen I think in the
538 * case where one thread starts to load the space cache info, and then
539 * some other thread starts a transaction commit which tries to do an
540 * allocation while the other thread is still loading the space cache
541 * info. The previous loop should have kept us from choosing this block
542 * group, but if we've moved to the state where we will wait on caching
543 * block groups we need to first check if we're doing a fast load here,
544 * so we can wait for it to finish, otherwise we could end up allocating
545 * from a block group who's cache gets evicted for one reason or
548 while (cache->cached == BTRFS_CACHE_FAST) {
549 struct btrfs_caching_control *ctl;
551 ctl = cache->caching_ctl;
552 atomic_inc(&ctl->count);
553 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
554 spin_unlock(&cache->lock);
558 finish_wait(&ctl->wait, &wait);
559 put_caching_control(ctl);
560 spin_lock(&cache->lock);
563 if (cache->cached != BTRFS_CACHE_NO) {
564 spin_unlock(&cache->lock);
568 WARN_ON(cache->caching_ctl);
569 cache->caching_ctl = caching_ctl;
570 cache->cached = BTRFS_CACHE_FAST;
571 spin_unlock(&cache->lock);
573 if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
574 ret = load_free_space_cache(fs_info, cache);
576 spin_lock(&cache->lock);
578 cache->caching_ctl = NULL;
579 cache->cached = BTRFS_CACHE_FINISHED;
580 cache->last_byte_to_unpin = (u64)-1;
582 if (load_cache_only) {
583 cache->caching_ctl = NULL;
584 cache->cached = BTRFS_CACHE_NO;
586 cache->cached = BTRFS_CACHE_STARTED;
589 spin_unlock(&cache->lock);
590 wake_up(&caching_ctl->wait);
592 put_caching_control(caching_ctl);
593 free_excluded_extents(fs_info->extent_root, cache);
598 * We are not going to do the fast caching, set cached to the
599 * appropriate value and wakeup any waiters.
601 spin_lock(&cache->lock);
602 if (load_cache_only) {
603 cache->caching_ctl = NULL;
604 cache->cached = BTRFS_CACHE_NO;
606 cache->cached = BTRFS_CACHE_STARTED;
608 spin_unlock(&cache->lock);
609 wake_up(&caching_ctl->wait);
612 if (load_cache_only) {
613 put_caching_control(caching_ctl);
617 down_write(&fs_info->extent_commit_sem);
618 atomic_inc(&caching_ctl->count);
619 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
620 up_write(&fs_info->extent_commit_sem);
622 btrfs_get_block_group(cache);
624 btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
630 * return the block group that starts at or after bytenr
632 static struct btrfs_block_group_cache *
633 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
635 struct btrfs_block_group_cache *cache;
637 cache = block_group_cache_tree_search(info, bytenr, 0);
643 * return the block group that contains the given bytenr
645 struct btrfs_block_group_cache *btrfs_lookup_block_group(
646 struct btrfs_fs_info *info,
649 struct btrfs_block_group_cache *cache;
651 cache = block_group_cache_tree_search(info, bytenr, 1);
656 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
659 struct list_head *head = &info->space_info;
660 struct btrfs_space_info *found;
662 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
665 list_for_each_entry_rcu(found, head, list) {
666 if (found->flags & flags) {
676 * after adding space to the filesystem, we need to clear the full flags
677 * on all the space infos.
679 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
681 struct list_head *head = &info->space_info;
682 struct btrfs_space_info *found;
685 list_for_each_entry_rcu(found, head, list)
690 /* simple helper to search for an existing extent at a given offset */
691 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
694 struct btrfs_key key;
695 struct btrfs_path *path;
697 path = btrfs_alloc_path();
701 key.objectid = start;
703 key.type = BTRFS_EXTENT_ITEM_KEY;
704 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
707 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
708 if (key.objectid == start &&
709 key.type == BTRFS_METADATA_ITEM_KEY)
712 btrfs_free_path(path);
717 * helper function to lookup reference count and flags of a tree block.
719 * the head node for delayed ref is used to store the sum of all the
720 * reference count modifications queued up in the rbtree. the head
721 * node may also store the extent flags to set. This way you can check
722 * to see what the reference count and extent flags would be if all of
723 * the delayed refs are not processed.
725 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
726 struct btrfs_root *root, u64 bytenr,
727 u64 offset, int metadata, u64 *refs, u64 *flags)
729 struct btrfs_delayed_ref_head *head;
730 struct btrfs_delayed_ref_root *delayed_refs;
731 struct btrfs_path *path;
732 struct btrfs_extent_item *ei;
733 struct extent_buffer *leaf;
734 struct btrfs_key key;
741 * If we don't have skinny metadata, don't bother doing anything
744 if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
745 offset = root->leafsize;
749 path = btrfs_alloc_path();
754 key.objectid = bytenr;
755 key.type = BTRFS_METADATA_ITEM_KEY;
758 key.objectid = bytenr;
759 key.type = BTRFS_EXTENT_ITEM_KEY;
764 path->skip_locking = 1;
765 path->search_commit_root = 1;
768 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
773 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
774 key.type = BTRFS_EXTENT_ITEM_KEY;
775 key.offset = root->leafsize;
776 btrfs_release_path(path);
781 leaf = path->nodes[0];
782 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
783 if (item_size >= sizeof(*ei)) {
784 ei = btrfs_item_ptr(leaf, path->slots[0],
785 struct btrfs_extent_item);
786 num_refs = btrfs_extent_refs(leaf, ei);
787 extent_flags = btrfs_extent_flags(leaf, ei);
789 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
790 struct btrfs_extent_item_v0 *ei0;
791 BUG_ON(item_size != sizeof(*ei0));
792 ei0 = btrfs_item_ptr(leaf, path->slots[0],
793 struct btrfs_extent_item_v0);
794 num_refs = btrfs_extent_refs_v0(leaf, ei0);
795 /* FIXME: this isn't correct for data */
796 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
801 BUG_ON(num_refs == 0);
811 delayed_refs = &trans->transaction->delayed_refs;
812 spin_lock(&delayed_refs->lock);
813 head = btrfs_find_delayed_ref_head(trans, bytenr);
815 if (!mutex_trylock(&head->mutex)) {
816 atomic_inc(&head->node.refs);
817 spin_unlock(&delayed_refs->lock);
819 btrfs_release_path(path);
822 * Mutex was contended, block until it's released and try
825 mutex_lock(&head->mutex);
826 mutex_unlock(&head->mutex);
827 btrfs_put_delayed_ref(&head->node);
830 if (head->extent_op && head->extent_op->update_flags)
831 extent_flags |= head->extent_op->flags_to_set;
833 BUG_ON(num_refs == 0);
835 num_refs += head->node.ref_mod;
836 mutex_unlock(&head->mutex);
838 spin_unlock(&delayed_refs->lock);
840 WARN_ON(num_refs == 0);
844 *flags = extent_flags;
846 btrfs_free_path(path);
851 * Back reference rules. Back refs have three main goals:
853 * 1) differentiate between all holders of references to an extent so that
854 * when a reference is dropped we can make sure it was a valid reference
855 * before freeing the extent.
857 * 2) Provide enough information to quickly find the holders of an extent
858 * if we notice a given block is corrupted or bad.
860 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
861 * maintenance. This is actually the same as #2, but with a slightly
862 * different use case.
864 * There are two kinds of back refs. The implicit back refs is optimized
865 * for pointers in non-shared tree blocks. For a given pointer in a block,
866 * back refs of this kind provide information about the block's owner tree
867 * and the pointer's key. These information allow us to find the block by
868 * b-tree searching. The full back refs is for pointers in tree blocks not
869 * referenced by their owner trees. The location of tree block is recorded
870 * in the back refs. Actually the full back refs is generic, and can be
871 * used in all cases the implicit back refs is used. The major shortcoming
872 * of the full back refs is its overhead. Every time a tree block gets
873 * COWed, we have to update back refs entry for all pointers in it.
875 * For a newly allocated tree block, we use implicit back refs for
876 * pointers in it. This means most tree related operations only involve
877 * implicit back refs. For a tree block created in old transaction, the
878 * only way to drop a reference to it is COW it. So we can detect the
879 * event that tree block loses its owner tree's reference and do the
880 * back refs conversion.
882 * When a tree block is COW'd through a tree, there are four cases:
884 * The reference count of the block is one and the tree is the block's
885 * owner tree. Nothing to do in this case.
887 * The reference count of the block is one and the tree is not the
888 * block's owner tree. In this case, full back refs is used for pointers
889 * in the block. Remove these full back refs, add implicit back refs for
890 * every pointers in the new block.
892 * The reference count of the block is greater than one and the tree is
893 * the block's owner tree. In this case, implicit back refs is used for
894 * pointers in the block. Add full back refs for every pointers in the
895 * block, increase lower level extents' reference counts. The original
896 * implicit back refs are entailed to the new block.
898 * The reference count of the block is greater than one and the tree is
899 * not the block's owner tree. Add implicit back refs for every pointer in
900 * the new block, increase lower level extents' reference count.
902 * Back Reference Key composing:
904 * The key objectid corresponds to the first byte in the extent,
905 * The key type is used to differentiate between types of back refs.
906 * There are different meanings of the key offset for different types
909 * File extents can be referenced by:
911 * - multiple snapshots, subvolumes, or different generations in one subvol
912 * - different files inside a single subvolume
913 * - different offsets inside a file (bookend extents in file.c)
915 * The extent ref structure for the implicit back refs has fields for:
917 * - Objectid of the subvolume root
918 * - objectid of the file holding the reference
919 * - original offset in the file
920 * - how many bookend extents
922 * The key offset for the implicit back refs is hash of the first
925 * The extent ref structure for the full back refs has field for:
927 * - number of pointers in the tree leaf
929 * The key offset for the implicit back refs is the first byte of
932 * When a file extent is allocated, The implicit back refs is used.
933 * the fields are filled in:
935 * (root_key.objectid, inode objectid, offset in file, 1)
937 * When a file extent is removed file truncation, we find the
938 * corresponding implicit back refs and check the following fields:
940 * (btrfs_header_owner(leaf), inode objectid, offset in file)
942 * Btree extents can be referenced by:
944 * - Different subvolumes
946 * Both the implicit back refs and the full back refs for tree blocks
947 * only consist of key. The key offset for the implicit back refs is
948 * objectid of block's owner tree. The key offset for the full back refs
949 * is the first byte of parent block.
951 * When implicit back refs is used, information about the lowest key and
952 * level of the tree block are required. These information are stored in
953 * tree block info structure.
956 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
957 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
958 struct btrfs_root *root,
959 struct btrfs_path *path,
960 u64 owner, u32 extra_size)
962 struct btrfs_extent_item *item;
963 struct btrfs_extent_item_v0 *ei0;
964 struct btrfs_extent_ref_v0 *ref0;
965 struct btrfs_tree_block_info *bi;
966 struct extent_buffer *leaf;
967 struct btrfs_key key;
968 struct btrfs_key found_key;
969 u32 new_size = sizeof(*item);
973 leaf = path->nodes[0];
974 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
976 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
977 ei0 = btrfs_item_ptr(leaf, path->slots[0],
978 struct btrfs_extent_item_v0);
979 refs = btrfs_extent_refs_v0(leaf, ei0);
981 if (owner == (u64)-1) {
983 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
984 ret = btrfs_next_leaf(root, path);
987 BUG_ON(ret > 0); /* Corruption */
988 leaf = path->nodes[0];
990 btrfs_item_key_to_cpu(leaf, &found_key,
992 BUG_ON(key.objectid != found_key.objectid);
993 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
997 ref0 = btrfs_item_ptr(leaf, path->slots[0],
998 struct btrfs_extent_ref_v0);
999 owner = btrfs_ref_objectid_v0(leaf, ref0);
1003 btrfs_release_path(path);
1005 if (owner < BTRFS_FIRST_FREE_OBJECTID)
1006 new_size += sizeof(*bi);
1008 new_size -= sizeof(*ei0);
1009 ret = btrfs_search_slot(trans, root, &key, path,
1010 new_size + extra_size, 1);
1013 BUG_ON(ret); /* Corruption */
1015 btrfs_extend_item(root, path, new_size);
1017 leaf = path->nodes[0];
1018 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1019 btrfs_set_extent_refs(leaf, item, refs);
1020 /* FIXME: get real generation */
1021 btrfs_set_extent_generation(leaf, item, 0);
1022 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1023 btrfs_set_extent_flags(leaf, item,
1024 BTRFS_EXTENT_FLAG_TREE_BLOCK |
1025 BTRFS_BLOCK_FLAG_FULL_BACKREF);
1026 bi = (struct btrfs_tree_block_info *)(item + 1);
1027 /* FIXME: get first key of the block */
1028 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1029 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1031 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1033 btrfs_mark_buffer_dirty(leaf);
1038 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1040 u32 high_crc = ~(u32)0;
1041 u32 low_crc = ~(u32)0;
1044 lenum = cpu_to_le64(root_objectid);
1045 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1046 lenum = cpu_to_le64(owner);
1047 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1048 lenum = cpu_to_le64(offset);
1049 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1051 return ((u64)high_crc << 31) ^ (u64)low_crc;
1054 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1055 struct btrfs_extent_data_ref *ref)
1057 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1058 btrfs_extent_data_ref_objectid(leaf, ref),
1059 btrfs_extent_data_ref_offset(leaf, ref));
1062 static int match_extent_data_ref(struct extent_buffer *leaf,
1063 struct btrfs_extent_data_ref *ref,
1064 u64 root_objectid, u64 owner, u64 offset)
1066 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1067 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1068 btrfs_extent_data_ref_offset(leaf, ref) != offset)
1073 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1074 struct btrfs_root *root,
1075 struct btrfs_path *path,
1076 u64 bytenr, u64 parent,
1078 u64 owner, u64 offset)
1080 struct btrfs_key key;
1081 struct btrfs_extent_data_ref *ref;
1082 struct extent_buffer *leaf;
1088 key.objectid = bytenr;
1090 key.type = BTRFS_SHARED_DATA_REF_KEY;
1091 key.offset = parent;
1093 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1094 key.offset = hash_extent_data_ref(root_objectid,
1099 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1108 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1109 key.type = BTRFS_EXTENT_REF_V0_KEY;
1110 btrfs_release_path(path);
1111 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1122 leaf = path->nodes[0];
1123 nritems = btrfs_header_nritems(leaf);
1125 if (path->slots[0] >= nritems) {
1126 ret = btrfs_next_leaf(root, path);
1132 leaf = path->nodes[0];
1133 nritems = btrfs_header_nritems(leaf);
1137 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1138 if (key.objectid != bytenr ||
1139 key.type != BTRFS_EXTENT_DATA_REF_KEY)
1142 ref = btrfs_item_ptr(leaf, path->slots[0],
1143 struct btrfs_extent_data_ref);
1145 if (match_extent_data_ref(leaf, ref, root_objectid,
1148 btrfs_release_path(path);
1160 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1161 struct btrfs_root *root,
1162 struct btrfs_path *path,
1163 u64 bytenr, u64 parent,
1164 u64 root_objectid, u64 owner,
1165 u64 offset, int refs_to_add)
1167 struct btrfs_key key;
1168 struct extent_buffer *leaf;
1173 key.objectid = bytenr;
1175 key.type = BTRFS_SHARED_DATA_REF_KEY;
1176 key.offset = parent;
1177 size = sizeof(struct btrfs_shared_data_ref);
1179 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1180 key.offset = hash_extent_data_ref(root_objectid,
1182 size = sizeof(struct btrfs_extent_data_ref);
1185 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1186 if (ret && ret != -EEXIST)
1189 leaf = path->nodes[0];
1191 struct btrfs_shared_data_ref *ref;
1192 ref = btrfs_item_ptr(leaf, path->slots[0],
1193 struct btrfs_shared_data_ref);
1195 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1197 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1198 num_refs += refs_to_add;
1199 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1202 struct btrfs_extent_data_ref *ref;
1203 while (ret == -EEXIST) {
1204 ref = btrfs_item_ptr(leaf, path->slots[0],
1205 struct btrfs_extent_data_ref);
1206 if (match_extent_data_ref(leaf, ref, root_objectid,
1209 btrfs_release_path(path);
1211 ret = btrfs_insert_empty_item(trans, root, path, &key,
1213 if (ret && ret != -EEXIST)
1216 leaf = path->nodes[0];
1218 ref = btrfs_item_ptr(leaf, path->slots[0],
1219 struct btrfs_extent_data_ref);
1221 btrfs_set_extent_data_ref_root(leaf, ref,
1223 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1224 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1225 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1227 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1228 num_refs += refs_to_add;
1229 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1232 btrfs_mark_buffer_dirty(leaf);
1235 btrfs_release_path(path);
1239 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1240 struct btrfs_root *root,
1241 struct btrfs_path *path,
1244 struct btrfs_key key;
1245 struct btrfs_extent_data_ref *ref1 = NULL;
1246 struct btrfs_shared_data_ref *ref2 = NULL;
1247 struct extent_buffer *leaf;
1251 leaf = path->nodes[0];
1252 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1254 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1255 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1256 struct btrfs_extent_data_ref);
1257 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1258 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1259 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1260 struct btrfs_shared_data_ref);
1261 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1262 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1263 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1264 struct btrfs_extent_ref_v0 *ref0;
1265 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1266 struct btrfs_extent_ref_v0);
1267 num_refs = btrfs_ref_count_v0(leaf, ref0);
1273 BUG_ON(num_refs < refs_to_drop);
1274 num_refs -= refs_to_drop;
1276 if (num_refs == 0) {
1277 ret = btrfs_del_item(trans, root, path);
1279 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1280 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1281 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1282 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1283 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1285 struct btrfs_extent_ref_v0 *ref0;
1286 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1287 struct btrfs_extent_ref_v0);
1288 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1291 btrfs_mark_buffer_dirty(leaf);
1296 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1297 struct btrfs_path *path,
1298 struct btrfs_extent_inline_ref *iref)
1300 struct btrfs_key key;
1301 struct extent_buffer *leaf;
1302 struct btrfs_extent_data_ref *ref1;
1303 struct btrfs_shared_data_ref *ref2;
1306 leaf = path->nodes[0];
1307 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1309 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1310 BTRFS_EXTENT_DATA_REF_KEY) {
1311 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1312 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1314 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1315 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1317 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1318 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1319 struct btrfs_extent_data_ref);
1320 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1321 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1322 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1323 struct btrfs_shared_data_ref);
1324 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1325 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1326 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1327 struct btrfs_extent_ref_v0 *ref0;
1328 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1329 struct btrfs_extent_ref_v0);
1330 num_refs = btrfs_ref_count_v0(leaf, ref0);
1338 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1339 struct btrfs_root *root,
1340 struct btrfs_path *path,
1341 u64 bytenr, u64 parent,
1344 struct btrfs_key key;
1347 key.objectid = bytenr;
1349 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1350 key.offset = parent;
1352 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1353 key.offset = root_objectid;
1356 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1359 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1360 if (ret == -ENOENT && parent) {
1361 btrfs_release_path(path);
1362 key.type = BTRFS_EXTENT_REF_V0_KEY;
1363 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1371 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1372 struct btrfs_root *root,
1373 struct btrfs_path *path,
1374 u64 bytenr, u64 parent,
1377 struct btrfs_key key;
1380 key.objectid = bytenr;
1382 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1383 key.offset = parent;
1385 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1386 key.offset = root_objectid;
1389 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1390 btrfs_release_path(path);
1394 static inline int extent_ref_type(u64 parent, u64 owner)
1397 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1399 type = BTRFS_SHARED_BLOCK_REF_KEY;
1401 type = BTRFS_TREE_BLOCK_REF_KEY;
1404 type = BTRFS_SHARED_DATA_REF_KEY;
1406 type = BTRFS_EXTENT_DATA_REF_KEY;
1411 static int find_next_key(struct btrfs_path *path, int level,
1412 struct btrfs_key *key)
1415 for (; level < BTRFS_MAX_LEVEL; level++) {
1416 if (!path->nodes[level])
1418 if (path->slots[level] + 1 >=
1419 btrfs_header_nritems(path->nodes[level]))
1422 btrfs_item_key_to_cpu(path->nodes[level], key,
1423 path->slots[level] + 1);
1425 btrfs_node_key_to_cpu(path->nodes[level], key,
1426 path->slots[level] + 1);
1433 * look for inline back ref. if back ref is found, *ref_ret is set
1434 * to the address of inline back ref, and 0 is returned.
1436 * if back ref isn't found, *ref_ret is set to the address where it
1437 * should be inserted, and -ENOENT is returned.
1439 * if insert is true and there are too many inline back refs, the path
1440 * points to the extent item, and -EAGAIN is returned.
1442 * NOTE: inline back refs are ordered in the same way that back ref
1443 * items in the tree are ordered.
1445 static noinline_for_stack
1446 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1447 struct btrfs_root *root,
1448 struct btrfs_path *path,
1449 struct btrfs_extent_inline_ref **ref_ret,
1450 u64 bytenr, u64 num_bytes,
1451 u64 parent, u64 root_objectid,
1452 u64 owner, u64 offset, int insert)
1454 struct btrfs_key key;
1455 struct extent_buffer *leaf;
1456 struct btrfs_extent_item *ei;
1457 struct btrfs_extent_inline_ref *iref;
1467 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1470 key.objectid = bytenr;
1471 key.type = BTRFS_EXTENT_ITEM_KEY;
1472 key.offset = num_bytes;
1474 want = extent_ref_type(parent, owner);
1476 extra_size = btrfs_extent_inline_ref_size(want);
1477 path->keep_locks = 1;
1482 * Owner is our parent level, so we can just add one to get the level
1483 * for the block we are interested in.
1485 if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1486 key.type = BTRFS_METADATA_ITEM_KEY;
1491 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1498 * We may be a newly converted file system which still has the old fat
1499 * extent entries for metadata, so try and see if we have one of those.
1501 if (ret > 0 && skinny_metadata) {
1502 skinny_metadata = false;
1503 if (path->slots[0]) {
1505 btrfs_item_key_to_cpu(path->nodes[0], &key,
1507 if (key.objectid == bytenr &&
1508 key.type == BTRFS_EXTENT_ITEM_KEY &&
1509 key.offset == num_bytes)
1513 key.type = BTRFS_EXTENT_ITEM_KEY;
1514 key.offset = num_bytes;
1515 btrfs_release_path(path);
1520 if (ret && !insert) {
1529 leaf = path->nodes[0];
1530 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1531 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1532 if (item_size < sizeof(*ei)) {
1537 ret = convert_extent_item_v0(trans, root, path, owner,
1543 leaf = path->nodes[0];
1544 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1547 BUG_ON(item_size < sizeof(*ei));
1549 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1550 flags = btrfs_extent_flags(leaf, ei);
1552 ptr = (unsigned long)(ei + 1);
1553 end = (unsigned long)ei + item_size;
1555 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1556 ptr += sizeof(struct btrfs_tree_block_info);
1566 iref = (struct btrfs_extent_inline_ref *)ptr;
1567 type = btrfs_extent_inline_ref_type(leaf, iref);
1571 ptr += btrfs_extent_inline_ref_size(type);
1575 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1576 struct btrfs_extent_data_ref *dref;
1577 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1578 if (match_extent_data_ref(leaf, dref, root_objectid,
1583 if (hash_extent_data_ref_item(leaf, dref) <
1584 hash_extent_data_ref(root_objectid, owner, offset))
1588 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1590 if (parent == ref_offset) {
1594 if (ref_offset < parent)
1597 if (root_objectid == ref_offset) {
1601 if (ref_offset < root_objectid)
1605 ptr += btrfs_extent_inline_ref_size(type);
1607 if (err == -ENOENT && insert) {
1608 if (item_size + extra_size >=
1609 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1614 * To add new inline back ref, we have to make sure
1615 * there is no corresponding back ref item.
1616 * For simplicity, we just do not add new inline back
1617 * ref if there is any kind of item for this block
1619 if (find_next_key(path, 0, &key) == 0 &&
1620 key.objectid == bytenr &&
1621 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1626 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1629 path->keep_locks = 0;
1630 btrfs_unlock_up_safe(path, 1);
1636 * helper to add new inline back ref
1638 static noinline_for_stack
1639 void setup_inline_extent_backref(struct btrfs_root *root,
1640 struct btrfs_path *path,
1641 struct btrfs_extent_inline_ref *iref,
1642 u64 parent, u64 root_objectid,
1643 u64 owner, u64 offset, int refs_to_add,
1644 struct btrfs_delayed_extent_op *extent_op)
1646 struct extent_buffer *leaf;
1647 struct btrfs_extent_item *ei;
1650 unsigned long item_offset;
1655 leaf = path->nodes[0];
1656 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1657 item_offset = (unsigned long)iref - (unsigned long)ei;
1659 type = extent_ref_type(parent, owner);
1660 size = btrfs_extent_inline_ref_size(type);
1662 btrfs_extend_item(root, path, size);
1664 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1665 refs = btrfs_extent_refs(leaf, ei);
1666 refs += refs_to_add;
1667 btrfs_set_extent_refs(leaf, ei, refs);
1669 __run_delayed_extent_op(extent_op, leaf, ei);
1671 ptr = (unsigned long)ei + item_offset;
1672 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1673 if (ptr < end - size)
1674 memmove_extent_buffer(leaf, ptr + size, ptr,
1677 iref = (struct btrfs_extent_inline_ref *)ptr;
1678 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1679 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1680 struct btrfs_extent_data_ref *dref;
1681 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1682 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1683 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1684 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1685 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1686 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1687 struct btrfs_shared_data_ref *sref;
1688 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1689 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1690 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1691 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1692 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1694 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1696 btrfs_mark_buffer_dirty(leaf);
1699 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1700 struct btrfs_root *root,
1701 struct btrfs_path *path,
1702 struct btrfs_extent_inline_ref **ref_ret,
1703 u64 bytenr, u64 num_bytes, u64 parent,
1704 u64 root_objectid, u64 owner, u64 offset)
1708 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1709 bytenr, num_bytes, parent,
1710 root_objectid, owner, offset, 0);
1714 btrfs_release_path(path);
1717 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1718 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1721 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1722 root_objectid, owner, offset);
1728 * helper to update/remove inline back ref
1730 static noinline_for_stack
1731 void update_inline_extent_backref(struct btrfs_root *root,
1732 struct btrfs_path *path,
1733 struct btrfs_extent_inline_ref *iref,
1735 struct btrfs_delayed_extent_op *extent_op)
1737 struct extent_buffer *leaf;
1738 struct btrfs_extent_item *ei;
1739 struct btrfs_extent_data_ref *dref = NULL;
1740 struct btrfs_shared_data_ref *sref = NULL;
1748 leaf = path->nodes[0];
1749 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1750 refs = btrfs_extent_refs(leaf, ei);
1751 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1752 refs += refs_to_mod;
1753 btrfs_set_extent_refs(leaf, ei, refs);
1755 __run_delayed_extent_op(extent_op, leaf, ei);
1757 type = btrfs_extent_inline_ref_type(leaf, iref);
1759 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1760 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1761 refs = btrfs_extent_data_ref_count(leaf, dref);
1762 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1763 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1764 refs = btrfs_shared_data_ref_count(leaf, sref);
1767 BUG_ON(refs_to_mod != -1);
1770 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1771 refs += refs_to_mod;
1774 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1775 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1777 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1779 size = btrfs_extent_inline_ref_size(type);
1780 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1781 ptr = (unsigned long)iref;
1782 end = (unsigned long)ei + item_size;
1783 if (ptr + size < end)
1784 memmove_extent_buffer(leaf, ptr, ptr + size,
1787 btrfs_truncate_item(root, path, item_size, 1);
1789 btrfs_mark_buffer_dirty(leaf);
1792 static noinline_for_stack
1793 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1794 struct btrfs_root *root,
1795 struct btrfs_path *path,
1796 u64 bytenr, u64 num_bytes, u64 parent,
1797 u64 root_objectid, u64 owner,
1798 u64 offset, int refs_to_add,
1799 struct btrfs_delayed_extent_op *extent_op)
1801 struct btrfs_extent_inline_ref *iref;
1804 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1805 bytenr, num_bytes, parent,
1806 root_objectid, owner, offset, 1);
1808 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1809 update_inline_extent_backref(root, path, iref,
1810 refs_to_add, extent_op);
1811 } else if (ret == -ENOENT) {
1812 setup_inline_extent_backref(root, path, iref, parent,
1813 root_objectid, owner, offset,
1814 refs_to_add, extent_op);
1820 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1821 struct btrfs_root *root,
1822 struct btrfs_path *path,
1823 u64 bytenr, u64 parent, u64 root_objectid,
1824 u64 owner, u64 offset, int refs_to_add)
1827 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1828 BUG_ON(refs_to_add != 1);
1829 ret = insert_tree_block_ref(trans, root, path, bytenr,
1830 parent, root_objectid);
1832 ret = insert_extent_data_ref(trans, root, path, bytenr,
1833 parent, root_objectid,
1834 owner, offset, refs_to_add);
1839 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1840 struct btrfs_root *root,
1841 struct btrfs_path *path,
1842 struct btrfs_extent_inline_ref *iref,
1843 int refs_to_drop, int is_data)
1847 BUG_ON(!is_data && refs_to_drop != 1);
1849 update_inline_extent_backref(root, path, iref,
1850 -refs_to_drop, NULL);
1851 } else if (is_data) {
1852 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1854 ret = btrfs_del_item(trans, root, path);
1859 static int btrfs_issue_discard(struct block_device *bdev,
1862 return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1865 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1866 u64 num_bytes, u64 *actual_bytes)
1869 u64 discarded_bytes = 0;
1870 struct btrfs_bio *bbio = NULL;
1873 /* Tell the block device(s) that the sectors can be discarded */
1874 ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1875 bytenr, &num_bytes, &bbio, 0);
1876 /* Error condition is -ENOMEM */
1878 struct btrfs_bio_stripe *stripe = bbio->stripes;
1882 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1883 if (!stripe->dev->can_discard)
1886 ret = btrfs_issue_discard(stripe->dev->bdev,
1890 discarded_bytes += stripe->length;
1891 else if (ret != -EOPNOTSUPP)
1892 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1895 * Just in case we get back EOPNOTSUPP for some reason,
1896 * just ignore the return value so we don't screw up
1897 * people calling discard_extent.
1905 *actual_bytes = discarded_bytes;
1908 if (ret == -EOPNOTSUPP)
1913 /* Can return -ENOMEM */
1914 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1915 struct btrfs_root *root,
1916 u64 bytenr, u64 num_bytes, u64 parent,
1917 u64 root_objectid, u64 owner, u64 offset, int for_cow)
1920 struct btrfs_fs_info *fs_info = root->fs_info;
1922 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1923 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1925 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1926 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1928 parent, root_objectid, (int)owner,
1929 BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1931 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1933 parent, root_objectid, owner, offset,
1934 BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1939 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1940 struct btrfs_root *root,
1941 u64 bytenr, u64 num_bytes,
1942 u64 parent, u64 root_objectid,
1943 u64 owner, u64 offset, int refs_to_add,
1944 struct btrfs_delayed_extent_op *extent_op)
1946 struct btrfs_path *path;
1947 struct extent_buffer *leaf;
1948 struct btrfs_extent_item *item;
1953 path = btrfs_alloc_path();
1958 path->leave_spinning = 1;
1959 /* this will setup the path even if it fails to insert the back ref */
1960 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1961 path, bytenr, num_bytes, parent,
1962 root_objectid, owner, offset,
1963 refs_to_add, extent_op);
1967 if (ret != -EAGAIN) {
1972 leaf = path->nodes[0];
1973 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1974 refs = btrfs_extent_refs(leaf, item);
1975 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1977 __run_delayed_extent_op(extent_op, leaf, item);
1979 btrfs_mark_buffer_dirty(leaf);
1980 btrfs_release_path(path);
1983 path->leave_spinning = 1;
1985 /* now insert the actual backref */
1986 ret = insert_extent_backref(trans, root->fs_info->extent_root,
1987 path, bytenr, parent, root_objectid,
1988 owner, offset, refs_to_add);
1990 btrfs_abort_transaction(trans, root, ret);
1992 btrfs_free_path(path);
1996 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1997 struct btrfs_root *root,
1998 struct btrfs_delayed_ref_node *node,
1999 struct btrfs_delayed_extent_op *extent_op,
2000 int insert_reserved)
2003 struct btrfs_delayed_data_ref *ref;
2004 struct btrfs_key ins;
2009 ins.objectid = node->bytenr;
2010 ins.offset = node->num_bytes;
2011 ins.type = BTRFS_EXTENT_ITEM_KEY;
2013 ref = btrfs_delayed_node_to_data_ref(node);
2014 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2015 parent = ref->parent;
2017 ref_root = ref->root;
2019 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2021 flags |= extent_op->flags_to_set;
2022 ret = alloc_reserved_file_extent(trans, root,
2023 parent, ref_root, flags,
2024 ref->objectid, ref->offset,
2025 &ins, node->ref_mod);
2026 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2027 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2028 node->num_bytes, parent,
2029 ref_root, ref->objectid,
2030 ref->offset, node->ref_mod,
2032 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2033 ret = __btrfs_free_extent(trans, root, node->bytenr,
2034 node->num_bytes, parent,
2035 ref_root, ref->objectid,
2036 ref->offset, node->ref_mod,
2044 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2045 struct extent_buffer *leaf,
2046 struct btrfs_extent_item *ei)
2048 u64 flags = btrfs_extent_flags(leaf, ei);
2049 if (extent_op->update_flags) {
2050 flags |= extent_op->flags_to_set;
2051 btrfs_set_extent_flags(leaf, ei, flags);
2054 if (extent_op->update_key) {
2055 struct btrfs_tree_block_info *bi;
2056 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2057 bi = (struct btrfs_tree_block_info *)(ei + 1);
2058 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2062 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2063 struct btrfs_root *root,
2064 struct btrfs_delayed_ref_node *node,
2065 struct btrfs_delayed_extent_op *extent_op)
2067 struct btrfs_key key;
2068 struct btrfs_path *path;
2069 struct btrfs_extent_item *ei;
2070 struct extent_buffer *leaf;
2074 int metadata = !extent_op->is_data;
2079 if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2082 path = btrfs_alloc_path();
2086 key.objectid = node->bytenr;
2089 key.type = BTRFS_METADATA_ITEM_KEY;
2090 key.offset = extent_op->level;
2092 key.type = BTRFS_EXTENT_ITEM_KEY;
2093 key.offset = node->num_bytes;
2098 path->leave_spinning = 1;
2099 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2107 btrfs_release_path(path);
2110 key.offset = node->num_bytes;
2111 key.type = BTRFS_EXTENT_ITEM_KEY;
2118 leaf = path->nodes[0];
2119 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2120 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2121 if (item_size < sizeof(*ei)) {
2122 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2128 leaf = path->nodes[0];
2129 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2132 BUG_ON(item_size < sizeof(*ei));
2133 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2134 __run_delayed_extent_op(extent_op, leaf, ei);
2136 btrfs_mark_buffer_dirty(leaf);
2138 btrfs_free_path(path);
2142 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2143 struct btrfs_root *root,
2144 struct btrfs_delayed_ref_node *node,
2145 struct btrfs_delayed_extent_op *extent_op,
2146 int insert_reserved)
2149 struct btrfs_delayed_tree_ref *ref;
2150 struct btrfs_key ins;
2153 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2156 ref = btrfs_delayed_node_to_tree_ref(node);
2157 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2158 parent = ref->parent;
2160 ref_root = ref->root;
2162 ins.objectid = node->bytenr;
2163 if (skinny_metadata) {
2164 ins.offset = ref->level;
2165 ins.type = BTRFS_METADATA_ITEM_KEY;
2167 ins.offset = node->num_bytes;
2168 ins.type = BTRFS_EXTENT_ITEM_KEY;
2171 BUG_ON(node->ref_mod != 1);
2172 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2173 BUG_ON(!extent_op || !extent_op->update_flags);
2174 ret = alloc_reserved_tree_block(trans, root,
2176 extent_op->flags_to_set,
2179 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2180 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2181 node->num_bytes, parent, ref_root,
2182 ref->level, 0, 1, extent_op);
2183 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2184 ret = __btrfs_free_extent(trans, root, node->bytenr,
2185 node->num_bytes, parent, ref_root,
2186 ref->level, 0, 1, extent_op);
2193 /* helper function to actually process a single delayed ref entry */
2194 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2195 struct btrfs_root *root,
2196 struct btrfs_delayed_ref_node *node,
2197 struct btrfs_delayed_extent_op *extent_op,
2198 int insert_reserved)
2205 if (btrfs_delayed_ref_is_head(node)) {
2206 struct btrfs_delayed_ref_head *head;
2208 * we've hit the end of the chain and we were supposed
2209 * to insert this extent into the tree. But, it got
2210 * deleted before we ever needed to insert it, so all
2211 * we have to do is clean up the accounting
2214 head = btrfs_delayed_node_to_head(node);
2215 if (insert_reserved) {
2216 btrfs_pin_extent(root, node->bytenr,
2217 node->num_bytes, 1);
2218 if (head->is_data) {
2219 ret = btrfs_del_csums(trans, root,
2227 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2228 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2229 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2231 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2232 node->type == BTRFS_SHARED_DATA_REF_KEY)
2233 ret = run_delayed_data_ref(trans, root, node, extent_op,
2240 static noinline struct btrfs_delayed_ref_node *
2241 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2243 struct rb_node *node;
2244 struct btrfs_delayed_ref_node *ref;
2245 int action = BTRFS_ADD_DELAYED_REF;
2248 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2249 * this prevents ref count from going down to zero when
2250 * there still are pending delayed ref.
2252 node = rb_prev(&head->node.rb_node);
2256 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2258 if (ref->bytenr != head->node.bytenr)
2260 if (ref->action == action)
2262 node = rb_prev(node);
2264 if (action == BTRFS_ADD_DELAYED_REF) {
2265 action = BTRFS_DROP_DELAYED_REF;
2272 * Returns 0 on success or if called with an already aborted transaction.
2273 * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2275 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2276 struct btrfs_root *root,
2277 struct list_head *cluster)
2279 struct btrfs_delayed_ref_root *delayed_refs;
2280 struct btrfs_delayed_ref_node *ref;
2281 struct btrfs_delayed_ref_head *locked_ref = NULL;
2282 struct btrfs_delayed_extent_op *extent_op;
2283 struct btrfs_fs_info *fs_info = root->fs_info;
2286 int must_insert_reserved = 0;
2288 delayed_refs = &trans->transaction->delayed_refs;
2291 /* pick a new head ref from the cluster list */
2292 if (list_empty(cluster))
2295 locked_ref = list_entry(cluster->next,
2296 struct btrfs_delayed_ref_head, cluster);
2298 /* grab the lock that says we are going to process
2299 * all the refs for this head */
2300 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2303 * we may have dropped the spin lock to get the head
2304 * mutex lock, and that might have given someone else
2305 * time to free the head. If that's true, it has been
2306 * removed from our list and we can move on.
2308 if (ret == -EAGAIN) {
2316 * We need to try and merge add/drops of the same ref since we
2317 * can run into issues with relocate dropping the implicit ref
2318 * and then it being added back again before the drop can
2319 * finish. If we merged anything we need to re-loop so we can
2322 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2326 * locked_ref is the head node, so we have to go one
2327 * node back for any delayed ref updates
2329 ref = select_delayed_ref(locked_ref);
2331 if (ref && ref->seq &&
2332 btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2334 * there are still refs with lower seq numbers in the
2335 * process of being added. Don't run this ref yet.
2337 list_del_init(&locked_ref->cluster);
2338 btrfs_delayed_ref_unlock(locked_ref);
2340 delayed_refs->num_heads_ready++;
2341 spin_unlock(&delayed_refs->lock);
2343 spin_lock(&delayed_refs->lock);
2348 * record the must insert reserved flag before we
2349 * drop the spin lock.
2351 must_insert_reserved = locked_ref->must_insert_reserved;
2352 locked_ref->must_insert_reserved = 0;
2354 extent_op = locked_ref->extent_op;
2355 locked_ref->extent_op = NULL;
2358 /* All delayed refs have been processed, Go ahead
2359 * and send the head node to run_one_delayed_ref,
2360 * so that any accounting fixes can happen
2362 ref = &locked_ref->node;
2364 if (extent_op && must_insert_reserved) {
2365 btrfs_free_delayed_extent_op(extent_op);
2370 spin_unlock(&delayed_refs->lock);
2372 ret = run_delayed_extent_op(trans, root,
2374 btrfs_free_delayed_extent_op(extent_op);
2377 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2378 spin_lock(&delayed_refs->lock);
2379 btrfs_delayed_ref_unlock(locked_ref);
2388 rb_erase(&ref->rb_node, &delayed_refs->root);
2389 delayed_refs->num_entries--;
2390 if (!btrfs_delayed_ref_is_head(ref)) {
2392 * when we play the delayed ref, also correct the
2395 switch (ref->action) {
2396 case BTRFS_ADD_DELAYED_REF:
2397 case BTRFS_ADD_DELAYED_EXTENT:
2398 locked_ref->node.ref_mod -= ref->ref_mod;
2400 case BTRFS_DROP_DELAYED_REF:
2401 locked_ref->node.ref_mod += ref->ref_mod;
2407 spin_unlock(&delayed_refs->lock);
2409 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2410 must_insert_reserved);
2412 btrfs_free_delayed_extent_op(extent_op);
2414 btrfs_delayed_ref_unlock(locked_ref);
2415 btrfs_put_delayed_ref(ref);
2416 btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2417 spin_lock(&delayed_refs->lock);
2422 * If this node is a head, that means all the refs in this head
2423 * have been dealt with, and we will pick the next head to deal
2424 * with, so we must unlock the head and drop it from the cluster
2425 * list before we release it.
2427 if (btrfs_delayed_ref_is_head(ref)) {
2428 list_del_init(&locked_ref->cluster);
2429 btrfs_delayed_ref_unlock(locked_ref);
2432 btrfs_put_delayed_ref(ref);
2436 spin_lock(&delayed_refs->lock);
2441 #ifdef SCRAMBLE_DELAYED_REFS
2443 * Normally delayed refs get processed in ascending bytenr order. This
2444 * correlates in most cases to the order added. To expose dependencies on this
2445 * order, we start to process the tree in the middle instead of the beginning
2447 static u64 find_middle(struct rb_root *root)
2449 struct rb_node *n = root->rb_node;
2450 struct btrfs_delayed_ref_node *entry;
2453 u64 first = 0, last = 0;
2457 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2458 first = entry->bytenr;
2462 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2463 last = entry->bytenr;
2468 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2469 WARN_ON(!entry->in_tree);
2471 middle = entry->bytenr;
2484 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2485 struct btrfs_fs_info *fs_info)
2487 struct qgroup_update *qgroup_update;
2490 if (list_empty(&trans->qgroup_ref_list) !=
2491 !trans->delayed_ref_elem.seq) {
2492 /* list without seq or seq without list */
2494 "qgroup accounting update error, list is%s empty, seq is %#x.%x",
2495 list_empty(&trans->qgroup_ref_list) ? "" : " not",
2496 (u32)(trans->delayed_ref_elem.seq >> 32),
2497 (u32)trans->delayed_ref_elem.seq);
2501 if (!trans->delayed_ref_elem.seq)
2504 while (!list_empty(&trans->qgroup_ref_list)) {
2505 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2506 struct qgroup_update, list);
2507 list_del(&qgroup_update->list);
2509 ret = btrfs_qgroup_account_ref(
2510 trans, fs_info, qgroup_update->node,
2511 qgroup_update->extent_op);
2512 kfree(qgroup_update);
2515 btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2520 static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
2523 int val = atomic_read(&delayed_refs->ref_seq);
2525 if (val < seq || val >= seq + count)
2530 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2534 num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2535 sizeof(struct btrfs_extent_inline_ref));
2536 if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2537 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2540 * We don't ever fill up leaves all the way so multiply by 2 just to be
2541 * closer to what we're really going to want to ouse.
2543 return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2546 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2547 struct btrfs_root *root)
2549 struct btrfs_block_rsv *global_rsv;
2550 u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2554 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2555 num_heads = heads_to_leaves(root, num_heads);
2557 num_bytes += (num_heads - 1) * root->leafsize;
2559 global_rsv = &root->fs_info->global_block_rsv;
2562 * If we can't allocate any more chunks lets make sure we have _lots_ of
2563 * wiggle room since running delayed refs can create more delayed refs.
2565 if (global_rsv->space_info->full)
2568 spin_lock(&global_rsv->lock);
2569 if (global_rsv->reserved <= num_bytes)
2571 spin_unlock(&global_rsv->lock);
2576 * this starts processing the delayed reference count updates and
2577 * extent insertions we have queued up so far. count can be
2578 * 0, which means to process everything in the tree at the start
2579 * of the run (but not newly added entries), or it can be some target
2580 * number you'd like to process.
2582 * Returns 0 on success or if called with an aborted transaction
2583 * Returns <0 on error and aborts the transaction
2585 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2586 struct btrfs_root *root, unsigned long count)
2588 struct rb_node *node;
2589 struct btrfs_delayed_ref_root *delayed_refs;
2590 struct btrfs_delayed_ref_node *ref;
2591 struct list_head cluster;
2594 int run_all = count == (unsigned long)-1;
2598 /* We'll clean this up in btrfs_cleanup_transaction */
2602 if (root == root->fs_info->extent_root)
2603 root = root->fs_info->tree_root;
2605 btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2607 delayed_refs = &trans->transaction->delayed_refs;
2608 INIT_LIST_HEAD(&cluster);
2610 count = delayed_refs->num_entries * 2;
2614 if (!run_all && !run_most) {
2616 int seq = atomic_read(&delayed_refs->ref_seq);
2619 old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2621 DEFINE_WAIT(__wait);
2622 if (delayed_refs->flushing ||
2623 !btrfs_should_throttle_delayed_refs(trans, root))
2626 prepare_to_wait(&delayed_refs->wait, &__wait,
2627 TASK_UNINTERRUPTIBLE);
2629 old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2632 finish_wait(&delayed_refs->wait, &__wait);
2634 if (!refs_newer(delayed_refs, seq, 256))
2639 finish_wait(&delayed_refs->wait, &__wait);
2645 atomic_inc(&delayed_refs->procs_running_refs);
2650 spin_lock(&delayed_refs->lock);
2652 #ifdef SCRAMBLE_DELAYED_REFS
2653 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2657 if (!(run_all || run_most) &&
2658 !btrfs_should_throttle_delayed_refs(trans, root))
2662 * go find something we can process in the rbtree. We start at
2663 * the beginning of the tree, and then build a cluster
2664 * of refs to process starting at the first one we are able to
2667 delayed_start = delayed_refs->run_delayed_start;
2668 ret = btrfs_find_ref_cluster(trans, &cluster,
2669 delayed_refs->run_delayed_start);
2673 ret = run_clustered_refs(trans, root, &cluster);
2675 btrfs_release_ref_cluster(&cluster);
2676 spin_unlock(&delayed_refs->lock);
2677 btrfs_abort_transaction(trans, root, ret);
2678 atomic_dec(&delayed_refs->procs_running_refs);
2679 wake_up(&delayed_refs->wait);
2683 atomic_add(ret, &delayed_refs->ref_seq);
2685 count -= min_t(unsigned long, ret, count);
2690 if (delayed_start >= delayed_refs->run_delayed_start) {
2693 * btrfs_find_ref_cluster looped. let's do one
2694 * more cycle. if we don't run any delayed ref
2695 * during that cycle (because we can't because
2696 * all of them are blocked), bail out.
2701 * no runnable refs left, stop trying
2708 /* refs were run, let's reset staleness detection */
2714 if (!list_empty(&trans->new_bgs)) {
2715 spin_unlock(&delayed_refs->lock);
2716 btrfs_create_pending_block_groups(trans, root);
2717 spin_lock(&delayed_refs->lock);
2720 node = rb_first(&delayed_refs->root);
2723 count = (unsigned long)-1;
2726 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2728 if (btrfs_delayed_ref_is_head(ref)) {
2729 struct btrfs_delayed_ref_head *head;
2731 head = btrfs_delayed_node_to_head(ref);
2732 atomic_inc(&ref->refs);
2734 spin_unlock(&delayed_refs->lock);
2736 * Mutex was contended, block until it's
2737 * released and try again
2739 mutex_lock(&head->mutex);
2740 mutex_unlock(&head->mutex);
2742 btrfs_put_delayed_ref(ref);
2746 node = rb_next(node);
2748 spin_unlock(&delayed_refs->lock);
2749 schedule_timeout(1);
2753 atomic_dec(&delayed_refs->procs_running_refs);
2755 if (waitqueue_active(&delayed_refs->wait))
2756 wake_up(&delayed_refs->wait);
2758 spin_unlock(&delayed_refs->lock);
2759 assert_qgroups_uptodate(trans);
2763 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2764 struct btrfs_root *root,
2765 u64 bytenr, u64 num_bytes, u64 flags,
2766 int level, int is_data)
2768 struct btrfs_delayed_extent_op *extent_op;
2771 extent_op = btrfs_alloc_delayed_extent_op();
2775 extent_op->flags_to_set = flags;
2776 extent_op->update_flags = 1;
2777 extent_op->update_key = 0;
2778 extent_op->is_data = is_data ? 1 : 0;
2779 extent_op->level = level;
2781 ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2782 num_bytes, extent_op);
2784 btrfs_free_delayed_extent_op(extent_op);
2788 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2789 struct btrfs_root *root,
2790 struct btrfs_path *path,
2791 u64 objectid, u64 offset, u64 bytenr)
2793 struct btrfs_delayed_ref_head *head;
2794 struct btrfs_delayed_ref_node *ref;
2795 struct btrfs_delayed_data_ref *data_ref;
2796 struct btrfs_delayed_ref_root *delayed_refs;
2797 struct rb_node *node;
2801 delayed_refs = &trans->transaction->delayed_refs;
2802 spin_lock(&delayed_refs->lock);
2803 head = btrfs_find_delayed_ref_head(trans, bytenr);
2807 if (!mutex_trylock(&head->mutex)) {
2808 atomic_inc(&head->node.refs);
2809 spin_unlock(&delayed_refs->lock);
2811 btrfs_release_path(path);
2814 * Mutex was contended, block until it's released and let
2817 mutex_lock(&head->mutex);
2818 mutex_unlock(&head->mutex);
2819 btrfs_put_delayed_ref(&head->node);
2823 node = rb_prev(&head->node.rb_node);
2827 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2829 if (ref->bytenr != bytenr)
2833 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2836 data_ref = btrfs_delayed_node_to_data_ref(ref);
2838 node = rb_prev(node);
2842 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2843 if (ref->bytenr == bytenr && ref->seq == seq)
2847 if (data_ref->root != root->root_key.objectid ||
2848 data_ref->objectid != objectid || data_ref->offset != offset)
2853 mutex_unlock(&head->mutex);
2855 spin_unlock(&delayed_refs->lock);
2859 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2860 struct btrfs_root *root,
2861 struct btrfs_path *path,
2862 u64 objectid, u64 offset, u64 bytenr)
2864 struct btrfs_root *extent_root = root->fs_info->extent_root;
2865 struct extent_buffer *leaf;
2866 struct btrfs_extent_data_ref *ref;
2867 struct btrfs_extent_inline_ref *iref;
2868 struct btrfs_extent_item *ei;
2869 struct btrfs_key key;
2873 key.objectid = bytenr;
2874 key.offset = (u64)-1;
2875 key.type = BTRFS_EXTENT_ITEM_KEY;
2877 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2880 BUG_ON(ret == 0); /* Corruption */
2883 if (path->slots[0] == 0)
2887 leaf = path->nodes[0];
2888 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2890 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2894 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2895 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2896 if (item_size < sizeof(*ei)) {
2897 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2901 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2903 if (item_size != sizeof(*ei) +
2904 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2907 if (btrfs_extent_generation(leaf, ei) <=
2908 btrfs_root_last_snapshot(&root->root_item))
2911 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2912 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2913 BTRFS_EXTENT_DATA_REF_KEY)
2916 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2917 if (btrfs_extent_refs(leaf, ei) !=
2918 btrfs_extent_data_ref_count(leaf, ref) ||
2919 btrfs_extent_data_ref_root(leaf, ref) !=
2920 root->root_key.objectid ||
2921 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2922 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2930 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2931 struct btrfs_root *root,
2932 u64 objectid, u64 offset, u64 bytenr)
2934 struct btrfs_path *path;
2938 path = btrfs_alloc_path();
2943 ret = check_committed_ref(trans, root, path, objectid,
2945 if (ret && ret != -ENOENT)
2948 ret2 = check_delayed_ref(trans, root, path, objectid,
2950 } while (ret2 == -EAGAIN);
2952 if (ret2 && ret2 != -ENOENT) {
2957 if (ret != -ENOENT || ret2 != -ENOENT)
2960 btrfs_free_path(path);
2961 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2966 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2967 struct btrfs_root *root,
2968 struct extent_buffer *buf,
2969 int full_backref, int inc, int for_cow)
2976 struct btrfs_key key;
2977 struct btrfs_file_extent_item *fi;
2981 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2982 u64, u64, u64, u64, u64, u64, int);
2984 ref_root = btrfs_header_owner(buf);
2985 nritems = btrfs_header_nritems(buf);
2986 level = btrfs_header_level(buf);
2988 if (!root->ref_cows && level == 0)
2992 process_func = btrfs_inc_extent_ref;
2994 process_func = btrfs_free_extent;
2997 parent = buf->start;
3001 for (i = 0; i < nritems; i++) {
3003 btrfs_item_key_to_cpu(buf, &key, i);
3004 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3006 fi = btrfs_item_ptr(buf, i,
3007 struct btrfs_file_extent_item);
3008 if (btrfs_file_extent_type(buf, fi) ==
3009 BTRFS_FILE_EXTENT_INLINE)
3011 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3015 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3016 key.offset -= btrfs_file_extent_offset(buf, fi);
3017 ret = process_func(trans, root, bytenr, num_bytes,
3018 parent, ref_root, key.objectid,
3019 key.offset, for_cow);
3023 bytenr = btrfs_node_blockptr(buf, i);
3024 num_bytes = btrfs_level_size(root, level - 1);
3025 ret = process_func(trans, root, bytenr, num_bytes,
3026 parent, ref_root, level - 1, 0,
3037 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3038 struct extent_buffer *buf, int full_backref, int for_cow)
3040 return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
3043 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3044 struct extent_buffer *buf, int full_backref, int for_cow)
3046 return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
3049 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3050 struct btrfs_root *root,
3051 struct btrfs_path *path,
3052 struct btrfs_block_group_cache *cache)
3055 struct btrfs_root *extent_root = root->fs_info->extent_root;
3057 struct extent_buffer *leaf;
3059 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3062 BUG_ON(ret); /* Corruption */
3064 leaf = path->nodes[0];
3065 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3066 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3067 btrfs_mark_buffer_dirty(leaf);
3068 btrfs_release_path(path);
3071 btrfs_abort_transaction(trans, root, ret);
3078 static struct btrfs_block_group_cache *
3079 next_block_group(struct btrfs_root *root,
3080 struct btrfs_block_group_cache *cache)
3082 struct rb_node *node;
3083 spin_lock(&root->fs_info->block_group_cache_lock);
3084 node = rb_next(&cache->cache_node);
3085 btrfs_put_block_group(cache);
3087 cache = rb_entry(node, struct btrfs_block_group_cache,
3089 btrfs_get_block_group(cache);
3092 spin_unlock(&root->fs_info->block_group_cache_lock);
3096 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3097 struct btrfs_trans_handle *trans,
3098 struct btrfs_path *path)
3100 struct btrfs_root *root = block_group->fs_info->tree_root;
3101 struct inode *inode = NULL;
3103 int dcs = BTRFS_DC_ERROR;
3109 * If this block group is smaller than 100 megs don't bother caching the
3112 if (block_group->key.offset < (100 * 1024 * 1024)) {
3113 spin_lock(&block_group->lock);
3114 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3115 spin_unlock(&block_group->lock);
3120 inode = lookup_free_space_inode(root, block_group, path);
3121 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3122 ret = PTR_ERR(inode);
3123 btrfs_release_path(path);
3127 if (IS_ERR(inode)) {
3131 if (block_group->ro)
3134 ret = create_free_space_inode(root, trans, block_group, path);
3140 /* We've already setup this transaction, go ahead and exit */
3141 if (block_group->cache_generation == trans->transid &&
3142 i_size_read(inode)) {
3143 dcs = BTRFS_DC_SETUP;
3148 * We want to set the generation to 0, that way if anything goes wrong
3149 * from here on out we know not to trust this cache when we load up next
3152 BTRFS_I(inode)->generation = 0;
3153 ret = btrfs_update_inode(trans, root, inode);
3156 if (i_size_read(inode) > 0) {
3157 ret = btrfs_check_trunc_cache_free_space(root,
3158 &root->fs_info->global_block_rsv);
3162 ret = btrfs_truncate_free_space_cache(root, trans, path,
3168 spin_lock(&block_group->lock);
3169 if (block_group->cached != BTRFS_CACHE_FINISHED ||
3170 !btrfs_test_opt(root, SPACE_CACHE)) {
3172 * don't bother trying to write stuff out _if_
3173 * a) we're not cached,
3174 * b) we're with nospace_cache mount option.
3176 dcs = BTRFS_DC_WRITTEN;
3177 spin_unlock(&block_group->lock);
3180 spin_unlock(&block_group->lock);
3183 * Try to preallocate enough space based on how big the block group is.
3184 * Keep in mind this has to include any pinned space which could end up
3185 * taking up quite a bit since it's not folded into the other space
3188 num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3193 num_pages *= PAGE_CACHE_SIZE;
3195 ret = btrfs_check_data_free_space(inode, num_pages);
3199 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3200 num_pages, num_pages,
3203 dcs = BTRFS_DC_SETUP;
3204 btrfs_free_reserved_data_space(inode, num_pages);
3209 btrfs_release_path(path);
3211 spin_lock(&block_group->lock);
3212 if (!ret && dcs == BTRFS_DC_SETUP)
3213 block_group->cache_generation = trans->transid;
3214 block_group->disk_cache_state = dcs;
3215 spin_unlock(&block_group->lock);
3220 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3221 struct btrfs_root *root)
3223 struct btrfs_block_group_cache *cache;
3225 struct btrfs_path *path;
3228 path = btrfs_alloc_path();
3234 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3236 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3238 cache = next_block_group(root, cache);
3246 err = cache_save_setup(cache, trans, path);
3247 last = cache->key.objectid + cache->key.offset;
3248 btrfs_put_block_group(cache);
3253 err = btrfs_run_delayed_refs(trans, root,
3255 if (err) /* File system offline */
3259 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3261 if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3262 btrfs_put_block_group(cache);
3268 cache = next_block_group(root, cache);
3277 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3278 cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3280 last = cache->key.objectid + cache->key.offset;
3282 err = write_one_cache_group(trans, root, path, cache);
3283 if (err) /* File system offline */
3286 btrfs_put_block_group(cache);
3291 * I don't think this is needed since we're just marking our
3292 * preallocated extent as written, but just in case it can't
3296 err = btrfs_run_delayed_refs(trans, root,
3298 if (err) /* File system offline */
3302 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3305 * Really this shouldn't happen, but it could if we
3306 * couldn't write the entire preallocated extent and
3307 * splitting the extent resulted in a new block.
3310 btrfs_put_block_group(cache);
3313 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3315 cache = next_block_group(root, cache);
3324 err = btrfs_write_out_cache(root, trans, cache, path);
3327 * If we didn't have an error then the cache state is still
3328 * NEED_WRITE, so we can set it to WRITTEN.
3330 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3331 cache->disk_cache_state = BTRFS_DC_WRITTEN;
3332 last = cache->key.objectid + cache->key.offset;
3333 btrfs_put_block_group(cache);
3337 btrfs_free_path(path);
3341 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3343 struct btrfs_block_group_cache *block_group;
3346 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3347 if (!block_group || block_group->ro)
3350 btrfs_put_block_group(block_group);
3354 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3355 u64 total_bytes, u64 bytes_used,
3356 struct btrfs_space_info **space_info)
3358 struct btrfs_space_info *found;
3363 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3364 BTRFS_BLOCK_GROUP_RAID10))
3369 found = __find_space_info(info, flags);
3371 spin_lock(&found->lock);
3372 found->total_bytes += total_bytes;
3373 found->disk_total += total_bytes * factor;
3374 found->bytes_used += bytes_used;
3375 found->disk_used += bytes_used * factor;
3377 spin_unlock(&found->lock);
3378 *space_info = found;
3381 found = kzalloc(sizeof(*found), GFP_NOFS);
3385 ret = percpu_counter_init(&found->total_bytes_pinned, 0);
3391 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3392 INIT_LIST_HEAD(&found->block_groups[i]);
3393 init_rwsem(&found->groups_sem);
3394 spin_lock_init(&found->lock);
3395 found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3396 found->total_bytes = total_bytes;
3397 found->disk_total = total_bytes * factor;
3398 found->bytes_used = bytes_used;
3399 found->disk_used = bytes_used * factor;
3400 found->bytes_pinned = 0;
3401 found->bytes_reserved = 0;
3402 found->bytes_readonly = 0;
3403 found->bytes_may_use = 0;
3405 found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3406 found->chunk_alloc = 0;
3408 init_waitqueue_head(&found->wait);
3409 *space_info = found;
3410 list_add_rcu(&found->list, &info->space_info);
3411 if (flags & BTRFS_BLOCK_GROUP_DATA)
3412 info->data_sinfo = found;
3416 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3418 u64 extra_flags = chunk_to_extended(flags) &
3419 BTRFS_EXTENDED_PROFILE_MASK;
3421 write_seqlock(&fs_info->profiles_lock);
3422 if (flags & BTRFS_BLOCK_GROUP_DATA)
3423 fs_info->avail_data_alloc_bits |= extra_flags;
3424 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3425 fs_info->avail_metadata_alloc_bits |= extra_flags;
3426 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3427 fs_info->avail_system_alloc_bits |= extra_flags;
3428 write_sequnlock(&fs_info->profiles_lock);
3432 * returns target flags in extended format or 0 if restripe for this
3433 * chunk_type is not in progress
3435 * should be called with either volume_mutex or balance_lock held
3437 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3439 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3445 if (flags & BTRFS_BLOCK_GROUP_DATA &&
3446 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3447 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3448 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3449 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3450 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3451 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3452 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3453 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3460 * @flags: available profiles in extended format (see ctree.h)
3462 * Returns reduced profile in chunk format. If profile changing is in
3463 * progress (either running or paused) picks the target profile (if it's
3464 * already available), otherwise falls back to plain reducing.
3466 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3469 * we add in the count of missing devices because we want
3470 * to make sure that any RAID levels on a degraded FS
3471 * continue to be honored.
3473 u64 num_devices = root->fs_info->fs_devices->rw_devices +
3474 root->fs_info->fs_devices->missing_devices;
3479 * see if restripe for this chunk_type is in progress, if so
3480 * try to reduce to the target profile
3482 spin_lock(&root->fs_info->balance_lock);
3483 target = get_restripe_target(root->fs_info, flags);
3485 /* pick target profile only if it's already available */
3486 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3487 spin_unlock(&root->fs_info->balance_lock);
3488 return extended_to_chunk(target);
3491 spin_unlock(&root->fs_info->balance_lock);
3493 /* First, mask out the RAID levels which aren't possible */
3494 if (num_devices == 1)
3495 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3496 BTRFS_BLOCK_GROUP_RAID5);
3497 if (num_devices < 3)
3498 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3499 if (num_devices < 4)
3500 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3502 tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3503 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3504 BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3507 if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3508 tmp = BTRFS_BLOCK_GROUP_RAID6;
3509 else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3510 tmp = BTRFS_BLOCK_GROUP_RAID5;
3511 else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3512 tmp = BTRFS_BLOCK_GROUP_RAID10;
3513 else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3514 tmp = BTRFS_BLOCK_GROUP_RAID1;
3515 else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3516 tmp = BTRFS_BLOCK_GROUP_RAID0;
3518 return extended_to_chunk(flags | tmp);
3521 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3526 seq = read_seqbegin(&root->fs_info->profiles_lock);
3528 if (flags & BTRFS_BLOCK_GROUP_DATA)
3529 flags |= root->fs_info->avail_data_alloc_bits;
3530 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3531 flags |= root->fs_info->avail_system_alloc_bits;
3532 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3533 flags |= root->fs_info->avail_metadata_alloc_bits;
3534 } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3536 return btrfs_reduce_alloc_profile(root, flags);
3539 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3545 flags = BTRFS_BLOCK_GROUP_DATA;
3546 else if (root == root->fs_info->chunk_root)
3547 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3549 flags = BTRFS_BLOCK_GROUP_METADATA;
3551 ret = get_alloc_profile(root, flags);
3556 * This will check the space that the inode allocates from to make sure we have
3557 * enough space for bytes.
3559 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3561 struct btrfs_space_info *data_sinfo;
3562 struct btrfs_root *root = BTRFS_I(inode)->root;
3563 struct btrfs_fs_info *fs_info = root->fs_info;
3565 int ret = 0, committed = 0, alloc_chunk = 1;
3567 /* make sure bytes are sectorsize aligned */
3568 bytes = ALIGN(bytes, root->sectorsize);
3570 if (root == root->fs_info->tree_root ||
3571 BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3576 data_sinfo = fs_info->data_sinfo;
3581 /* make sure we have enough space to handle the data first */
3582 spin_lock(&data_sinfo->lock);
3583 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3584 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3585 data_sinfo->bytes_may_use;
3587 if (used + bytes > data_sinfo->total_bytes) {
3588 struct btrfs_trans_handle *trans;
3591 * if we don't have enough free bytes in this space then we need
3592 * to alloc a new chunk.
3594 if (!data_sinfo->full && alloc_chunk) {
3597 data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3598 spin_unlock(&data_sinfo->lock);
3600 alloc_target = btrfs_get_alloc_profile(root, 1);
3601 trans = btrfs_join_transaction(root);
3603 return PTR_ERR(trans);
3605 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3607 CHUNK_ALLOC_NO_FORCE);
3608 btrfs_end_transaction(trans, root);
3617 data_sinfo = fs_info->data_sinfo;
3623 * If we don't have enough pinned space to deal with this
3624 * allocation don't bother committing the transaction.
3626 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3629 spin_unlock(&data_sinfo->lock);
3631 /* commit the current transaction and try again */
3634 !atomic_read(&root->fs_info->open_ioctl_trans)) {
3637 trans = btrfs_join_transaction(root);
3639 return PTR_ERR(trans);
3640 ret = btrfs_commit_transaction(trans, root);
3648 data_sinfo->bytes_may_use += bytes;
3649 trace_btrfs_space_reservation(root->fs_info, "space_info",
3650 data_sinfo->flags, bytes, 1);
3651 spin_unlock(&data_sinfo->lock);
3657 * Called if we need to clear a data reservation for this inode.
3659 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3661 struct btrfs_root *root = BTRFS_I(inode)->root;
3662 struct btrfs_space_info *data_sinfo;
3664 /* make sure bytes are sectorsize aligned */
3665 bytes = ALIGN(bytes, root->sectorsize);
3667 data_sinfo = root->fs_info->data_sinfo;
3668 spin_lock(&data_sinfo->lock);
3669 WARN_ON(data_sinfo->bytes_may_use < bytes);
3670 data_sinfo->bytes_may_use -= bytes;
3671 trace_btrfs_space_reservation(root->fs_info, "space_info",
3672 data_sinfo->flags, bytes, 0);
3673 spin_unlock(&data_sinfo->lock);
3676 static void force_metadata_allocation(struct btrfs_fs_info *info)
3678 struct list_head *head = &info->space_info;
3679 struct btrfs_space_info *found;
3682 list_for_each_entry_rcu(found, head, list) {
3683 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3684 found->force_alloc = CHUNK_ALLOC_FORCE;
3689 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3691 return (global->size << 1);
3694 static int should_alloc_chunk(struct btrfs_root *root,
3695 struct btrfs_space_info *sinfo, int force)
3697 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3698 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3699 u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3702 if (force == CHUNK_ALLOC_FORCE)
3706 * We need to take into account the global rsv because for all intents
3707 * and purposes it's used space. Don't worry about locking the
3708 * global_rsv, it doesn't change except when the transaction commits.
3710 if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3711 num_allocated += calc_global_rsv_need_space(global_rsv);
3714 * in limited mode, we want to have some free space up to
3715 * about 1% of the FS size.
3717 if (force == CHUNK_ALLOC_LIMITED) {
3718 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3719 thresh = max_t(u64, 64 * 1024 * 1024,
3720 div_factor_fine(thresh, 1));
3722 if (num_bytes - num_allocated < thresh)
3726 if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3731 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3735 if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3736 BTRFS_BLOCK_GROUP_RAID0 |
3737 BTRFS_BLOCK_GROUP_RAID5 |
3738 BTRFS_BLOCK_GROUP_RAID6))
3739 num_dev = root->fs_info->fs_devices->rw_devices;
3740 else if (type & BTRFS_BLOCK_GROUP_RAID1)
3743 num_dev = 1; /* DUP or single */
3745 /* metadata for updaing devices and chunk tree */
3746 return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3749 static void check_system_chunk(struct btrfs_trans_handle *trans,
3750 struct btrfs_root *root, u64 type)
3752 struct btrfs_space_info *info;
3756 info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3757 spin_lock(&info->lock);
3758 left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3759 info->bytes_reserved - info->bytes_readonly;
3760 spin_unlock(&info->lock);
3762 thresh = get_system_chunk_thresh(root, type);
3763 if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3764 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3765 left, thresh, type);
3766 dump_space_info(info, 0, 0);
3769 if (left < thresh) {
3772 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3773 btrfs_alloc_chunk(trans, root, flags);
3777 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3778 struct btrfs_root *extent_root, u64 flags, int force)
3780 struct btrfs_space_info *space_info;
3781 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3782 int wait_for_alloc = 0;
3785 /* Don't re-enter if we're already allocating a chunk */
3786 if (trans->allocating_chunk)
3789 space_info = __find_space_info(extent_root->fs_info, flags);
3791 ret = update_space_info(extent_root->fs_info, flags,
3793 BUG_ON(ret); /* -ENOMEM */
3795 BUG_ON(!space_info); /* Logic error */
3798 spin_lock(&space_info->lock);
3799 if (force < space_info->force_alloc)
3800 force = space_info->force_alloc;
3801 if (space_info->full) {
3802 spin_unlock(&space_info->lock);
3806 if (!should_alloc_chunk(extent_root, space_info, force)) {
3807 spin_unlock(&space_info->lock);
3809 } else if (space_info->chunk_alloc) {
3812 space_info->chunk_alloc = 1;
3815 spin_unlock(&space_info->lock);
3817 mutex_lock(&fs_info->chunk_mutex);
3820 * The chunk_mutex is held throughout the entirety of a chunk
3821 * allocation, so once we've acquired the chunk_mutex we know that the
3822 * other guy is done and we need to recheck and see if we should
3825 if (wait_for_alloc) {
3826 mutex_unlock(&fs_info->chunk_mutex);
3831 trans->allocating_chunk = true;
3834 * If we have mixed data/metadata chunks we want to make sure we keep
3835 * allocating mixed chunks instead of individual chunks.
3837 if (btrfs_mixed_space_info(space_info))
3838 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3841 * if we're doing a data chunk, go ahead and make sure that
3842 * we keep a reasonable number of metadata chunks allocated in the
3845 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3846 fs_info->data_chunk_allocations++;
3847 if (!(fs_info->data_chunk_allocations %
3848 fs_info->metadata_ratio))
3849 force_metadata_allocation(fs_info);
3853 * Check if we have enough space in SYSTEM chunk because we may need
3854 * to update devices.
3856 check_system_chunk(trans, extent_root, flags);
3858 ret = btrfs_alloc_chunk(trans, extent_root, flags);
3859 trans->allocating_chunk = false;
3861 spin_lock(&space_info->lock);
3862 if (ret < 0 && ret != -ENOSPC)
3865 space_info->full = 1;
3869 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3871 space_info->chunk_alloc = 0;
3872 spin_unlock(&space_info->lock);
3873 mutex_unlock(&fs_info->chunk_mutex);
3877 static int can_overcommit(struct btrfs_root *root,
3878 struct btrfs_space_info *space_info, u64 bytes,
3879 enum btrfs_reserve_flush_enum flush)
3881 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3882 u64 profile = btrfs_get_alloc_profile(root, 0);
3888 used = space_info->bytes_used + space_info->bytes_reserved +
3889 space_info->bytes_pinned + space_info->bytes_readonly;
3892 * We only want to allow over committing if we have lots of actual space
3893 * free, but if we don't have enough space to handle the global reserve
3894 * space then we could end up having a real enospc problem when trying
3895 * to allocate a chunk or some other such important allocation.
3897 spin_lock(&global_rsv->lock);
3898 space_size = calc_global_rsv_need_space(global_rsv);
3899 spin_unlock(&global_rsv->lock);
3900 if (used + space_size >= space_info->total_bytes)
3903 used += space_info->bytes_may_use;
3905 spin_lock(&root->fs_info->free_chunk_lock);
3906 avail = root->fs_info->free_chunk_space;
3907 spin_unlock(&root->fs_info->free_chunk_lock);
3910 * If we have dup, raid1 or raid10 then only half of the free
3911 * space is actually useable. For raid56, the space info used
3912 * doesn't include the parity drive, so we don't have to
3915 if (profile & (BTRFS_BLOCK_GROUP_DUP |
3916 BTRFS_BLOCK_GROUP_RAID1 |
3917 BTRFS_BLOCK_GROUP_RAID10))
3920 to_add = space_info->total_bytes;
3923 * If we aren't flushing all things, let us overcommit up to
3924 * 1/2th of the space. If we can flush, don't let us overcommit
3925 * too much, let it overcommit up to 1/8 of the space.
3927 if (flush == BTRFS_RESERVE_FLUSH_ALL)
3933 * Limit the overcommit to the amount of free space we could possibly
3934 * allocate for chunks.
3936 to_add = min(avail, to_add);
3938 if (used + bytes < space_info->total_bytes + to_add)
3943 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3944 unsigned long nr_pages)
3946 struct super_block *sb = root->fs_info->sb;
3948 if (down_read_trylock(&sb->s_umount)) {
3949 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
3950 up_read(&sb->s_umount);
3953 * We needn't worry the filesystem going from r/w to r/o though
3954 * we don't acquire ->s_umount mutex, because the filesystem
3955 * should guarantee the delalloc inodes list be empty after
3956 * the filesystem is readonly(all dirty pages are written to
3959 btrfs_start_all_delalloc_inodes(root->fs_info, 0);
3960 if (!current->journal_info)
3961 btrfs_wait_all_ordered_extents(root->fs_info, 0);
3966 * shrink metadata reservation for delalloc
3968 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
3971 struct btrfs_block_rsv *block_rsv;
3972 struct btrfs_space_info *space_info;
3973 struct btrfs_trans_handle *trans;
3977 unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3979 enum btrfs_reserve_flush_enum flush;
3981 trans = (struct btrfs_trans_handle *)current->journal_info;
3982 block_rsv = &root->fs_info->delalloc_block_rsv;
3983 space_info = block_rsv->space_info;
3986 delalloc_bytes = percpu_counter_sum_positive(
3987 &root->fs_info->delalloc_bytes);
3988 if (delalloc_bytes == 0) {
3991 btrfs_wait_all_ordered_extents(root->fs_info, 0);
3995 while (delalloc_bytes && loops < 3) {
3996 max_reclaim = min(delalloc_bytes, to_reclaim);
3997 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
3998 btrfs_writeback_inodes_sb_nr(root, nr_pages);
4000 * We need to wait for the async pages to actually start before
4003 wait_event(root->fs_info->async_submit_wait,
4004 !atomic_read(&root->fs_info->async_delalloc_pages));
4007 flush = BTRFS_RESERVE_FLUSH_ALL;
4009 flush = BTRFS_RESERVE_NO_FLUSH;
4010 spin_lock(&space_info->lock);
4011 if (can_overcommit(root, space_info, orig, flush)) {
4012 spin_unlock(&space_info->lock);
4015 spin_unlock(&space_info->lock);
4018 if (wait_ordered && !trans) {
4019 btrfs_wait_all_ordered_extents(root->fs_info, 0);
4021 time_left = schedule_timeout_killable(1);
4026 delalloc_bytes = percpu_counter_sum_positive(
4027 &root->fs_info->delalloc_bytes);
4032 * maybe_commit_transaction - possibly commit the transaction if its ok to
4033 * @root - the root we're allocating for
4034 * @bytes - the number of bytes we want to reserve
4035 * @force - force the commit
4037 * This will check to make sure that committing the transaction will actually
4038 * get us somewhere and then commit the transaction if it does. Otherwise it
4039 * will return -ENOSPC.
4041 static int may_commit_transaction(struct btrfs_root *root,
4042 struct btrfs_space_info *space_info,
4043 u64 bytes, int force)
4045 struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4046 struct btrfs_trans_handle *trans;
4048 trans = (struct btrfs_trans_handle *)current->journal_info;
4055 /* See if there is enough pinned space to make this reservation */
4056 spin_lock(&space_info->lock);
4057 if (percpu_counter_compare(&space_info->total_bytes_pinned,
4059 spin_unlock(&space_info->lock);
4062 spin_unlock(&space_info->lock);
4065 * See if there is some space in the delayed insertion reservation for
4068 if (space_info != delayed_rsv->space_info)
4071 spin_lock(&space_info->lock);
4072 spin_lock(&delayed_rsv->lock);
4073 if (percpu_counter_compare(&space_info->total_bytes_pinned,
4074 bytes - delayed_rsv->size) >= 0) {
4075 spin_unlock(&delayed_rsv->lock);
4076 spin_unlock(&space_info->lock);
4079 spin_unlock(&delayed_rsv->lock);
4080 spin_unlock(&space_info->lock);
4083 trans = btrfs_join_transaction(root);
4087 return btrfs_commit_transaction(trans, root);
4091 FLUSH_DELAYED_ITEMS_NR = 1,
4092 FLUSH_DELAYED_ITEMS = 2,
4094 FLUSH_DELALLOC_WAIT = 4,
4099 static int flush_space(struct btrfs_root *root,
4100 struct btrfs_space_info *space_info, u64 num_bytes,
4101 u64 orig_bytes, int state)
4103 struct btrfs_trans_handle *trans;
4108 case FLUSH_DELAYED_ITEMS_NR:
4109 case FLUSH_DELAYED_ITEMS:
4110 if (state == FLUSH_DELAYED_ITEMS_NR) {
4111 u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
4113 nr = (int)div64_u64(num_bytes, bytes);
4120 trans = btrfs_join_transaction(root);
4121 if (IS_ERR(trans)) {
4122 ret = PTR_ERR(trans);
4125 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4126 btrfs_end_transaction(trans, root);
4128 case FLUSH_DELALLOC:
4129 case FLUSH_DELALLOC_WAIT:
4130 shrink_delalloc(root, num_bytes, orig_bytes,
4131 state == FLUSH_DELALLOC_WAIT);
4134 trans = btrfs_join_transaction(root);
4135 if (IS_ERR(trans)) {
4136 ret = PTR_ERR(trans);
4139 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4140 btrfs_get_alloc_profile(root, 0),
4141 CHUNK_ALLOC_NO_FORCE);
4142 btrfs_end_transaction(trans, root);
4147 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4157 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4158 * @root - the root we're allocating for
4159 * @block_rsv - the block_rsv we're allocating for
4160 * @orig_bytes - the number of bytes we want
4161 * @flush - whether or not we can flush to make our reservation
4163 * This will reserve orgi_bytes number of bytes from the space info associated
4164 * with the block_rsv. If there is not enough space it will make an attempt to
4165 * flush out space to make room. It will do this by flushing delalloc if
4166 * possible or committing the transaction. If flush is 0 then no attempts to
4167 * regain reservations will be made and this will fail if there is not enough
4170 static int reserve_metadata_bytes(struct btrfs_root *root,
4171 struct btrfs_block_rsv *block_rsv,
4173 enum btrfs_reserve_flush_enum flush)
4175 struct btrfs_space_info *space_info = block_rsv->space_info;
4177 u64 num_bytes = orig_bytes;
4178 int flush_state = FLUSH_DELAYED_ITEMS_NR;
4180 bool flushing = false;
4184 spin_lock(&space_info->lock);
4186 * We only want to wait if somebody other than us is flushing and we
4187 * are actually allowed to flush all things.
4189 while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4190 space_info->flush) {
4191 spin_unlock(&space_info->lock);
4193 * If we have a trans handle we can't wait because the flusher
4194 * may have to commit the transaction, which would mean we would
4195 * deadlock since we are waiting for the flusher to finish, but
4196 * hold the current transaction open.
4198 if (current->journal_info)
4200 ret = wait_event_killable(space_info->wait, !space_info->flush);
4201 /* Must have been killed, return */
4205 spin_lock(&space_info->lock);
4209 used = space_info->bytes_used + space_info->bytes_reserved +
4210 space_info->bytes_pinned + space_info->bytes_readonly +
4211 space_info->bytes_may_use;
4214 * The idea here is that we've not already over-reserved the block group
4215 * then we can go ahead and save our reservation first and then start
4216 * flushing if we need to. Otherwise if we've already overcommitted
4217 * lets start flushing stuff first and then come back and try to make
4220 if (used <= space_info->total_bytes) {
4221 if (used + orig_bytes <= space_info->total_bytes) {
4222 space_info->bytes_may_use += orig_bytes;
4223 trace_btrfs_space_reservation(root->fs_info,
4224 "space_info", space_info->flags, orig_bytes, 1);
4228 * Ok set num_bytes to orig_bytes since we aren't
4229 * overocmmitted, this way we only try and reclaim what
4232 num_bytes = orig_bytes;
4236 * Ok we're over committed, set num_bytes to the overcommitted
4237 * amount plus the amount of bytes that we need for this
4240 num_bytes = used - space_info->total_bytes +
4244 if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4245 space_info->bytes_may_use += orig_bytes;
4246 trace_btrfs_space_reservation(root->fs_info, "space_info",
4247 space_info->flags, orig_bytes,
4253 * Couldn't make our reservation, save our place so while we're trying
4254 * to reclaim space we can actually use it instead of somebody else
4255 * stealing it from us.
4257 * We make the other tasks wait for the flush only when we can flush
4260 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4262 space_info->flush = 1;
4265 spin_unlock(&space_info->lock);
4267 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4270 ret = flush_space(root, space_info, num_bytes, orig_bytes,
4275 * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4276 * would happen. So skip delalloc flush.
4278 if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4279 (flush_state == FLUSH_DELALLOC ||
4280 flush_state == FLUSH_DELALLOC_WAIT))
4281 flush_state = ALLOC_CHUNK;
4285 else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4286 flush_state < COMMIT_TRANS)
4288 else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4289 flush_state <= COMMIT_TRANS)
4293 if (ret == -ENOSPC &&
4294 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4295 struct btrfs_block_rsv *global_rsv =
4296 &root->fs_info->global_block_rsv;
4298 if (block_rsv != global_rsv &&
4299 !block_rsv_use_bytes(global_rsv, orig_bytes))
4303 spin_lock(&space_info->lock);
4304 space_info->flush = 0;
4305 wake_up_all(&space_info->wait);
4306 spin_unlock(&space_info->lock);
4311 static struct btrfs_block_rsv *get_block_rsv(
4312 const struct btrfs_trans_handle *trans,
4313 const struct btrfs_root *root)
4315 struct btrfs_block_rsv *block_rsv = NULL;
4318 block_rsv = trans->block_rsv;
4320 if (root == root->fs_info->csum_root && trans->adding_csums)
4321 block_rsv = trans->block_rsv;
4324 block_rsv = root->block_rsv;
4327 block_rsv = &root->fs_info->empty_block_rsv;
4332 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4336 spin_lock(&block_rsv->lock);
4337 if (block_rsv->reserved >= num_bytes) {
4338 block_rsv->reserved -= num_bytes;
4339 if (block_rsv->reserved < block_rsv->size)
4340 block_rsv->full = 0;
4343 spin_unlock(&block_rsv->lock);
4347 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4348 u64 num_bytes, int update_size)
4350 spin_lock(&block_rsv->lock);
4351 block_rsv->reserved += num_bytes;
4353 block_rsv->size += num_bytes;
4354 else if (block_rsv->reserved >= block_rsv->size)
4355 block_rsv->full = 1;
4356 spin_unlock(&block_rsv->lock);
4359 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4360 struct btrfs_block_rsv *dest, u64 num_bytes,
4363 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4366 if (global_rsv->space_info != dest->space_info)
4369 spin_lock(&global_rsv->lock);
4370 min_bytes = div_factor(global_rsv->size, min_factor);
4371 if (global_rsv->reserved < min_bytes + num_bytes) {
4372 spin_unlock(&global_rsv->lock);
4375 global_rsv->reserved -= num_bytes;
4376 if (global_rsv->reserved < global_rsv->size)
4377 global_rsv->full = 0;
4378 spin_unlock(&global_rsv->lock);
4380 block_rsv_add_bytes(dest, num_bytes, 1);
4384 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4385 struct btrfs_block_rsv *block_rsv,
4386 struct btrfs_block_rsv *dest, u64 num_bytes)
4388 struct btrfs_space_info *space_info = block_rsv->space_info;
4390 spin_lock(&block_rsv->lock);
4391 if (num_bytes == (u64)-1)
4392 num_bytes = block_rsv->size;
4393 block_rsv->size -= num_bytes;
4394 if (block_rsv->reserved >= block_rsv->size) {
4395 num_bytes = block_rsv->reserved - block_rsv->size;
4396 block_rsv->reserved = block_rsv->size;
4397 block_rsv->full = 1;
4401 spin_unlock(&block_rsv->lock);
4403 if (num_bytes > 0) {
4405 spin_lock(&dest->lock);
4409 bytes_to_add = dest->size - dest->reserved;
4410 bytes_to_add = min(num_bytes, bytes_to_add);
4411 dest->reserved += bytes_to_add;
4412 if (dest->reserved >= dest->size)
4414 num_bytes -= bytes_to_add;
4416 spin_unlock(&dest->lock);
4419 spin_lock(&space_info->lock);
4420 space_info->bytes_may_use -= num_bytes;
4421 trace_btrfs_space_reservation(fs_info, "space_info",
4422 space_info->flags, num_bytes, 0);
4423 space_info->reservation_progress++;
4424 spin_unlock(&space_info->lock);
4429 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4430 struct btrfs_block_rsv *dst, u64 num_bytes)
4434 ret = block_rsv_use_bytes(src, num_bytes);
4438 block_rsv_add_bytes(dst, num_bytes, 1);
4442 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4444 memset(rsv, 0, sizeof(*rsv));
4445 spin_lock_init(&rsv->lock);
4449 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4450 unsigned short type)
4452 struct btrfs_block_rsv *block_rsv;
4453 struct btrfs_fs_info *fs_info = root->fs_info;
4455 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4459 btrfs_init_block_rsv(block_rsv, type);
4460 block_rsv->space_info = __find_space_info(fs_info,
4461 BTRFS_BLOCK_GROUP_METADATA);
4465 void btrfs_free_block_rsv(struct btrfs_root *root,
4466 struct btrfs_block_rsv *rsv)
4470 btrfs_block_rsv_release(root, rsv, (u64)-1);
4474 int btrfs_block_rsv_add(struct btrfs_root *root,
4475 struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4476 enum btrfs_reserve_flush_enum flush)
4483 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4485 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4492 int btrfs_block_rsv_check(struct btrfs_root *root,
4493 struct btrfs_block_rsv *block_rsv, int min_factor)
4501 spin_lock(&block_rsv->lock);
4502 num_bytes = div_factor(block_rsv->size, min_factor);
4503 if (block_rsv->reserved >= num_bytes)
4505 spin_unlock(&block_rsv->lock);
4510 int btrfs_block_rsv_refill(struct btrfs_root *root,
4511 struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4512 enum btrfs_reserve_flush_enum flush)
4520 spin_lock(&block_rsv->lock);
4521 num_bytes = min_reserved;
4522 if (block_rsv->reserved >= num_bytes)
4525 num_bytes -= block_rsv->reserved;
4526 spin_unlock(&block_rsv->lock);
4531 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4533 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4540 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4541 struct btrfs_block_rsv *dst_rsv,
4544 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4547 void btrfs_block_rsv_release(struct btrfs_root *root,
4548 struct btrfs_block_rsv *block_rsv,
4551 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4552 if (global_rsv->full || global_rsv == block_rsv ||
4553 block_rsv->space_info != global_rsv->space_info)
4555 block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4560 * helper to calculate size of global block reservation.
4561 * the desired value is sum of space used by extent tree,
4562 * checksum tree and root tree
4564 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4566 struct btrfs_space_info *sinfo;
4570 int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4572 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4573 spin_lock(&sinfo->lock);
4574 data_used = sinfo->bytes_used;
4575 spin_unlock(&sinfo->lock);
4577 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4578 spin_lock(&sinfo->lock);
4579 if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4581 meta_used = sinfo->bytes_used;
4582 spin_unlock(&sinfo->lock);
4584 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4586 num_bytes += div64_u64(data_used + meta_used, 50);
4588 if (num_bytes * 3 > meta_used)
4589 num_bytes = div64_u64(meta_used, 3);
4591 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4594 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4596 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4597 struct btrfs_space_info *sinfo = block_rsv->space_info;
4600 num_bytes = calc_global_metadata_size(fs_info);
4602 spin_lock(&sinfo->lock);
4603 spin_lock(&block_rsv->lock);
4605 block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4607 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4608 sinfo->bytes_reserved + sinfo->bytes_readonly +
4609 sinfo->bytes_may_use;
4611 if (sinfo->total_bytes > num_bytes) {
4612 num_bytes = sinfo->total_bytes - num_bytes;
4613 block_rsv->reserved += num_bytes;
4614 sinfo->bytes_may_use += num_bytes;
4615 trace_btrfs_space_reservation(fs_info, "space_info",
4616 sinfo->flags, num_bytes, 1);
4619 if (block_rsv->reserved >= block_rsv->size) {
4620 num_bytes = block_rsv->reserved - block_rsv->size;
4621 sinfo->bytes_may_use -= num_bytes;
4622 trace_btrfs_space_reservation(fs_info, "space_info",
4623 sinfo->flags, num_bytes, 0);
4624 sinfo->reservation_progress++;
4625 block_rsv->reserved = block_rsv->size;
4626 block_rsv->full = 1;
4629 spin_unlock(&block_rsv->lock);
4630 spin_unlock(&sinfo->lock);
4633 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4635 struct btrfs_space_info *space_info;
4637 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4638 fs_info->chunk_block_rsv.space_info = space_info;
4640 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4641 fs_info->global_block_rsv.space_info = space_info;
4642 fs_info->delalloc_block_rsv.space_info = space_info;
4643 fs_info->trans_block_rsv.space_info = space_info;
4644 fs_info->empty_block_rsv.space_info = space_info;
4645 fs_info->delayed_block_rsv.space_info = space_info;
4647 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4648 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4649 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4650 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4651 if (fs_info->quota_root)
4652 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4653 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4655 update_global_block_rsv(fs_info);
4658 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4660 block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4662 WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4663 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4664 WARN_ON(fs_info->trans_block_rsv.size > 0);
4665 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4666 WARN_ON(fs_info->chunk_block_rsv.size > 0);
4667 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4668 WARN_ON(fs_info->delayed_block_rsv.size > 0);
4669 WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4672 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4673 struct btrfs_root *root)
4675 if (!trans->block_rsv)
4678 if (!trans->bytes_reserved)
4681 trace_btrfs_space_reservation(root->fs_info, "transaction",
4682 trans->transid, trans->bytes_reserved, 0);
4683 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4684 trans->bytes_reserved = 0;
4687 /* Can only return 0 or -ENOSPC */
4688 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4689 struct inode *inode)
4691 struct btrfs_root *root = BTRFS_I(inode)->root;
4692 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4693 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4696 * We need to hold space in order to delete our orphan item once we've
4697 * added it, so this takes the reservation so we can release it later
4698 * when we are truly done with the orphan item.
4700 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4701 trace_btrfs_space_reservation(root->fs_info, "orphan",
4702 btrfs_ino(inode), num_bytes, 1);
4703 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4706 void btrfs_orphan_release_metadata(struct inode *inode)
4708 struct btrfs_root *root = BTRFS_I(inode)->root;
4709 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4710 trace_btrfs_space_reservation(root->fs_info, "orphan",
4711 btrfs_ino(inode), num_bytes, 0);
4712 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4716 * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4717 * root: the root of the parent directory
4718 * rsv: block reservation
4719 * items: the number of items that we need do reservation
4720 * qgroup_reserved: used to return the reserved size in qgroup
4722 * This function is used to reserve the space for snapshot/subvolume
4723 * creation and deletion. Those operations are different with the
4724 * common file/directory operations, they change two fs/file trees
4725 * and root tree, the number of items that the qgroup reserves is
4726 * different with the free space reservation. So we can not use
4727 * the space reseravtion mechanism in start_transaction().
4729 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4730 struct btrfs_block_rsv *rsv,
4732 u64 *qgroup_reserved)
4737 if (root->fs_info->quota_enabled) {
4738 /* One for parent inode, two for dir entries */
4739 num_bytes = 3 * root->leafsize;
4740 ret = btrfs_qgroup_reserve(root, num_bytes);
4747 *qgroup_reserved = num_bytes;
4749 num_bytes = btrfs_calc_trans_metadata_size(root, items);
4750 rsv->space_info = __find_space_info(root->fs_info,
4751 BTRFS_BLOCK_GROUP_METADATA);
4752 ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4753 BTRFS_RESERVE_FLUSH_ALL);
4755 if (*qgroup_reserved)
4756 btrfs_qgroup_free(root, *qgroup_reserved);
4762 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4763 struct btrfs_block_rsv *rsv,
4764 u64 qgroup_reserved)
4766 btrfs_block_rsv_release(root, rsv, (u64)-1);
4767 if (qgroup_reserved)
4768 btrfs_qgroup_free(root, qgroup_reserved);
4772 * drop_outstanding_extent - drop an outstanding extent
4773 * @inode: the inode we're dropping the extent for
4775 * This is called when we are freeing up an outstanding extent, either called
4776 * after an error or after an extent is written. This will return the number of
4777 * reserved extents that need to be freed. This must be called with
4778 * BTRFS_I(inode)->lock held.
4780 static unsigned drop_outstanding_extent(struct inode *inode)
4782 unsigned drop_inode_space = 0;
4783 unsigned dropped_extents = 0;
4785 BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4786 BTRFS_I(inode)->outstanding_extents--;
4788 if (BTRFS_I(inode)->outstanding_extents == 0 &&
4789 test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4790 &BTRFS_I(inode)->runtime_flags))
4791 drop_inode_space = 1;
4794 * If we have more or the same amount of outsanding extents than we have
4795 * reserved then we need to leave the reserved extents count alone.
4797 if (BTRFS_I(inode)->outstanding_extents >=
4798 BTRFS_I(inode)->reserved_extents)
4799 return drop_inode_space;
4801 dropped_extents = BTRFS_I(inode)->reserved_extents -
4802 BTRFS_I(inode)->outstanding_extents;
4803 BTRFS_I(inode)->reserved_extents -= dropped_extents;
4804 return dropped_extents + drop_inode_space;
4808 * calc_csum_metadata_size - return the amount of metada space that must be
4809 * reserved/free'd for the given bytes.
4810 * @inode: the inode we're manipulating
4811 * @num_bytes: the number of bytes in question
4812 * @reserve: 1 if we are reserving space, 0 if we are freeing space
4814 * This adjusts the number of csum_bytes in the inode and then returns the
4815 * correct amount of metadata that must either be reserved or freed. We
4816 * calculate how many checksums we can fit into one leaf and then divide the
4817 * number of bytes that will need to be checksumed by this value to figure out
4818 * how many checksums will be required. If we are adding bytes then the number
4819 * may go up and we will return the number of additional bytes that must be
4820 * reserved. If it is going down we will return the number of bytes that must
4823 * This must be called with BTRFS_I(inode)->lock held.
4825 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4828 struct btrfs_root *root = BTRFS_I(inode)->root;
4830 int num_csums_per_leaf;
4834 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4835 BTRFS_I(inode)->csum_bytes == 0)
4838 old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4840 BTRFS_I(inode)->csum_bytes += num_bytes;
4842 BTRFS_I(inode)->csum_bytes -= num_bytes;
4843 csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4844 num_csums_per_leaf = (int)div64_u64(csum_size,
4845 sizeof(struct btrfs_csum_item) +
4846 sizeof(struct btrfs_disk_key));
4847 num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4848 num_csums = num_csums + num_csums_per_leaf - 1;
4849 num_csums = num_csums / num_csums_per_leaf;
4851 old_csums = old_csums + num_csums_per_leaf - 1;
4852 old_csums = old_csums / num_csums_per_leaf;
4854 /* No change, no need to reserve more */
4855 if (old_csums == num_csums)
4859 return btrfs_calc_trans_metadata_size(root,
4860 num_csums - old_csums);
4862 return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4865 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4867 struct btrfs_root *root = BTRFS_I(inode)->root;
4868 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4871 unsigned nr_extents = 0;
4872 int extra_reserve = 0;
4873 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4875 bool delalloc_lock = true;
4879 /* If we are a free space inode we need to not flush since we will be in
4880 * the middle of a transaction commit. We also don't need the delalloc
4881 * mutex since we won't race with anybody. We need this mostly to make
4882 * lockdep shut its filthy mouth.
4884 if (btrfs_is_free_space_inode(inode)) {
4885 flush = BTRFS_RESERVE_NO_FLUSH;
4886 delalloc_lock = false;
4889 if (flush != BTRFS_RESERVE_NO_FLUSH &&
4890 btrfs_transaction_in_commit(root->fs_info))
4891 schedule_timeout(1);
4894 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4896 num_bytes = ALIGN(num_bytes, root->sectorsize);
4898 spin_lock(&BTRFS_I(inode)->lock);
4899 BTRFS_I(inode)->outstanding_extents++;
4901 if (BTRFS_I(inode)->outstanding_extents >
4902 BTRFS_I(inode)->reserved_extents)
4903 nr_extents = BTRFS_I(inode)->outstanding_extents -
4904 BTRFS_I(inode)->reserved_extents;
4907 * Add an item to reserve for updating the inode when we complete the
4910 if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4911 &BTRFS_I(inode)->runtime_flags)) {
4916 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4917 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4918 csum_bytes = BTRFS_I(inode)->csum_bytes;
4919 spin_unlock(&BTRFS_I(inode)->lock);
4921 if (root->fs_info->quota_enabled) {
4922 ret = btrfs_qgroup_reserve(root, num_bytes +
4923 nr_extents * root->leafsize);
4928 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4929 if (unlikely(ret)) {
4930 if (root->fs_info->quota_enabled)
4931 btrfs_qgroup_free(root, num_bytes +
4932 nr_extents * root->leafsize);
4936 spin_lock(&BTRFS_I(inode)->lock);
4937 if (extra_reserve) {
4938 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4939 &BTRFS_I(inode)->runtime_flags);
4942 BTRFS_I(inode)->reserved_extents += nr_extents;
4943 spin_unlock(&BTRFS_I(inode)->lock);
4946 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4949 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4950 btrfs_ino(inode), to_reserve, 1);
4951 block_rsv_add_bytes(block_rsv, to_reserve, 1);
4956 spin_lock(&BTRFS_I(inode)->lock);
4957 dropped = drop_outstanding_extent(inode);
4959 * If the inodes csum_bytes is the same as the original
4960 * csum_bytes then we know we haven't raced with any free()ers
4961 * so we can just reduce our inodes csum bytes and carry on.
4963 if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
4964 calc_csum_metadata_size(inode, num_bytes, 0);
4966 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
4970 * This is tricky, but first we need to figure out how much we
4971 * free'd from any free-ers that occured during this
4972 * reservation, so we reset ->csum_bytes to the csum_bytes
4973 * before we dropped our lock, and then call the free for the
4974 * number of bytes that were freed while we were trying our
4977 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
4978 BTRFS_I(inode)->csum_bytes = csum_bytes;
4979 to_free = calc_csum_metadata_size(inode, bytes, 0);
4983 * Now we need to see how much we would have freed had we not
4984 * been making this reservation and our ->csum_bytes were not
4985 * artificially inflated.
4987 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
4988 bytes = csum_bytes - orig_csum_bytes;
4989 bytes = calc_csum_metadata_size(inode, bytes, 0);
4992 * Now reset ->csum_bytes to what it should be. If bytes is
4993 * more than to_free then we would have free'd more space had we
4994 * not had an artificially high ->csum_bytes, so we need to free
4995 * the remainder. If bytes is the same or less then we don't
4996 * need to do anything, the other free-ers did the correct
4999 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5000 if (bytes > to_free)
5001 to_free = bytes - to_free;
5005 spin_unlock(&BTRFS_I(inode)->lock);
5007 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5010 btrfs_block_rsv_release(root, block_rsv, to_free);
5011 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5012 btrfs_ino(inode), to_free, 0);
5015 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5020 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5021 * @inode: the inode to release the reservation for
5022 * @num_bytes: the number of bytes we're releasing
5024 * This will release the metadata reservation for an inode. This can be called
5025 * once we complete IO for a given set of bytes to release their metadata
5028 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5030 struct btrfs_root *root = BTRFS_I(inode)->root;
5034 num_bytes = ALIGN(num_bytes, root->sectorsize);
5035 spin_lock(&BTRFS_I(inode)->lock);
5036 dropped = drop_outstanding_extent(inode);
5039 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5040 spin_unlock(&BTRFS_I(inode)->lock);
5042 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5044 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5045 btrfs_ino(inode), to_free, 0);
5046 if (root->fs_info->quota_enabled) {
5047 btrfs_qgroup_free(root, num_bytes +
5048 dropped * root->leafsize);
5051 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5056 * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5057 * @inode: inode we're writing to
5058 * @num_bytes: the number of bytes we want to allocate
5060 * This will do the following things
5062 * o reserve space in the data space info for num_bytes
5063 * o reserve space in the metadata space info based on number of outstanding
5064 * extents and how much csums will be needed
5065 * o add to the inodes ->delalloc_bytes
5066 * o add it to the fs_info's delalloc inodes list.
5068 * This will return 0 for success and -ENOSPC if there is no space left.
5070 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5074 ret = btrfs_check_data_free_space(inode, num_bytes);
5078 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5080 btrfs_free_reserved_data_space(inode, num_bytes);
5088 * btrfs_delalloc_release_space - release data and metadata space for delalloc
5089 * @inode: inode we're releasing space for
5090 * @num_bytes: the number of bytes we want to free up
5092 * This must be matched with a call to btrfs_delalloc_reserve_space. This is
5093 * called in the case that we don't need the metadata AND data reservations
5094 * anymore. So if there is an error or we insert an inline extent.
5096 * This function will release the metadata space that was not used and will
5097 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5098 * list if there are no delalloc bytes left.
5100 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5102 btrfs_delalloc_release_metadata(inode, num_bytes);
5103 btrfs_free_reserved_data_space(inode, num_bytes);
5106 static int update_block_group(struct btrfs_root *root,
5107 u64 bytenr, u64 num_bytes, int alloc)
5109 struct btrfs_block_group_cache *cache = NULL;
5110 struct btrfs_fs_info *info = root->fs_info;
5111 u64 total = num_bytes;
5116 /* block accounting for super block */
5117 spin_lock(&info->delalloc_root_lock);
5118 old_val = btrfs_super_bytes_used(info->super_copy);
5120 old_val += num_bytes;
5122 old_val -= num_bytes;
5123 btrfs_set_super_bytes_used(info->super_copy, old_val);
5124 spin_unlock(&info->delalloc_root_lock);
5127 cache = btrfs_lookup_block_group(info, bytenr);
5130 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5131 BTRFS_BLOCK_GROUP_RAID1 |
5132 BTRFS_BLOCK_GROUP_RAID10))
5137 * If this block group has free space cache written out, we
5138 * need to make sure to load it if we are removing space. This
5139 * is because we need the unpinning stage to actually add the
5140 * space back to the block group, otherwise we will leak space.
5142 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5143 cache_block_group(cache, 1);
5145 byte_in_group = bytenr - cache->key.objectid;
5146 WARN_ON(byte_in_group > cache->key.offset);
5148 spin_lock(&cache->space_info->lock);
5149 spin_lock(&cache->lock);
5151 if (btrfs_test_opt(root, SPACE_CACHE) &&
5152 cache->disk_cache_state < BTRFS_DC_CLEAR)
5153 cache->disk_cache_state = BTRFS_DC_CLEAR;
5156 old_val = btrfs_block_group_used(&cache->item);
5157 num_bytes = min(total, cache->key.offset - byte_in_group);
5159 old_val += num_bytes;
5160 btrfs_set_block_group_used(&cache->item, old_val);
5161 cache->reserved -= num_bytes;
5162 cache->space_info->bytes_reserved -= num_bytes;
5163 cache->space_info->bytes_used += num_bytes;
5164 cache->space_info->disk_used += num_bytes * factor;
5165 spin_unlock(&cache->lock);
5166 spin_unlock(&cache->space_info->lock);
5168 old_val -= num_bytes;
5169 btrfs_set_block_group_used(&cache->item, old_val);
5170 cache->pinned += num_bytes;
5171 cache->space_info->bytes_pinned += num_bytes;
5172 cache->space_info->bytes_used -= num_bytes;
5173 cache->space_info->disk_used -= num_bytes * factor;
5174 spin_unlock(&cache->lock);
5175 spin_unlock(&cache->space_info->lock);
5177 set_extent_dirty(info->pinned_extents,
5178 bytenr, bytenr + num_bytes - 1,
5179 GFP_NOFS | __GFP_NOFAIL);
5181 btrfs_put_block_group(cache);
5183 bytenr += num_bytes;
5188 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5190 struct btrfs_block_group_cache *cache;
5193 spin_lock(&root->fs_info->block_group_cache_lock);
5194 bytenr = root->fs_info->first_logical_byte;
5195 spin_unlock(&root->fs_info->block_group_cache_lock);
5197 if (bytenr < (u64)-1)
5200 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5204 bytenr = cache->key.objectid;
5205 btrfs_put_block_group(cache);
5210 static int pin_down_extent(struct btrfs_root *root,
5211 struct btrfs_block_group_cache *cache,
5212 u64 bytenr, u64 num_bytes, int reserved)
5214 spin_lock(&cache->space_info->lock);
5215 spin_lock(&cache->lock);
5216 cache->pinned += num_bytes;
5217 cache->space_info->bytes_pinned += num_bytes;
5219 cache->reserved -= num_bytes;
5220 cache->space_info->bytes_reserved -= num_bytes;
5222 spin_unlock(&cache->lock);
5223 spin_unlock(&cache->space_info->lock);
5225 set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5226 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5231 * this function must be called within transaction
5233 int btrfs_pin_extent(struct btrfs_root *root,
5234 u64 bytenr, u64 num_bytes, int reserved)
5236 struct btrfs_block_group_cache *cache;
5238 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5239 BUG_ON(!cache); /* Logic error */
5241 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5243 btrfs_put_block_group(cache);
5248 * this function must be called within transaction
5250 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5251 u64 bytenr, u64 num_bytes)
5253 struct btrfs_block_group_cache *cache;
5256 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5261 * pull in the free space cache (if any) so that our pin
5262 * removes the free space from the cache. We have load_only set
5263 * to one because the slow code to read in the free extents does check
5264 * the pinned extents.
5266 cache_block_group(cache, 1);
5268 pin_down_extent(root, cache, bytenr, num_bytes, 0);
5270 /* remove us from the free space cache (if we're there at all) */
5271 ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5272 btrfs_put_block_group(cache);
5276 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5279 struct btrfs_block_group_cache *block_group;
5280 struct btrfs_caching_control *caching_ctl;
5282 block_group = btrfs_lookup_block_group(root->fs_info, start);
5286 cache_block_group(block_group, 0);
5287 caching_ctl = get_caching_control(block_group);
5291 BUG_ON(!block_group_cache_done(block_group));
5292 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5294 mutex_lock(&caching_ctl->mutex);
5296 if (start >= caching_ctl->progress) {
5297 ret = add_excluded_extent(root, start, num_bytes);
5298 } else if (start + num_bytes <= caching_ctl->progress) {
5299 ret = btrfs_remove_free_space(block_group,
5302 num_bytes = caching_ctl->progress - start;
5303 ret = btrfs_remove_free_space(block_group,
5308 num_bytes = (start + num_bytes) -
5309 caching_ctl->progress;
5310 start = caching_ctl->progress;
5311 ret = add_excluded_extent(root, start, num_bytes);
5314 mutex_unlock(&caching_ctl->mutex);
5315 put_caching_control(caching_ctl);
5317 btrfs_put_block_group(block_group);
5321 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5322 struct extent_buffer *eb)
5324 struct btrfs_file_extent_item *item;
5325 struct btrfs_key key;
5329 if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5332 for (i = 0; i < btrfs_header_nritems(eb); i++) {
5333 btrfs_item_key_to_cpu(eb, &key, i);
5334 if (key.type != BTRFS_EXTENT_DATA_KEY)
5336 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5337 found_type = btrfs_file_extent_type(eb, item);
5338 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5340 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5342 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5343 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5344 __exclude_logged_extent(log, key.objectid, key.offset);
5351 * btrfs_update_reserved_bytes - update the block_group and space info counters
5352 * @cache: The cache we are manipulating
5353 * @num_bytes: The number of bytes in question
5354 * @reserve: One of the reservation enums
5356 * This is called by the allocator when it reserves space, or by somebody who is
5357 * freeing space that was never actually used on disk. For example if you
5358 * reserve some space for a new leaf in transaction A and before transaction A
5359 * commits you free that leaf, you call this with reserve set to 0 in order to
5360 * clear the reservation.
5362 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5363 * ENOSPC accounting. For data we handle the reservation through clearing the
5364 * delalloc bits in the io_tree. We have to do this since we could end up
5365 * allocating less disk space for the amount of data we have reserved in the
5366 * case of compression.
5368 * If this is a reservation and the block group has become read only we cannot
5369 * make the reservation and return -EAGAIN, otherwise this function always
5372 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5373 u64 num_bytes, int reserve)
5375 struct btrfs_space_info *space_info = cache->space_info;
5378 spin_lock(&space_info->lock);
5379 spin_lock(&cache->lock);
5380 if (reserve != RESERVE_FREE) {
5384 cache->reserved += num_bytes;
5385 space_info->bytes_reserved += num_bytes;
5386 if (reserve == RESERVE_ALLOC) {
5387 trace_btrfs_space_reservation(cache->fs_info,
5388 "space_info", space_info->flags,
5390 space_info->bytes_may_use -= num_bytes;
5395 space_info->bytes_readonly += num_bytes;
5396 cache->reserved -= num_bytes;
5397 space_info->bytes_reserved -= num_bytes;
5398 space_info->reservation_progress++;
5400 spin_unlock(&cache->lock);
5401 spin_unlock(&space_info->lock);
5405 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5406 struct btrfs_root *root)
5408 struct btrfs_fs_info *fs_info = root->fs_info;
5409 struct btrfs_caching_control *next;
5410 struct btrfs_caching_control *caching_ctl;
5411 struct btrfs_block_group_cache *cache;
5412 struct btrfs_space_info *space_info;
5414 down_write(&fs_info->extent_commit_sem);
5416 list_for_each_entry_safe(caching_ctl, next,
5417 &fs_info->caching_block_groups, list) {
5418 cache = caching_ctl->block_group;
5419 if (block_group_cache_done(cache)) {
5420 cache->last_byte_to_unpin = (u64)-1;
5421 list_del_init(&caching_ctl->list);
5422 put_caching_control(caching_ctl);
5424 cache->last_byte_to_unpin = caching_ctl->progress;
5428 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5429 fs_info->pinned_extents = &fs_info->freed_extents[1];
5431 fs_info->pinned_extents = &fs_info->freed_extents[0];
5433 up_write(&fs_info->extent_commit_sem);
5435 list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
5436 percpu_counter_set(&space_info->total_bytes_pinned, 0);
5438 update_global_block_rsv(fs_info);
5441 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5443 struct btrfs_fs_info *fs_info = root->fs_info;
5444 struct btrfs_block_group_cache *cache = NULL;
5445 struct btrfs_space_info *space_info;
5446 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5450 while (start <= end) {
5453 start >= cache->key.objectid + cache->key.offset) {
5455 btrfs_put_block_group(cache);
5456 cache = btrfs_lookup_block_group(fs_info, start);
5457 BUG_ON(!cache); /* Logic error */
5460 len = cache->key.objectid + cache->key.offset - start;
5461 len = min(len, end + 1 - start);
5463 if (start < cache->last_byte_to_unpin) {
5464 len = min(len, cache->last_byte_to_unpin - start);
5465 btrfs_add_free_space(cache, start, len);
5469 space_info = cache->space_info;
5471 spin_lock(&space_info->lock);
5472 spin_lock(&cache->lock);
5473 cache->pinned -= len;
5474 space_info->bytes_pinned -= len;
5476 space_info->bytes_readonly += len;
5479 spin_unlock(&cache->lock);
5480 if (!readonly && global_rsv->space_info == space_info) {
5481 spin_lock(&global_rsv->lock);
5482 if (!global_rsv->full) {
5483 len = min(len, global_rsv->size -
5484 global_rsv->reserved);
5485 global_rsv->reserved += len;
5486 space_info->bytes_may_use += len;
5487 if (global_rsv->reserved >= global_rsv->size)
5488 global_rsv->full = 1;
5490 spin_unlock(&global_rsv->lock);
5492 spin_unlock(&space_info->lock);
5496 btrfs_put_block_group(cache);
5500 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5501 struct btrfs_root *root)
5503 struct btrfs_fs_info *fs_info = root->fs_info;
5504 struct extent_io_tree *unpin;
5512 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5513 unpin = &fs_info->freed_extents[1];
5515 unpin = &fs_info->freed_extents[0];
5518 ret = find_first_extent_bit(unpin, 0, &start, &end,
5519 EXTENT_DIRTY, NULL);
5523 if (btrfs_test_opt(root, DISCARD))
5524 ret = btrfs_discard_extent(root, start,
5525 end + 1 - start, NULL);
5527 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5528 unpin_extent_range(root, start, end);
5535 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5536 u64 owner, u64 root_objectid)
5538 struct btrfs_space_info *space_info;
5541 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5542 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5543 flags = BTRFS_BLOCK_GROUP_SYSTEM;
5545 flags = BTRFS_BLOCK_GROUP_METADATA;
5547 flags = BTRFS_BLOCK_GROUP_DATA;
5550 space_info = __find_space_info(fs_info, flags);
5551 BUG_ON(!space_info); /* Logic bug */
5552 percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5556 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5557 struct btrfs_root *root,
5558 u64 bytenr, u64 num_bytes, u64 parent,
5559 u64 root_objectid, u64 owner_objectid,
5560 u64 owner_offset, int refs_to_drop,
5561 struct btrfs_delayed_extent_op *extent_op)
5563 struct btrfs_key key;
5564 struct btrfs_path *path;
5565 struct btrfs_fs_info *info = root->fs_info;
5566 struct btrfs_root *extent_root = info->extent_root;
5567 struct extent_buffer *leaf;
5568 struct btrfs_extent_item *ei;
5569 struct btrfs_extent_inline_ref *iref;
5572 int extent_slot = 0;
5573 int found_extent = 0;
5577 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5580 path = btrfs_alloc_path();
5585 path->leave_spinning = 1;
5587 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5588 BUG_ON(!is_data && refs_to_drop != 1);
5591 skinny_metadata = 0;
5593 ret = lookup_extent_backref(trans, extent_root, path, &iref,
5594 bytenr, num_bytes, parent,
5595 root_objectid, owner_objectid,
5598 extent_slot = path->slots[0];
5599 while (extent_slot >= 0) {
5600 btrfs_item_key_to_cpu(path->nodes[0], &key,
5602 if (key.objectid != bytenr)
5604 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5605 key.offset == num_bytes) {
5609 if (key.type == BTRFS_METADATA_ITEM_KEY &&
5610 key.offset == owner_objectid) {
5614 if (path->slots[0] - extent_slot > 5)
5618 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5619 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5620 if (found_extent && item_size < sizeof(*ei))
5623 if (!found_extent) {
5625 ret = remove_extent_backref(trans, extent_root, path,
5629 btrfs_abort_transaction(trans, extent_root, ret);
5632 btrfs_release_path(path);
5633 path->leave_spinning = 1;
5635 key.objectid = bytenr;
5636 key.type = BTRFS_EXTENT_ITEM_KEY;
5637 key.offset = num_bytes;
5639 if (!is_data && skinny_metadata) {
5640 key.type = BTRFS_METADATA_ITEM_KEY;
5641 key.offset = owner_objectid;
5644 ret = btrfs_search_slot(trans, extent_root,
5646 if (ret > 0 && skinny_metadata && path->slots[0]) {
5648 * Couldn't find our skinny metadata item,
5649 * see if we have ye olde extent item.
5652 btrfs_item_key_to_cpu(path->nodes[0], &key,
5654 if (key.objectid == bytenr &&
5655 key.type == BTRFS_EXTENT_ITEM_KEY &&
5656 key.offset == num_bytes)
5660 if (ret > 0 && skinny_metadata) {
5661 skinny_metadata = false;
5662 key.type = BTRFS_EXTENT_ITEM_KEY;
5663 key.offset = num_bytes;
5664 btrfs_release_path(path);
5665 ret = btrfs_search_slot(trans, extent_root,
5670 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5671 ret, (unsigned long long)bytenr);
5673 btrfs_print_leaf(extent_root,
5677 btrfs_abort_transaction(trans, extent_root, ret);
5680 extent_slot = path->slots[0];
5682 } else if (ret == -ENOENT) {
5683 btrfs_print_leaf(extent_root, path->nodes[0]);
5686 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
5687 (unsigned long long)bytenr,
5688 (unsigned long long)parent,
5689 (unsigned long long)root_objectid,
5690 (unsigned long long)owner_objectid,
5691 (unsigned long long)owner_offset);
5693 btrfs_abort_transaction(trans, extent_root, ret);
5697 leaf = path->nodes[0];
5698 item_size = btrfs_item_size_nr(leaf, extent_slot);
5699 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5700 if (item_size < sizeof(*ei)) {
5701 BUG_ON(found_extent || extent_slot != path->slots[0]);
5702 ret = convert_extent_item_v0(trans, extent_root, path,
5705 btrfs_abort_transaction(trans, extent_root, ret);
5709 btrfs_release_path(path);
5710 path->leave_spinning = 1;
5712 key.objectid = bytenr;
5713 key.type = BTRFS_EXTENT_ITEM_KEY;
5714 key.offset = num_bytes;
5716 ret = btrfs_search_slot(trans, extent_root, &key, path,
5719 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5720 ret, (unsigned long long)bytenr);
5721 btrfs_print_leaf(extent_root, path->nodes[0]);
5724 btrfs_abort_transaction(trans, extent_root, ret);
5728 extent_slot = path->slots[0];
5729 leaf = path->nodes[0];
5730 item_size = btrfs_item_size_nr(leaf, extent_slot);
5733 BUG_ON(item_size < sizeof(*ei));
5734 ei = btrfs_item_ptr(leaf, extent_slot,
5735 struct btrfs_extent_item);
5736 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
5737 key.type == BTRFS_EXTENT_ITEM_KEY) {
5738 struct btrfs_tree_block_info *bi;
5739 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5740 bi = (struct btrfs_tree_block_info *)(ei + 1);
5741 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5744 refs = btrfs_extent_refs(leaf, ei);
5745 if (refs < refs_to_drop) {
5746 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
5747 "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
5749 btrfs_abort_transaction(trans, extent_root, ret);
5752 refs -= refs_to_drop;
5756 __run_delayed_extent_op(extent_op, leaf, ei);
5758 * In the case of inline back ref, reference count will
5759 * be updated by remove_extent_backref
5762 BUG_ON(!found_extent);
5764 btrfs_set_extent_refs(leaf, ei, refs);
5765 btrfs_mark_buffer_dirty(leaf);
5768 ret = remove_extent_backref(trans, extent_root, path,
5772 btrfs_abort_transaction(trans, extent_root, ret);
5776 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
5780 BUG_ON(is_data && refs_to_drop !=
5781 extent_data_ref_count(root, path, iref));
5783 BUG_ON(path->slots[0] != extent_slot);
5785 BUG_ON(path->slots[0] != extent_slot + 1);
5786 path->slots[0] = extent_slot;
5791 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5794 btrfs_abort_transaction(trans, extent_root, ret);
5797 btrfs_release_path(path);
5800 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5802 btrfs_abort_transaction(trans, extent_root, ret);
5807 ret = update_block_group(root, bytenr, num_bytes, 0);
5809 btrfs_abort_transaction(trans, extent_root, ret);
5814 btrfs_free_path(path);
5819 * when we free an block, it is possible (and likely) that we free the last
5820 * delayed ref for that extent as well. This searches the delayed ref tree for
5821 * a given extent, and if there are no other delayed refs to be processed, it
5822 * removes it from the tree.
5824 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5825 struct btrfs_root *root, u64 bytenr)
5827 struct btrfs_delayed_ref_head *head;
5828 struct btrfs_delayed_ref_root *delayed_refs;
5829 struct btrfs_delayed_ref_node *ref;
5830 struct rb_node *node;
5833 delayed_refs = &trans->transaction->delayed_refs;
5834 spin_lock(&delayed_refs->lock);
5835 head = btrfs_find_delayed_ref_head(trans, bytenr);
5839 node = rb_prev(&head->node.rb_node);
5843 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5845 /* there are still entries for this ref, we can't drop it */
5846 if (ref->bytenr == bytenr)
5849 if (head->extent_op) {
5850 if (!head->must_insert_reserved)
5852 btrfs_free_delayed_extent_op(head->extent_op);
5853 head->extent_op = NULL;
5857 * waiting for the lock here would deadlock. If someone else has it
5858 * locked they are already in the process of dropping it anyway
5860 if (!mutex_trylock(&head->mutex))
5864 * at this point we have a head with no other entries. Go
5865 * ahead and process it.
5867 head->node.in_tree = 0;
5868 rb_erase(&head->node.rb_node, &delayed_refs->root);
5870 delayed_refs->num_entries--;
5873 * we don't take a ref on the node because we're removing it from the
5874 * tree, so we just steal the ref the tree was holding.
5876 delayed_refs->num_heads--;
5877 if (list_empty(&head->cluster))
5878 delayed_refs->num_heads_ready--;
5880 list_del_init(&head->cluster);
5881 spin_unlock(&delayed_refs->lock);
5883 BUG_ON(head->extent_op);
5884 if (head->must_insert_reserved)
5887 mutex_unlock(&head->mutex);
5888 btrfs_put_delayed_ref(&head->node);
5891 spin_unlock(&delayed_refs->lock);
5895 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5896 struct btrfs_root *root,
5897 struct extent_buffer *buf,
5898 u64 parent, int last_ref)
5900 struct btrfs_block_group_cache *cache = NULL;
5904 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5905 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5906 buf->start, buf->len,
5907 parent, root->root_key.objectid,
5908 btrfs_header_level(buf),
5909 BTRFS_DROP_DELAYED_REF, NULL, 0);
5910 BUG_ON(ret); /* -ENOMEM */
5916 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5918 if (btrfs_header_generation(buf) == trans->transid) {
5919 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5920 ret = check_ref_cleanup(trans, root, buf->start);
5925 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5926 pin_down_extent(root, cache, buf->start, buf->len, 1);
5930 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5932 btrfs_add_free_space(cache, buf->start, buf->len);
5933 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5938 add_pinned_bytes(root->fs_info, buf->len,
5939 btrfs_header_level(buf),
5940 root->root_key.objectid);
5943 * Deleting the buffer, clear the corrupt flag since it doesn't matter
5946 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5947 btrfs_put_block_group(cache);
5950 /* Can return -ENOMEM */
5951 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5952 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5953 u64 owner, u64 offset, int for_cow)
5956 struct btrfs_fs_info *fs_info = root->fs_info;
5958 add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
5961 * tree log blocks never actually go into the extent allocation
5962 * tree, just update pinning info and exit early.
5964 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5965 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5966 /* unlocks the pinned mutex */
5967 btrfs_pin_extent(root, bytenr, num_bytes, 1);
5969 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5970 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5972 parent, root_objectid, (int)owner,
5973 BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5975 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5977 parent, root_objectid, owner,
5978 offset, BTRFS_DROP_DELAYED_REF,
5984 static u64 stripe_align(struct btrfs_root *root,
5985 struct btrfs_block_group_cache *cache,
5986 u64 val, u64 num_bytes)
5988 u64 ret = ALIGN(val, root->stripesize);
5993 * when we wait for progress in the block group caching, its because
5994 * our allocation attempt failed at least once. So, we must sleep
5995 * and let some progress happen before we try again.
5997 * This function will sleep at least once waiting for new free space to
5998 * show up, and then it will check the block group free space numbers
5999 * for our min num_bytes. Another option is to have it go ahead
6000 * and look in the rbtree for a free extent of a given size, but this
6004 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6007 struct btrfs_caching_control *caching_ctl;
6009 caching_ctl = get_caching_control(cache);
6013 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6014 (cache->free_space_ctl->free_space >= num_bytes));
6016 put_caching_control(caching_ctl);
6021 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6023 struct btrfs_caching_control *caching_ctl;
6025 caching_ctl = get_caching_control(cache);
6029 wait_event(caching_ctl->wait, block_group_cache_done(cache));
6031 put_caching_control(caching_ctl);
6035 int __get_raid_index(u64 flags)
6037 if (flags & BTRFS_BLOCK_GROUP_RAID10)
6038 return BTRFS_RAID_RAID10;
6039 else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6040 return BTRFS_RAID_RAID1;
6041 else if (flags & BTRFS_BLOCK_GROUP_DUP)
6042 return BTRFS_RAID_DUP;
6043 else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6044 return BTRFS_RAID_RAID0;
6045 else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6046 return BTRFS_RAID_RAID5;
6047 else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6048 return BTRFS_RAID_RAID6;
6050 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6053 static int get_block_group_index(struct btrfs_block_group_cache *cache)
6055 return __get_raid_index(cache->flags);
6058 enum btrfs_loop_type {
6059 LOOP_CACHING_NOWAIT = 0,
6060 LOOP_CACHING_WAIT = 1,
6061 LOOP_ALLOC_CHUNK = 2,
6062 LOOP_NO_EMPTY_SIZE = 3,
6066 * walks the btree of allocated extents and find a hole of a given size.
6067 * The key ins is changed to record the hole:
6068 * ins->objectid == block start
6069 * ins->flags = BTRFS_EXTENT_ITEM_KEY
6070 * ins->offset == number of blocks
6071 * Any available blocks before search_start are skipped.
6073 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
6074 struct btrfs_root *orig_root,
6075 u64 num_bytes, u64 empty_size,
6076 u64 hint_byte, struct btrfs_key *ins,
6080 struct btrfs_root *root = orig_root->fs_info->extent_root;
6081 struct btrfs_free_cluster *last_ptr = NULL;
6082 struct btrfs_block_group_cache *block_group = NULL;
6083 struct btrfs_block_group_cache *used_block_group;
6084 u64 search_start = 0;
6085 int empty_cluster = 2 * 1024 * 1024;
6086 struct btrfs_space_info *space_info;
6088 int index = __get_raid_index(flags);
6089 int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6090 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6091 bool found_uncached_bg = false;
6092 bool failed_cluster_refill = false;
6093 bool failed_alloc = false;
6094 bool use_cluster = true;
6095 bool have_caching_bg = false;
6097 WARN_ON(num_bytes < root->sectorsize);
6098 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
6102 trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6104 space_info = __find_space_info(root->fs_info, flags);
6106 btrfs_err(root->fs_info, "No space info for %llu", flags);
6111 * If the space info is for both data and metadata it means we have a
6112 * small filesystem and we can't use the clustering stuff.
6114 if (btrfs_mixed_space_info(space_info))
6115 use_cluster = false;
6117 if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6118 last_ptr = &root->fs_info->meta_alloc_cluster;
6119 if (!btrfs_test_opt(root, SSD))
6120 empty_cluster = 64 * 1024;
6123 if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6124 btrfs_test_opt(root, SSD)) {
6125 last_ptr = &root->fs_info->data_alloc_cluster;
6129 spin_lock(&last_ptr->lock);
6130 if (last_ptr->block_group)
6131 hint_byte = last_ptr->window_start;
6132 spin_unlock(&last_ptr->lock);
6135 search_start = max(search_start, first_logical_byte(root, 0));
6136 search_start = max(search_start, hint_byte);
6141 if (search_start == hint_byte) {
6142 block_group = btrfs_lookup_block_group(root->fs_info,
6144 used_block_group = block_group;
6146 * we don't want to use the block group if it doesn't match our
6147 * allocation bits, or if its not cached.
6149 * However if we are re-searching with an ideal block group
6150 * picked out then we don't care that the block group is cached.
6152 if (block_group && block_group_bits(block_group, flags) &&
6153 block_group->cached != BTRFS_CACHE_NO) {
6154 down_read(&space_info->groups_sem);
6155 if (list_empty(&block_group->list) ||
6158 * someone is removing this block group,
6159 * we can't jump into the have_block_group
6160 * target because our list pointers are not
6163 btrfs_put_block_group(block_group);
6164 up_read(&space_info->groups_sem);
6166 index = get_block_group_index(block_group);
6167 goto have_block_group;
6169 } else if (block_group) {
6170 btrfs_put_block_group(block_group);
6174 have_caching_bg = false;
6175 down_read(&space_info->groups_sem);
6176 list_for_each_entry(block_group, &space_info->block_groups[index],
6181 used_block_group = block_group;
6182 btrfs_get_block_group(block_group);
6183 search_start = block_group->key.objectid;
6186 * this can happen if we end up cycling through all the
6187 * raid types, but we want to make sure we only allocate
6188 * for the proper type.
6190 if (!block_group_bits(block_group, flags)) {
6191 u64 extra = BTRFS_BLOCK_GROUP_DUP |
6192 BTRFS_BLOCK_GROUP_RAID1 |
6193 BTRFS_BLOCK_GROUP_RAID5 |
6194 BTRFS_BLOCK_GROUP_RAID6 |
6195 BTRFS_BLOCK_GROUP_RAID10;
6198 * if they asked for extra copies and this block group
6199 * doesn't provide them, bail. This does allow us to
6200 * fill raid0 from raid1.
6202 if ((flags & extra) && !(block_group->flags & extra))
6207 cached = block_group_cache_done(block_group);
6208 if (unlikely(!cached)) {
6209 found_uncached_bg = true;
6210 ret = cache_block_group(block_group, 0);
6215 if (unlikely(block_group->ro))
6219 * Ok we want to try and use the cluster allocator, so
6223 unsigned long aligned_cluster;
6225 * the refill lock keeps out other
6226 * people trying to start a new cluster
6228 spin_lock(&last_ptr->refill_lock);
6229 used_block_group = last_ptr->block_group;
6230 if (used_block_group != block_group &&
6231 (!used_block_group ||
6232 used_block_group->ro ||
6233 !block_group_bits(used_block_group, flags))) {
6234 used_block_group = block_group;
6235 goto refill_cluster;
6238 if (used_block_group != block_group)
6239 btrfs_get_block_group(used_block_group);
6241 offset = btrfs_alloc_from_cluster(used_block_group,
6242 last_ptr, num_bytes, used_block_group->key.objectid);
6244 /* we have a block, we're done */
6245 spin_unlock(&last_ptr->refill_lock);
6246 trace_btrfs_reserve_extent_cluster(root,
6247 block_group, search_start, num_bytes);
6251 WARN_ON(last_ptr->block_group != used_block_group);
6252 if (used_block_group != block_group) {
6253 btrfs_put_block_group(used_block_group);
6254 used_block_group = block_group;
6257 BUG_ON(used_block_group != block_group);
6258 /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6259 * set up a new clusters, so lets just skip it
6260 * and let the allocator find whatever block
6261 * it can find. If we reach this point, we
6262 * will have tried the cluster allocator
6263 * plenty of times and not have found
6264 * anything, so we are likely way too
6265 * fragmented for the clustering stuff to find
6268 * However, if the cluster is taken from the
6269 * current block group, release the cluster
6270 * first, so that we stand a better chance of
6271 * succeeding in the unclustered
6273 if (loop >= LOOP_NO_EMPTY_SIZE &&
6274 last_ptr->block_group != block_group) {
6275 spin_unlock(&last_ptr->refill_lock);
6276 goto unclustered_alloc;
6280 * this cluster didn't work out, free it and
6283 btrfs_return_cluster_to_free_space(NULL, last_ptr);
6285 if (loop >= LOOP_NO_EMPTY_SIZE) {
6286 spin_unlock(&last_ptr->refill_lock);
6287 goto unclustered_alloc;
6290 aligned_cluster = max_t(unsigned long,
6291 empty_cluster + empty_size,
6292 block_group->full_stripe_len);
6294 /* allocate a cluster in this block group */
6295 ret = btrfs_find_space_cluster(trans, root,
6296 block_group, last_ptr,
6297 search_start, num_bytes,
6301 * now pull our allocation out of this
6304 offset = btrfs_alloc_from_cluster(block_group,
6305 last_ptr, num_bytes,
6308 /* we found one, proceed */
6309 spin_unlock(&last_ptr->refill_lock);
6310 trace_btrfs_reserve_extent_cluster(root,
6311 block_group, search_start,
6315 } else if (!cached && loop > LOOP_CACHING_NOWAIT
6316 && !failed_cluster_refill) {
6317 spin_unlock(&last_ptr->refill_lock);
6319 failed_cluster_refill = true;
6320 wait_block_group_cache_progress(block_group,
6321 num_bytes + empty_cluster + empty_size);
6322 goto have_block_group;
6326 * at this point we either didn't find a cluster
6327 * or we weren't able to allocate a block from our
6328 * cluster. Free the cluster we've been trying
6329 * to use, and go to the next block group
6331 btrfs_return_cluster_to_free_space(NULL, last_ptr);
6332 spin_unlock(&last_ptr->refill_lock);
6337 spin_lock(&block_group->free_space_ctl->tree_lock);
6339 block_group->free_space_ctl->free_space <
6340 num_bytes + empty_cluster + empty_size) {
6341 spin_unlock(&block_group->free_space_ctl->tree_lock);
6344 spin_unlock(&block_group->free_space_ctl->tree_lock);
6346 offset = btrfs_find_space_for_alloc(block_group, search_start,
6347 num_bytes, empty_size);
6349 * If we didn't find a chunk, and we haven't failed on this
6350 * block group before, and this block group is in the middle of
6351 * caching and we are ok with waiting, then go ahead and wait
6352 * for progress to be made, and set failed_alloc to true.
6354 * If failed_alloc is true then we've already waited on this
6355 * block group once and should move on to the next block group.
6357 if (!offset && !failed_alloc && !cached &&
6358 loop > LOOP_CACHING_NOWAIT) {
6359 wait_block_group_cache_progress(block_group,
6360 num_bytes + empty_size);
6361 failed_alloc = true;
6362 goto have_block_group;
6363 } else if (!offset) {
6365 have_caching_bg = true;
6369 search_start = stripe_align(root, used_block_group,
6372 /* move on to the next group */
6373 if (search_start + num_bytes >
6374 used_block_group->key.objectid + used_block_group->key.offset) {
6375 btrfs_add_free_space(used_block_group, offset, num_bytes);
6379 if (offset < search_start)
6380 btrfs_add_free_space(used_block_group, offset,
6381 search_start - offset);
6382 BUG_ON(offset > search_start);
6384 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
6386 if (ret == -EAGAIN) {
6387 btrfs_add_free_space(used_block_group, offset, num_bytes);
6391 /* we are all good, lets return */
6392 ins->objectid = search_start;
6393 ins->offset = num_bytes;
6395 trace_btrfs_reserve_extent(orig_root, block_group,
6396 search_start, num_bytes);
6397 if (used_block_group != block_group)
6398 btrfs_put_block_group(used_block_group);
6399 btrfs_put_block_group(block_group);
6402 failed_cluster_refill = false;
6403 failed_alloc = false;
6404 BUG_ON(index != get_block_group_index(block_group));
6405 if (used_block_group != block_group)
6406 btrfs_put_block_group(used_block_group);
6407 btrfs_put_block_group(block_group);
6409 up_read(&space_info->groups_sem);
6411 if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6414 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6418 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6419 * caching kthreads as we move along
6420 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6421 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6422 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6425 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6428 if (loop == LOOP_ALLOC_CHUNK) {
6429 ret = do_chunk_alloc(trans, root, flags,
6432 * Do not bail out on ENOSPC since we
6433 * can do more things.
6435 if (ret < 0 && ret != -ENOSPC) {
6436 btrfs_abort_transaction(trans,
6442 if (loop == LOOP_NO_EMPTY_SIZE) {
6448 } else if (!ins->objectid) {
6450 } else if (ins->objectid) {
6458 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6459 int dump_block_groups)
6461 struct btrfs_block_group_cache *cache;
6464 spin_lock(&info->lock);
6465 printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
6466 (unsigned long long)info->flags,
6467 (unsigned long long)(info->total_bytes - info->bytes_used -
6468 info->bytes_pinned - info->bytes_reserved -
6469 info->bytes_readonly),
6470 (info->full) ? "" : "not ");
6471 printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
6472 "reserved=%llu, may_use=%llu, readonly=%llu\n",
6473 (unsigned long long)info->total_bytes,
6474 (unsigned long long)info->bytes_used,
6475 (unsigned long long)info->bytes_pinned,
6476 (unsigned long long)info->bytes_reserved,
6477 (unsigned long long)info->bytes_may_use,
6478 (unsigned long long)info->bytes_readonly);
6479 spin_unlock(&info->lock);
6481 if (!dump_block_groups)
6484 down_read(&info->groups_sem);
6486 list_for_each_entry(cache, &info->block_groups[index], list) {
6487 spin_lock(&cache->lock);
6488 printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
6489 (unsigned long long)cache->key.objectid,
6490 (unsigned long long)cache->key.offset,
6491 (unsigned long long)btrfs_block_group_used(&cache->item),
6492 (unsigned long long)cache->pinned,
6493 (unsigned long long)cache->reserved,
6494 cache->ro ? "[readonly]" : "");
6495 btrfs_dump_free_space(cache, bytes);
6496 spin_unlock(&cache->lock);
6498 if (++index < BTRFS_NR_RAID_TYPES)
6500 up_read(&info->groups_sem);
6503 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
6504 struct btrfs_root *root,
6505 u64 num_bytes, u64 min_alloc_size,
6506 u64 empty_size, u64 hint_byte,
6507 struct btrfs_key *ins, int is_data)
6509 bool final_tried = false;
6513 flags = btrfs_get_alloc_profile(root, is_data);
6515 WARN_ON(num_bytes < root->sectorsize);
6516 ret = find_free_extent(trans, root, num_bytes, empty_size,
6517 hint_byte, ins, flags);
6519 if (ret == -ENOSPC) {
6521 num_bytes = num_bytes >> 1;
6522 num_bytes = round_down(num_bytes, root->sectorsize);
6523 num_bytes = max(num_bytes, min_alloc_size);
6524 if (num_bytes == min_alloc_size)
6527 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6528 struct btrfs_space_info *sinfo;
6530 sinfo = __find_space_info(root->fs_info, flags);
6531 btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6532 (unsigned long long)flags,
6533 (unsigned long long)num_bytes);
6535 dump_space_info(sinfo, num_bytes, 1);
6539 trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6544 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6545 u64 start, u64 len, int pin)
6547 struct btrfs_block_group_cache *cache;
6550 cache = btrfs_lookup_block_group(root->fs_info, start);
6552 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6553 (unsigned long long)start);
6557 if (btrfs_test_opt(root, DISCARD))
6558 ret = btrfs_discard_extent(root, start, len, NULL);
6561 pin_down_extent(root, cache, start, len, 1);
6563 btrfs_add_free_space(cache, start, len);
6564 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6566 btrfs_put_block_group(cache);
6568 trace_btrfs_reserved_extent_free(root, start, len);
6573 int btrfs_free_reserved_extent(struct btrfs_root *root,
6576 return __btrfs_free_reserved_extent(root, start, len, 0);
6579 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6582 return __btrfs_free_reserved_extent(root, start, len, 1);
6585 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6586 struct btrfs_root *root,
6587 u64 parent, u64 root_objectid,
6588 u64 flags, u64 owner, u64 offset,
6589 struct btrfs_key *ins, int ref_mod)
6592 struct btrfs_fs_info *fs_info = root->fs_info;
6593 struct btrfs_extent_item *extent_item;
6594 struct btrfs_extent_inline_ref *iref;
6595 struct btrfs_path *path;
6596 struct extent_buffer *leaf;
6601 type = BTRFS_SHARED_DATA_REF_KEY;
6603 type = BTRFS_EXTENT_DATA_REF_KEY;
6605 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6607 path = btrfs_alloc_path();
6611 path->leave_spinning = 1;
6612 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6615 btrfs_free_path(path);
6619 leaf = path->nodes[0];
6620 extent_item = btrfs_item_ptr(leaf, path->slots[0],
6621 struct btrfs_extent_item);
6622 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6623 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6624 btrfs_set_extent_flags(leaf, extent_item,
6625 flags | BTRFS_EXTENT_FLAG_DATA);
6627 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6628 btrfs_set_extent_inline_ref_type(leaf, iref, type);
6630 struct btrfs_shared_data_ref *ref;
6631 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6632 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6633 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6635 struct btrfs_extent_data_ref *ref;
6636 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6637 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6638 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6639 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6640 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6643 btrfs_mark_buffer_dirty(path->nodes[0]);
6644 btrfs_free_path(path);
6646 ret = update_block_group(root, ins->objectid, ins->offset, 1);
6647 if (ret) { /* -ENOENT, logic error */
6648 btrfs_err(fs_info, "update block group failed for %llu %llu",
6649 (unsigned long long)ins->objectid,
6650 (unsigned long long)ins->offset);
6656 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6657 struct btrfs_root *root,
6658 u64 parent, u64 root_objectid,
6659 u64 flags, struct btrfs_disk_key *key,
6660 int level, struct btrfs_key *ins)
6663 struct btrfs_fs_info *fs_info = root->fs_info;
6664 struct btrfs_extent_item *extent_item;
6665 struct btrfs_tree_block_info *block_info;
6666 struct btrfs_extent_inline_ref *iref;
6667 struct btrfs_path *path;
6668 struct extent_buffer *leaf;
6669 u32 size = sizeof(*extent_item) + sizeof(*iref);
6670 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6673 if (!skinny_metadata)
6674 size += sizeof(*block_info);
6676 path = btrfs_alloc_path();
6680 path->leave_spinning = 1;
6681 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6684 btrfs_free_path(path);
6688 leaf = path->nodes[0];
6689 extent_item = btrfs_item_ptr(leaf, path->slots[0],
6690 struct btrfs_extent_item);
6691 btrfs_set_extent_refs(leaf, extent_item, 1);
6692 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6693 btrfs_set_extent_flags(leaf, extent_item,
6694 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6696 if (skinny_metadata) {
6697 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6699 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6700 btrfs_set_tree_block_key(leaf, block_info, key);
6701 btrfs_set_tree_block_level(leaf, block_info, level);
6702 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6706 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6707 btrfs_set_extent_inline_ref_type(leaf, iref,
6708 BTRFS_SHARED_BLOCK_REF_KEY);
6709 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6711 btrfs_set_extent_inline_ref_type(leaf, iref,
6712 BTRFS_TREE_BLOCK_REF_KEY);
6713 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6716 btrfs_mark_buffer_dirty(leaf);
6717 btrfs_free_path(path);
6719 ret = update_block_group(root, ins->objectid, root->leafsize, 1);
6720 if (ret) { /* -ENOENT, logic error */
6721 btrfs_err(fs_info, "update block group failed for %llu %llu",
6722 (unsigned long long)ins->objectid,
6723 (unsigned long long)ins->offset);
6729 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6730 struct btrfs_root *root,
6731 u64 root_objectid, u64 owner,
6732 u64 offset, struct btrfs_key *ins)
6736 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6738 ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6740 root_objectid, owner, offset,
6741 BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6746 * this is used by the tree logging recovery code. It records that
6747 * an extent has been allocated and makes sure to clear the free
6748 * space cache bits as well
6750 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6751 struct btrfs_root *root,
6752 u64 root_objectid, u64 owner, u64 offset,
6753 struct btrfs_key *ins)
6756 struct btrfs_block_group_cache *block_group;
6759 * Mixed block groups will exclude before processing the log so we only
6760 * need to do the exlude dance if this fs isn't mixed.
6762 if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
6763 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
6768 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6772 ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6773 RESERVE_ALLOC_NO_ACCOUNT);
6774 BUG_ON(ret); /* logic error */
6775 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6776 0, owner, offset, ins, 1);
6777 btrfs_put_block_group(block_group);
6781 static struct extent_buffer *
6782 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6783 u64 bytenr, u32 blocksize, int level)
6785 struct extent_buffer *buf;
6787 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6789 return ERR_PTR(-ENOMEM);
6790 btrfs_set_header_generation(buf, trans->transid);
6791 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6792 btrfs_tree_lock(buf);
6793 clean_tree_block(trans, root, buf);
6794 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6796 btrfs_set_lock_blocking(buf);
6797 btrfs_set_buffer_uptodate(buf);
6799 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6801 * we allow two log transactions at a time, use different
6802 * EXENT bit to differentiate dirty pages.
6804 if (root->log_transid % 2 == 0)
6805 set_extent_dirty(&root->dirty_log_pages, buf->start,
6806 buf->start + buf->len - 1, GFP_NOFS);
6808 set_extent_new(&root->dirty_log_pages, buf->start,
6809 buf->start + buf->len - 1, GFP_NOFS);
6811 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6812 buf->start + buf->len - 1, GFP_NOFS);
6814 trans->blocks_used++;
6815 /* this returns a buffer locked for blocking */
6819 static struct btrfs_block_rsv *
6820 use_block_rsv(struct btrfs_trans_handle *trans,
6821 struct btrfs_root *root, u32 blocksize)
6823 struct btrfs_block_rsv *block_rsv;
6824 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6826 bool global_updated = false;
6828 block_rsv = get_block_rsv(trans, root);
6830 if (unlikely(block_rsv->size == 0))
6833 ret = block_rsv_use_bytes(block_rsv, blocksize);
6837 if (block_rsv->failfast)
6838 return ERR_PTR(ret);
6840 if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
6841 global_updated = true;
6842 update_global_block_rsv(root->fs_info);
6846 if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6847 static DEFINE_RATELIMIT_STATE(_rs,
6848 DEFAULT_RATELIMIT_INTERVAL * 10,
6849 /*DEFAULT_RATELIMIT_BURST*/ 1);
6850 if (__ratelimit(&_rs))
6852 "btrfs: block rsv returned %d\n", ret);
6855 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6856 BTRFS_RESERVE_NO_FLUSH);
6860 * If we couldn't reserve metadata bytes try and use some from
6861 * the global reserve if its space type is the same as the global
6864 if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
6865 block_rsv->space_info == global_rsv->space_info) {
6866 ret = block_rsv_use_bytes(global_rsv, blocksize);
6870 return ERR_PTR(ret);
6873 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6874 struct btrfs_block_rsv *block_rsv, u32 blocksize)
6876 block_rsv_add_bytes(block_rsv, blocksize, 0);
6877 block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6881 * finds a free extent and does all the dirty work required for allocation
6882 * returns the key for the extent through ins, and a tree buffer for
6883 * the first block of the extent through buf.
6885 * returns the tree buffer or NULL.
6887 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6888 struct btrfs_root *root, u32 blocksize,
6889 u64 parent, u64 root_objectid,
6890 struct btrfs_disk_key *key, int level,
6891 u64 hint, u64 empty_size)
6893 struct btrfs_key ins;
6894 struct btrfs_block_rsv *block_rsv;
6895 struct extent_buffer *buf;
6898 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6901 block_rsv = use_block_rsv(trans, root, blocksize);
6902 if (IS_ERR(block_rsv))
6903 return ERR_CAST(block_rsv);
6905 ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6906 empty_size, hint, &ins, 0);
6908 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6909 return ERR_PTR(ret);
6912 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6914 BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6916 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6918 parent = ins.objectid;
6919 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6923 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6924 struct btrfs_delayed_extent_op *extent_op;
6925 extent_op = btrfs_alloc_delayed_extent_op();
6926 BUG_ON(!extent_op); /* -ENOMEM */
6928 memcpy(&extent_op->key, key, sizeof(extent_op->key));
6930 memset(&extent_op->key, 0, sizeof(extent_op->key));
6931 extent_op->flags_to_set = flags;
6932 if (skinny_metadata)
6933 extent_op->update_key = 0;
6935 extent_op->update_key = 1;
6936 extent_op->update_flags = 1;
6937 extent_op->is_data = 0;
6938 extent_op->level = level;
6940 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6942 ins.offset, parent, root_objectid,
6943 level, BTRFS_ADD_DELAYED_EXTENT,
6945 BUG_ON(ret); /* -ENOMEM */
6950 struct walk_control {
6951 u64 refs[BTRFS_MAX_LEVEL];
6952 u64 flags[BTRFS_MAX_LEVEL];
6953 struct btrfs_key update_progress;
6964 #define DROP_REFERENCE 1
6965 #define UPDATE_BACKREF 2
6967 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6968 struct btrfs_root *root,
6969 struct walk_control *wc,
6970 struct btrfs_path *path)
6978 struct btrfs_key key;
6979 struct extent_buffer *eb;
6984 if (path->slots[wc->level] < wc->reada_slot) {
6985 wc->reada_count = wc->reada_count * 2 / 3;
6986 wc->reada_count = max(wc->reada_count, 2);
6988 wc->reada_count = wc->reada_count * 3 / 2;
6989 wc->reada_count = min_t(int, wc->reada_count,
6990 BTRFS_NODEPTRS_PER_BLOCK(root));
6993 eb = path->nodes[wc->level];
6994 nritems = btrfs_header_nritems(eb);
6995 blocksize = btrfs_level_size(root, wc->level - 1);
6997 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6998 if (nread >= wc->reada_count)
7002 bytenr = btrfs_node_blockptr(eb, slot);
7003 generation = btrfs_node_ptr_generation(eb, slot);
7005 if (slot == path->slots[wc->level])
7008 if (wc->stage == UPDATE_BACKREF &&
7009 generation <= root->root_key.offset)
7012 /* We don't lock the tree block, it's OK to be racy here */
7013 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7014 wc->level - 1, 1, &refs,
7016 /* We don't care about errors in readahead. */
7021 if (wc->stage == DROP_REFERENCE) {
7025 if (wc->level == 1 &&
7026 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7028 if (!wc->update_ref ||
7029 generation <= root->root_key.offset)
7031 btrfs_node_key_to_cpu(eb, &key, slot);
7032 ret = btrfs_comp_cpu_keys(&key,
7033 &wc->update_progress);
7037 if (wc->level == 1 &&
7038 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7042 ret = readahead_tree_block(root, bytenr, blocksize,
7048 wc->reada_slot = slot;
7052 * helper to process tree block while walking down the tree.
7054 * when wc->stage == UPDATE_BACKREF, this function updates
7055 * back refs for pointers in the block.
7057 * NOTE: return value 1 means we should stop walking down.
7059 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7060 struct btrfs_root *root,
7061 struct btrfs_path *path,
7062 struct walk_control *wc, int lookup_info)
7064 int level = wc->level;
7065 struct extent_buffer *eb = path->nodes[level];
7066 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7069 if (wc->stage == UPDATE_BACKREF &&
7070 btrfs_header_owner(eb) != root->root_key.objectid)
7074 * when reference count of tree block is 1, it won't increase
7075 * again. once full backref flag is set, we never clear it.
7078 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7079 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7080 BUG_ON(!path->locks[level]);
7081 ret = btrfs_lookup_extent_info(trans, root,
7082 eb->start, level, 1,
7085 BUG_ON(ret == -ENOMEM);
7088 BUG_ON(wc->refs[level] == 0);
7091 if (wc->stage == DROP_REFERENCE) {
7092 if (wc->refs[level] > 1)
7095 if (path->locks[level] && !wc->keep_locks) {
7096 btrfs_tree_unlock_rw(eb, path->locks[level]);
7097 path->locks[level] = 0;
7102 /* wc->stage == UPDATE_BACKREF */
7103 if (!(wc->flags[level] & flag)) {
7104 BUG_ON(!path->locks[level]);
7105 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
7106 BUG_ON(ret); /* -ENOMEM */
7107 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
7108 BUG_ON(ret); /* -ENOMEM */
7109 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7111 btrfs_header_level(eb), 0);
7112 BUG_ON(ret); /* -ENOMEM */
7113 wc->flags[level] |= flag;
7117 * the block is shared by multiple trees, so it's not good to
7118 * keep the tree lock
7120 if (path->locks[level] && level > 0) {
7121 btrfs_tree_unlock_rw(eb, path->locks[level]);
7122 path->locks[level] = 0;
7128 * helper to process tree block pointer.
7130 * when wc->stage == DROP_REFERENCE, this function checks
7131 * reference count of the block pointed to. if the block
7132 * is shared and we need update back refs for the subtree
7133 * rooted at the block, this function changes wc->stage to
7134 * UPDATE_BACKREF. if the block is shared and there is no
7135 * need to update back, this function drops the reference
7138 * NOTE: return value 1 means we should stop walking down.
7140 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7141 struct btrfs_root *root,
7142 struct btrfs_path *path,
7143 struct walk_control *wc, int *lookup_info)
7149 struct btrfs_key key;
7150 struct extent_buffer *next;
7151 int level = wc->level;
7155 generation = btrfs_node_ptr_generation(path->nodes[level],
7156 path->slots[level]);
7158 * if the lower level block was created before the snapshot
7159 * was created, we know there is no need to update back refs
7162 if (wc->stage == UPDATE_BACKREF &&
7163 generation <= root->root_key.offset) {
7168 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7169 blocksize = btrfs_level_size(root, level - 1);
7171 next = btrfs_find_tree_block(root, bytenr, blocksize);
7173 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7178 btrfs_tree_lock(next);
7179 btrfs_set_lock_blocking(next);
7181 ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7182 &wc->refs[level - 1],
7183 &wc->flags[level - 1]);
7185 btrfs_tree_unlock(next);
7189 if (unlikely(wc->refs[level - 1] == 0)) {
7190 btrfs_err(root->fs_info, "Missing references.");
7195 if (wc->stage == DROP_REFERENCE) {
7196 if (wc->refs[level - 1] > 1) {
7198 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7201 if (!wc->update_ref ||
7202 generation <= root->root_key.offset)
7205 btrfs_node_key_to_cpu(path->nodes[level], &key,
7206 path->slots[level]);
7207 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7211 wc->stage = UPDATE_BACKREF;
7212 wc->shared_level = level - 1;
7216 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7220 if (!btrfs_buffer_uptodate(next, generation, 0)) {
7221 btrfs_tree_unlock(next);
7222 free_extent_buffer(next);
7228 if (reada && level == 1)
7229 reada_walk_down(trans, root, wc, path);
7230 next = read_tree_block(root, bytenr, blocksize, generation);
7231 if (!next || !extent_buffer_uptodate(next)) {
7232 free_extent_buffer(next);
7235 btrfs_tree_lock(next);
7236 btrfs_set_lock_blocking(next);
7240 BUG_ON(level != btrfs_header_level(next));
7241 path->nodes[level] = next;
7242 path->slots[level] = 0;
7243 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7249 wc->refs[level - 1] = 0;
7250 wc->flags[level - 1] = 0;
7251 if (wc->stage == DROP_REFERENCE) {
7252 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7253 parent = path->nodes[level]->start;
7255 BUG_ON(root->root_key.objectid !=
7256 btrfs_header_owner(path->nodes[level]));
7260 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7261 root->root_key.objectid, level - 1, 0, 0);
7262 BUG_ON(ret); /* -ENOMEM */
7264 btrfs_tree_unlock(next);
7265 free_extent_buffer(next);
7271 * helper to process tree block while walking up the tree.
7273 * when wc->stage == DROP_REFERENCE, this function drops
7274 * reference count on the block.
7276 * when wc->stage == UPDATE_BACKREF, this function changes
7277 * wc->stage back to DROP_REFERENCE if we changed wc->stage
7278 * to UPDATE_BACKREF previously while processing the block.
7280 * NOTE: return value 1 means we should stop walking up.
7282 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7283 struct btrfs_root *root,
7284 struct btrfs_path *path,
7285 struct walk_control *wc)
7288 int level = wc->level;
7289 struct extent_buffer *eb = path->nodes[level];
7292 if (wc->stage == UPDATE_BACKREF) {
7293 BUG_ON(wc->shared_level < level);
7294 if (level < wc->shared_level)
7297 ret = find_next_key(path, level + 1, &wc->update_progress);
7301 wc->stage = DROP_REFERENCE;
7302 wc->shared_level = -1;
7303 path->slots[level] = 0;
7306 * check reference count again if the block isn't locked.
7307 * we should start walking down the tree again if reference
7310 if (!path->locks[level]) {
7312 btrfs_tree_lock(eb);
7313 btrfs_set_lock_blocking(eb);
7314 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7316 ret = btrfs_lookup_extent_info(trans, root,
7317 eb->start, level, 1,
7321 btrfs_tree_unlock_rw(eb, path->locks[level]);
7322 path->locks[level] = 0;
7325 BUG_ON(wc->refs[level] == 0);
7326 if (wc->refs[level] == 1) {
7327 btrfs_tree_unlock_rw(eb, path->locks[level]);
7328 path->locks[level] = 0;
7334 /* wc->stage == DROP_REFERENCE */
7335 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7337 if (wc->refs[level] == 1) {
7339 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7340 ret = btrfs_dec_ref(trans, root, eb, 1,
7343 ret = btrfs_dec_ref(trans, root, eb, 0,
7345 BUG_ON(ret); /* -ENOMEM */
7347 /* make block locked assertion in clean_tree_block happy */
7348 if (!path->locks[level] &&
7349 btrfs_header_generation(eb) == trans->transid) {
7350 btrfs_tree_lock(eb);
7351 btrfs_set_lock_blocking(eb);
7352 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7354 clean_tree_block(trans, root, eb);
7357 if (eb == root->node) {
7358 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7361 BUG_ON(root->root_key.objectid !=
7362 btrfs_header_owner(eb));
7364 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7365 parent = path->nodes[level + 1]->start;
7367 BUG_ON(root->root_key.objectid !=
7368 btrfs_header_owner(path->nodes[level + 1]));
7371 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7373 wc->refs[level] = 0;
7374 wc->flags[level] = 0;
7378 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7379 struct btrfs_root *root,
7380 struct btrfs_path *path,
7381 struct walk_control *wc)
7383 int level = wc->level;
7384 int lookup_info = 1;
7387 while (level >= 0) {
7388 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7395 if (path->slots[level] >=
7396 btrfs_header_nritems(path->nodes[level]))
7399 ret = do_walk_down(trans, root, path, wc, &lookup_info);
7401 path->slots[level]++;
7410 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7411 struct btrfs_root *root,
7412 struct btrfs_path *path,
7413 struct walk_control *wc, int max_level)
7415 int level = wc->level;
7418 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7419 while (level < max_level && path->nodes[level]) {
7421 if (path->slots[level] + 1 <
7422 btrfs_header_nritems(path->nodes[level])) {
7423 path->slots[level]++;
7426 ret = walk_up_proc(trans, root, path, wc);
7430 if (path->locks[level]) {
7431 btrfs_tree_unlock_rw(path->nodes[level],
7432 path->locks[level]);
7433 path->locks[level] = 0;
7435 free_extent_buffer(path->nodes[level]);
7436 path->nodes[level] = NULL;
7444 * drop a subvolume tree.
7446 * this function traverses the tree freeing any blocks that only
7447 * referenced by the tree.
7449 * when a shared tree block is found. this function decreases its
7450 * reference count by one. if update_ref is true, this function
7451 * also make sure backrefs for the shared block and all lower level
7452 * blocks are properly updated.
7454 * If called with for_reloc == 0, may exit early with -EAGAIN
7456 int btrfs_drop_snapshot(struct btrfs_root *root,
7457 struct btrfs_block_rsv *block_rsv, int update_ref,
7460 struct btrfs_path *path;
7461 struct btrfs_trans_handle *trans;
7462 struct btrfs_root *tree_root = root->fs_info->tree_root;
7463 struct btrfs_root_item *root_item = &root->root_item;
7464 struct walk_control *wc;
7465 struct btrfs_key key;
7469 bool root_dropped = false;
7471 path = btrfs_alloc_path();
7477 wc = kzalloc(sizeof(*wc), GFP_NOFS);
7479 btrfs_free_path(path);
7484 trans = btrfs_start_transaction(tree_root, 0);
7485 if (IS_ERR(trans)) {
7486 err = PTR_ERR(trans);
7491 trans->block_rsv = block_rsv;
7493 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7494 level = btrfs_header_level(root->node);
7495 path->nodes[level] = btrfs_lock_root_node(root);
7496 btrfs_set_lock_blocking(path->nodes[level]);
7497 path->slots[level] = 0;
7498 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7499 memset(&wc->update_progress, 0,
7500 sizeof(wc->update_progress));
7502 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7503 memcpy(&wc->update_progress, &key,
7504 sizeof(wc->update_progress));
7506 level = root_item->drop_level;
7508 path->lowest_level = level;
7509 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7510 path->lowest_level = 0;
7518 * unlock our path, this is safe because only this
7519 * function is allowed to delete this snapshot
7521 btrfs_unlock_up_safe(path, 0);
7523 level = btrfs_header_level(root->node);
7525 btrfs_tree_lock(path->nodes[level]);
7526 btrfs_set_lock_blocking(path->nodes[level]);
7527 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7529 ret = btrfs_lookup_extent_info(trans, root,
7530 path->nodes[level]->start,
7531 level, 1, &wc->refs[level],
7537 BUG_ON(wc->refs[level] == 0);
7539 if (level == root_item->drop_level)
7542 btrfs_tree_unlock(path->nodes[level]);
7543 path->locks[level] = 0;
7544 WARN_ON(wc->refs[level] != 1);
7550 wc->shared_level = -1;
7551 wc->stage = DROP_REFERENCE;
7552 wc->update_ref = update_ref;
7554 wc->for_reloc = for_reloc;
7555 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7559 ret = walk_down_tree(trans, root, path, wc);
7565 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7572 BUG_ON(wc->stage != DROP_REFERENCE);
7576 if (wc->stage == DROP_REFERENCE) {
7578 btrfs_node_key(path->nodes[level],
7579 &root_item->drop_progress,
7580 path->slots[level]);
7581 root_item->drop_level = level;
7584 BUG_ON(wc->level == 0);
7585 if (btrfs_should_end_transaction(trans, tree_root) ||
7586 (!for_reloc && btrfs_need_cleaner_sleep(root))) {
7587 ret = btrfs_update_root(trans, tree_root,
7591 btrfs_abort_transaction(trans, tree_root, ret);
7596 btrfs_end_transaction_throttle(trans, tree_root);
7597 if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
7598 pr_debug("btrfs: drop snapshot early exit\n");
7603 trans = btrfs_start_transaction(tree_root, 0);
7604 if (IS_ERR(trans)) {
7605 err = PTR_ERR(trans);
7609 trans->block_rsv = block_rsv;
7612 btrfs_release_path(path);
7616 ret = btrfs_del_root(trans, tree_root, &root->root_key);
7618 btrfs_abort_transaction(trans, tree_root, ret);
7622 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7623 ret = btrfs_find_root(tree_root, &root->root_key, path,
7626 btrfs_abort_transaction(trans, tree_root, ret);
7629 } else if (ret > 0) {
7630 /* if we fail to delete the orphan item this time
7631 * around, it'll get picked up the next time.
7633 * The most common failure here is just -ENOENT.
7635 btrfs_del_orphan_item(trans, tree_root,
7636 root->root_key.objectid);
7640 if (root->in_radix) {
7641 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
7643 free_extent_buffer(root->node);
7644 free_extent_buffer(root->commit_root);
7645 btrfs_put_fs_root(root);
7647 root_dropped = true;
7649 btrfs_end_transaction_throttle(trans, tree_root);
7652 btrfs_free_path(path);
7655 * So if we need to stop dropping the snapshot for whatever reason we
7656 * need to make sure to add it back to the dead root list so that we
7657 * keep trying to do the work later. This also cleans up roots if we
7658 * don't have it in the radix (like when we recover after a power fail
7659 * or unmount) so we don't leak memory.
7661 if (root_dropped == false)
7662 btrfs_add_dead_root(root);
7664 btrfs_std_error(root->fs_info, err);
7669 * drop subtree rooted at tree block 'node'.
7671 * NOTE: this function will unlock and release tree block 'node'
7672 * only used by relocation code
7674 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7675 struct btrfs_root *root,
7676 struct extent_buffer *node,
7677 struct extent_buffer *parent)
7679 struct btrfs_path *path;
7680 struct walk_control *wc;
7686 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7688 path = btrfs_alloc_path();
7692 wc = kzalloc(sizeof(*wc), GFP_NOFS);
7694 btrfs_free_path(path);
7698 btrfs_assert_tree_locked(parent);
7699 parent_level = btrfs_header_level(parent);
7700 extent_buffer_get(parent);
7701 path->nodes[parent_level] = parent;
7702 path->slots[parent_level] = btrfs_header_nritems(parent);
7704 btrfs_assert_tree_locked(node);
7705 level = btrfs_header_level(node);
7706 path->nodes[level] = node;
7707 path->slots[level] = 0;
7708 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7710 wc->refs[parent_level] = 1;
7711 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7713 wc->shared_level = -1;
7714 wc->stage = DROP_REFERENCE;
7718 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7721 wret = walk_down_tree(trans, root, path, wc);
7727 wret = walk_up_tree(trans, root, path, wc, parent_level);
7735 btrfs_free_path(path);
7739 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7745 * if restripe for this chunk_type is on pick target profile and
7746 * return, otherwise do the usual balance
7748 stripped = get_restripe_target(root->fs_info, flags);
7750 return extended_to_chunk(stripped);
7753 * we add in the count of missing devices because we want
7754 * to make sure that any RAID levels on a degraded FS
7755 * continue to be honored.
7757 num_devices = root->fs_info->fs_devices->rw_devices +
7758 root->fs_info->fs_devices->missing_devices;
7760 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7761 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7762 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7764 if (num_devices == 1) {
7765 stripped |= BTRFS_BLOCK_GROUP_DUP;
7766 stripped = flags & ~stripped;
7768 /* turn raid0 into single device chunks */
7769 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7772 /* turn mirroring into duplication */
7773 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7774 BTRFS_BLOCK_GROUP_RAID10))
7775 return stripped | BTRFS_BLOCK_GROUP_DUP;
7777 /* they already had raid on here, just return */
7778 if (flags & stripped)
7781 stripped |= BTRFS_BLOCK_GROUP_DUP;
7782 stripped = flags & ~stripped;
7784 /* switch duplicated blocks with raid1 */
7785 if (flags & BTRFS_BLOCK_GROUP_DUP)
7786 return stripped | BTRFS_BLOCK_GROUP_RAID1;
7788 /* this is drive concat, leave it alone */
7794 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7796 struct btrfs_space_info *sinfo = cache->space_info;
7798 u64 min_allocable_bytes;
7803 * We need some metadata space and system metadata space for
7804 * allocating chunks in some corner cases until we force to set
7805 * it to be readonly.
7808 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7810 min_allocable_bytes = 1 * 1024 * 1024;
7812 min_allocable_bytes = 0;
7814 spin_lock(&sinfo->lock);
7815 spin_lock(&cache->lock);
7822 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7823 cache->bytes_super - btrfs_block_group_used(&cache->item);
7825 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7826 sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7827 min_allocable_bytes <= sinfo->total_bytes) {
7828 sinfo->bytes_readonly += num_bytes;
7833 spin_unlock(&cache->lock);
7834 spin_unlock(&sinfo->lock);
7838 int btrfs_set_block_group_ro(struct btrfs_root *root,
7839 struct btrfs_block_group_cache *cache)
7842 struct btrfs_trans_handle *trans;
7848 trans = btrfs_join_transaction(root);
7850 return PTR_ERR(trans);
7852 alloc_flags = update_block_group_flags(root, cache->flags);
7853 if (alloc_flags != cache->flags) {
7854 ret = do_chunk_alloc(trans, root, alloc_flags,
7860 ret = set_block_group_ro(cache, 0);
7863 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7864 ret = do_chunk_alloc(trans, root, alloc_flags,
7868 ret = set_block_group_ro(cache, 0);
7870 btrfs_end_transaction(trans, root);
7874 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7875 struct btrfs_root *root, u64 type)
7877 u64 alloc_flags = get_alloc_profile(root, type);
7878 return do_chunk_alloc(trans, root, alloc_flags,
7883 * helper to account the unused space of all the readonly block group in the
7884 * list. takes mirrors into account.
7886 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7888 struct btrfs_block_group_cache *block_group;
7892 list_for_each_entry(block_group, groups_list, list) {
7893 spin_lock(&block_group->lock);
7895 if (!block_group->ro) {
7896 spin_unlock(&block_group->lock);
7900 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7901 BTRFS_BLOCK_GROUP_RAID10 |
7902 BTRFS_BLOCK_GROUP_DUP))
7907 free_bytes += (block_group->key.offset -
7908 btrfs_block_group_used(&block_group->item)) *
7911 spin_unlock(&block_group->lock);
7918 * helper to account the unused space of all the readonly block group in the
7919 * space_info. takes mirrors into account.
7921 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7926 spin_lock(&sinfo->lock);
7928 for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7929 if (!list_empty(&sinfo->block_groups[i]))
7930 free_bytes += __btrfs_get_ro_block_group_free_space(
7931 &sinfo->block_groups[i]);
7933 spin_unlock(&sinfo->lock);
7938 void btrfs_set_block_group_rw(struct btrfs_root *root,
7939 struct btrfs_block_group_cache *cache)
7941 struct btrfs_space_info *sinfo = cache->space_info;
7946 spin_lock(&sinfo->lock);
7947 spin_lock(&cache->lock);
7948 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7949 cache->bytes_super - btrfs_block_group_used(&cache->item);
7950 sinfo->bytes_readonly -= num_bytes;
7952 spin_unlock(&cache->lock);
7953 spin_unlock(&sinfo->lock);
7957 * checks to see if its even possible to relocate this block group.
7959 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7960 * ok to go ahead and try.
7962 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7964 struct btrfs_block_group_cache *block_group;
7965 struct btrfs_space_info *space_info;
7966 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7967 struct btrfs_device *device;
7968 struct btrfs_trans_handle *trans;
7977 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7979 /* odd, couldn't find the block group, leave it alone */
7983 min_free = btrfs_block_group_used(&block_group->item);
7985 /* no bytes used, we're good */
7989 space_info = block_group->space_info;
7990 spin_lock(&space_info->lock);
7992 full = space_info->full;
7995 * if this is the last block group we have in this space, we can't
7996 * relocate it unless we're able to allocate a new chunk below.
7998 * Otherwise, we need to make sure we have room in the space to handle
7999 * all of the extents from this block group. If we can, we're good
8001 if ((space_info->total_bytes != block_group->key.offset) &&
8002 (space_info->bytes_used + space_info->bytes_reserved +
8003 space_info->bytes_pinned + space_info->bytes_readonly +
8004 min_free < space_info->total_bytes)) {
8005 spin_unlock(&space_info->lock);
8008 spin_unlock(&space_info->lock);
8011 * ok we don't have enough space, but maybe we have free space on our
8012 * devices to allocate new chunks for relocation, so loop through our
8013 * alloc devices and guess if we have enough space. if this block
8014 * group is going to be restriped, run checks against the target
8015 * profile instead of the current one.
8027 target = get_restripe_target(root->fs_info, block_group->flags);
8029 index = __get_raid_index(extended_to_chunk(target));
8032 * this is just a balance, so if we were marked as full
8033 * we know there is no space for a new chunk
8038 index = get_block_group_index(block_group);
8041 if (index == BTRFS_RAID_RAID10) {
8045 } else if (index == BTRFS_RAID_RAID1) {
8047 } else if (index == BTRFS_RAID_DUP) {
8050 } else if (index == BTRFS_RAID_RAID0) {
8051 dev_min = fs_devices->rw_devices;
8052 do_div(min_free, dev_min);
8055 /* We need to do this so that we can look at pending chunks */
8056 trans = btrfs_join_transaction(root);
8057 if (IS_ERR(trans)) {
8058 ret = PTR_ERR(trans);
8062 mutex_lock(&root->fs_info->chunk_mutex);
8063 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8067 * check to make sure we can actually find a chunk with enough
8068 * space to fit our block group in.
8070 if (device->total_bytes > device->bytes_used + min_free &&
8071 !device->is_tgtdev_for_dev_replace) {
8072 ret = find_free_dev_extent(trans, device, min_free,
8077 if (dev_nr >= dev_min)
8083 mutex_unlock(&root->fs_info->chunk_mutex);
8084 btrfs_end_transaction(trans, root);
8086 btrfs_put_block_group(block_group);
8090 static int find_first_block_group(struct btrfs_root *root,
8091 struct btrfs_path *path, struct btrfs_key *key)
8094 struct btrfs_key found_key;
8095 struct extent_buffer *leaf;
8098 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8103 slot = path->slots[0];
8104 leaf = path->nodes[0];
8105 if (slot >= btrfs_header_nritems(leaf)) {
8106 ret = btrfs_next_leaf(root, path);
8113 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8115 if (found_key.objectid >= key->objectid &&
8116 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8126 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8128 struct btrfs_block_group_cache *block_group;
8132 struct inode *inode;
8134 block_group = btrfs_lookup_first_block_group(info, last);
8135 while (block_group) {
8136 spin_lock(&block_group->lock);
8137 if (block_group->iref)
8139 spin_unlock(&block_group->lock);
8140 block_group = next_block_group(info->tree_root,
8150 inode = block_group->inode;
8151 block_group->iref = 0;
8152 block_group->inode = NULL;
8153 spin_unlock(&block_group->lock);
8155 last = block_group->key.objectid + block_group->key.offset;
8156 btrfs_put_block_group(block_group);
8160 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8162 struct btrfs_block_group_cache *block_group;
8163 struct btrfs_space_info *space_info;
8164 struct btrfs_caching_control *caching_ctl;
8167 down_write(&info->extent_commit_sem);
8168 while (!list_empty(&info->caching_block_groups)) {
8169 caching_ctl = list_entry(info->caching_block_groups.next,
8170 struct btrfs_caching_control, list);
8171 list_del(&caching_ctl->list);
8172 put_caching_control(caching_ctl);
8174 up_write(&info->extent_commit_sem);
8176 spin_lock(&info->block_group_cache_lock);
8177 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8178 block_group = rb_entry(n, struct btrfs_block_group_cache,
8180 rb_erase(&block_group->cache_node,
8181 &info->block_group_cache_tree);
8182 spin_unlock(&info->block_group_cache_lock);
8184 down_write(&block_group->space_info->groups_sem);
8185 list_del(&block_group->list);
8186 up_write(&block_group->space_info->groups_sem);
8188 if (block_group->cached == BTRFS_CACHE_STARTED)
8189 wait_block_group_cache_done(block_group);
8192 * We haven't cached this block group, which means we could
8193 * possibly have excluded extents on this block group.
8195 if (block_group->cached == BTRFS_CACHE_NO)
8196 free_excluded_extents(info->extent_root, block_group);
8198 btrfs_remove_free_space_cache(block_group);
8199 btrfs_put_block_group(block_group);
8201 spin_lock(&info->block_group_cache_lock);
8203 spin_unlock(&info->block_group_cache_lock);
8205 /* now that all the block groups are freed, go through and
8206 * free all the space_info structs. This is only called during
8207 * the final stages of unmount, and so we know nobody is
8208 * using them. We call synchronize_rcu() once before we start,
8209 * just to be on the safe side.
8213 release_global_block_rsv(info);
8215 while(!list_empty(&info->space_info)) {
8216 space_info = list_entry(info->space_info.next,
8217 struct btrfs_space_info,
8219 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8220 if (space_info->bytes_pinned > 0 ||
8221 space_info->bytes_reserved > 0 ||
8222 space_info->bytes_may_use > 0) {
8224 dump_space_info(space_info, 0, 0);
8227 percpu_counter_destroy(&space_info->total_bytes_pinned);
8228 list_del(&space_info->list);
8234 static void __link_block_group(struct btrfs_space_info *space_info,
8235 struct btrfs_block_group_cache *cache)
8237 int index = get_block_group_index(cache);
8239 down_write(&space_info->groups_sem);
8240 list_add_tail(&cache->list, &space_info->block_groups[index]);
8241 up_write(&space_info->groups_sem);
8244 int btrfs_read_block_groups(struct btrfs_root *root)
8246 struct btrfs_path *path;
8248 struct btrfs_block_group_cache *cache;
8249 struct btrfs_fs_info *info = root->fs_info;
8250 struct btrfs_space_info *space_info;
8251 struct btrfs_key key;
8252 struct btrfs_key found_key;
8253 struct extent_buffer *leaf;
8257 root = info->extent_root;
8260 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8261 path = btrfs_alloc_path();
8266 cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8267 if (btrfs_test_opt(root, SPACE_CACHE) &&
8268 btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8270 if (btrfs_test_opt(root, CLEAR_CACHE))
8274 ret = find_first_block_group(root, path, &key);
8279 leaf = path->nodes[0];
8280 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8281 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8286 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8288 if (!cache->free_space_ctl) {
8294 atomic_set(&cache->count, 1);
8295 spin_lock_init(&cache->lock);
8296 cache->fs_info = info;
8297 INIT_LIST_HEAD(&cache->list);
8298 INIT_LIST_HEAD(&cache->cluster_list);
8302 * When we mount with old space cache, we need to
8303 * set BTRFS_DC_CLEAR and set dirty flag.
8305 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
8306 * truncate the old free space cache inode and
8308 * b) Setting 'dirty flag' makes sure that we flush
8309 * the new space cache info onto disk.
8311 cache->disk_cache_state = BTRFS_DC_CLEAR;
8312 if (btrfs_test_opt(root, SPACE_CACHE))
8316 read_extent_buffer(leaf, &cache->item,
8317 btrfs_item_ptr_offset(leaf, path->slots[0]),
8318 sizeof(cache->item));
8319 memcpy(&cache->key, &found_key, sizeof(found_key));
8321 key.objectid = found_key.objectid + found_key.offset;
8322 btrfs_release_path(path);
8323 cache->flags = btrfs_block_group_flags(&cache->item);
8324 cache->sectorsize = root->sectorsize;
8325 cache->full_stripe_len = btrfs_full_stripe_len(root,
8326 &root->fs_info->mapping_tree,
8327 found_key.objectid);
8328 btrfs_init_free_space_ctl(cache);
8331 * We need to exclude the super stripes now so that the space
8332 * info has super bytes accounted for, otherwise we'll think
8333 * we have more space than we actually do.
8335 ret = exclude_super_stripes(root, cache);
8338 * We may have excluded something, so call this just in
8341 free_excluded_extents(root, cache);
8342 kfree(cache->free_space_ctl);
8348 * check for two cases, either we are full, and therefore
8349 * don't need to bother with the caching work since we won't
8350 * find any space, or we are empty, and we can just add all
8351 * the space in and be done with it. This saves us _alot_ of
8352 * time, particularly in the full case.
8354 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8355 cache->last_byte_to_unpin = (u64)-1;
8356 cache->cached = BTRFS_CACHE_FINISHED;
8357 free_excluded_extents(root, cache);
8358 } else if (btrfs_block_group_used(&cache->item) == 0) {
8359 cache->last_byte_to_unpin = (u64)-1;
8360 cache->cached = BTRFS_CACHE_FINISHED;
8361 add_new_free_space(cache, root->fs_info,
8363 found_key.objectid +
8365 free_excluded_extents(root, cache);
8368 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8370 btrfs_remove_free_space_cache(cache);
8371 btrfs_put_block_group(cache);
8375 ret = update_space_info(info, cache->flags, found_key.offset,
8376 btrfs_block_group_used(&cache->item),
8379 btrfs_remove_free_space_cache(cache);
8380 spin_lock(&info->block_group_cache_lock);
8381 rb_erase(&cache->cache_node,
8382 &info->block_group_cache_tree);
8383 spin_unlock(&info->block_group_cache_lock);
8384 btrfs_put_block_group(cache);
8388 cache->space_info = space_info;
8389 spin_lock(&cache->space_info->lock);
8390 cache->space_info->bytes_readonly += cache->bytes_super;
8391 spin_unlock(&cache->space_info->lock);
8393 __link_block_group(space_info, cache);
8395 set_avail_alloc_bits(root->fs_info, cache->flags);
8396 if (btrfs_chunk_readonly(root, cache->key.objectid))
8397 set_block_group_ro(cache, 1);
8400 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8401 if (!(get_alloc_profile(root, space_info->flags) &
8402 (BTRFS_BLOCK_GROUP_RAID10 |
8403 BTRFS_BLOCK_GROUP_RAID1 |
8404 BTRFS_BLOCK_GROUP_RAID5 |
8405 BTRFS_BLOCK_GROUP_RAID6 |
8406 BTRFS_BLOCK_GROUP_DUP)))
8409 * avoid allocating from un-mirrored block group if there are
8410 * mirrored block groups.
8412 list_for_each_entry(cache, &space_info->block_groups[3], list)
8413 set_block_group_ro(cache, 1);
8414 list_for_each_entry(cache, &space_info->block_groups[4], list)
8415 set_block_group_ro(cache, 1);
8418 init_global_block_rsv(info);
8421 btrfs_free_path(path);
8425 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8426 struct btrfs_root *root)
8428 struct btrfs_block_group_cache *block_group, *tmp;
8429 struct btrfs_root *extent_root = root->fs_info->extent_root;
8430 struct btrfs_block_group_item item;
8431 struct btrfs_key key;
8434 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8436 list_del_init(&block_group->new_bg_list);
8441 spin_lock(&block_group->lock);
8442 memcpy(&item, &block_group->item, sizeof(item));
8443 memcpy(&key, &block_group->key, sizeof(key));
8444 spin_unlock(&block_group->lock);
8446 ret = btrfs_insert_item(trans, extent_root, &key, &item,
8449 btrfs_abort_transaction(trans, extent_root, ret);
8450 ret = btrfs_finish_chunk_alloc(trans, extent_root,
8451 key.objectid, key.offset);
8453 btrfs_abort_transaction(trans, extent_root, ret);
8457 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8458 struct btrfs_root *root, u64 bytes_used,
8459 u64 type, u64 chunk_objectid, u64 chunk_offset,
8463 struct btrfs_root *extent_root;
8464 struct btrfs_block_group_cache *cache;
8466 extent_root = root->fs_info->extent_root;
8468 root->fs_info->last_trans_log_full_commit = trans->transid;
8470 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8473 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8475 if (!cache->free_space_ctl) {
8480 cache->key.objectid = chunk_offset;
8481 cache->key.offset = size;
8482 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8483 cache->sectorsize = root->sectorsize;
8484 cache->fs_info = root->fs_info;
8485 cache->full_stripe_len = btrfs_full_stripe_len(root,
8486 &root->fs_info->mapping_tree,
8489 atomic_set(&cache->count, 1);
8490 spin_lock_init(&cache->lock);
8491 INIT_LIST_HEAD(&cache->list);
8492 INIT_LIST_HEAD(&cache->cluster_list);
8493 INIT_LIST_HEAD(&cache->new_bg_list);
8495 btrfs_init_free_space_ctl(cache);
8497 btrfs_set_block_group_used(&cache->item, bytes_used);
8498 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8499 cache->flags = type;
8500 btrfs_set_block_group_flags(&cache->item, type);
8502 cache->last_byte_to_unpin = (u64)-1;
8503 cache->cached = BTRFS_CACHE_FINISHED;
8504 ret = exclude_super_stripes(root, cache);
8507 * We may have excluded something, so call this just in
8510 free_excluded_extents(root, cache);
8511 kfree(cache->free_space_ctl);
8516 add_new_free_space(cache, root->fs_info, chunk_offset,
8517 chunk_offset + size);
8519 free_excluded_extents(root, cache);
8521 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8523 btrfs_remove_free_space_cache(cache);
8524 btrfs_put_block_group(cache);
8528 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8529 &cache->space_info);
8531 btrfs_remove_free_space_cache(cache);
8532 spin_lock(&root->fs_info->block_group_cache_lock);
8533 rb_erase(&cache->cache_node,
8534 &root->fs_info->block_group_cache_tree);
8535 spin_unlock(&root->fs_info->block_group_cache_lock);
8536 btrfs_put_block_group(cache);
8539 update_global_block_rsv(root->fs_info);
8541 spin_lock(&cache->space_info->lock);
8542 cache->space_info->bytes_readonly += cache->bytes_super;
8543 spin_unlock(&cache->space_info->lock);
8545 __link_block_group(cache->space_info, cache);
8547 list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8549 set_avail_alloc_bits(extent_root->fs_info, type);
8554 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8556 u64 extra_flags = chunk_to_extended(flags) &
8557 BTRFS_EXTENDED_PROFILE_MASK;
8559 write_seqlock(&fs_info->profiles_lock);
8560 if (flags & BTRFS_BLOCK_GROUP_DATA)
8561 fs_info->avail_data_alloc_bits &= ~extra_flags;
8562 if (flags & BTRFS_BLOCK_GROUP_METADATA)
8563 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8564 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8565 fs_info->avail_system_alloc_bits &= ~extra_flags;
8566 write_sequnlock(&fs_info->profiles_lock);
8569 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8570 struct btrfs_root *root, u64 group_start)
8572 struct btrfs_path *path;
8573 struct btrfs_block_group_cache *block_group;
8574 struct btrfs_free_cluster *cluster;
8575 struct btrfs_root *tree_root = root->fs_info->tree_root;
8576 struct btrfs_key key;
8577 struct inode *inode;
8582 root = root->fs_info->extent_root;
8584 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8585 BUG_ON(!block_group);
8586 BUG_ON(!block_group->ro);
8589 * Free the reserved super bytes from this block group before
8592 free_excluded_extents(root, block_group);
8594 memcpy(&key, &block_group->key, sizeof(key));
8595 index = get_block_group_index(block_group);
8596 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8597 BTRFS_BLOCK_GROUP_RAID1 |
8598 BTRFS_BLOCK_GROUP_RAID10))
8603 /* make sure this block group isn't part of an allocation cluster */
8604 cluster = &root->fs_info->data_alloc_cluster;
8605 spin_lock(&cluster->refill_lock);
8606 btrfs_return_cluster_to_free_space(block_group, cluster);
8607 spin_unlock(&cluster->refill_lock);
8610 * make sure this block group isn't part of a metadata
8611 * allocation cluster
8613 cluster = &root->fs_info->meta_alloc_cluster;
8614 spin_lock(&cluster->refill_lock);
8615 btrfs_return_cluster_to_free_space(block_group, cluster);
8616 spin_unlock(&cluster->refill_lock);
8618 path = btrfs_alloc_path();
8624 inode = lookup_free_space_inode(tree_root, block_group, path);
8625 if (!IS_ERR(inode)) {
8626 ret = btrfs_orphan_add(trans, inode);
8628 btrfs_add_delayed_iput(inode);
8632 /* One for the block groups ref */
8633 spin_lock(&block_group->lock);
8634 if (block_group->iref) {
8635 block_group->iref = 0;
8636 block_group->inode = NULL;
8637 spin_unlock(&block_group->lock);
8640 spin_unlock(&block_group->lock);
8642 /* One for our lookup ref */
8643 btrfs_add_delayed_iput(inode);
8646 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8647 key.offset = block_group->key.objectid;
8650 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8654 btrfs_release_path(path);
8656 ret = btrfs_del_item(trans, tree_root, path);
8659 btrfs_release_path(path);
8662 spin_lock(&root->fs_info->block_group_cache_lock);
8663 rb_erase(&block_group->cache_node,
8664 &root->fs_info->block_group_cache_tree);
8666 if (root->fs_info->first_logical_byte == block_group->key.objectid)
8667 root->fs_info->first_logical_byte = (u64)-1;
8668 spin_unlock(&root->fs_info->block_group_cache_lock);
8670 down_write(&block_group->space_info->groups_sem);
8672 * we must use list_del_init so people can check to see if they
8673 * are still on the list after taking the semaphore
8675 list_del_init(&block_group->list);
8676 if (list_empty(&block_group->space_info->block_groups[index]))
8677 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8678 up_write(&block_group->space_info->groups_sem);
8680 if (block_group->cached == BTRFS_CACHE_STARTED)
8681 wait_block_group_cache_done(block_group);
8683 btrfs_remove_free_space_cache(block_group);
8685 spin_lock(&block_group->space_info->lock);
8686 block_group->space_info->total_bytes -= block_group->key.offset;
8687 block_group->space_info->bytes_readonly -= block_group->key.offset;
8688 block_group->space_info->disk_total -= block_group->key.offset * factor;
8689 spin_unlock(&block_group->space_info->lock);
8691 memcpy(&key, &block_group->key, sizeof(key));
8693 btrfs_clear_space_info_full(root->fs_info);
8695 btrfs_put_block_group(block_group);
8696 btrfs_put_block_group(block_group);
8698 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8704 ret = btrfs_del_item(trans, root, path);
8706 btrfs_free_path(path);
8710 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8712 struct btrfs_space_info *space_info;
8713 struct btrfs_super_block *disk_super;
8719 disk_super = fs_info->super_copy;
8720 if (!btrfs_super_root(disk_super))
8723 features = btrfs_super_incompat_flags(disk_super);
8724 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8727 flags = BTRFS_BLOCK_GROUP_SYSTEM;
8728 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8733 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8734 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8736 flags = BTRFS_BLOCK_GROUP_METADATA;
8737 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8741 flags = BTRFS_BLOCK_GROUP_DATA;
8742 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8748 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8750 return unpin_extent_range(root, start, end);
8753 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8754 u64 num_bytes, u64 *actual_bytes)
8756 return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8759 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8761 struct btrfs_fs_info *fs_info = root->fs_info;
8762 struct btrfs_block_group_cache *cache = NULL;
8767 u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8771 * try to trim all FS space, our block group may start from non-zero.
8773 if (range->len == total_bytes)
8774 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8776 cache = btrfs_lookup_block_group(fs_info, range->start);
8779 if (cache->key.objectid >= (range->start + range->len)) {
8780 btrfs_put_block_group(cache);
8784 start = max(range->start, cache->key.objectid);
8785 end = min(range->start + range->len,
8786 cache->key.objectid + cache->key.offset);
8788 if (end - start >= range->minlen) {
8789 if (!block_group_cache_done(cache)) {
8790 ret = cache_block_group(cache, 0);
8792 btrfs_put_block_group(cache);
8795 ret = wait_block_group_cache_done(cache);
8797 btrfs_put_block_group(cache);
8801 ret = btrfs_trim_block_group(cache,
8807 trimmed += group_trimmed;
8809 btrfs_put_block_group(cache);
8814 cache = next_block_group(fs_info->tree_root, cache);
8817 range->len = trimmed;