Btrfs: fix very slow inode eviction and fs unmount
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "raid56.h"
35 #include "locking.h"
36 #include "free-space-cache.h"
37 #include "math.h"
38 #include "sysfs.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_root *root,
78                               u64 bytenr, u64 num_bytes, int alloc);
79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
80                                 struct btrfs_root *root,
81                                 u64 bytenr, u64 num_bytes, u64 parent,
82                                 u64 root_objectid, u64 owner_objectid,
83                                 u64 owner_offset, int refs_to_drop,
84                                 struct btrfs_delayed_extent_op *extra_op);
85 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
86                                     struct extent_buffer *leaf,
87                                     struct btrfs_extent_item *ei);
88 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
89                                       struct btrfs_root *root,
90                                       u64 parent, u64 root_objectid,
91                                       u64 flags, u64 owner, u64 offset,
92                                       struct btrfs_key *ins, int ref_mod);
93 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
94                                      struct btrfs_root *root,
95                                      u64 parent, u64 root_objectid,
96                                      u64 flags, struct btrfs_disk_key *key,
97                                      int level, struct btrfs_key *ins);
98 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
99                           struct btrfs_root *extent_root, u64 flags,
100                           int force);
101 static int find_next_key(struct btrfs_path *path, int level,
102                          struct btrfs_key *key);
103 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
104                             int dump_block_groups);
105 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
106                                        u64 num_bytes, int reserve);
107 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
108                                u64 num_bytes);
109 int btrfs_pin_extent(struct btrfs_root *root,
110                      u64 bytenr, u64 num_bytes, int reserved);
111
112 static noinline int
113 block_group_cache_done(struct btrfs_block_group_cache *cache)
114 {
115         smp_mb();
116         return cache->cached == BTRFS_CACHE_FINISHED ||
117                 cache->cached == BTRFS_CACHE_ERROR;
118 }
119
120 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
121 {
122         return (cache->flags & bits) == bits;
123 }
124
125 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
126 {
127         atomic_inc(&cache->count);
128 }
129
130 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
131 {
132         if (atomic_dec_and_test(&cache->count)) {
133                 WARN_ON(cache->pinned > 0);
134                 WARN_ON(cache->reserved > 0);
135                 kfree(cache->free_space_ctl);
136                 kfree(cache);
137         }
138 }
139
140 /*
141  * this adds the block group to the fs_info rb tree for the block group
142  * cache
143  */
144 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
145                                 struct btrfs_block_group_cache *block_group)
146 {
147         struct rb_node **p;
148         struct rb_node *parent = NULL;
149         struct btrfs_block_group_cache *cache;
150
151         spin_lock(&info->block_group_cache_lock);
152         p = &info->block_group_cache_tree.rb_node;
153
154         while (*p) {
155                 parent = *p;
156                 cache = rb_entry(parent, struct btrfs_block_group_cache,
157                                  cache_node);
158                 if (block_group->key.objectid < cache->key.objectid) {
159                         p = &(*p)->rb_left;
160                 } else if (block_group->key.objectid > cache->key.objectid) {
161                         p = &(*p)->rb_right;
162                 } else {
163                         spin_unlock(&info->block_group_cache_lock);
164                         return -EEXIST;
165                 }
166         }
167
168         rb_link_node(&block_group->cache_node, parent, p);
169         rb_insert_color(&block_group->cache_node,
170                         &info->block_group_cache_tree);
171
172         if (info->first_logical_byte > block_group->key.objectid)
173                 info->first_logical_byte = block_group->key.objectid;
174
175         spin_unlock(&info->block_group_cache_lock);
176
177         return 0;
178 }
179
180 /*
181  * This will return the block group at or after bytenr if contains is 0, else
182  * it will return the block group that contains the bytenr
183  */
184 static struct btrfs_block_group_cache *
185 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
186                               int contains)
187 {
188         struct btrfs_block_group_cache *cache, *ret = NULL;
189         struct rb_node *n;
190         u64 end, start;
191
192         spin_lock(&info->block_group_cache_lock);
193         n = info->block_group_cache_tree.rb_node;
194
195         while (n) {
196                 cache = rb_entry(n, struct btrfs_block_group_cache,
197                                  cache_node);
198                 end = cache->key.objectid + cache->key.offset - 1;
199                 start = cache->key.objectid;
200
201                 if (bytenr < start) {
202                         if (!contains && (!ret || start < ret->key.objectid))
203                                 ret = cache;
204                         n = n->rb_left;
205                 } else if (bytenr > start) {
206                         if (contains && bytenr <= end) {
207                                 ret = cache;
208                                 break;
209                         }
210                         n = n->rb_right;
211                 } else {
212                         ret = cache;
213                         break;
214                 }
215         }
216         if (ret) {
217                 btrfs_get_block_group(ret);
218                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
219                         info->first_logical_byte = ret->key.objectid;
220         }
221         spin_unlock(&info->block_group_cache_lock);
222
223         return ret;
224 }
225
226 static int add_excluded_extent(struct btrfs_root *root,
227                                u64 start, u64 num_bytes)
228 {
229         u64 end = start + num_bytes - 1;
230         set_extent_bits(&root->fs_info->freed_extents[0],
231                         start, end, EXTENT_UPTODATE, GFP_NOFS);
232         set_extent_bits(&root->fs_info->freed_extents[1],
233                         start, end, EXTENT_UPTODATE, GFP_NOFS);
234         return 0;
235 }
236
237 static void free_excluded_extents(struct btrfs_root *root,
238                                   struct btrfs_block_group_cache *cache)
239 {
240         u64 start, end;
241
242         start = cache->key.objectid;
243         end = start + cache->key.offset - 1;
244
245         clear_extent_bits(&root->fs_info->freed_extents[0],
246                           start, end, EXTENT_UPTODATE, GFP_NOFS);
247         clear_extent_bits(&root->fs_info->freed_extents[1],
248                           start, end, EXTENT_UPTODATE, GFP_NOFS);
249 }
250
251 static int exclude_super_stripes(struct btrfs_root *root,
252                                  struct btrfs_block_group_cache *cache)
253 {
254         u64 bytenr;
255         u64 *logical;
256         int stripe_len;
257         int i, nr, ret;
258
259         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
260                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
261                 cache->bytes_super += stripe_len;
262                 ret = add_excluded_extent(root, cache->key.objectid,
263                                           stripe_len);
264                 if (ret)
265                         return ret;
266         }
267
268         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
269                 bytenr = btrfs_sb_offset(i);
270                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
271                                        cache->key.objectid, bytenr,
272                                        0, &logical, &nr, &stripe_len);
273                 if (ret)
274                         return ret;
275
276                 while (nr--) {
277                         u64 start, len;
278
279                         if (logical[nr] > cache->key.objectid +
280                             cache->key.offset)
281                                 continue;
282
283                         if (logical[nr] + stripe_len <= cache->key.objectid)
284                                 continue;
285
286                         start = logical[nr];
287                         if (start < cache->key.objectid) {
288                                 start = cache->key.objectid;
289                                 len = (logical[nr] + stripe_len) - start;
290                         } else {
291                                 len = min_t(u64, stripe_len,
292                                             cache->key.objectid +
293                                             cache->key.offset - start);
294                         }
295
296                         cache->bytes_super += len;
297                         ret = add_excluded_extent(root, start, len);
298                         if (ret) {
299                                 kfree(logical);
300                                 return ret;
301                         }
302                 }
303
304                 kfree(logical);
305         }
306         return 0;
307 }
308
309 static struct btrfs_caching_control *
310 get_caching_control(struct btrfs_block_group_cache *cache)
311 {
312         struct btrfs_caching_control *ctl;
313
314         spin_lock(&cache->lock);
315         if (cache->cached != BTRFS_CACHE_STARTED) {
316                 spin_unlock(&cache->lock);
317                 return NULL;
318         }
319
320         /* We're loading it the fast way, so we don't have a caching_ctl. */
321         if (!cache->caching_ctl) {
322                 spin_unlock(&cache->lock);
323                 return NULL;
324         }
325
326         ctl = cache->caching_ctl;
327         atomic_inc(&ctl->count);
328         spin_unlock(&cache->lock);
329         return ctl;
330 }
331
332 static void put_caching_control(struct btrfs_caching_control *ctl)
333 {
334         if (atomic_dec_and_test(&ctl->count))
335                 kfree(ctl);
336 }
337
338 /*
339  * this is only called by cache_block_group, since we could have freed extents
340  * we need to check the pinned_extents for any extents that can't be used yet
341  * since their free space will be released as soon as the transaction commits.
342  */
343 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
344                               struct btrfs_fs_info *info, u64 start, u64 end)
345 {
346         u64 extent_start, extent_end, size, total_added = 0;
347         int ret;
348
349         while (start < end) {
350                 ret = find_first_extent_bit(info->pinned_extents, start,
351                                             &extent_start, &extent_end,
352                                             EXTENT_DIRTY | EXTENT_UPTODATE,
353                                             NULL);
354                 if (ret)
355                         break;
356
357                 if (extent_start <= start) {
358                         start = extent_end + 1;
359                 } else if (extent_start > start && extent_start < end) {
360                         size = extent_start - start;
361                         total_added += size;
362                         ret = btrfs_add_free_space(block_group, start,
363                                                    size);
364                         BUG_ON(ret); /* -ENOMEM or logic error */
365                         start = extent_end + 1;
366                 } else {
367                         break;
368                 }
369         }
370
371         if (start < end) {
372                 size = end - start;
373                 total_added += size;
374                 ret = btrfs_add_free_space(block_group, start, size);
375                 BUG_ON(ret); /* -ENOMEM or logic error */
376         }
377
378         return total_added;
379 }
380
381 static noinline void caching_thread(struct btrfs_work *work)
382 {
383         struct btrfs_block_group_cache *block_group;
384         struct btrfs_fs_info *fs_info;
385         struct btrfs_caching_control *caching_ctl;
386         struct btrfs_root *extent_root;
387         struct btrfs_path *path;
388         struct extent_buffer *leaf;
389         struct btrfs_key key;
390         u64 total_found = 0;
391         u64 last = 0;
392         u32 nritems;
393         int ret = -ENOMEM;
394
395         caching_ctl = container_of(work, struct btrfs_caching_control, work);
396         block_group = caching_ctl->block_group;
397         fs_info = block_group->fs_info;
398         extent_root = fs_info->extent_root;
399
400         path = btrfs_alloc_path();
401         if (!path)
402                 goto out;
403
404         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
405
406         /*
407          * We don't want to deadlock with somebody trying to allocate a new
408          * extent for the extent root while also trying to search the extent
409          * root to add free space.  So we skip locking and search the commit
410          * root, since its read-only
411          */
412         path->skip_locking = 1;
413         path->search_commit_root = 1;
414         path->reada = 1;
415
416         key.objectid = last;
417         key.offset = 0;
418         key.type = BTRFS_EXTENT_ITEM_KEY;
419 again:
420         mutex_lock(&caching_ctl->mutex);
421         /* need to make sure the commit_root doesn't disappear */
422         down_read(&fs_info->extent_commit_sem);
423
424 next:
425         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
426         if (ret < 0)
427                 goto err;
428
429         leaf = path->nodes[0];
430         nritems = btrfs_header_nritems(leaf);
431
432         while (1) {
433                 if (btrfs_fs_closing(fs_info) > 1) {
434                         last = (u64)-1;
435                         break;
436                 }
437
438                 if (path->slots[0] < nritems) {
439                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
440                 } else {
441                         ret = find_next_key(path, 0, &key);
442                         if (ret)
443                                 break;
444
445                         if (need_resched()) {
446                                 caching_ctl->progress = last;
447                                 btrfs_release_path(path);
448                                 up_read(&fs_info->extent_commit_sem);
449                                 mutex_unlock(&caching_ctl->mutex);
450                                 cond_resched();
451                                 goto again;
452                         }
453
454                         ret = btrfs_next_leaf(extent_root, path);
455                         if (ret < 0)
456                                 goto err;
457                         if (ret)
458                                 break;
459                         leaf = path->nodes[0];
460                         nritems = btrfs_header_nritems(leaf);
461                         continue;
462                 }
463
464                 if (key.objectid < last) {
465                         key.objectid = last;
466                         key.offset = 0;
467                         key.type = BTRFS_EXTENT_ITEM_KEY;
468
469                         caching_ctl->progress = last;
470                         btrfs_release_path(path);
471                         goto next;
472                 }
473
474                 if (key.objectid < block_group->key.objectid) {
475                         path->slots[0]++;
476                         continue;
477                 }
478
479                 if (key.objectid >= block_group->key.objectid +
480                     block_group->key.offset)
481                         break;
482
483                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
484                     key.type == BTRFS_METADATA_ITEM_KEY) {
485                         total_found += add_new_free_space(block_group,
486                                                           fs_info, last,
487                                                           key.objectid);
488                         if (key.type == BTRFS_METADATA_ITEM_KEY)
489                                 last = key.objectid +
490                                         fs_info->tree_root->leafsize;
491                         else
492                                 last = key.objectid + key.offset;
493
494                         if (total_found > (1024 * 1024 * 2)) {
495                                 total_found = 0;
496                                 wake_up(&caching_ctl->wait);
497                         }
498                 }
499                 path->slots[0]++;
500         }
501         ret = 0;
502
503         total_found += add_new_free_space(block_group, fs_info, last,
504                                           block_group->key.objectid +
505                                           block_group->key.offset);
506         caching_ctl->progress = (u64)-1;
507
508         spin_lock(&block_group->lock);
509         block_group->caching_ctl = NULL;
510         block_group->cached = BTRFS_CACHE_FINISHED;
511         spin_unlock(&block_group->lock);
512
513 err:
514         btrfs_free_path(path);
515         up_read(&fs_info->extent_commit_sem);
516
517         free_excluded_extents(extent_root, block_group);
518
519         mutex_unlock(&caching_ctl->mutex);
520 out:
521         if (ret) {
522                 spin_lock(&block_group->lock);
523                 block_group->caching_ctl = NULL;
524                 block_group->cached = BTRFS_CACHE_ERROR;
525                 spin_unlock(&block_group->lock);
526         }
527         wake_up(&caching_ctl->wait);
528
529         put_caching_control(caching_ctl);
530         btrfs_put_block_group(block_group);
531 }
532
533 static int cache_block_group(struct btrfs_block_group_cache *cache,
534                              int load_cache_only)
535 {
536         DEFINE_WAIT(wait);
537         struct btrfs_fs_info *fs_info = cache->fs_info;
538         struct btrfs_caching_control *caching_ctl;
539         int ret = 0;
540
541         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
542         if (!caching_ctl)
543                 return -ENOMEM;
544
545         INIT_LIST_HEAD(&caching_ctl->list);
546         mutex_init(&caching_ctl->mutex);
547         init_waitqueue_head(&caching_ctl->wait);
548         caching_ctl->block_group = cache;
549         caching_ctl->progress = cache->key.objectid;
550         atomic_set(&caching_ctl->count, 1);
551         caching_ctl->work.func = caching_thread;
552
553         spin_lock(&cache->lock);
554         /*
555          * This should be a rare occasion, but this could happen I think in the
556          * case where one thread starts to load the space cache info, and then
557          * some other thread starts a transaction commit which tries to do an
558          * allocation while the other thread is still loading the space cache
559          * info.  The previous loop should have kept us from choosing this block
560          * group, but if we've moved to the state where we will wait on caching
561          * block groups we need to first check if we're doing a fast load here,
562          * so we can wait for it to finish, otherwise we could end up allocating
563          * from a block group who's cache gets evicted for one reason or
564          * another.
565          */
566         while (cache->cached == BTRFS_CACHE_FAST) {
567                 struct btrfs_caching_control *ctl;
568
569                 ctl = cache->caching_ctl;
570                 atomic_inc(&ctl->count);
571                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
572                 spin_unlock(&cache->lock);
573
574                 schedule();
575
576                 finish_wait(&ctl->wait, &wait);
577                 put_caching_control(ctl);
578                 spin_lock(&cache->lock);
579         }
580
581         if (cache->cached != BTRFS_CACHE_NO) {
582                 spin_unlock(&cache->lock);
583                 kfree(caching_ctl);
584                 return 0;
585         }
586         WARN_ON(cache->caching_ctl);
587         cache->caching_ctl = caching_ctl;
588         cache->cached = BTRFS_CACHE_FAST;
589         spin_unlock(&cache->lock);
590
591         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
592                 ret = load_free_space_cache(fs_info, cache);
593
594                 spin_lock(&cache->lock);
595                 if (ret == 1) {
596                         cache->caching_ctl = NULL;
597                         cache->cached = BTRFS_CACHE_FINISHED;
598                         cache->last_byte_to_unpin = (u64)-1;
599                 } else {
600                         if (load_cache_only) {
601                                 cache->caching_ctl = NULL;
602                                 cache->cached = BTRFS_CACHE_NO;
603                         } else {
604                                 cache->cached = BTRFS_CACHE_STARTED;
605                         }
606                 }
607                 spin_unlock(&cache->lock);
608                 wake_up(&caching_ctl->wait);
609                 if (ret == 1) {
610                         put_caching_control(caching_ctl);
611                         free_excluded_extents(fs_info->extent_root, cache);
612                         return 0;
613                 }
614         } else {
615                 /*
616                  * We are not going to do the fast caching, set cached to the
617                  * appropriate value and wakeup any waiters.
618                  */
619                 spin_lock(&cache->lock);
620                 if (load_cache_only) {
621                         cache->caching_ctl = NULL;
622                         cache->cached = BTRFS_CACHE_NO;
623                 } else {
624                         cache->cached = BTRFS_CACHE_STARTED;
625                 }
626                 spin_unlock(&cache->lock);
627                 wake_up(&caching_ctl->wait);
628         }
629
630         if (load_cache_only) {
631                 put_caching_control(caching_ctl);
632                 return 0;
633         }
634
635         down_write(&fs_info->extent_commit_sem);
636         atomic_inc(&caching_ctl->count);
637         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
638         up_write(&fs_info->extent_commit_sem);
639
640         btrfs_get_block_group(cache);
641
642         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
643
644         return ret;
645 }
646
647 /*
648  * return the block group that starts at or after bytenr
649  */
650 static struct btrfs_block_group_cache *
651 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
652 {
653         struct btrfs_block_group_cache *cache;
654
655         cache = block_group_cache_tree_search(info, bytenr, 0);
656
657         return cache;
658 }
659
660 /*
661  * return the block group that contains the given bytenr
662  */
663 struct btrfs_block_group_cache *btrfs_lookup_block_group(
664                                                  struct btrfs_fs_info *info,
665                                                  u64 bytenr)
666 {
667         struct btrfs_block_group_cache *cache;
668
669         cache = block_group_cache_tree_search(info, bytenr, 1);
670
671         return cache;
672 }
673
674 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
675                                                   u64 flags)
676 {
677         struct list_head *head = &info->space_info;
678         struct btrfs_space_info *found;
679
680         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
681
682         rcu_read_lock();
683         list_for_each_entry_rcu(found, head, list) {
684                 if (found->flags & flags) {
685                         rcu_read_unlock();
686                         return found;
687                 }
688         }
689         rcu_read_unlock();
690         return NULL;
691 }
692
693 /*
694  * after adding space to the filesystem, we need to clear the full flags
695  * on all the space infos.
696  */
697 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
698 {
699         struct list_head *head = &info->space_info;
700         struct btrfs_space_info *found;
701
702         rcu_read_lock();
703         list_for_each_entry_rcu(found, head, list)
704                 found->full = 0;
705         rcu_read_unlock();
706 }
707
708 /* simple helper to search for an existing extent at a given offset */
709 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
710 {
711         int ret;
712         struct btrfs_key key;
713         struct btrfs_path *path;
714
715         path = btrfs_alloc_path();
716         if (!path)
717                 return -ENOMEM;
718
719         key.objectid = start;
720         key.offset = len;
721         key.type = BTRFS_EXTENT_ITEM_KEY;
722         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
723                                 0, 0);
724         if (ret > 0) {
725                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
726                 if (key.objectid == start &&
727                     key.type == BTRFS_METADATA_ITEM_KEY)
728                         ret = 0;
729         }
730         btrfs_free_path(path);
731         return ret;
732 }
733
734 /*
735  * helper function to lookup reference count and flags of a tree block.
736  *
737  * the head node for delayed ref is used to store the sum of all the
738  * reference count modifications queued up in the rbtree. the head
739  * node may also store the extent flags to set. This way you can check
740  * to see what the reference count and extent flags would be if all of
741  * the delayed refs are not processed.
742  */
743 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
744                              struct btrfs_root *root, u64 bytenr,
745                              u64 offset, int metadata, u64 *refs, u64 *flags)
746 {
747         struct btrfs_delayed_ref_head *head;
748         struct btrfs_delayed_ref_root *delayed_refs;
749         struct btrfs_path *path;
750         struct btrfs_extent_item *ei;
751         struct extent_buffer *leaf;
752         struct btrfs_key key;
753         u32 item_size;
754         u64 num_refs;
755         u64 extent_flags;
756         int ret;
757
758         /*
759          * If we don't have skinny metadata, don't bother doing anything
760          * different
761          */
762         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
763                 offset = root->leafsize;
764                 metadata = 0;
765         }
766
767         path = btrfs_alloc_path();
768         if (!path)
769                 return -ENOMEM;
770
771         if (!trans) {
772                 path->skip_locking = 1;
773                 path->search_commit_root = 1;
774         }
775
776 search_again:
777         key.objectid = bytenr;
778         key.offset = offset;
779         if (metadata)
780                 key.type = BTRFS_METADATA_ITEM_KEY;
781         else
782                 key.type = BTRFS_EXTENT_ITEM_KEY;
783
784 again:
785         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
786                                 &key, path, 0, 0);
787         if (ret < 0)
788                 goto out_free;
789
790         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
791                 if (path->slots[0]) {
792                         path->slots[0]--;
793                         btrfs_item_key_to_cpu(path->nodes[0], &key,
794                                               path->slots[0]);
795                         if (key.objectid == bytenr &&
796                             key.type == BTRFS_EXTENT_ITEM_KEY &&
797                             key.offset == root->leafsize)
798                                 ret = 0;
799                 }
800                 if (ret) {
801                         key.objectid = bytenr;
802                         key.type = BTRFS_EXTENT_ITEM_KEY;
803                         key.offset = root->leafsize;
804                         btrfs_release_path(path);
805                         goto again;
806                 }
807         }
808
809         if (ret == 0) {
810                 leaf = path->nodes[0];
811                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
812                 if (item_size >= sizeof(*ei)) {
813                         ei = btrfs_item_ptr(leaf, path->slots[0],
814                                             struct btrfs_extent_item);
815                         num_refs = btrfs_extent_refs(leaf, ei);
816                         extent_flags = btrfs_extent_flags(leaf, ei);
817                 } else {
818 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
819                         struct btrfs_extent_item_v0 *ei0;
820                         BUG_ON(item_size != sizeof(*ei0));
821                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
822                                              struct btrfs_extent_item_v0);
823                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
824                         /* FIXME: this isn't correct for data */
825                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
826 #else
827                         BUG();
828 #endif
829                 }
830                 BUG_ON(num_refs == 0);
831         } else {
832                 num_refs = 0;
833                 extent_flags = 0;
834                 ret = 0;
835         }
836
837         if (!trans)
838                 goto out;
839
840         delayed_refs = &trans->transaction->delayed_refs;
841         spin_lock(&delayed_refs->lock);
842         head = btrfs_find_delayed_ref_head(trans, bytenr);
843         if (head) {
844                 if (!mutex_trylock(&head->mutex)) {
845                         atomic_inc(&head->node.refs);
846                         spin_unlock(&delayed_refs->lock);
847
848                         btrfs_release_path(path);
849
850                         /*
851                          * Mutex was contended, block until it's released and try
852                          * again
853                          */
854                         mutex_lock(&head->mutex);
855                         mutex_unlock(&head->mutex);
856                         btrfs_put_delayed_ref(&head->node);
857                         goto search_again;
858                 }
859                 if (head->extent_op && head->extent_op->update_flags)
860                         extent_flags |= head->extent_op->flags_to_set;
861                 else
862                         BUG_ON(num_refs == 0);
863
864                 num_refs += head->node.ref_mod;
865                 mutex_unlock(&head->mutex);
866         }
867         spin_unlock(&delayed_refs->lock);
868 out:
869         WARN_ON(num_refs == 0);
870         if (refs)
871                 *refs = num_refs;
872         if (flags)
873                 *flags = extent_flags;
874 out_free:
875         btrfs_free_path(path);
876         return ret;
877 }
878
879 /*
880  * Back reference rules.  Back refs have three main goals:
881  *
882  * 1) differentiate between all holders of references to an extent so that
883  *    when a reference is dropped we can make sure it was a valid reference
884  *    before freeing the extent.
885  *
886  * 2) Provide enough information to quickly find the holders of an extent
887  *    if we notice a given block is corrupted or bad.
888  *
889  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
890  *    maintenance.  This is actually the same as #2, but with a slightly
891  *    different use case.
892  *
893  * There are two kinds of back refs. The implicit back refs is optimized
894  * for pointers in non-shared tree blocks. For a given pointer in a block,
895  * back refs of this kind provide information about the block's owner tree
896  * and the pointer's key. These information allow us to find the block by
897  * b-tree searching. The full back refs is for pointers in tree blocks not
898  * referenced by their owner trees. The location of tree block is recorded
899  * in the back refs. Actually the full back refs is generic, and can be
900  * used in all cases the implicit back refs is used. The major shortcoming
901  * of the full back refs is its overhead. Every time a tree block gets
902  * COWed, we have to update back refs entry for all pointers in it.
903  *
904  * For a newly allocated tree block, we use implicit back refs for
905  * pointers in it. This means most tree related operations only involve
906  * implicit back refs. For a tree block created in old transaction, the
907  * only way to drop a reference to it is COW it. So we can detect the
908  * event that tree block loses its owner tree's reference and do the
909  * back refs conversion.
910  *
911  * When a tree block is COW'd through a tree, there are four cases:
912  *
913  * The reference count of the block is one and the tree is the block's
914  * owner tree. Nothing to do in this case.
915  *
916  * The reference count of the block is one and the tree is not the
917  * block's owner tree. In this case, full back refs is used for pointers
918  * in the block. Remove these full back refs, add implicit back refs for
919  * every pointers in the new block.
920  *
921  * The reference count of the block is greater than one and the tree is
922  * the block's owner tree. In this case, implicit back refs is used for
923  * pointers in the block. Add full back refs for every pointers in the
924  * block, increase lower level extents' reference counts. The original
925  * implicit back refs are entailed to the new block.
926  *
927  * The reference count of the block is greater than one and the tree is
928  * not the block's owner tree. Add implicit back refs for every pointer in
929  * the new block, increase lower level extents' reference count.
930  *
931  * Back Reference Key composing:
932  *
933  * The key objectid corresponds to the first byte in the extent,
934  * The key type is used to differentiate between types of back refs.
935  * There are different meanings of the key offset for different types
936  * of back refs.
937  *
938  * File extents can be referenced by:
939  *
940  * - multiple snapshots, subvolumes, or different generations in one subvol
941  * - different files inside a single subvolume
942  * - different offsets inside a file (bookend extents in file.c)
943  *
944  * The extent ref structure for the implicit back refs has fields for:
945  *
946  * - Objectid of the subvolume root
947  * - objectid of the file holding the reference
948  * - original offset in the file
949  * - how many bookend extents
950  *
951  * The key offset for the implicit back refs is hash of the first
952  * three fields.
953  *
954  * The extent ref structure for the full back refs has field for:
955  *
956  * - number of pointers in the tree leaf
957  *
958  * The key offset for the implicit back refs is the first byte of
959  * the tree leaf
960  *
961  * When a file extent is allocated, The implicit back refs is used.
962  * the fields are filled in:
963  *
964  *     (root_key.objectid, inode objectid, offset in file, 1)
965  *
966  * When a file extent is removed file truncation, we find the
967  * corresponding implicit back refs and check the following fields:
968  *
969  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
970  *
971  * Btree extents can be referenced by:
972  *
973  * - Different subvolumes
974  *
975  * Both the implicit back refs and the full back refs for tree blocks
976  * only consist of key. The key offset for the implicit back refs is
977  * objectid of block's owner tree. The key offset for the full back refs
978  * is the first byte of parent block.
979  *
980  * When implicit back refs is used, information about the lowest key and
981  * level of the tree block are required. These information are stored in
982  * tree block info structure.
983  */
984
985 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
986 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
987                                   struct btrfs_root *root,
988                                   struct btrfs_path *path,
989                                   u64 owner, u32 extra_size)
990 {
991         struct btrfs_extent_item *item;
992         struct btrfs_extent_item_v0 *ei0;
993         struct btrfs_extent_ref_v0 *ref0;
994         struct btrfs_tree_block_info *bi;
995         struct extent_buffer *leaf;
996         struct btrfs_key key;
997         struct btrfs_key found_key;
998         u32 new_size = sizeof(*item);
999         u64 refs;
1000         int ret;
1001
1002         leaf = path->nodes[0];
1003         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1004
1005         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1006         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1007                              struct btrfs_extent_item_v0);
1008         refs = btrfs_extent_refs_v0(leaf, ei0);
1009
1010         if (owner == (u64)-1) {
1011                 while (1) {
1012                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1013                                 ret = btrfs_next_leaf(root, path);
1014                                 if (ret < 0)
1015                                         return ret;
1016                                 BUG_ON(ret > 0); /* Corruption */
1017                                 leaf = path->nodes[0];
1018                         }
1019                         btrfs_item_key_to_cpu(leaf, &found_key,
1020                                               path->slots[0]);
1021                         BUG_ON(key.objectid != found_key.objectid);
1022                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1023                                 path->slots[0]++;
1024                                 continue;
1025                         }
1026                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1027                                               struct btrfs_extent_ref_v0);
1028                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1029                         break;
1030                 }
1031         }
1032         btrfs_release_path(path);
1033
1034         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1035                 new_size += sizeof(*bi);
1036
1037         new_size -= sizeof(*ei0);
1038         ret = btrfs_search_slot(trans, root, &key, path,
1039                                 new_size + extra_size, 1);
1040         if (ret < 0)
1041                 return ret;
1042         BUG_ON(ret); /* Corruption */
1043
1044         btrfs_extend_item(root, path, new_size);
1045
1046         leaf = path->nodes[0];
1047         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1048         btrfs_set_extent_refs(leaf, item, refs);
1049         /* FIXME: get real generation */
1050         btrfs_set_extent_generation(leaf, item, 0);
1051         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1052                 btrfs_set_extent_flags(leaf, item,
1053                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1054                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1055                 bi = (struct btrfs_tree_block_info *)(item + 1);
1056                 /* FIXME: get first key of the block */
1057                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1058                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1059         } else {
1060                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1061         }
1062         btrfs_mark_buffer_dirty(leaf);
1063         return 0;
1064 }
1065 #endif
1066
1067 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1068 {
1069         u32 high_crc = ~(u32)0;
1070         u32 low_crc = ~(u32)0;
1071         __le64 lenum;
1072
1073         lenum = cpu_to_le64(root_objectid);
1074         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1075         lenum = cpu_to_le64(owner);
1076         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1077         lenum = cpu_to_le64(offset);
1078         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1079
1080         return ((u64)high_crc << 31) ^ (u64)low_crc;
1081 }
1082
1083 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1084                                      struct btrfs_extent_data_ref *ref)
1085 {
1086         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1087                                     btrfs_extent_data_ref_objectid(leaf, ref),
1088                                     btrfs_extent_data_ref_offset(leaf, ref));
1089 }
1090
1091 static int match_extent_data_ref(struct extent_buffer *leaf,
1092                                  struct btrfs_extent_data_ref *ref,
1093                                  u64 root_objectid, u64 owner, u64 offset)
1094 {
1095         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1096             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1097             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1098                 return 0;
1099         return 1;
1100 }
1101
1102 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1103                                            struct btrfs_root *root,
1104                                            struct btrfs_path *path,
1105                                            u64 bytenr, u64 parent,
1106                                            u64 root_objectid,
1107                                            u64 owner, u64 offset)
1108 {
1109         struct btrfs_key key;
1110         struct btrfs_extent_data_ref *ref;
1111         struct extent_buffer *leaf;
1112         u32 nritems;
1113         int ret;
1114         int recow;
1115         int err = -ENOENT;
1116
1117         key.objectid = bytenr;
1118         if (parent) {
1119                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1120                 key.offset = parent;
1121         } else {
1122                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1123                 key.offset = hash_extent_data_ref(root_objectid,
1124                                                   owner, offset);
1125         }
1126 again:
1127         recow = 0;
1128         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1129         if (ret < 0) {
1130                 err = ret;
1131                 goto fail;
1132         }
1133
1134         if (parent) {
1135                 if (!ret)
1136                         return 0;
1137 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1138                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1139                 btrfs_release_path(path);
1140                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1141                 if (ret < 0) {
1142                         err = ret;
1143                         goto fail;
1144                 }
1145                 if (!ret)
1146                         return 0;
1147 #endif
1148                 goto fail;
1149         }
1150
1151         leaf = path->nodes[0];
1152         nritems = btrfs_header_nritems(leaf);
1153         while (1) {
1154                 if (path->slots[0] >= nritems) {
1155                         ret = btrfs_next_leaf(root, path);
1156                         if (ret < 0)
1157                                 err = ret;
1158                         if (ret)
1159                                 goto fail;
1160
1161                         leaf = path->nodes[0];
1162                         nritems = btrfs_header_nritems(leaf);
1163                         recow = 1;
1164                 }
1165
1166                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1167                 if (key.objectid != bytenr ||
1168                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1169                         goto fail;
1170
1171                 ref = btrfs_item_ptr(leaf, path->slots[0],
1172                                      struct btrfs_extent_data_ref);
1173
1174                 if (match_extent_data_ref(leaf, ref, root_objectid,
1175                                           owner, offset)) {
1176                         if (recow) {
1177                                 btrfs_release_path(path);
1178                                 goto again;
1179                         }
1180                         err = 0;
1181                         break;
1182                 }
1183                 path->slots[0]++;
1184         }
1185 fail:
1186         return err;
1187 }
1188
1189 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1190                                            struct btrfs_root *root,
1191                                            struct btrfs_path *path,
1192                                            u64 bytenr, u64 parent,
1193                                            u64 root_objectid, u64 owner,
1194                                            u64 offset, int refs_to_add)
1195 {
1196         struct btrfs_key key;
1197         struct extent_buffer *leaf;
1198         u32 size;
1199         u32 num_refs;
1200         int ret;
1201
1202         key.objectid = bytenr;
1203         if (parent) {
1204                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1205                 key.offset = parent;
1206                 size = sizeof(struct btrfs_shared_data_ref);
1207         } else {
1208                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1209                 key.offset = hash_extent_data_ref(root_objectid,
1210                                                   owner, offset);
1211                 size = sizeof(struct btrfs_extent_data_ref);
1212         }
1213
1214         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1215         if (ret && ret != -EEXIST)
1216                 goto fail;
1217
1218         leaf = path->nodes[0];
1219         if (parent) {
1220                 struct btrfs_shared_data_ref *ref;
1221                 ref = btrfs_item_ptr(leaf, path->slots[0],
1222                                      struct btrfs_shared_data_ref);
1223                 if (ret == 0) {
1224                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1225                 } else {
1226                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1227                         num_refs += refs_to_add;
1228                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1229                 }
1230         } else {
1231                 struct btrfs_extent_data_ref *ref;
1232                 while (ret == -EEXIST) {
1233                         ref = btrfs_item_ptr(leaf, path->slots[0],
1234                                              struct btrfs_extent_data_ref);
1235                         if (match_extent_data_ref(leaf, ref, root_objectid,
1236                                                   owner, offset))
1237                                 break;
1238                         btrfs_release_path(path);
1239                         key.offset++;
1240                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1241                                                       size);
1242                         if (ret && ret != -EEXIST)
1243                                 goto fail;
1244
1245                         leaf = path->nodes[0];
1246                 }
1247                 ref = btrfs_item_ptr(leaf, path->slots[0],
1248                                      struct btrfs_extent_data_ref);
1249                 if (ret == 0) {
1250                         btrfs_set_extent_data_ref_root(leaf, ref,
1251                                                        root_objectid);
1252                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1253                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1254                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1255                 } else {
1256                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1257                         num_refs += refs_to_add;
1258                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1259                 }
1260         }
1261         btrfs_mark_buffer_dirty(leaf);
1262         ret = 0;
1263 fail:
1264         btrfs_release_path(path);
1265         return ret;
1266 }
1267
1268 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1269                                            struct btrfs_root *root,
1270                                            struct btrfs_path *path,
1271                                            int refs_to_drop)
1272 {
1273         struct btrfs_key key;
1274         struct btrfs_extent_data_ref *ref1 = NULL;
1275         struct btrfs_shared_data_ref *ref2 = NULL;
1276         struct extent_buffer *leaf;
1277         u32 num_refs = 0;
1278         int ret = 0;
1279
1280         leaf = path->nodes[0];
1281         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1282
1283         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1284                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1285                                       struct btrfs_extent_data_ref);
1286                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1287         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1288                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1289                                       struct btrfs_shared_data_ref);
1290                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1291 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1292         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1293                 struct btrfs_extent_ref_v0 *ref0;
1294                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1295                                       struct btrfs_extent_ref_v0);
1296                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1297 #endif
1298         } else {
1299                 BUG();
1300         }
1301
1302         BUG_ON(num_refs < refs_to_drop);
1303         num_refs -= refs_to_drop;
1304
1305         if (num_refs == 0) {
1306                 ret = btrfs_del_item(trans, root, path);
1307         } else {
1308                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1309                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1310                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1311                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1312 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1313                 else {
1314                         struct btrfs_extent_ref_v0 *ref0;
1315                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1316                                         struct btrfs_extent_ref_v0);
1317                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1318                 }
1319 #endif
1320                 btrfs_mark_buffer_dirty(leaf);
1321         }
1322         return ret;
1323 }
1324
1325 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1326                                           struct btrfs_path *path,
1327                                           struct btrfs_extent_inline_ref *iref)
1328 {
1329         struct btrfs_key key;
1330         struct extent_buffer *leaf;
1331         struct btrfs_extent_data_ref *ref1;
1332         struct btrfs_shared_data_ref *ref2;
1333         u32 num_refs = 0;
1334
1335         leaf = path->nodes[0];
1336         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1337         if (iref) {
1338                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1339                     BTRFS_EXTENT_DATA_REF_KEY) {
1340                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1341                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1342                 } else {
1343                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1344                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1345                 }
1346         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1347                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1348                                       struct btrfs_extent_data_ref);
1349                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1350         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1351                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1352                                       struct btrfs_shared_data_ref);
1353                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1354 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1355         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1356                 struct btrfs_extent_ref_v0 *ref0;
1357                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1358                                       struct btrfs_extent_ref_v0);
1359                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1360 #endif
1361         } else {
1362                 WARN_ON(1);
1363         }
1364         return num_refs;
1365 }
1366
1367 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1368                                           struct btrfs_root *root,
1369                                           struct btrfs_path *path,
1370                                           u64 bytenr, u64 parent,
1371                                           u64 root_objectid)
1372 {
1373         struct btrfs_key key;
1374         int ret;
1375
1376         key.objectid = bytenr;
1377         if (parent) {
1378                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1379                 key.offset = parent;
1380         } else {
1381                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1382                 key.offset = root_objectid;
1383         }
1384
1385         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1386         if (ret > 0)
1387                 ret = -ENOENT;
1388 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1389         if (ret == -ENOENT && parent) {
1390                 btrfs_release_path(path);
1391                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1392                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1393                 if (ret > 0)
1394                         ret = -ENOENT;
1395         }
1396 #endif
1397         return ret;
1398 }
1399
1400 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1401                                           struct btrfs_root *root,
1402                                           struct btrfs_path *path,
1403                                           u64 bytenr, u64 parent,
1404                                           u64 root_objectid)
1405 {
1406         struct btrfs_key key;
1407         int ret;
1408
1409         key.objectid = bytenr;
1410         if (parent) {
1411                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1412                 key.offset = parent;
1413         } else {
1414                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1415                 key.offset = root_objectid;
1416         }
1417
1418         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1419         btrfs_release_path(path);
1420         return ret;
1421 }
1422
1423 static inline int extent_ref_type(u64 parent, u64 owner)
1424 {
1425         int type;
1426         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1427                 if (parent > 0)
1428                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1429                 else
1430                         type = BTRFS_TREE_BLOCK_REF_KEY;
1431         } else {
1432                 if (parent > 0)
1433                         type = BTRFS_SHARED_DATA_REF_KEY;
1434                 else
1435                         type = BTRFS_EXTENT_DATA_REF_KEY;
1436         }
1437         return type;
1438 }
1439
1440 static int find_next_key(struct btrfs_path *path, int level,
1441                          struct btrfs_key *key)
1442
1443 {
1444         for (; level < BTRFS_MAX_LEVEL; level++) {
1445                 if (!path->nodes[level])
1446                         break;
1447                 if (path->slots[level] + 1 >=
1448                     btrfs_header_nritems(path->nodes[level]))
1449                         continue;
1450                 if (level == 0)
1451                         btrfs_item_key_to_cpu(path->nodes[level], key,
1452                                               path->slots[level] + 1);
1453                 else
1454                         btrfs_node_key_to_cpu(path->nodes[level], key,
1455                                               path->slots[level] + 1);
1456                 return 0;
1457         }
1458         return 1;
1459 }
1460
1461 /*
1462  * look for inline back ref. if back ref is found, *ref_ret is set
1463  * to the address of inline back ref, and 0 is returned.
1464  *
1465  * if back ref isn't found, *ref_ret is set to the address where it
1466  * should be inserted, and -ENOENT is returned.
1467  *
1468  * if insert is true and there are too many inline back refs, the path
1469  * points to the extent item, and -EAGAIN is returned.
1470  *
1471  * NOTE: inline back refs are ordered in the same way that back ref
1472  *       items in the tree are ordered.
1473  */
1474 static noinline_for_stack
1475 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1476                                  struct btrfs_root *root,
1477                                  struct btrfs_path *path,
1478                                  struct btrfs_extent_inline_ref **ref_ret,
1479                                  u64 bytenr, u64 num_bytes,
1480                                  u64 parent, u64 root_objectid,
1481                                  u64 owner, u64 offset, int insert)
1482 {
1483         struct btrfs_key key;
1484         struct extent_buffer *leaf;
1485         struct btrfs_extent_item *ei;
1486         struct btrfs_extent_inline_ref *iref;
1487         u64 flags;
1488         u64 item_size;
1489         unsigned long ptr;
1490         unsigned long end;
1491         int extra_size;
1492         int type;
1493         int want;
1494         int ret;
1495         int err = 0;
1496         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1497                                                  SKINNY_METADATA);
1498
1499         key.objectid = bytenr;
1500         key.type = BTRFS_EXTENT_ITEM_KEY;
1501         key.offset = num_bytes;
1502
1503         want = extent_ref_type(parent, owner);
1504         if (insert) {
1505                 extra_size = btrfs_extent_inline_ref_size(want);
1506                 path->keep_locks = 1;
1507         } else
1508                 extra_size = -1;
1509
1510         /*
1511          * Owner is our parent level, so we can just add one to get the level
1512          * for the block we are interested in.
1513          */
1514         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1515                 key.type = BTRFS_METADATA_ITEM_KEY;
1516                 key.offset = owner;
1517         }
1518
1519 again:
1520         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1521         if (ret < 0) {
1522                 err = ret;
1523                 goto out;
1524         }
1525
1526         /*
1527          * We may be a newly converted file system which still has the old fat
1528          * extent entries for metadata, so try and see if we have one of those.
1529          */
1530         if (ret > 0 && skinny_metadata) {
1531                 skinny_metadata = false;
1532                 if (path->slots[0]) {
1533                         path->slots[0]--;
1534                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1535                                               path->slots[0]);
1536                         if (key.objectid == bytenr &&
1537                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1538                             key.offset == num_bytes)
1539                                 ret = 0;
1540                 }
1541                 if (ret) {
1542                         key.type = BTRFS_EXTENT_ITEM_KEY;
1543                         key.offset = num_bytes;
1544                         btrfs_release_path(path);
1545                         goto again;
1546                 }
1547         }
1548
1549         if (ret && !insert) {
1550                 err = -ENOENT;
1551                 goto out;
1552         } else if (WARN_ON(ret)) {
1553                 err = -EIO;
1554                 goto out;
1555         }
1556
1557         leaf = path->nodes[0];
1558         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1559 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1560         if (item_size < sizeof(*ei)) {
1561                 if (!insert) {
1562                         err = -ENOENT;
1563                         goto out;
1564                 }
1565                 ret = convert_extent_item_v0(trans, root, path, owner,
1566                                              extra_size);
1567                 if (ret < 0) {
1568                         err = ret;
1569                         goto out;
1570                 }
1571                 leaf = path->nodes[0];
1572                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1573         }
1574 #endif
1575         BUG_ON(item_size < sizeof(*ei));
1576
1577         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1578         flags = btrfs_extent_flags(leaf, ei);
1579
1580         ptr = (unsigned long)(ei + 1);
1581         end = (unsigned long)ei + item_size;
1582
1583         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1584                 ptr += sizeof(struct btrfs_tree_block_info);
1585                 BUG_ON(ptr > end);
1586         }
1587
1588         err = -ENOENT;
1589         while (1) {
1590                 if (ptr >= end) {
1591                         WARN_ON(ptr > end);
1592                         break;
1593                 }
1594                 iref = (struct btrfs_extent_inline_ref *)ptr;
1595                 type = btrfs_extent_inline_ref_type(leaf, iref);
1596                 if (want < type)
1597                         break;
1598                 if (want > type) {
1599                         ptr += btrfs_extent_inline_ref_size(type);
1600                         continue;
1601                 }
1602
1603                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1604                         struct btrfs_extent_data_ref *dref;
1605                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1606                         if (match_extent_data_ref(leaf, dref, root_objectid,
1607                                                   owner, offset)) {
1608                                 err = 0;
1609                                 break;
1610                         }
1611                         if (hash_extent_data_ref_item(leaf, dref) <
1612                             hash_extent_data_ref(root_objectid, owner, offset))
1613                                 break;
1614                 } else {
1615                         u64 ref_offset;
1616                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1617                         if (parent > 0) {
1618                                 if (parent == ref_offset) {
1619                                         err = 0;
1620                                         break;
1621                                 }
1622                                 if (ref_offset < parent)
1623                                         break;
1624                         } else {
1625                                 if (root_objectid == ref_offset) {
1626                                         err = 0;
1627                                         break;
1628                                 }
1629                                 if (ref_offset < root_objectid)
1630                                         break;
1631                         }
1632                 }
1633                 ptr += btrfs_extent_inline_ref_size(type);
1634         }
1635         if (err == -ENOENT && insert) {
1636                 if (item_size + extra_size >=
1637                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1638                         err = -EAGAIN;
1639                         goto out;
1640                 }
1641                 /*
1642                  * To add new inline back ref, we have to make sure
1643                  * there is no corresponding back ref item.
1644                  * For simplicity, we just do not add new inline back
1645                  * ref if there is any kind of item for this block
1646                  */
1647                 if (find_next_key(path, 0, &key) == 0 &&
1648                     key.objectid == bytenr &&
1649                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1650                         err = -EAGAIN;
1651                         goto out;
1652                 }
1653         }
1654         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1655 out:
1656         if (insert) {
1657                 path->keep_locks = 0;
1658                 btrfs_unlock_up_safe(path, 1);
1659         }
1660         return err;
1661 }
1662
1663 /*
1664  * helper to add new inline back ref
1665  */
1666 static noinline_for_stack
1667 void setup_inline_extent_backref(struct btrfs_root *root,
1668                                  struct btrfs_path *path,
1669                                  struct btrfs_extent_inline_ref *iref,
1670                                  u64 parent, u64 root_objectid,
1671                                  u64 owner, u64 offset, int refs_to_add,
1672                                  struct btrfs_delayed_extent_op *extent_op)
1673 {
1674         struct extent_buffer *leaf;
1675         struct btrfs_extent_item *ei;
1676         unsigned long ptr;
1677         unsigned long end;
1678         unsigned long item_offset;
1679         u64 refs;
1680         int size;
1681         int type;
1682
1683         leaf = path->nodes[0];
1684         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1685         item_offset = (unsigned long)iref - (unsigned long)ei;
1686
1687         type = extent_ref_type(parent, owner);
1688         size = btrfs_extent_inline_ref_size(type);
1689
1690         btrfs_extend_item(root, path, size);
1691
1692         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1693         refs = btrfs_extent_refs(leaf, ei);
1694         refs += refs_to_add;
1695         btrfs_set_extent_refs(leaf, ei, refs);
1696         if (extent_op)
1697                 __run_delayed_extent_op(extent_op, leaf, ei);
1698
1699         ptr = (unsigned long)ei + item_offset;
1700         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1701         if (ptr < end - size)
1702                 memmove_extent_buffer(leaf, ptr + size, ptr,
1703                                       end - size - ptr);
1704
1705         iref = (struct btrfs_extent_inline_ref *)ptr;
1706         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1707         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1708                 struct btrfs_extent_data_ref *dref;
1709                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1710                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1711                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1712                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1713                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1714         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1715                 struct btrfs_shared_data_ref *sref;
1716                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1717                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1718                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1719         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1720                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1721         } else {
1722                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1723         }
1724         btrfs_mark_buffer_dirty(leaf);
1725 }
1726
1727 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1728                                  struct btrfs_root *root,
1729                                  struct btrfs_path *path,
1730                                  struct btrfs_extent_inline_ref **ref_ret,
1731                                  u64 bytenr, u64 num_bytes, u64 parent,
1732                                  u64 root_objectid, u64 owner, u64 offset)
1733 {
1734         int ret;
1735
1736         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1737                                            bytenr, num_bytes, parent,
1738                                            root_objectid, owner, offset, 0);
1739         if (ret != -ENOENT)
1740                 return ret;
1741
1742         btrfs_release_path(path);
1743         *ref_ret = NULL;
1744
1745         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1746                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1747                                             root_objectid);
1748         } else {
1749                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1750                                              root_objectid, owner, offset);
1751         }
1752         return ret;
1753 }
1754
1755 /*
1756  * helper to update/remove inline back ref
1757  */
1758 static noinline_for_stack
1759 void update_inline_extent_backref(struct btrfs_root *root,
1760                                   struct btrfs_path *path,
1761                                   struct btrfs_extent_inline_ref *iref,
1762                                   int refs_to_mod,
1763                                   struct btrfs_delayed_extent_op *extent_op)
1764 {
1765         struct extent_buffer *leaf;
1766         struct btrfs_extent_item *ei;
1767         struct btrfs_extent_data_ref *dref = NULL;
1768         struct btrfs_shared_data_ref *sref = NULL;
1769         unsigned long ptr;
1770         unsigned long end;
1771         u32 item_size;
1772         int size;
1773         int type;
1774         u64 refs;
1775
1776         leaf = path->nodes[0];
1777         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1778         refs = btrfs_extent_refs(leaf, ei);
1779         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1780         refs += refs_to_mod;
1781         btrfs_set_extent_refs(leaf, ei, refs);
1782         if (extent_op)
1783                 __run_delayed_extent_op(extent_op, leaf, ei);
1784
1785         type = btrfs_extent_inline_ref_type(leaf, iref);
1786
1787         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1788                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1789                 refs = btrfs_extent_data_ref_count(leaf, dref);
1790         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1791                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1792                 refs = btrfs_shared_data_ref_count(leaf, sref);
1793         } else {
1794                 refs = 1;
1795                 BUG_ON(refs_to_mod != -1);
1796         }
1797
1798         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1799         refs += refs_to_mod;
1800
1801         if (refs > 0) {
1802                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1803                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1804                 else
1805                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1806         } else {
1807                 size =  btrfs_extent_inline_ref_size(type);
1808                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1809                 ptr = (unsigned long)iref;
1810                 end = (unsigned long)ei + item_size;
1811                 if (ptr + size < end)
1812                         memmove_extent_buffer(leaf, ptr, ptr + size,
1813                                               end - ptr - size);
1814                 item_size -= size;
1815                 btrfs_truncate_item(root, path, item_size, 1);
1816         }
1817         btrfs_mark_buffer_dirty(leaf);
1818 }
1819
1820 static noinline_for_stack
1821 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1822                                  struct btrfs_root *root,
1823                                  struct btrfs_path *path,
1824                                  u64 bytenr, u64 num_bytes, u64 parent,
1825                                  u64 root_objectid, u64 owner,
1826                                  u64 offset, int refs_to_add,
1827                                  struct btrfs_delayed_extent_op *extent_op)
1828 {
1829         struct btrfs_extent_inline_ref *iref;
1830         int ret;
1831
1832         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1833                                            bytenr, num_bytes, parent,
1834                                            root_objectid, owner, offset, 1);
1835         if (ret == 0) {
1836                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1837                 update_inline_extent_backref(root, path, iref,
1838                                              refs_to_add, extent_op);
1839         } else if (ret == -ENOENT) {
1840                 setup_inline_extent_backref(root, path, iref, parent,
1841                                             root_objectid, owner, offset,
1842                                             refs_to_add, extent_op);
1843                 ret = 0;
1844         }
1845         return ret;
1846 }
1847
1848 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1849                                  struct btrfs_root *root,
1850                                  struct btrfs_path *path,
1851                                  u64 bytenr, u64 parent, u64 root_objectid,
1852                                  u64 owner, u64 offset, int refs_to_add)
1853 {
1854         int ret;
1855         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1856                 BUG_ON(refs_to_add != 1);
1857                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1858                                             parent, root_objectid);
1859         } else {
1860                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1861                                              parent, root_objectid,
1862                                              owner, offset, refs_to_add);
1863         }
1864         return ret;
1865 }
1866
1867 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1868                                  struct btrfs_root *root,
1869                                  struct btrfs_path *path,
1870                                  struct btrfs_extent_inline_ref *iref,
1871                                  int refs_to_drop, int is_data)
1872 {
1873         int ret = 0;
1874
1875         BUG_ON(!is_data && refs_to_drop != 1);
1876         if (iref) {
1877                 update_inline_extent_backref(root, path, iref,
1878                                              -refs_to_drop, NULL);
1879         } else if (is_data) {
1880                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1881         } else {
1882                 ret = btrfs_del_item(trans, root, path);
1883         }
1884         return ret;
1885 }
1886
1887 static int btrfs_issue_discard(struct block_device *bdev,
1888                                 u64 start, u64 len)
1889 {
1890         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1891 }
1892
1893 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1894                                 u64 num_bytes, u64 *actual_bytes)
1895 {
1896         int ret;
1897         u64 discarded_bytes = 0;
1898         struct btrfs_bio *bbio = NULL;
1899
1900
1901         /* Tell the block device(s) that the sectors can be discarded */
1902         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1903                               bytenr, &num_bytes, &bbio, 0);
1904         /* Error condition is -ENOMEM */
1905         if (!ret) {
1906                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1907                 int i;
1908
1909
1910                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1911                         if (!stripe->dev->can_discard)
1912                                 continue;
1913
1914                         ret = btrfs_issue_discard(stripe->dev->bdev,
1915                                                   stripe->physical,
1916                                                   stripe->length);
1917                         if (!ret)
1918                                 discarded_bytes += stripe->length;
1919                         else if (ret != -EOPNOTSUPP)
1920                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1921
1922                         /*
1923                          * Just in case we get back EOPNOTSUPP for some reason,
1924                          * just ignore the return value so we don't screw up
1925                          * people calling discard_extent.
1926                          */
1927                         ret = 0;
1928                 }
1929                 kfree(bbio);
1930         }
1931
1932         if (actual_bytes)
1933                 *actual_bytes = discarded_bytes;
1934
1935
1936         if (ret == -EOPNOTSUPP)
1937                 ret = 0;
1938         return ret;
1939 }
1940
1941 /* Can return -ENOMEM */
1942 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1943                          struct btrfs_root *root,
1944                          u64 bytenr, u64 num_bytes, u64 parent,
1945                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1946 {
1947         int ret;
1948         struct btrfs_fs_info *fs_info = root->fs_info;
1949
1950         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1951                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1952
1953         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1954                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1955                                         num_bytes,
1956                                         parent, root_objectid, (int)owner,
1957                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1958         } else {
1959                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1960                                         num_bytes,
1961                                         parent, root_objectid, owner, offset,
1962                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1963         }
1964         return ret;
1965 }
1966
1967 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1968                                   struct btrfs_root *root,
1969                                   u64 bytenr, u64 num_bytes,
1970                                   u64 parent, u64 root_objectid,
1971                                   u64 owner, u64 offset, int refs_to_add,
1972                                   struct btrfs_delayed_extent_op *extent_op)
1973 {
1974         struct btrfs_path *path;
1975         struct extent_buffer *leaf;
1976         struct btrfs_extent_item *item;
1977         u64 refs;
1978         int ret;
1979
1980         path = btrfs_alloc_path();
1981         if (!path)
1982                 return -ENOMEM;
1983
1984         path->reada = 1;
1985         path->leave_spinning = 1;
1986         /* this will setup the path even if it fails to insert the back ref */
1987         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1988                                            path, bytenr, num_bytes, parent,
1989                                            root_objectid, owner, offset,
1990                                            refs_to_add, extent_op);
1991         if (ret != -EAGAIN)
1992                 goto out;
1993
1994         leaf = path->nodes[0];
1995         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1996         refs = btrfs_extent_refs(leaf, item);
1997         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1998         if (extent_op)
1999                 __run_delayed_extent_op(extent_op, leaf, item);
2000
2001         btrfs_mark_buffer_dirty(leaf);
2002         btrfs_release_path(path);
2003
2004         path->reada = 1;
2005         path->leave_spinning = 1;
2006
2007         /* now insert the actual backref */
2008         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2009                                     path, bytenr, parent, root_objectid,
2010                                     owner, offset, refs_to_add);
2011         if (ret)
2012                 btrfs_abort_transaction(trans, root, ret);
2013 out:
2014         btrfs_free_path(path);
2015         return ret;
2016 }
2017
2018 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2019                                 struct btrfs_root *root,
2020                                 struct btrfs_delayed_ref_node *node,
2021                                 struct btrfs_delayed_extent_op *extent_op,
2022                                 int insert_reserved)
2023 {
2024         int ret = 0;
2025         struct btrfs_delayed_data_ref *ref;
2026         struct btrfs_key ins;
2027         u64 parent = 0;
2028         u64 ref_root = 0;
2029         u64 flags = 0;
2030
2031         ins.objectid = node->bytenr;
2032         ins.offset = node->num_bytes;
2033         ins.type = BTRFS_EXTENT_ITEM_KEY;
2034
2035         ref = btrfs_delayed_node_to_data_ref(node);
2036         trace_run_delayed_data_ref(node, ref, node->action);
2037
2038         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2039                 parent = ref->parent;
2040         else
2041                 ref_root = ref->root;
2042
2043         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2044                 if (extent_op)
2045                         flags |= extent_op->flags_to_set;
2046                 ret = alloc_reserved_file_extent(trans, root,
2047                                                  parent, ref_root, flags,
2048                                                  ref->objectid, ref->offset,
2049                                                  &ins, node->ref_mod);
2050         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2051                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2052                                              node->num_bytes, parent,
2053                                              ref_root, ref->objectid,
2054                                              ref->offset, node->ref_mod,
2055                                              extent_op);
2056         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2057                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2058                                           node->num_bytes, parent,
2059                                           ref_root, ref->objectid,
2060                                           ref->offset, node->ref_mod,
2061                                           extent_op);
2062         } else {
2063                 BUG();
2064         }
2065         return ret;
2066 }
2067
2068 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2069                                     struct extent_buffer *leaf,
2070                                     struct btrfs_extent_item *ei)
2071 {
2072         u64 flags = btrfs_extent_flags(leaf, ei);
2073         if (extent_op->update_flags) {
2074                 flags |= extent_op->flags_to_set;
2075                 btrfs_set_extent_flags(leaf, ei, flags);
2076         }
2077
2078         if (extent_op->update_key) {
2079                 struct btrfs_tree_block_info *bi;
2080                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2081                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2082                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2083         }
2084 }
2085
2086 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2087                                  struct btrfs_root *root,
2088                                  struct btrfs_delayed_ref_node *node,
2089                                  struct btrfs_delayed_extent_op *extent_op)
2090 {
2091         struct btrfs_key key;
2092         struct btrfs_path *path;
2093         struct btrfs_extent_item *ei;
2094         struct extent_buffer *leaf;
2095         u32 item_size;
2096         int ret;
2097         int err = 0;
2098         int metadata = !extent_op->is_data;
2099
2100         if (trans->aborted)
2101                 return 0;
2102
2103         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2104                 metadata = 0;
2105
2106         path = btrfs_alloc_path();
2107         if (!path)
2108                 return -ENOMEM;
2109
2110         key.objectid = node->bytenr;
2111
2112         if (metadata) {
2113                 key.type = BTRFS_METADATA_ITEM_KEY;
2114                 key.offset = extent_op->level;
2115         } else {
2116                 key.type = BTRFS_EXTENT_ITEM_KEY;
2117                 key.offset = node->num_bytes;
2118         }
2119
2120 again:
2121         path->reada = 1;
2122         path->leave_spinning = 1;
2123         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2124                                 path, 0, 1);
2125         if (ret < 0) {
2126                 err = ret;
2127                 goto out;
2128         }
2129         if (ret > 0) {
2130                 if (metadata) {
2131                         if (path->slots[0] > 0) {
2132                                 path->slots[0]--;
2133                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2134                                                       path->slots[0]);
2135                                 if (key.objectid == node->bytenr &&
2136                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2137                                     key.offset == node->num_bytes)
2138                                         ret = 0;
2139                         }
2140                         if (ret > 0) {
2141                                 btrfs_release_path(path);
2142                                 metadata = 0;
2143
2144                                 key.objectid = node->bytenr;
2145                                 key.offset = node->num_bytes;
2146                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2147                                 goto again;
2148                         }
2149                 } else {
2150                         err = -EIO;
2151                         goto out;
2152                 }
2153         }
2154
2155         leaf = path->nodes[0];
2156         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2157 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2158         if (item_size < sizeof(*ei)) {
2159                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2160                                              path, (u64)-1, 0);
2161                 if (ret < 0) {
2162                         err = ret;
2163                         goto out;
2164                 }
2165                 leaf = path->nodes[0];
2166                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2167         }
2168 #endif
2169         BUG_ON(item_size < sizeof(*ei));
2170         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2171         __run_delayed_extent_op(extent_op, leaf, ei);
2172
2173         btrfs_mark_buffer_dirty(leaf);
2174 out:
2175         btrfs_free_path(path);
2176         return err;
2177 }
2178
2179 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2180                                 struct btrfs_root *root,
2181                                 struct btrfs_delayed_ref_node *node,
2182                                 struct btrfs_delayed_extent_op *extent_op,
2183                                 int insert_reserved)
2184 {
2185         int ret = 0;
2186         struct btrfs_delayed_tree_ref *ref;
2187         struct btrfs_key ins;
2188         u64 parent = 0;
2189         u64 ref_root = 0;
2190         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2191                                                  SKINNY_METADATA);
2192
2193         ref = btrfs_delayed_node_to_tree_ref(node);
2194         trace_run_delayed_tree_ref(node, ref, node->action);
2195
2196         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2197                 parent = ref->parent;
2198         else
2199                 ref_root = ref->root;
2200
2201         ins.objectid = node->bytenr;
2202         if (skinny_metadata) {
2203                 ins.offset = ref->level;
2204                 ins.type = BTRFS_METADATA_ITEM_KEY;
2205         } else {
2206                 ins.offset = node->num_bytes;
2207                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2208         }
2209
2210         BUG_ON(node->ref_mod != 1);
2211         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2212                 BUG_ON(!extent_op || !extent_op->update_flags);
2213                 ret = alloc_reserved_tree_block(trans, root,
2214                                                 parent, ref_root,
2215                                                 extent_op->flags_to_set,
2216                                                 &extent_op->key,
2217                                                 ref->level, &ins);
2218         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2219                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2220                                              node->num_bytes, parent, ref_root,
2221                                              ref->level, 0, 1, extent_op);
2222         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2223                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2224                                           node->num_bytes, parent, ref_root,
2225                                           ref->level, 0, 1, extent_op);
2226         } else {
2227                 BUG();
2228         }
2229         return ret;
2230 }
2231
2232 /* helper function to actually process a single delayed ref entry */
2233 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2234                                struct btrfs_root *root,
2235                                struct btrfs_delayed_ref_node *node,
2236                                struct btrfs_delayed_extent_op *extent_op,
2237                                int insert_reserved)
2238 {
2239         int ret = 0;
2240
2241         if (trans->aborted) {
2242                 if (insert_reserved)
2243                         btrfs_pin_extent(root, node->bytenr,
2244                                          node->num_bytes, 1);
2245                 return 0;
2246         }
2247
2248         if (btrfs_delayed_ref_is_head(node)) {
2249                 struct btrfs_delayed_ref_head *head;
2250                 /*
2251                  * we've hit the end of the chain and we were supposed
2252                  * to insert this extent into the tree.  But, it got
2253                  * deleted before we ever needed to insert it, so all
2254                  * we have to do is clean up the accounting
2255                  */
2256                 BUG_ON(extent_op);
2257                 head = btrfs_delayed_node_to_head(node);
2258                 trace_run_delayed_ref_head(node, head, node->action);
2259
2260                 if (insert_reserved) {
2261                         btrfs_pin_extent(root, node->bytenr,
2262                                          node->num_bytes, 1);
2263                         if (head->is_data) {
2264                                 ret = btrfs_del_csums(trans, root,
2265                                                       node->bytenr,
2266                                                       node->num_bytes);
2267                         }
2268                 }
2269                 return ret;
2270         }
2271
2272         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2273             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2274                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2275                                            insert_reserved);
2276         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2277                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2278                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2279                                            insert_reserved);
2280         else
2281                 BUG();
2282         return ret;
2283 }
2284
2285 static noinline struct btrfs_delayed_ref_node *
2286 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2287 {
2288         struct rb_node *node;
2289         struct btrfs_delayed_ref_node *ref;
2290         int action = BTRFS_ADD_DELAYED_REF;
2291 again:
2292         /*
2293          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2294          * this prevents ref count from going down to zero when
2295          * there still are pending delayed ref.
2296          */
2297         node = rb_prev(&head->node.rb_node);
2298         while (1) {
2299                 if (!node)
2300                         break;
2301                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2302                                 rb_node);
2303                 if (ref->bytenr != head->node.bytenr)
2304                         break;
2305                 if (ref->action == action)
2306                         return ref;
2307                 node = rb_prev(node);
2308         }
2309         if (action == BTRFS_ADD_DELAYED_REF) {
2310                 action = BTRFS_DROP_DELAYED_REF;
2311                 goto again;
2312         }
2313         return NULL;
2314 }
2315
2316 /*
2317  * Returns 0 on success or if called with an already aborted transaction.
2318  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2319  */
2320 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2321                                        struct btrfs_root *root,
2322                                        struct list_head *cluster)
2323 {
2324         struct btrfs_delayed_ref_root *delayed_refs;
2325         struct btrfs_delayed_ref_node *ref;
2326         struct btrfs_delayed_ref_head *locked_ref = NULL;
2327         struct btrfs_delayed_extent_op *extent_op;
2328         struct btrfs_fs_info *fs_info = root->fs_info;
2329         int ret;
2330         int count = 0;
2331         int must_insert_reserved = 0;
2332
2333         delayed_refs = &trans->transaction->delayed_refs;
2334         while (1) {
2335                 if (!locked_ref) {
2336                         /* pick a new head ref from the cluster list */
2337                         if (list_empty(cluster))
2338                                 break;
2339
2340                         locked_ref = list_entry(cluster->next,
2341                                      struct btrfs_delayed_ref_head, cluster);
2342
2343                         /* grab the lock that says we are going to process
2344                          * all the refs for this head */
2345                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2346
2347                         /*
2348                          * we may have dropped the spin lock to get the head
2349                          * mutex lock, and that might have given someone else
2350                          * time to free the head.  If that's true, it has been
2351                          * removed from our list and we can move on.
2352                          */
2353                         if (ret == -EAGAIN) {
2354                                 locked_ref = NULL;
2355                                 count++;
2356                                 continue;
2357                         }
2358                 }
2359
2360                 /*
2361                  * We need to try and merge add/drops of the same ref since we
2362                  * can run into issues with relocate dropping the implicit ref
2363                  * and then it being added back again before the drop can
2364                  * finish.  If we merged anything we need to re-loop so we can
2365                  * get a good ref.
2366                  */
2367                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2368                                          locked_ref);
2369
2370                 /*
2371                  * locked_ref is the head node, so we have to go one
2372                  * node back for any delayed ref updates
2373                  */
2374                 ref = select_delayed_ref(locked_ref);
2375
2376                 if (ref && ref->seq &&
2377                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2378                         /*
2379                          * there are still refs with lower seq numbers in the
2380                          * process of being added. Don't run this ref yet.
2381                          */
2382                         list_del_init(&locked_ref->cluster);
2383                         btrfs_delayed_ref_unlock(locked_ref);
2384                         locked_ref = NULL;
2385                         delayed_refs->num_heads_ready++;
2386                         spin_unlock(&delayed_refs->lock);
2387                         cond_resched();
2388                         spin_lock(&delayed_refs->lock);
2389                         continue;
2390                 }
2391
2392                 /*
2393                  * record the must insert reserved flag before we
2394                  * drop the spin lock.
2395                  */
2396                 must_insert_reserved = locked_ref->must_insert_reserved;
2397                 locked_ref->must_insert_reserved = 0;
2398
2399                 extent_op = locked_ref->extent_op;
2400                 locked_ref->extent_op = NULL;
2401
2402                 if (!ref) {
2403                         /* All delayed refs have been processed, Go ahead
2404                          * and send the head node to run_one_delayed_ref,
2405                          * so that any accounting fixes can happen
2406                          */
2407                         ref = &locked_ref->node;
2408
2409                         if (extent_op && must_insert_reserved) {
2410                                 btrfs_free_delayed_extent_op(extent_op);
2411                                 extent_op = NULL;
2412                         }
2413
2414                         if (extent_op) {
2415                                 spin_unlock(&delayed_refs->lock);
2416
2417                                 ret = run_delayed_extent_op(trans, root,
2418                                                             ref, extent_op);
2419                                 btrfs_free_delayed_extent_op(extent_op);
2420
2421                                 if (ret) {
2422                                         /*
2423                                          * Need to reset must_insert_reserved if
2424                                          * there was an error so the abort stuff
2425                                          * can cleanup the reserved space
2426                                          * properly.
2427                                          */
2428                                         if (must_insert_reserved)
2429                                                 locked_ref->must_insert_reserved = 1;
2430                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2431                                         spin_lock(&delayed_refs->lock);
2432                                         btrfs_delayed_ref_unlock(locked_ref);
2433                                         return ret;
2434                                 }
2435
2436                                 goto next;
2437                         }
2438                 }
2439
2440                 ref->in_tree = 0;
2441                 rb_erase(&ref->rb_node, &delayed_refs->root);
2442                 if (btrfs_delayed_ref_is_head(ref)) {
2443                         rb_erase(&locked_ref->href_node,
2444                                  &delayed_refs->href_root);
2445                 }
2446                 delayed_refs->num_entries--;
2447                 if (!btrfs_delayed_ref_is_head(ref)) {
2448                         /*
2449                          * when we play the delayed ref, also correct the
2450                          * ref_mod on head
2451                          */
2452                         switch (ref->action) {
2453                         case BTRFS_ADD_DELAYED_REF:
2454                         case BTRFS_ADD_DELAYED_EXTENT:
2455                                 locked_ref->node.ref_mod -= ref->ref_mod;
2456                                 break;
2457                         case BTRFS_DROP_DELAYED_REF:
2458                                 locked_ref->node.ref_mod += ref->ref_mod;
2459                                 break;
2460                         default:
2461                                 WARN_ON(1);
2462                         }
2463                 } else {
2464                         list_del_init(&locked_ref->cluster);
2465                 }
2466                 spin_unlock(&delayed_refs->lock);
2467
2468                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2469                                           must_insert_reserved);
2470
2471                 btrfs_free_delayed_extent_op(extent_op);
2472                 if (ret) {
2473                         btrfs_delayed_ref_unlock(locked_ref);
2474                         btrfs_put_delayed_ref(ref);
2475                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2476                         spin_lock(&delayed_refs->lock);
2477                         return ret;
2478                 }
2479
2480                 /*
2481                  * If this node is a head, that means all the refs in this head
2482                  * have been dealt with, and we will pick the next head to deal
2483                  * with, so we must unlock the head and drop it from the cluster
2484                  * list before we release it.
2485                  */
2486                 if (btrfs_delayed_ref_is_head(ref)) {
2487                         btrfs_delayed_ref_unlock(locked_ref);
2488                         locked_ref = NULL;
2489                 }
2490                 btrfs_put_delayed_ref(ref);
2491                 count++;
2492 next:
2493                 cond_resched();
2494                 spin_lock(&delayed_refs->lock);
2495         }
2496         return count;
2497 }
2498
2499 #ifdef SCRAMBLE_DELAYED_REFS
2500 /*
2501  * Normally delayed refs get processed in ascending bytenr order. This
2502  * correlates in most cases to the order added. To expose dependencies on this
2503  * order, we start to process the tree in the middle instead of the beginning
2504  */
2505 static u64 find_middle(struct rb_root *root)
2506 {
2507         struct rb_node *n = root->rb_node;
2508         struct btrfs_delayed_ref_node *entry;
2509         int alt = 1;
2510         u64 middle;
2511         u64 first = 0, last = 0;
2512
2513         n = rb_first(root);
2514         if (n) {
2515                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2516                 first = entry->bytenr;
2517         }
2518         n = rb_last(root);
2519         if (n) {
2520                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2521                 last = entry->bytenr;
2522         }
2523         n = root->rb_node;
2524
2525         while (n) {
2526                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2527                 WARN_ON(!entry->in_tree);
2528
2529                 middle = entry->bytenr;
2530
2531                 if (alt)
2532                         n = n->rb_left;
2533                 else
2534                         n = n->rb_right;
2535
2536                 alt = 1 - alt;
2537         }
2538         return middle;
2539 }
2540 #endif
2541
2542 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2543                                          struct btrfs_fs_info *fs_info)
2544 {
2545         struct qgroup_update *qgroup_update;
2546         int ret = 0;
2547
2548         if (list_empty(&trans->qgroup_ref_list) !=
2549             !trans->delayed_ref_elem.seq) {
2550                 /* list without seq or seq without list */
2551                 btrfs_err(fs_info,
2552                         "qgroup accounting update error, list is%s empty, seq is %#x.%x",
2553                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2554                         (u32)(trans->delayed_ref_elem.seq >> 32),
2555                         (u32)trans->delayed_ref_elem.seq);
2556                 BUG();
2557         }
2558
2559         if (!trans->delayed_ref_elem.seq)
2560                 return 0;
2561
2562         while (!list_empty(&trans->qgroup_ref_list)) {
2563                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2564                                                  struct qgroup_update, list);
2565                 list_del(&qgroup_update->list);
2566                 if (!ret)
2567                         ret = btrfs_qgroup_account_ref(
2568                                         trans, fs_info, qgroup_update->node,
2569                                         qgroup_update->extent_op);
2570                 kfree(qgroup_update);
2571         }
2572
2573         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2574
2575         return ret;
2576 }
2577
2578 static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
2579                       int count)
2580 {
2581         int val = atomic_read(&delayed_refs->ref_seq);
2582
2583         if (val < seq || val >= seq + count)
2584                 return 1;
2585         return 0;
2586 }
2587
2588 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2589 {
2590         u64 num_bytes;
2591
2592         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2593                              sizeof(struct btrfs_extent_inline_ref));
2594         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2595                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2596
2597         /*
2598          * We don't ever fill up leaves all the way so multiply by 2 just to be
2599          * closer to what we're really going to want to ouse.
2600          */
2601         return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2602 }
2603
2604 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2605                                        struct btrfs_root *root)
2606 {
2607         struct btrfs_block_rsv *global_rsv;
2608         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2609         u64 num_bytes;
2610         int ret = 0;
2611
2612         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2613         num_heads = heads_to_leaves(root, num_heads);
2614         if (num_heads > 1)
2615                 num_bytes += (num_heads - 1) * root->leafsize;
2616         num_bytes <<= 1;
2617         global_rsv = &root->fs_info->global_block_rsv;
2618
2619         /*
2620          * If we can't allocate any more chunks lets make sure we have _lots_ of
2621          * wiggle room since running delayed refs can create more delayed refs.
2622          */
2623         if (global_rsv->space_info->full)
2624                 num_bytes <<= 1;
2625
2626         spin_lock(&global_rsv->lock);
2627         if (global_rsv->reserved <= num_bytes)
2628                 ret = 1;
2629         spin_unlock(&global_rsv->lock);
2630         return ret;
2631 }
2632
2633 /*
2634  * this starts processing the delayed reference count updates and
2635  * extent insertions we have queued up so far.  count can be
2636  * 0, which means to process everything in the tree at the start
2637  * of the run (but not newly added entries), or it can be some target
2638  * number you'd like to process.
2639  *
2640  * Returns 0 on success or if called with an aborted transaction
2641  * Returns <0 on error and aborts the transaction
2642  */
2643 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2644                            struct btrfs_root *root, unsigned long count)
2645 {
2646         struct rb_node *node;
2647         struct btrfs_delayed_ref_root *delayed_refs;
2648         struct btrfs_delayed_ref_head *head;
2649         struct list_head cluster;
2650         int ret;
2651         u64 delayed_start;
2652         int run_all = count == (unsigned long)-1;
2653         int run_most = 0;
2654         int loops;
2655
2656         /* We'll clean this up in btrfs_cleanup_transaction */
2657         if (trans->aborted)
2658                 return 0;
2659
2660         if (root == root->fs_info->extent_root)
2661                 root = root->fs_info->tree_root;
2662
2663         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2664
2665         delayed_refs = &trans->transaction->delayed_refs;
2666         INIT_LIST_HEAD(&cluster);
2667         if (count == 0) {
2668                 count = delayed_refs->num_entries * 2;
2669                 run_most = 1;
2670         }
2671
2672         if (!run_all && !run_most) {
2673                 int old;
2674                 int seq = atomic_read(&delayed_refs->ref_seq);
2675
2676 progress:
2677                 old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2678                 if (old) {
2679                         DEFINE_WAIT(__wait);
2680                         if (delayed_refs->flushing ||
2681                             !btrfs_should_throttle_delayed_refs(trans, root))
2682                                 return 0;
2683
2684                         prepare_to_wait(&delayed_refs->wait, &__wait,
2685                                         TASK_UNINTERRUPTIBLE);
2686
2687                         old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2688                         if (old) {
2689                                 schedule();
2690                                 finish_wait(&delayed_refs->wait, &__wait);
2691
2692                                 if (!refs_newer(delayed_refs, seq, 256))
2693                                         goto progress;
2694                                 else
2695                                         return 0;
2696                         } else {
2697                                 finish_wait(&delayed_refs->wait, &__wait);
2698                                 goto again;
2699                         }
2700                 }
2701
2702         } else {
2703                 atomic_inc(&delayed_refs->procs_running_refs);
2704         }
2705
2706 again:
2707         loops = 0;
2708         spin_lock(&delayed_refs->lock);
2709
2710 #ifdef SCRAMBLE_DELAYED_REFS
2711         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2712 #endif
2713
2714         while (1) {
2715                 if (!(run_all || run_most) &&
2716                     !btrfs_should_throttle_delayed_refs(trans, root))
2717                         break;
2718
2719                 /*
2720                  * go find something we can process in the rbtree.  We start at
2721                  * the beginning of the tree, and then build a cluster
2722                  * of refs to process starting at the first one we are able to
2723                  * lock
2724                  */
2725                 delayed_start = delayed_refs->run_delayed_start;
2726                 ret = btrfs_find_ref_cluster(trans, &cluster,
2727                                              delayed_refs->run_delayed_start);
2728                 if (ret)
2729                         break;
2730
2731                 ret = run_clustered_refs(trans, root, &cluster);
2732                 if (ret < 0) {
2733                         btrfs_release_ref_cluster(&cluster);
2734                         spin_unlock(&delayed_refs->lock);
2735                         btrfs_abort_transaction(trans, root, ret);
2736                         atomic_dec(&delayed_refs->procs_running_refs);
2737                         wake_up(&delayed_refs->wait);
2738                         return ret;
2739                 }
2740
2741                 atomic_add(ret, &delayed_refs->ref_seq);
2742
2743                 count -= min_t(unsigned long, ret, count);
2744
2745                 if (count == 0)
2746                         break;
2747
2748                 if (delayed_start >= delayed_refs->run_delayed_start) {
2749                         if (loops == 0) {
2750                                 /*
2751                                  * btrfs_find_ref_cluster looped. let's do one
2752                                  * more cycle. if we don't run any delayed ref
2753                                  * during that cycle (because we can't because
2754                                  * all of them are blocked), bail out.
2755                                  */
2756                                 loops = 1;
2757                         } else {
2758                                 /*
2759                                  * no runnable refs left, stop trying
2760                                  */
2761                                 BUG_ON(run_all);
2762                                 break;
2763                         }
2764                 }
2765                 if (ret) {
2766                         /* refs were run, let's reset staleness detection */
2767                         loops = 0;
2768                 }
2769         }
2770
2771         if (run_all) {
2772                 if (!list_empty(&trans->new_bgs)) {
2773                         spin_unlock(&delayed_refs->lock);
2774                         btrfs_create_pending_block_groups(trans, root);
2775                         spin_lock(&delayed_refs->lock);
2776                 }
2777
2778                 node = rb_first(&delayed_refs->href_root);
2779                 if (!node)
2780                         goto out;
2781                 count = (unsigned long)-1;
2782
2783                 while (node) {
2784                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2785                                         href_node);
2786                         if (btrfs_delayed_ref_is_head(&head->node)) {
2787                                 struct btrfs_delayed_ref_node *ref;
2788
2789                                 ref = &head->node;
2790                                 atomic_inc(&ref->refs);
2791
2792                                 spin_unlock(&delayed_refs->lock);
2793                                 /*
2794                                  * Mutex was contended, block until it's
2795                                  * released and try again
2796                                  */
2797                                 mutex_lock(&head->mutex);
2798                                 mutex_unlock(&head->mutex);
2799
2800                                 btrfs_put_delayed_ref(ref);
2801                                 cond_resched();
2802                                 goto again;
2803                         } else {
2804                                 WARN_ON(1);
2805                         }
2806                         node = rb_next(node);
2807                 }
2808                 spin_unlock(&delayed_refs->lock);
2809                 schedule_timeout(1);
2810                 goto again;
2811         }
2812 out:
2813         atomic_dec(&delayed_refs->procs_running_refs);
2814         smp_mb();
2815         if (waitqueue_active(&delayed_refs->wait))
2816                 wake_up(&delayed_refs->wait);
2817
2818         spin_unlock(&delayed_refs->lock);
2819         assert_qgroups_uptodate(trans);
2820         return 0;
2821 }
2822
2823 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2824                                 struct btrfs_root *root,
2825                                 u64 bytenr, u64 num_bytes, u64 flags,
2826                                 int level, int is_data)
2827 {
2828         struct btrfs_delayed_extent_op *extent_op;
2829         int ret;
2830
2831         extent_op = btrfs_alloc_delayed_extent_op();
2832         if (!extent_op)
2833                 return -ENOMEM;
2834
2835         extent_op->flags_to_set = flags;
2836         extent_op->update_flags = 1;
2837         extent_op->update_key = 0;
2838         extent_op->is_data = is_data ? 1 : 0;
2839         extent_op->level = level;
2840
2841         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2842                                           num_bytes, extent_op);
2843         if (ret)
2844                 btrfs_free_delayed_extent_op(extent_op);
2845         return ret;
2846 }
2847
2848 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2849                                       struct btrfs_root *root,
2850                                       struct btrfs_path *path,
2851                                       u64 objectid, u64 offset, u64 bytenr)
2852 {
2853         struct btrfs_delayed_ref_head *head;
2854         struct btrfs_delayed_ref_node *ref;
2855         struct btrfs_delayed_data_ref *data_ref;
2856         struct btrfs_delayed_ref_root *delayed_refs;
2857         struct rb_node *node;
2858         int ret = 0;
2859
2860         ret = -ENOENT;
2861         delayed_refs = &trans->transaction->delayed_refs;
2862         spin_lock(&delayed_refs->lock);
2863         head = btrfs_find_delayed_ref_head(trans, bytenr);
2864         if (!head)
2865                 goto out;
2866
2867         if (!mutex_trylock(&head->mutex)) {
2868                 atomic_inc(&head->node.refs);
2869                 spin_unlock(&delayed_refs->lock);
2870
2871                 btrfs_release_path(path);
2872
2873                 /*
2874                  * Mutex was contended, block until it's released and let
2875                  * caller try again
2876                  */
2877                 mutex_lock(&head->mutex);
2878                 mutex_unlock(&head->mutex);
2879                 btrfs_put_delayed_ref(&head->node);
2880                 return -EAGAIN;
2881         }
2882
2883         node = rb_prev(&head->node.rb_node);
2884         if (!node)
2885                 goto out_unlock;
2886
2887         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2888
2889         if (ref->bytenr != bytenr)
2890                 goto out_unlock;
2891
2892         ret = 1;
2893         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2894                 goto out_unlock;
2895
2896         data_ref = btrfs_delayed_node_to_data_ref(ref);
2897
2898         node = rb_prev(node);
2899         if (node) {
2900                 int seq = ref->seq;
2901
2902                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2903                 if (ref->bytenr == bytenr && ref->seq == seq)
2904                         goto out_unlock;
2905         }
2906
2907         if (data_ref->root != root->root_key.objectid ||
2908             data_ref->objectid != objectid || data_ref->offset != offset)
2909                 goto out_unlock;
2910
2911         ret = 0;
2912 out_unlock:
2913         mutex_unlock(&head->mutex);
2914 out:
2915         spin_unlock(&delayed_refs->lock);
2916         return ret;
2917 }
2918
2919 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2920                                         struct btrfs_root *root,
2921                                         struct btrfs_path *path,
2922                                         u64 objectid, u64 offset, u64 bytenr)
2923 {
2924         struct btrfs_root *extent_root = root->fs_info->extent_root;
2925         struct extent_buffer *leaf;
2926         struct btrfs_extent_data_ref *ref;
2927         struct btrfs_extent_inline_ref *iref;
2928         struct btrfs_extent_item *ei;
2929         struct btrfs_key key;
2930         u32 item_size;
2931         int ret;
2932
2933         key.objectid = bytenr;
2934         key.offset = (u64)-1;
2935         key.type = BTRFS_EXTENT_ITEM_KEY;
2936
2937         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2938         if (ret < 0)
2939                 goto out;
2940         BUG_ON(ret == 0); /* Corruption */
2941
2942         ret = -ENOENT;
2943         if (path->slots[0] == 0)
2944                 goto out;
2945
2946         path->slots[0]--;
2947         leaf = path->nodes[0];
2948         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2949
2950         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2951                 goto out;
2952
2953         ret = 1;
2954         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2955 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2956         if (item_size < sizeof(*ei)) {
2957                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2958                 goto out;
2959         }
2960 #endif
2961         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2962
2963         if (item_size != sizeof(*ei) +
2964             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2965                 goto out;
2966
2967         if (btrfs_extent_generation(leaf, ei) <=
2968             btrfs_root_last_snapshot(&root->root_item))
2969                 goto out;
2970
2971         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2972         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2973             BTRFS_EXTENT_DATA_REF_KEY)
2974                 goto out;
2975
2976         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2977         if (btrfs_extent_refs(leaf, ei) !=
2978             btrfs_extent_data_ref_count(leaf, ref) ||
2979             btrfs_extent_data_ref_root(leaf, ref) !=
2980             root->root_key.objectid ||
2981             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2982             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2983                 goto out;
2984
2985         ret = 0;
2986 out:
2987         return ret;
2988 }
2989
2990 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2991                           struct btrfs_root *root,
2992                           u64 objectid, u64 offset, u64 bytenr)
2993 {
2994         struct btrfs_path *path;
2995         int ret;
2996         int ret2;
2997
2998         path = btrfs_alloc_path();
2999         if (!path)
3000                 return -ENOENT;
3001
3002         do {
3003                 ret = check_committed_ref(trans, root, path, objectid,
3004                                           offset, bytenr);
3005                 if (ret && ret != -ENOENT)
3006                         goto out;
3007
3008                 ret2 = check_delayed_ref(trans, root, path, objectid,
3009                                          offset, bytenr);
3010         } while (ret2 == -EAGAIN);
3011
3012         if (ret2 && ret2 != -ENOENT) {
3013                 ret = ret2;
3014                 goto out;
3015         }
3016
3017         if (ret != -ENOENT || ret2 != -ENOENT)
3018                 ret = 0;
3019 out:
3020         btrfs_free_path(path);
3021         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3022                 WARN_ON(ret > 0);
3023         return ret;
3024 }
3025
3026 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3027                            struct btrfs_root *root,
3028                            struct extent_buffer *buf,
3029                            int full_backref, int inc, int for_cow)
3030 {
3031         u64 bytenr;
3032         u64 num_bytes;
3033         u64 parent;
3034         u64 ref_root;
3035         u32 nritems;
3036         struct btrfs_key key;
3037         struct btrfs_file_extent_item *fi;
3038         int i;
3039         int level;
3040         int ret = 0;
3041         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3042                             u64, u64, u64, u64, u64, u64, int);
3043
3044         ref_root = btrfs_header_owner(buf);
3045         nritems = btrfs_header_nritems(buf);
3046         level = btrfs_header_level(buf);
3047
3048         if (!root->ref_cows && level == 0)
3049                 return 0;
3050
3051         if (inc)
3052                 process_func = btrfs_inc_extent_ref;
3053         else
3054                 process_func = btrfs_free_extent;
3055
3056         if (full_backref)
3057                 parent = buf->start;
3058         else
3059                 parent = 0;
3060
3061         for (i = 0; i < nritems; i++) {
3062                 if (level == 0) {
3063                         btrfs_item_key_to_cpu(buf, &key, i);
3064                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3065                                 continue;
3066                         fi = btrfs_item_ptr(buf, i,
3067                                             struct btrfs_file_extent_item);
3068                         if (btrfs_file_extent_type(buf, fi) ==
3069                             BTRFS_FILE_EXTENT_INLINE)
3070                                 continue;
3071                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3072                         if (bytenr == 0)
3073                                 continue;
3074
3075                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3076                         key.offset -= btrfs_file_extent_offset(buf, fi);
3077                         ret = process_func(trans, root, bytenr, num_bytes,
3078                                            parent, ref_root, key.objectid,
3079                                            key.offset, for_cow);
3080                         if (ret)
3081                                 goto fail;
3082                 } else {
3083                         bytenr = btrfs_node_blockptr(buf, i);
3084                         num_bytes = btrfs_level_size(root, level - 1);
3085                         ret = process_func(trans, root, bytenr, num_bytes,
3086                                            parent, ref_root, level - 1, 0,
3087                                            for_cow);
3088                         if (ret)
3089                                 goto fail;
3090                 }
3091         }
3092         return 0;
3093 fail:
3094         return ret;
3095 }
3096
3097 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3098                   struct extent_buffer *buf, int full_backref, int for_cow)
3099 {
3100         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
3101 }
3102
3103 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3104                   struct extent_buffer *buf, int full_backref, int for_cow)
3105 {
3106         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
3107 }
3108
3109 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3110                                  struct btrfs_root *root,
3111                                  struct btrfs_path *path,
3112                                  struct btrfs_block_group_cache *cache)
3113 {
3114         int ret;
3115         struct btrfs_root *extent_root = root->fs_info->extent_root;
3116         unsigned long bi;
3117         struct extent_buffer *leaf;
3118
3119         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3120         if (ret < 0)
3121                 goto fail;
3122         BUG_ON(ret); /* Corruption */
3123
3124         leaf = path->nodes[0];
3125         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3126         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3127         btrfs_mark_buffer_dirty(leaf);
3128         btrfs_release_path(path);
3129 fail:
3130         if (ret) {
3131                 btrfs_abort_transaction(trans, root, ret);
3132                 return ret;
3133         }
3134         return 0;
3135
3136 }
3137
3138 static struct btrfs_block_group_cache *
3139 next_block_group(struct btrfs_root *root,
3140                  struct btrfs_block_group_cache *cache)
3141 {
3142         struct rb_node *node;
3143         spin_lock(&root->fs_info->block_group_cache_lock);
3144         node = rb_next(&cache->cache_node);
3145         btrfs_put_block_group(cache);
3146         if (node) {
3147                 cache = rb_entry(node, struct btrfs_block_group_cache,
3148                                  cache_node);
3149                 btrfs_get_block_group(cache);
3150         } else
3151                 cache = NULL;
3152         spin_unlock(&root->fs_info->block_group_cache_lock);
3153         return cache;
3154 }
3155
3156 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3157                             struct btrfs_trans_handle *trans,
3158                             struct btrfs_path *path)
3159 {
3160         struct btrfs_root *root = block_group->fs_info->tree_root;
3161         struct inode *inode = NULL;
3162         u64 alloc_hint = 0;
3163         int dcs = BTRFS_DC_ERROR;
3164         int num_pages = 0;
3165         int retries = 0;
3166         int ret = 0;
3167
3168         /*
3169          * If this block group is smaller than 100 megs don't bother caching the
3170          * block group.
3171          */
3172         if (block_group->key.offset < (100 * 1024 * 1024)) {
3173                 spin_lock(&block_group->lock);
3174                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3175                 spin_unlock(&block_group->lock);
3176                 return 0;
3177         }
3178
3179 again:
3180         inode = lookup_free_space_inode(root, block_group, path);
3181         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3182                 ret = PTR_ERR(inode);
3183                 btrfs_release_path(path);
3184                 goto out;
3185         }
3186
3187         if (IS_ERR(inode)) {
3188                 BUG_ON(retries);
3189                 retries++;
3190
3191                 if (block_group->ro)
3192                         goto out_free;
3193
3194                 ret = create_free_space_inode(root, trans, block_group, path);
3195                 if (ret)
3196                         goto out_free;
3197                 goto again;
3198         }
3199
3200         /* We've already setup this transaction, go ahead and exit */
3201         if (block_group->cache_generation == trans->transid &&
3202             i_size_read(inode)) {
3203                 dcs = BTRFS_DC_SETUP;
3204                 goto out_put;
3205         }
3206
3207         /*
3208          * We want to set the generation to 0, that way if anything goes wrong
3209          * from here on out we know not to trust this cache when we load up next
3210          * time.
3211          */
3212         BTRFS_I(inode)->generation = 0;
3213         ret = btrfs_update_inode(trans, root, inode);
3214         WARN_ON(ret);
3215
3216         if (i_size_read(inode) > 0) {
3217                 ret = btrfs_check_trunc_cache_free_space(root,
3218                                         &root->fs_info->global_block_rsv);
3219                 if (ret)
3220                         goto out_put;
3221
3222                 ret = btrfs_truncate_free_space_cache(root, trans, inode);
3223                 if (ret)
3224                         goto out_put;
3225         }
3226
3227         spin_lock(&block_group->lock);
3228         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3229             !btrfs_test_opt(root, SPACE_CACHE)) {
3230                 /*
3231                  * don't bother trying to write stuff out _if_
3232                  * a) we're not cached,
3233                  * b) we're with nospace_cache mount option.
3234                  */
3235                 dcs = BTRFS_DC_WRITTEN;
3236                 spin_unlock(&block_group->lock);
3237                 goto out_put;
3238         }
3239         spin_unlock(&block_group->lock);
3240
3241         /*
3242          * Try to preallocate enough space based on how big the block group is.
3243          * Keep in mind this has to include any pinned space which could end up
3244          * taking up quite a bit since it's not folded into the other space
3245          * cache.
3246          */
3247         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3248         if (!num_pages)
3249                 num_pages = 1;
3250
3251         num_pages *= 16;
3252         num_pages *= PAGE_CACHE_SIZE;
3253
3254         ret = btrfs_check_data_free_space(inode, num_pages);
3255         if (ret)
3256                 goto out_put;
3257
3258         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3259                                               num_pages, num_pages,
3260                                               &alloc_hint);
3261         if (!ret)
3262                 dcs = BTRFS_DC_SETUP;
3263         btrfs_free_reserved_data_space(inode, num_pages);
3264
3265 out_put:
3266         iput(inode);
3267 out_free:
3268         btrfs_release_path(path);
3269 out:
3270         spin_lock(&block_group->lock);
3271         if (!ret && dcs == BTRFS_DC_SETUP)
3272                 block_group->cache_generation = trans->transid;
3273         block_group->disk_cache_state = dcs;
3274         spin_unlock(&block_group->lock);
3275
3276         return ret;
3277 }
3278
3279 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3280                                    struct btrfs_root *root)
3281 {
3282         struct btrfs_block_group_cache *cache;
3283         int err = 0;
3284         struct btrfs_path *path;
3285         u64 last = 0;
3286
3287         path = btrfs_alloc_path();
3288         if (!path)
3289                 return -ENOMEM;
3290
3291 again:
3292         while (1) {
3293                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3294                 while (cache) {
3295                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3296                                 break;
3297                         cache = next_block_group(root, cache);
3298                 }
3299                 if (!cache) {
3300                         if (last == 0)
3301                                 break;
3302                         last = 0;
3303                         continue;
3304                 }
3305                 err = cache_save_setup(cache, trans, path);
3306                 last = cache->key.objectid + cache->key.offset;
3307                 btrfs_put_block_group(cache);
3308         }
3309
3310         while (1) {
3311                 if (last == 0) {
3312                         err = btrfs_run_delayed_refs(trans, root,
3313                                                      (unsigned long)-1);
3314                         if (err) /* File system offline */
3315                                 goto out;
3316                 }
3317
3318                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3319                 while (cache) {
3320                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3321                                 btrfs_put_block_group(cache);
3322                                 goto again;
3323                         }
3324
3325                         if (cache->dirty)
3326                                 break;
3327                         cache = next_block_group(root, cache);
3328                 }
3329                 if (!cache) {
3330                         if (last == 0)
3331                                 break;
3332                         last = 0;
3333                         continue;
3334                 }
3335
3336                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3337                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3338                 cache->dirty = 0;
3339                 last = cache->key.objectid + cache->key.offset;
3340
3341                 err = write_one_cache_group(trans, root, path, cache);
3342                 btrfs_put_block_group(cache);
3343                 if (err) /* File system offline */
3344                         goto out;
3345         }
3346
3347         while (1) {
3348                 /*
3349                  * I don't think this is needed since we're just marking our
3350                  * preallocated extent as written, but just in case it can't
3351                  * hurt.
3352                  */
3353                 if (last == 0) {
3354                         err = btrfs_run_delayed_refs(trans, root,
3355                                                      (unsigned long)-1);
3356                         if (err) /* File system offline */
3357                                 goto out;
3358                 }
3359
3360                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3361                 while (cache) {
3362                         /*
3363                          * Really this shouldn't happen, but it could if we
3364                          * couldn't write the entire preallocated extent and
3365                          * splitting the extent resulted in a new block.
3366                          */
3367                         if (cache->dirty) {
3368                                 btrfs_put_block_group(cache);
3369                                 goto again;
3370                         }
3371                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3372                                 break;
3373                         cache = next_block_group(root, cache);
3374                 }
3375                 if (!cache) {
3376                         if (last == 0)
3377                                 break;
3378                         last = 0;
3379                         continue;
3380                 }
3381
3382                 err = btrfs_write_out_cache(root, trans, cache, path);
3383
3384                 /*
3385                  * If we didn't have an error then the cache state is still
3386                  * NEED_WRITE, so we can set it to WRITTEN.
3387                  */
3388                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3389                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3390                 last = cache->key.objectid + cache->key.offset;
3391                 btrfs_put_block_group(cache);
3392         }
3393 out:
3394
3395         btrfs_free_path(path);
3396         return err;
3397 }
3398
3399 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3400 {
3401         struct btrfs_block_group_cache *block_group;
3402         int readonly = 0;
3403
3404         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3405         if (!block_group || block_group->ro)
3406                 readonly = 1;
3407         if (block_group)
3408                 btrfs_put_block_group(block_group);
3409         return readonly;
3410 }
3411
3412 static const char *alloc_name(u64 flags)
3413 {
3414         switch (flags) {
3415         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3416                 return "mixed";
3417         case BTRFS_BLOCK_GROUP_METADATA:
3418                 return "metadata";
3419         case BTRFS_BLOCK_GROUP_DATA:
3420                 return "data";
3421         case BTRFS_BLOCK_GROUP_SYSTEM:
3422                 return "system";
3423         default:
3424                 WARN_ON(1);
3425                 return "invalid-combination";
3426         };
3427 }
3428
3429 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3430                              u64 total_bytes, u64 bytes_used,
3431                              struct btrfs_space_info **space_info)
3432 {
3433         struct btrfs_space_info *found;
3434         int i;
3435         int factor;
3436         int ret;
3437
3438         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3439                      BTRFS_BLOCK_GROUP_RAID10))
3440                 factor = 2;
3441         else
3442                 factor = 1;
3443
3444         found = __find_space_info(info, flags);
3445         if (found) {
3446                 spin_lock(&found->lock);
3447                 found->total_bytes += total_bytes;
3448                 found->disk_total += total_bytes * factor;
3449                 found->bytes_used += bytes_used;
3450                 found->disk_used += bytes_used * factor;
3451                 found->full = 0;
3452                 spin_unlock(&found->lock);
3453                 *space_info = found;
3454                 return 0;
3455         }
3456         found = kzalloc(sizeof(*found), GFP_NOFS);
3457         if (!found)
3458                 return -ENOMEM;
3459
3460         ret = percpu_counter_init(&found->total_bytes_pinned, 0);
3461         if (ret) {
3462                 kfree(found);
3463                 return ret;
3464         }
3465
3466         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3467                 INIT_LIST_HEAD(&found->block_groups[i]);
3468         init_rwsem(&found->groups_sem);
3469         spin_lock_init(&found->lock);
3470         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3471         found->total_bytes = total_bytes;
3472         found->disk_total = total_bytes * factor;
3473         found->bytes_used = bytes_used;
3474         found->disk_used = bytes_used * factor;
3475         found->bytes_pinned = 0;
3476         found->bytes_reserved = 0;
3477         found->bytes_readonly = 0;
3478         found->bytes_may_use = 0;
3479         found->full = 0;
3480         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3481         found->chunk_alloc = 0;
3482         found->flush = 0;
3483         init_waitqueue_head(&found->wait);
3484
3485         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3486                                     info->space_info_kobj, "%s",
3487                                     alloc_name(found->flags));
3488         if (ret) {
3489                 kfree(found);
3490                 return ret;
3491         }
3492
3493         *space_info = found;
3494         list_add_rcu(&found->list, &info->space_info);
3495         if (flags & BTRFS_BLOCK_GROUP_DATA)
3496                 info->data_sinfo = found;
3497
3498         return ret;
3499 }
3500
3501 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3502 {
3503         u64 extra_flags = chunk_to_extended(flags) &
3504                                 BTRFS_EXTENDED_PROFILE_MASK;
3505
3506         write_seqlock(&fs_info->profiles_lock);
3507         if (flags & BTRFS_BLOCK_GROUP_DATA)
3508                 fs_info->avail_data_alloc_bits |= extra_flags;
3509         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3510                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3511         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3512                 fs_info->avail_system_alloc_bits |= extra_flags;
3513         write_sequnlock(&fs_info->profiles_lock);
3514 }
3515
3516 /*
3517  * returns target flags in extended format or 0 if restripe for this
3518  * chunk_type is not in progress
3519  *
3520  * should be called with either volume_mutex or balance_lock held
3521  */
3522 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3523 {
3524         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3525         u64 target = 0;
3526
3527         if (!bctl)
3528                 return 0;
3529
3530         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3531             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3532                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3533         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3534                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3535                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3536         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3537                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3538                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3539         }
3540
3541         return target;
3542 }
3543
3544 /*
3545  * @flags: available profiles in extended format (see ctree.h)
3546  *
3547  * Returns reduced profile in chunk format.  If profile changing is in
3548  * progress (either running or paused) picks the target profile (if it's
3549  * already available), otherwise falls back to plain reducing.
3550  */
3551 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3552 {
3553         /*
3554          * we add in the count of missing devices because we want
3555          * to make sure that any RAID levels on a degraded FS
3556          * continue to be honored.
3557          */
3558         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3559                 root->fs_info->fs_devices->missing_devices;
3560         u64 target;
3561         u64 tmp;
3562
3563         /*
3564          * see if restripe for this chunk_type is in progress, if so
3565          * try to reduce to the target profile
3566          */
3567         spin_lock(&root->fs_info->balance_lock);
3568         target = get_restripe_target(root->fs_info, flags);
3569         if (target) {
3570                 /* pick target profile only if it's already available */
3571                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3572                         spin_unlock(&root->fs_info->balance_lock);
3573                         return extended_to_chunk(target);
3574                 }
3575         }
3576         spin_unlock(&root->fs_info->balance_lock);
3577
3578         /* First, mask out the RAID levels which aren't possible */
3579         if (num_devices == 1)
3580                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3581                            BTRFS_BLOCK_GROUP_RAID5);
3582         if (num_devices < 3)
3583                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3584         if (num_devices < 4)
3585                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3586
3587         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3588                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3589                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3590         flags &= ~tmp;
3591
3592         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3593                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3594         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3595                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3596         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3597                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3598         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3599                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3600         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3601                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3602
3603         return extended_to_chunk(flags | tmp);
3604 }
3605
3606 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3607 {
3608         unsigned seq;
3609
3610         do {
3611                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3612
3613                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3614                         flags |= root->fs_info->avail_data_alloc_bits;
3615                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3616                         flags |= root->fs_info->avail_system_alloc_bits;
3617                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3618                         flags |= root->fs_info->avail_metadata_alloc_bits;
3619         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3620
3621         return btrfs_reduce_alloc_profile(root, flags);
3622 }
3623
3624 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3625 {
3626         u64 flags;
3627         u64 ret;
3628
3629         if (data)
3630                 flags = BTRFS_BLOCK_GROUP_DATA;
3631         else if (root == root->fs_info->chunk_root)
3632                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3633         else
3634                 flags = BTRFS_BLOCK_GROUP_METADATA;
3635
3636         ret = get_alloc_profile(root, flags);
3637         return ret;
3638 }
3639
3640 /*
3641  * This will check the space that the inode allocates from to make sure we have
3642  * enough space for bytes.
3643  */
3644 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3645 {
3646         struct btrfs_space_info *data_sinfo;
3647         struct btrfs_root *root = BTRFS_I(inode)->root;
3648         struct btrfs_fs_info *fs_info = root->fs_info;
3649         u64 used;
3650         int ret = 0, committed = 0, alloc_chunk = 1;
3651
3652         /* make sure bytes are sectorsize aligned */
3653         bytes = ALIGN(bytes, root->sectorsize);
3654
3655         if (btrfs_is_free_space_inode(inode)) {
3656                 committed = 1;
3657                 ASSERT(current->journal_info);
3658         }
3659
3660         data_sinfo = fs_info->data_sinfo;
3661         if (!data_sinfo)
3662                 goto alloc;
3663
3664 again:
3665         /* make sure we have enough space to handle the data first */
3666         spin_lock(&data_sinfo->lock);
3667         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3668                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3669                 data_sinfo->bytes_may_use;
3670
3671         if (used + bytes > data_sinfo->total_bytes) {
3672                 struct btrfs_trans_handle *trans;
3673
3674                 /*
3675                  * if we don't have enough free bytes in this space then we need
3676                  * to alloc a new chunk.
3677                  */
3678                 if (!data_sinfo->full && alloc_chunk) {
3679                         u64 alloc_target;
3680
3681                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3682                         spin_unlock(&data_sinfo->lock);
3683 alloc:
3684                         alloc_target = btrfs_get_alloc_profile(root, 1);
3685                         /*
3686                          * It is ugly that we don't call nolock join
3687                          * transaction for the free space inode case here.
3688                          * But it is safe because we only do the data space
3689                          * reservation for the free space cache in the
3690                          * transaction context, the common join transaction
3691                          * just increase the counter of the current transaction
3692                          * handler, doesn't try to acquire the trans_lock of
3693                          * the fs.
3694                          */
3695                         trans = btrfs_join_transaction(root);
3696                         if (IS_ERR(trans))
3697                                 return PTR_ERR(trans);
3698
3699                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3700                                              alloc_target,
3701                                              CHUNK_ALLOC_NO_FORCE);
3702                         btrfs_end_transaction(trans, root);
3703                         if (ret < 0) {
3704                                 if (ret != -ENOSPC)
3705                                         return ret;
3706                                 else
3707                                         goto commit_trans;
3708                         }
3709
3710                         if (!data_sinfo)
3711                                 data_sinfo = fs_info->data_sinfo;
3712
3713                         goto again;
3714                 }
3715
3716                 /*
3717                  * If we don't have enough pinned space to deal with this
3718                  * allocation don't bother committing the transaction.
3719                  */
3720                 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3721                                            bytes) < 0)
3722                         committed = 1;
3723                 spin_unlock(&data_sinfo->lock);
3724
3725                 /* commit the current transaction and try again */
3726 commit_trans:
3727                 if (!committed &&
3728                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3729                         committed = 1;
3730
3731                         trans = btrfs_join_transaction(root);
3732                         if (IS_ERR(trans))
3733                                 return PTR_ERR(trans);
3734                         ret = btrfs_commit_transaction(trans, root);
3735                         if (ret)
3736                                 return ret;
3737                         goto again;
3738                 }
3739
3740                 trace_btrfs_space_reservation(root->fs_info,
3741                                               "space_info:enospc",
3742                                               data_sinfo->flags, bytes, 1);
3743                 return -ENOSPC;
3744         }
3745         data_sinfo->bytes_may_use += bytes;
3746         trace_btrfs_space_reservation(root->fs_info, "space_info",
3747                                       data_sinfo->flags, bytes, 1);
3748         spin_unlock(&data_sinfo->lock);
3749
3750         return 0;
3751 }
3752
3753 /*
3754  * Called if we need to clear a data reservation for this inode.
3755  */
3756 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3757 {
3758         struct btrfs_root *root = BTRFS_I(inode)->root;
3759         struct btrfs_space_info *data_sinfo;
3760
3761         /* make sure bytes are sectorsize aligned */
3762         bytes = ALIGN(bytes, root->sectorsize);
3763
3764         data_sinfo = root->fs_info->data_sinfo;
3765         spin_lock(&data_sinfo->lock);
3766         WARN_ON(data_sinfo->bytes_may_use < bytes);
3767         data_sinfo->bytes_may_use -= bytes;
3768         trace_btrfs_space_reservation(root->fs_info, "space_info",
3769                                       data_sinfo->flags, bytes, 0);
3770         spin_unlock(&data_sinfo->lock);
3771 }
3772
3773 static void force_metadata_allocation(struct btrfs_fs_info *info)
3774 {
3775         struct list_head *head = &info->space_info;
3776         struct btrfs_space_info *found;
3777
3778         rcu_read_lock();
3779         list_for_each_entry_rcu(found, head, list) {
3780                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3781                         found->force_alloc = CHUNK_ALLOC_FORCE;
3782         }
3783         rcu_read_unlock();
3784 }
3785
3786 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3787 {
3788         return (global->size << 1);
3789 }
3790
3791 static int should_alloc_chunk(struct btrfs_root *root,
3792                               struct btrfs_space_info *sinfo, int force)
3793 {
3794         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3795         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3796         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3797         u64 thresh;
3798
3799         if (force == CHUNK_ALLOC_FORCE)
3800                 return 1;
3801
3802         /*
3803          * We need to take into account the global rsv because for all intents
3804          * and purposes it's used space.  Don't worry about locking the
3805          * global_rsv, it doesn't change except when the transaction commits.
3806          */
3807         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3808                 num_allocated += calc_global_rsv_need_space(global_rsv);
3809
3810         /*
3811          * in limited mode, we want to have some free space up to
3812          * about 1% of the FS size.
3813          */
3814         if (force == CHUNK_ALLOC_LIMITED) {
3815                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3816                 thresh = max_t(u64, 64 * 1024 * 1024,
3817                                div_factor_fine(thresh, 1));
3818
3819                 if (num_bytes - num_allocated < thresh)
3820                         return 1;
3821         }
3822
3823         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3824                 return 0;
3825         return 1;
3826 }
3827
3828 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3829 {
3830         u64 num_dev;
3831
3832         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3833                     BTRFS_BLOCK_GROUP_RAID0 |
3834                     BTRFS_BLOCK_GROUP_RAID5 |
3835                     BTRFS_BLOCK_GROUP_RAID6))
3836                 num_dev = root->fs_info->fs_devices->rw_devices;
3837         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3838                 num_dev = 2;
3839         else
3840                 num_dev = 1;    /* DUP or single */
3841
3842         /* metadata for updaing devices and chunk tree */
3843         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3844 }
3845
3846 static void check_system_chunk(struct btrfs_trans_handle *trans,
3847                                struct btrfs_root *root, u64 type)
3848 {
3849         struct btrfs_space_info *info;
3850         u64 left;
3851         u64 thresh;
3852
3853         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3854         spin_lock(&info->lock);
3855         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3856                 info->bytes_reserved - info->bytes_readonly;
3857         spin_unlock(&info->lock);
3858
3859         thresh = get_system_chunk_thresh(root, type);
3860         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3861                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3862                         left, thresh, type);
3863                 dump_space_info(info, 0, 0);
3864         }
3865
3866         if (left < thresh) {
3867                 u64 flags;
3868
3869                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3870                 btrfs_alloc_chunk(trans, root, flags);
3871         }
3872 }
3873
3874 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3875                           struct btrfs_root *extent_root, u64 flags, int force)
3876 {
3877         struct btrfs_space_info *space_info;
3878         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3879         int wait_for_alloc = 0;
3880         int ret = 0;
3881
3882         /* Don't re-enter if we're already allocating a chunk */
3883         if (trans->allocating_chunk)
3884                 return -ENOSPC;
3885
3886         space_info = __find_space_info(extent_root->fs_info, flags);
3887         if (!space_info) {
3888                 ret = update_space_info(extent_root->fs_info, flags,
3889                                         0, 0, &space_info);
3890                 BUG_ON(ret); /* -ENOMEM */
3891         }
3892         BUG_ON(!space_info); /* Logic error */
3893
3894 again:
3895         spin_lock(&space_info->lock);
3896         if (force < space_info->force_alloc)
3897                 force = space_info->force_alloc;
3898         if (space_info->full) {
3899                 if (should_alloc_chunk(extent_root, space_info, force))
3900                         ret = -ENOSPC;
3901                 else
3902                         ret = 0;
3903                 spin_unlock(&space_info->lock);
3904                 return ret;
3905         }
3906
3907         if (!should_alloc_chunk(extent_root, space_info, force)) {
3908                 spin_unlock(&space_info->lock);
3909                 return 0;
3910         } else if (space_info->chunk_alloc) {
3911                 wait_for_alloc = 1;
3912         } else {
3913                 space_info->chunk_alloc = 1;
3914         }
3915
3916         spin_unlock(&space_info->lock);
3917
3918         mutex_lock(&fs_info->chunk_mutex);
3919
3920         /*
3921          * The chunk_mutex is held throughout the entirety of a chunk
3922          * allocation, so once we've acquired the chunk_mutex we know that the
3923          * other guy is done and we need to recheck and see if we should
3924          * allocate.
3925          */
3926         if (wait_for_alloc) {
3927                 mutex_unlock(&fs_info->chunk_mutex);
3928                 wait_for_alloc = 0;
3929                 goto again;
3930         }
3931
3932         trans->allocating_chunk = true;
3933
3934         /*
3935          * If we have mixed data/metadata chunks we want to make sure we keep
3936          * allocating mixed chunks instead of individual chunks.
3937          */
3938         if (btrfs_mixed_space_info(space_info))
3939                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3940
3941         /*
3942          * if we're doing a data chunk, go ahead and make sure that
3943          * we keep a reasonable number of metadata chunks allocated in the
3944          * FS as well.
3945          */
3946         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3947                 fs_info->data_chunk_allocations++;
3948                 if (!(fs_info->data_chunk_allocations %
3949                       fs_info->metadata_ratio))
3950                         force_metadata_allocation(fs_info);
3951         }
3952
3953         /*
3954          * Check if we have enough space in SYSTEM chunk because we may need
3955          * to update devices.
3956          */
3957         check_system_chunk(trans, extent_root, flags);
3958
3959         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3960         trans->allocating_chunk = false;
3961
3962         spin_lock(&space_info->lock);
3963         if (ret < 0 && ret != -ENOSPC)
3964                 goto out;
3965         if (ret)
3966                 space_info->full = 1;
3967         else
3968                 ret = 1;
3969
3970         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3971 out:
3972         space_info->chunk_alloc = 0;
3973         spin_unlock(&space_info->lock);
3974         mutex_unlock(&fs_info->chunk_mutex);
3975         return ret;
3976 }
3977
3978 static int can_overcommit(struct btrfs_root *root,
3979                           struct btrfs_space_info *space_info, u64 bytes,
3980                           enum btrfs_reserve_flush_enum flush)
3981 {
3982         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3983         u64 profile = btrfs_get_alloc_profile(root, 0);
3984         u64 space_size;
3985         u64 avail;
3986         u64 used;
3987
3988         used = space_info->bytes_used + space_info->bytes_reserved +
3989                 space_info->bytes_pinned + space_info->bytes_readonly;
3990
3991         /*
3992          * We only want to allow over committing if we have lots of actual space
3993          * free, but if we don't have enough space to handle the global reserve
3994          * space then we could end up having a real enospc problem when trying
3995          * to allocate a chunk or some other such important allocation.
3996          */
3997         spin_lock(&global_rsv->lock);
3998         space_size = calc_global_rsv_need_space(global_rsv);
3999         spin_unlock(&global_rsv->lock);
4000         if (used + space_size >= space_info->total_bytes)
4001                 return 0;
4002
4003         used += space_info->bytes_may_use;
4004
4005         spin_lock(&root->fs_info->free_chunk_lock);
4006         avail = root->fs_info->free_chunk_space;
4007         spin_unlock(&root->fs_info->free_chunk_lock);
4008
4009         /*
4010          * If we have dup, raid1 or raid10 then only half of the free
4011          * space is actually useable.  For raid56, the space info used
4012          * doesn't include the parity drive, so we don't have to
4013          * change the math
4014          */
4015         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4016                        BTRFS_BLOCK_GROUP_RAID1 |
4017                        BTRFS_BLOCK_GROUP_RAID10))
4018                 avail >>= 1;
4019
4020         /*
4021          * If we aren't flushing all things, let us overcommit up to
4022          * 1/2th of the space. If we can flush, don't let us overcommit
4023          * too much, let it overcommit up to 1/8 of the space.
4024          */
4025         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4026                 avail >>= 3;
4027         else
4028                 avail >>= 1;
4029
4030         if (used + bytes < space_info->total_bytes + avail)
4031                 return 1;
4032         return 0;
4033 }
4034
4035 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4036                                          unsigned long nr_pages)
4037 {
4038         struct super_block *sb = root->fs_info->sb;
4039
4040         if (down_read_trylock(&sb->s_umount)) {
4041                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4042                 up_read(&sb->s_umount);
4043         } else {
4044                 /*
4045                  * We needn't worry the filesystem going from r/w to r/o though
4046                  * we don't acquire ->s_umount mutex, because the filesystem
4047                  * should guarantee the delalloc inodes list be empty after
4048                  * the filesystem is readonly(all dirty pages are written to
4049                  * the disk).
4050                  */
4051                 btrfs_start_delalloc_roots(root->fs_info, 0);
4052                 if (!current->journal_info)
4053                         btrfs_wait_ordered_roots(root->fs_info, -1);
4054         }
4055 }
4056
4057 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4058 {
4059         u64 bytes;
4060         int nr;
4061
4062         bytes = btrfs_calc_trans_metadata_size(root, 1);
4063         nr = (int)div64_u64(to_reclaim, bytes);
4064         if (!nr)
4065                 nr = 1;
4066         return nr;
4067 }
4068
4069 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4070
4071 /*
4072  * shrink metadata reservation for delalloc
4073  */
4074 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4075                             bool wait_ordered)
4076 {
4077         struct btrfs_block_rsv *block_rsv;
4078         struct btrfs_space_info *space_info;
4079         struct btrfs_trans_handle *trans;
4080         u64 delalloc_bytes;
4081         u64 max_reclaim;
4082         long time_left;
4083         unsigned long nr_pages;
4084         int loops;
4085         int items;
4086         enum btrfs_reserve_flush_enum flush;
4087
4088         /* Calc the number of the pages we need flush for space reservation */
4089         items = calc_reclaim_items_nr(root, to_reclaim);
4090         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4091
4092         trans = (struct btrfs_trans_handle *)current->journal_info;
4093         block_rsv = &root->fs_info->delalloc_block_rsv;
4094         space_info = block_rsv->space_info;
4095
4096         delalloc_bytes = percpu_counter_sum_positive(
4097                                                 &root->fs_info->delalloc_bytes);
4098         if (delalloc_bytes == 0) {
4099                 if (trans)
4100                         return;
4101                 if (wait_ordered)
4102                         btrfs_wait_ordered_roots(root->fs_info, items);
4103                 return;
4104         }
4105
4106         loops = 0;
4107         while (delalloc_bytes && loops < 3) {
4108                 max_reclaim = min(delalloc_bytes, to_reclaim);
4109                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4110                 btrfs_writeback_inodes_sb_nr(root, nr_pages);
4111                 /*
4112                  * We need to wait for the async pages to actually start before
4113                  * we do anything.
4114                  */
4115                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4116                 if (!max_reclaim)
4117                         goto skip_async;
4118
4119                 if (max_reclaim <= nr_pages)
4120                         max_reclaim = 0;
4121                 else
4122                         max_reclaim -= nr_pages;
4123
4124                 wait_event(root->fs_info->async_submit_wait,
4125                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4126                            (int)max_reclaim);
4127 skip_async:
4128                 if (!trans)
4129                         flush = BTRFS_RESERVE_FLUSH_ALL;
4130                 else
4131                         flush = BTRFS_RESERVE_NO_FLUSH;
4132                 spin_lock(&space_info->lock);
4133                 if (can_overcommit(root, space_info, orig, flush)) {
4134                         spin_unlock(&space_info->lock);
4135                         break;
4136                 }
4137                 spin_unlock(&space_info->lock);
4138
4139                 loops++;
4140                 if (wait_ordered && !trans) {
4141                         btrfs_wait_ordered_roots(root->fs_info, items);
4142                 } else {
4143                         time_left = schedule_timeout_killable(1);
4144                         if (time_left)
4145                                 break;
4146                 }
4147                 delalloc_bytes = percpu_counter_sum_positive(
4148                                                 &root->fs_info->delalloc_bytes);
4149         }
4150 }
4151
4152 /**
4153  * maybe_commit_transaction - possibly commit the transaction if its ok to
4154  * @root - the root we're allocating for
4155  * @bytes - the number of bytes we want to reserve
4156  * @force - force the commit
4157  *
4158  * This will check to make sure that committing the transaction will actually
4159  * get us somewhere and then commit the transaction if it does.  Otherwise it
4160  * will return -ENOSPC.
4161  */
4162 static int may_commit_transaction(struct btrfs_root *root,
4163                                   struct btrfs_space_info *space_info,
4164                                   u64 bytes, int force)
4165 {
4166         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4167         struct btrfs_trans_handle *trans;
4168
4169         trans = (struct btrfs_trans_handle *)current->journal_info;
4170         if (trans)
4171                 return -EAGAIN;
4172
4173         if (force)
4174                 goto commit;
4175
4176         /* See if there is enough pinned space to make this reservation */
4177         spin_lock(&space_info->lock);
4178         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4179                                    bytes) >= 0) {
4180                 spin_unlock(&space_info->lock);
4181                 goto commit;
4182         }
4183         spin_unlock(&space_info->lock);
4184
4185         /*
4186          * See if there is some space in the delayed insertion reservation for
4187          * this reservation.
4188          */
4189         if (space_info != delayed_rsv->space_info)
4190                 return -ENOSPC;
4191
4192         spin_lock(&space_info->lock);
4193         spin_lock(&delayed_rsv->lock);
4194         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4195                                    bytes - delayed_rsv->size) >= 0) {
4196                 spin_unlock(&delayed_rsv->lock);
4197                 spin_unlock(&space_info->lock);
4198                 return -ENOSPC;
4199         }
4200         spin_unlock(&delayed_rsv->lock);
4201         spin_unlock(&space_info->lock);
4202
4203 commit:
4204         trans = btrfs_join_transaction(root);
4205         if (IS_ERR(trans))
4206                 return -ENOSPC;
4207
4208         return btrfs_commit_transaction(trans, root);
4209 }
4210
4211 enum flush_state {
4212         FLUSH_DELAYED_ITEMS_NR  =       1,
4213         FLUSH_DELAYED_ITEMS     =       2,
4214         FLUSH_DELALLOC          =       3,
4215         FLUSH_DELALLOC_WAIT     =       4,
4216         ALLOC_CHUNK             =       5,
4217         COMMIT_TRANS            =       6,
4218 };
4219
4220 static int flush_space(struct btrfs_root *root,
4221                        struct btrfs_space_info *space_info, u64 num_bytes,
4222                        u64 orig_bytes, int state)
4223 {
4224         struct btrfs_trans_handle *trans;
4225         int nr;
4226         int ret = 0;
4227
4228         switch (state) {
4229         case FLUSH_DELAYED_ITEMS_NR:
4230         case FLUSH_DELAYED_ITEMS:
4231                 if (state == FLUSH_DELAYED_ITEMS_NR)
4232                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4233                 else
4234                         nr = -1;
4235
4236                 trans = btrfs_join_transaction(root);
4237                 if (IS_ERR(trans)) {
4238                         ret = PTR_ERR(trans);
4239                         break;
4240                 }
4241                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4242                 btrfs_end_transaction(trans, root);
4243                 break;
4244         case FLUSH_DELALLOC:
4245         case FLUSH_DELALLOC_WAIT:
4246                 shrink_delalloc(root, num_bytes, orig_bytes,
4247                                 state == FLUSH_DELALLOC_WAIT);
4248                 break;
4249         case ALLOC_CHUNK:
4250                 trans = btrfs_join_transaction(root);
4251                 if (IS_ERR(trans)) {
4252                         ret = PTR_ERR(trans);
4253                         break;
4254                 }
4255                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4256                                      btrfs_get_alloc_profile(root, 0),
4257                                      CHUNK_ALLOC_NO_FORCE);
4258                 btrfs_end_transaction(trans, root);
4259                 if (ret == -ENOSPC)
4260                         ret = 0;
4261                 break;
4262         case COMMIT_TRANS:
4263                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4264                 break;
4265         default:
4266                 ret = -ENOSPC;
4267                 break;
4268         }
4269
4270         return ret;
4271 }
4272 /**
4273  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4274  * @root - the root we're allocating for
4275  * @block_rsv - the block_rsv we're allocating for
4276  * @orig_bytes - the number of bytes we want
4277  * @flush - whether or not we can flush to make our reservation
4278  *
4279  * This will reserve orgi_bytes number of bytes from the space info associated
4280  * with the block_rsv.  If there is not enough space it will make an attempt to
4281  * flush out space to make room.  It will do this by flushing delalloc if
4282  * possible or committing the transaction.  If flush is 0 then no attempts to
4283  * regain reservations will be made and this will fail if there is not enough
4284  * space already.
4285  */
4286 static int reserve_metadata_bytes(struct btrfs_root *root,
4287                                   struct btrfs_block_rsv *block_rsv,
4288                                   u64 orig_bytes,
4289                                   enum btrfs_reserve_flush_enum flush)
4290 {
4291         struct btrfs_space_info *space_info = block_rsv->space_info;
4292         u64 used;
4293         u64 num_bytes = orig_bytes;
4294         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4295         int ret = 0;
4296         bool flushing = false;
4297
4298 again:
4299         ret = 0;
4300         spin_lock(&space_info->lock);
4301         /*
4302          * We only want to wait if somebody other than us is flushing and we
4303          * are actually allowed to flush all things.
4304          */
4305         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4306                space_info->flush) {
4307                 spin_unlock(&space_info->lock);
4308                 /*
4309                  * If we have a trans handle we can't wait because the flusher
4310                  * may have to commit the transaction, which would mean we would
4311                  * deadlock since we are waiting for the flusher to finish, but
4312                  * hold the current transaction open.
4313                  */
4314                 if (current->journal_info)
4315                         return -EAGAIN;
4316                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4317                 /* Must have been killed, return */
4318                 if (ret)
4319                         return -EINTR;
4320
4321                 spin_lock(&space_info->lock);
4322         }
4323
4324         ret = -ENOSPC;
4325         used = space_info->bytes_used + space_info->bytes_reserved +
4326                 space_info->bytes_pinned + space_info->bytes_readonly +
4327                 space_info->bytes_may_use;
4328
4329         /*
4330          * The idea here is that we've not already over-reserved the block group
4331          * then we can go ahead and save our reservation first and then start
4332          * flushing if we need to.  Otherwise if we've already overcommitted
4333          * lets start flushing stuff first and then come back and try to make
4334          * our reservation.
4335          */
4336         if (used <= space_info->total_bytes) {
4337                 if (used + orig_bytes <= space_info->total_bytes) {
4338                         space_info->bytes_may_use += orig_bytes;
4339                         trace_btrfs_space_reservation(root->fs_info,
4340                                 "space_info", space_info->flags, orig_bytes, 1);
4341                         ret = 0;
4342                 } else {
4343                         /*
4344                          * Ok set num_bytes to orig_bytes since we aren't
4345                          * overocmmitted, this way we only try and reclaim what
4346                          * we need.
4347                          */
4348                         num_bytes = orig_bytes;
4349                 }
4350         } else {
4351                 /*
4352                  * Ok we're over committed, set num_bytes to the overcommitted
4353                  * amount plus the amount of bytes that we need for this
4354                  * reservation.
4355                  */
4356                 num_bytes = used - space_info->total_bytes +
4357                         (orig_bytes * 2);
4358         }
4359
4360         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4361                 space_info->bytes_may_use += orig_bytes;
4362                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4363                                               space_info->flags, orig_bytes,
4364                                               1);
4365                 ret = 0;
4366         }
4367
4368         /*
4369          * Couldn't make our reservation, save our place so while we're trying
4370          * to reclaim space we can actually use it instead of somebody else
4371          * stealing it from us.
4372          *
4373          * We make the other tasks wait for the flush only when we can flush
4374          * all things.
4375          */
4376         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4377                 flushing = true;
4378                 space_info->flush = 1;
4379         }
4380
4381         spin_unlock(&space_info->lock);
4382
4383         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4384                 goto out;
4385
4386         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4387                           flush_state);
4388         flush_state++;
4389
4390         /*
4391          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4392          * would happen. So skip delalloc flush.
4393          */
4394         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4395             (flush_state == FLUSH_DELALLOC ||
4396              flush_state == FLUSH_DELALLOC_WAIT))
4397                 flush_state = ALLOC_CHUNK;
4398
4399         if (!ret)
4400                 goto again;
4401         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4402                  flush_state < COMMIT_TRANS)
4403                 goto again;
4404         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4405                  flush_state <= COMMIT_TRANS)
4406                 goto again;
4407
4408 out:
4409         if (ret == -ENOSPC &&
4410             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4411                 struct btrfs_block_rsv *global_rsv =
4412                         &root->fs_info->global_block_rsv;
4413
4414                 if (block_rsv != global_rsv &&
4415                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4416                         ret = 0;
4417         }
4418         if (ret == -ENOSPC)
4419                 trace_btrfs_space_reservation(root->fs_info,
4420                                               "space_info:enospc",
4421                                               space_info->flags, orig_bytes, 1);
4422         if (flushing) {
4423                 spin_lock(&space_info->lock);
4424                 space_info->flush = 0;
4425                 wake_up_all(&space_info->wait);
4426                 spin_unlock(&space_info->lock);
4427         }
4428         return ret;
4429 }
4430
4431 static struct btrfs_block_rsv *get_block_rsv(
4432                                         const struct btrfs_trans_handle *trans,
4433                                         const struct btrfs_root *root)
4434 {
4435         struct btrfs_block_rsv *block_rsv = NULL;
4436
4437         if (root->ref_cows)
4438                 block_rsv = trans->block_rsv;
4439
4440         if (root == root->fs_info->csum_root && trans->adding_csums)
4441                 block_rsv = trans->block_rsv;
4442
4443         if (root == root->fs_info->uuid_root)
4444                 block_rsv = trans->block_rsv;
4445
4446         if (!block_rsv)
4447                 block_rsv = root->block_rsv;
4448
4449         if (!block_rsv)
4450                 block_rsv = &root->fs_info->empty_block_rsv;
4451
4452         return block_rsv;
4453 }
4454
4455 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4456                                u64 num_bytes)
4457 {
4458         int ret = -ENOSPC;
4459         spin_lock(&block_rsv->lock);
4460         if (block_rsv->reserved >= num_bytes) {
4461                 block_rsv->reserved -= num_bytes;
4462                 if (block_rsv->reserved < block_rsv->size)
4463                         block_rsv->full = 0;
4464                 ret = 0;
4465         }
4466         spin_unlock(&block_rsv->lock);
4467         return ret;
4468 }
4469
4470 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4471                                 u64 num_bytes, int update_size)
4472 {
4473         spin_lock(&block_rsv->lock);
4474         block_rsv->reserved += num_bytes;
4475         if (update_size)
4476                 block_rsv->size += num_bytes;
4477         else if (block_rsv->reserved >= block_rsv->size)
4478                 block_rsv->full = 1;
4479         spin_unlock(&block_rsv->lock);
4480 }
4481
4482 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4483                              struct btrfs_block_rsv *dest, u64 num_bytes,
4484                              int min_factor)
4485 {
4486         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4487         u64 min_bytes;
4488
4489         if (global_rsv->space_info != dest->space_info)
4490                 return -ENOSPC;
4491
4492         spin_lock(&global_rsv->lock);
4493         min_bytes = div_factor(global_rsv->size, min_factor);
4494         if (global_rsv->reserved < min_bytes + num_bytes) {
4495                 spin_unlock(&global_rsv->lock);
4496                 return -ENOSPC;
4497         }
4498         global_rsv->reserved -= num_bytes;
4499         if (global_rsv->reserved < global_rsv->size)
4500                 global_rsv->full = 0;
4501         spin_unlock(&global_rsv->lock);
4502
4503         block_rsv_add_bytes(dest, num_bytes, 1);
4504         return 0;
4505 }
4506
4507 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4508                                     struct btrfs_block_rsv *block_rsv,
4509                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4510 {
4511         struct btrfs_space_info *space_info = block_rsv->space_info;
4512
4513         spin_lock(&block_rsv->lock);
4514         if (num_bytes == (u64)-1)
4515                 num_bytes = block_rsv->size;
4516         block_rsv->size -= num_bytes;
4517         if (block_rsv->reserved >= block_rsv->size) {
4518                 num_bytes = block_rsv->reserved - block_rsv->size;
4519                 block_rsv->reserved = block_rsv->size;
4520                 block_rsv->full = 1;
4521         } else {
4522                 num_bytes = 0;
4523         }
4524         spin_unlock(&block_rsv->lock);
4525
4526         if (num_bytes > 0) {
4527                 if (dest) {
4528                         spin_lock(&dest->lock);
4529                         if (!dest->full) {
4530                                 u64 bytes_to_add;
4531
4532                                 bytes_to_add = dest->size - dest->reserved;
4533                                 bytes_to_add = min(num_bytes, bytes_to_add);
4534                                 dest->reserved += bytes_to_add;
4535                                 if (dest->reserved >= dest->size)
4536                                         dest->full = 1;
4537                                 num_bytes -= bytes_to_add;
4538                         }
4539                         spin_unlock(&dest->lock);
4540                 }
4541                 if (num_bytes) {
4542                         spin_lock(&space_info->lock);
4543                         space_info->bytes_may_use -= num_bytes;
4544                         trace_btrfs_space_reservation(fs_info, "space_info",
4545                                         space_info->flags, num_bytes, 0);
4546                         spin_unlock(&space_info->lock);
4547                 }
4548         }
4549 }
4550
4551 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4552                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4553 {
4554         int ret;
4555
4556         ret = block_rsv_use_bytes(src, num_bytes);
4557         if (ret)
4558                 return ret;
4559
4560         block_rsv_add_bytes(dst, num_bytes, 1);
4561         return 0;
4562 }
4563
4564 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4565 {
4566         memset(rsv, 0, sizeof(*rsv));
4567         spin_lock_init(&rsv->lock);
4568         rsv->type = type;
4569 }
4570
4571 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4572                                               unsigned short type)
4573 {
4574         struct btrfs_block_rsv *block_rsv;
4575         struct btrfs_fs_info *fs_info = root->fs_info;
4576
4577         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4578         if (!block_rsv)
4579                 return NULL;
4580
4581         btrfs_init_block_rsv(block_rsv, type);
4582         block_rsv->space_info = __find_space_info(fs_info,
4583                                                   BTRFS_BLOCK_GROUP_METADATA);
4584         return block_rsv;
4585 }
4586
4587 void btrfs_free_block_rsv(struct btrfs_root *root,
4588                           struct btrfs_block_rsv *rsv)
4589 {
4590         if (!rsv)
4591                 return;
4592         btrfs_block_rsv_release(root, rsv, (u64)-1);
4593         kfree(rsv);
4594 }
4595
4596 int btrfs_block_rsv_add(struct btrfs_root *root,
4597                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4598                         enum btrfs_reserve_flush_enum flush)
4599 {
4600         int ret;
4601
4602         if (num_bytes == 0)
4603                 return 0;
4604
4605         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4606         if (!ret) {
4607                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4608                 return 0;
4609         }
4610
4611         return ret;
4612 }
4613
4614 int btrfs_block_rsv_check(struct btrfs_root *root,
4615                           struct btrfs_block_rsv *block_rsv, int min_factor)
4616 {
4617         u64 num_bytes = 0;
4618         int ret = -ENOSPC;
4619
4620         if (!block_rsv)
4621                 return 0;
4622
4623         spin_lock(&block_rsv->lock);
4624         num_bytes = div_factor(block_rsv->size, min_factor);
4625         if (block_rsv->reserved >= num_bytes)
4626                 ret = 0;
4627         spin_unlock(&block_rsv->lock);
4628
4629         return ret;
4630 }
4631
4632 int btrfs_block_rsv_refill(struct btrfs_root *root,
4633                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4634                            enum btrfs_reserve_flush_enum flush)
4635 {
4636         u64 num_bytes = 0;
4637         int ret = -ENOSPC;
4638
4639         if (!block_rsv)
4640                 return 0;
4641
4642         spin_lock(&block_rsv->lock);
4643         num_bytes = min_reserved;
4644         if (block_rsv->reserved >= num_bytes)
4645                 ret = 0;
4646         else
4647                 num_bytes -= block_rsv->reserved;
4648         spin_unlock(&block_rsv->lock);
4649
4650         if (!ret)
4651                 return 0;
4652
4653         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4654         if (!ret) {
4655                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4656                 return 0;
4657         }
4658
4659         return ret;
4660 }
4661
4662 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4663                             struct btrfs_block_rsv *dst_rsv,
4664                             u64 num_bytes)
4665 {
4666         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4667 }
4668
4669 void btrfs_block_rsv_release(struct btrfs_root *root,
4670                              struct btrfs_block_rsv *block_rsv,
4671                              u64 num_bytes)
4672 {
4673         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4674         if (global_rsv->full || global_rsv == block_rsv ||
4675             block_rsv->space_info != global_rsv->space_info)
4676                 global_rsv = NULL;
4677         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4678                                 num_bytes);
4679 }
4680
4681 /*
4682  * helper to calculate size of global block reservation.
4683  * the desired value is sum of space used by extent tree,
4684  * checksum tree and root tree
4685  */
4686 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4687 {
4688         struct btrfs_space_info *sinfo;
4689         u64 num_bytes;
4690         u64 meta_used;
4691         u64 data_used;
4692         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4693
4694         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4695         spin_lock(&sinfo->lock);
4696         data_used = sinfo->bytes_used;
4697         spin_unlock(&sinfo->lock);
4698
4699         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4700         spin_lock(&sinfo->lock);
4701         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4702                 data_used = 0;
4703         meta_used = sinfo->bytes_used;
4704         spin_unlock(&sinfo->lock);
4705
4706         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4707                     csum_size * 2;
4708         num_bytes += div64_u64(data_used + meta_used, 50);
4709
4710         if (num_bytes * 3 > meta_used)
4711                 num_bytes = div64_u64(meta_used, 3);
4712
4713         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4714 }
4715
4716 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4717 {
4718         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4719         struct btrfs_space_info *sinfo = block_rsv->space_info;
4720         u64 num_bytes;
4721
4722         num_bytes = calc_global_metadata_size(fs_info);
4723
4724         spin_lock(&sinfo->lock);
4725         spin_lock(&block_rsv->lock);
4726
4727         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4728
4729         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4730                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4731                     sinfo->bytes_may_use;
4732
4733         if (sinfo->total_bytes > num_bytes) {
4734                 num_bytes = sinfo->total_bytes - num_bytes;
4735                 block_rsv->reserved += num_bytes;
4736                 sinfo->bytes_may_use += num_bytes;
4737                 trace_btrfs_space_reservation(fs_info, "space_info",
4738                                       sinfo->flags, num_bytes, 1);
4739         }
4740
4741         if (block_rsv->reserved >= block_rsv->size) {
4742                 num_bytes = block_rsv->reserved - block_rsv->size;
4743                 sinfo->bytes_may_use -= num_bytes;
4744                 trace_btrfs_space_reservation(fs_info, "space_info",
4745                                       sinfo->flags, num_bytes, 0);
4746                 block_rsv->reserved = block_rsv->size;
4747                 block_rsv->full = 1;
4748         }
4749
4750         spin_unlock(&block_rsv->lock);
4751         spin_unlock(&sinfo->lock);
4752 }
4753
4754 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4755 {
4756         struct btrfs_space_info *space_info;
4757
4758         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4759         fs_info->chunk_block_rsv.space_info = space_info;
4760
4761         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4762         fs_info->global_block_rsv.space_info = space_info;
4763         fs_info->delalloc_block_rsv.space_info = space_info;
4764         fs_info->trans_block_rsv.space_info = space_info;
4765         fs_info->empty_block_rsv.space_info = space_info;
4766         fs_info->delayed_block_rsv.space_info = space_info;
4767
4768         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4769         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4770         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4771         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4772         if (fs_info->quota_root)
4773                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4774         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4775
4776         update_global_block_rsv(fs_info);
4777 }
4778
4779 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4780 {
4781         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4782                                 (u64)-1);
4783         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4784         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4785         WARN_ON(fs_info->trans_block_rsv.size > 0);
4786         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4787         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4788         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4789         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4790         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4791 }
4792
4793 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4794                                   struct btrfs_root *root)
4795 {
4796         if (!trans->block_rsv)
4797                 return;
4798
4799         if (!trans->bytes_reserved)
4800                 return;
4801
4802         trace_btrfs_space_reservation(root->fs_info, "transaction",
4803                                       trans->transid, trans->bytes_reserved, 0);
4804         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4805         trans->bytes_reserved = 0;
4806 }
4807
4808 /* Can only return 0 or -ENOSPC */
4809 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4810                                   struct inode *inode)
4811 {
4812         struct btrfs_root *root = BTRFS_I(inode)->root;
4813         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4814         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4815
4816         /*
4817          * We need to hold space in order to delete our orphan item once we've
4818          * added it, so this takes the reservation so we can release it later
4819          * when we are truly done with the orphan item.
4820          */
4821         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4822         trace_btrfs_space_reservation(root->fs_info, "orphan",
4823                                       btrfs_ino(inode), num_bytes, 1);
4824         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4825 }
4826
4827 void btrfs_orphan_release_metadata(struct inode *inode)
4828 {
4829         struct btrfs_root *root = BTRFS_I(inode)->root;
4830         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4831         trace_btrfs_space_reservation(root->fs_info, "orphan",
4832                                       btrfs_ino(inode), num_bytes, 0);
4833         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4834 }
4835
4836 /*
4837  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4838  * root: the root of the parent directory
4839  * rsv: block reservation
4840  * items: the number of items that we need do reservation
4841  * qgroup_reserved: used to return the reserved size in qgroup
4842  *
4843  * This function is used to reserve the space for snapshot/subvolume
4844  * creation and deletion. Those operations are different with the
4845  * common file/directory operations, they change two fs/file trees
4846  * and root tree, the number of items that the qgroup reserves is
4847  * different with the free space reservation. So we can not use
4848  * the space reseravtion mechanism in start_transaction().
4849  */
4850 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4851                                      struct btrfs_block_rsv *rsv,
4852                                      int items,
4853                                      u64 *qgroup_reserved,
4854                                      bool use_global_rsv)
4855 {
4856         u64 num_bytes;
4857         int ret;
4858         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4859
4860         if (root->fs_info->quota_enabled) {
4861                 /* One for parent inode, two for dir entries */
4862                 num_bytes = 3 * root->leafsize;
4863                 ret = btrfs_qgroup_reserve(root, num_bytes);
4864                 if (ret)
4865                         return ret;
4866         } else {
4867                 num_bytes = 0;
4868         }
4869
4870         *qgroup_reserved = num_bytes;
4871
4872         num_bytes = btrfs_calc_trans_metadata_size(root, items);
4873         rsv->space_info = __find_space_info(root->fs_info,
4874                                             BTRFS_BLOCK_GROUP_METADATA);
4875         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4876                                   BTRFS_RESERVE_FLUSH_ALL);
4877
4878         if (ret == -ENOSPC && use_global_rsv)
4879                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
4880
4881         if (ret) {
4882                 if (*qgroup_reserved)
4883                         btrfs_qgroup_free(root, *qgroup_reserved);
4884         }
4885
4886         return ret;
4887 }
4888
4889 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4890                                       struct btrfs_block_rsv *rsv,
4891                                       u64 qgroup_reserved)
4892 {
4893         btrfs_block_rsv_release(root, rsv, (u64)-1);
4894         if (qgroup_reserved)
4895                 btrfs_qgroup_free(root, qgroup_reserved);
4896 }
4897
4898 /**
4899  * drop_outstanding_extent - drop an outstanding extent
4900  * @inode: the inode we're dropping the extent for
4901  *
4902  * This is called when we are freeing up an outstanding extent, either called
4903  * after an error or after an extent is written.  This will return the number of
4904  * reserved extents that need to be freed.  This must be called with
4905  * BTRFS_I(inode)->lock held.
4906  */
4907 static unsigned drop_outstanding_extent(struct inode *inode)
4908 {
4909         unsigned drop_inode_space = 0;
4910         unsigned dropped_extents = 0;
4911
4912         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4913         BTRFS_I(inode)->outstanding_extents--;
4914
4915         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4916             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4917                                &BTRFS_I(inode)->runtime_flags))
4918                 drop_inode_space = 1;
4919
4920         /*
4921          * If we have more or the same amount of outsanding extents than we have
4922          * reserved then we need to leave the reserved extents count alone.
4923          */
4924         if (BTRFS_I(inode)->outstanding_extents >=
4925             BTRFS_I(inode)->reserved_extents)
4926                 return drop_inode_space;
4927
4928         dropped_extents = BTRFS_I(inode)->reserved_extents -
4929                 BTRFS_I(inode)->outstanding_extents;
4930         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4931         return dropped_extents + drop_inode_space;
4932 }
4933
4934 /**
4935  * calc_csum_metadata_size - return the amount of metada space that must be
4936  *      reserved/free'd for the given bytes.
4937  * @inode: the inode we're manipulating
4938  * @num_bytes: the number of bytes in question
4939  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4940  *
4941  * This adjusts the number of csum_bytes in the inode and then returns the
4942  * correct amount of metadata that must either be reserved or freed.  We
4943  * calculate how many checksums we can fit into one leaf and then divide the
4944  * number of bytes that will need to be checksumed by this value to figure out
4945  * how many checksums will be required.  If we are adding bytes then the number
4946  * may go up and we will return the number of additional bytes that must be
4947  * reserved.  If it is going down we will return the number of bytes that must
4948  * be freed.
4949  *
4950  * This must be called with BTRFS_I(inode)->lock held.
4951  */
4952 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4953                                    int reserve)
4954 {
4955         struct btrfs_root *root = BTRFS_I(inode)->root;
4956         u64 csum_size;
4957         int num_csums_per_leaf;
4958         int num_csums;
4959         int old_csums;
4960
4961         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4962             BTRFS_I(inode)->csum_bytes == 0)
4963                 return 0;
4964
4965         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4966         if (reserve)
4967                 BTRFS_I(inode)->csum_bytes += num_bytes;
4968         else
4969                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4970         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4971         num_csums_per_leaf = (int)div64_u64(csum_size,
4972                                             sizeof(struct btrfs_csum_item) +
4973                                             sizeof(struct btrfs_disk_key));
4974         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4975         num_csums = num_csums + num_csums_per_leaf - 1;
4976         num_csums = num_csums / num_csums_per_leaf;
4977
4978         old_csums = old_csums + num_csums_per_leaf - 1;
4979         old_csums = old_csums / num_csums_per_leaf;
4980
4981         /* No change, no need to reserve more */
4982         if (old_csums == num_csums)
4983                 return 0;
4984
4985         if (reserve)
4986                 return btrfs_calc_trans_metadata_size(root,
4987                                                       num_csums - old_csums);
4988
4989         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4990 }
4991
4992 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4993 {
4994         struct btrfs_root *root = BTRFS_I(inode)->root;
4995         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4996         u64 to_reserve = 0;
4997         u64 csum_bytes;
4998         unsigned nr_extents = 0;
4999         int extra_reserve = 0;
5000         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5001         int ret = 0;
5002         bool delalloc_lock = true;
5003         u64 to_free = 0;
5004         unsigned dropped;
5005
5006         /* If we are a free space inode we need to not flush since we will be in
5007          * the middle of a transaction commit.  We also don't need the delalloc
5008          * mutex since we won't race with anybody.  We need this mostly to make
5009          * lockdep shut its filthy mouth.
5010          */
5011         if (btrfs_is_free_space_inode(inode)) {
5012                 flush = BTRFS_RESERVE_NO_FLUSH;
5013                 delalloc_lock = false;
5014         }
5015
5016         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5017             btrfs_transaction_in_commit(root->fs_info))
5018                 schedule_timeout(1);
5019
5020         if (delalloc_lock)
5021                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5022
5023         num_bytes = ALIGN(num_bytes, root->sectorsize);
5024
5025         spin_lock(&BTRFS_I(inode)->lock);
5026         BTRFS_I(inode)->outstanding_extents++;
5027
5028         if (BTRFS_I(inode)->outstanding_extents >
5029             BTRFS_I(inode)->reserved_extents)
5030                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5031                         BTRFS_I(inode)->reserved_extents;
5032
5033         /*
5034          * Add an item to reserve for updating the inode when we complete the
5035          * delalloc io.
5036          */
5037         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5038                       &BTRFS_I(inode)->runtime_flags)) {
5039                 nr_extents++;
5040                 extra_reserve = 1;
5041         }
5042
5043         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5044         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5045         csum_bytes = BTRFS_I(inode)->csum_bytes;
5046         spin_unlock(&BTRFS_I(inode)->lock);
5047
5048         if (root->fs_info->quota_enabled) {
5049                 ret = btrfs_qgroup_reserve(root, num_bytes +
5050                                            nr_extents * root->leafsize);
5051                 if (ret)
5052                         goto out_fail;
5053         }
5054
5055         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5056         if (unlikely(ret)) {
5057                 if (root->fs_info->quota_enabled)
5058                         btrfs_qgroup_free(root, num_bytes +
5059                                                 nr_extents * root->leafsize);
5060                 goto out_fail;
5061         }
5062
5063         spin_lock(&BTRFS_I(inode)->lock);
5064         if (extra_reserve) {
5065                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5066                         &BTRFS_I(inode)->runtime_flags);
5067                 nr_extents--;
5068         }
5069         BTRFS_I(inode)->reserved_extents += nr_extents;
5070         spin_unlock(&BTRFS_I(inode)->lock);
5071
5072         if (delalloc_lock)
5073                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5074
5075         if (to_reserve)
5076                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5077                                               btrfs_ino(inode), to_reserve, 1);
5078         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5079
5080         return 0;
5081
5082 out_fail:
5083         spin_lock(&BTRFS_I(inode)->lock);
5084         dropped = drop_outstanding_extent(inode);
5085         /*
5086          * If the inodes csum_bytes is the same as the original
5087          * csum_bytes then we know we haven't raced with any free()ers
5088          * so we can just reduce our inodes csum bytes and carry on.
5089          */
5090         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5091                 calc_csum_metadata_size(inode, num_bytes, 0);
5092         } else {
5093                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5094                 u64 bytes;
5095
5096                 /*
5097                  * This is tricky, but first we need to figure out how much we
5098                  * free'd from any free-ers that occured during this
5099                  * reservation, so we reset ->csum_bytes to the csum_bytes
5100                  * before we dropped our lock, and then call the free for the
5101                  * number of bytes that were freed while we were trying our
5102                  * reservation.
5103                  */
5104                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5105                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5106                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5107
5108
5109                 /*
5110                  * Now we need to see how much we would have freed had we not
5111                  * been making this reservation and our ->csum_bytes were not
5112                  * artificially inflated.
5113                  */
5114                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5115                 bytes = csum_bytes - orig_csum_bytes;
5116                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5117
5118                 /*
5119                  * Now reset ->csum_bytes to what it should be.  If bytes is
5120                  * more than to_free then we would have free'd more space had we
5121                  * not had an artificially high ->csum_bytes, so we need to free
5122                  * the remainder.  If bytes is the same or less then we don't
5123                  * need to do anything, the other free-ers did the correct
5124                  * thing.
5125                  */
5126                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5127                 if (bytes > to_free)
5128                         to_free = bytes - to_free;
5129                 else
5130                         to_free = 0;
5131         }
5132         spin_unlock(&BTRFS_I(inode)->lock);
5133         if (dropped)
5134                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5135
5136         if (to_free) {
5137                 btrfs_block_rsv_release(root, block_rsv, to_free);
5138                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5139                                               btrfs_ino(inode), to_free, 0);
5140         }
5141         if (delalloc_lock)
5142                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5143         return ret;
5144 }
5145
5146 /**
5147  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5148  * @inode: the inode to release the reservation for
5149  * @num_bytes: the number of bytes we're releasing
5150  *
5151  * This will release the metadata reservation for an inode.  This can be called
5152  * once we complete IO for a given set of bytes to release their metadata
5153  * reservations.
5154  */
5155 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5156 {
5157         struct btrfs_root *root = BTRFS_I(inode)->root;
5158         u64 to_free = 0;
5159         unsigned dropped;
5160
5161         num_bytes = ALIGN(num_bytes, root->sectorsize);
5162         spin_lock(&BTRFS_I(inode)->lock);
5163         dropped = drop_outstanding_extent(inode);
5164
5165         if (num_bytes)
5166                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5167         spin_unlock(&BTRFS_I(inode)->lock);
5168         if (dropped > 0)
5169                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5170
5171         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5172                                       btrfs_ino(inode), to_free, 0);
5173         if (root->fs_info->quota_enabled) {
5174                 btrfs_qgroup_free(root, num_bytes +
5175                                         dropped * root->leafsize);
5176         }
5177
5178         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5179                                 to_free);
5180 }
5181
5182 /**
5183  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5184  * @inode: inode we're writing to
5185  * @num_bytes: the number of bytes we want to allocate
5186  *
5187  * This will do the following things
5188  *
5189  * o reserve space in the data space info for num_bytes
5190  * o reserve space in the metadata space info based on number of outstanding
5191  *   extents and how much csums will be needed
5192  * o add to the inodes ->delalloc_bytes
5193  * o add it to the fs_info's delalloc inodes list.
5194  *
5195  * This will return 0 for success and -ENOSPC if there is no space left.
5196  */
5197 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5198 {
5199         int ret;
5200
5201         ret = btrfs_check_data_free_space(inode, num_bytes);
5202         if (ret)
5203                 return ret;
5204
5205         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5206         if (ret) {
5207                 btrfs_free_reserved_data_space(inode, num_bytes);
5208                 return ret;
5209         }
5210
5211         return 0;
5212 }
5213
5214 /**
5215  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5216  * @inode: inode we're releasing space for
5217  * @num_bytes: the number of bytes we want to free up
5218  *
5219  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5220  * called in the case that we don't need the metadata AND data reservations
5221  * anymore.  So if there is an error or we insert an inline extent.
5222  *
5223  * This function will release the metadata space that was not used and will
5224  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5225  * list if there are no delalloc bytes left.
5226  */
5227 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5228 {
5229         btrfs_delalloc_release_metadata(inode, num_bytes);
5230         btrfs_free_reserved_data_space(inode, num_bytes);
5231 }
5232
5233 static int update_block_group(struct btrfs_root *root,
5234                               u64 bytenr, u64 num_bytes, int alloc)
5235 {
5236         struct btrfs_block_group_cache *cache = NULL;
5237         struct btrfs_fs_info *info = root->fs_info;
5238         u64 total = num_bytes;
5239         u64 old_val;
5240         u64 byte_in_group;
5241         int factor;
5242
5243         /* block accounting for super block */
5244         spin_lock(&info->delalloc_root_lock);
5245         old_val = btrfs_super_bytes_used(info->super_copy);
5246         if (alloc)
5247                 old_val += num_bytes;
5248         else
5249                 old_val -= num_bytes;
5250         btrfs_set_super_bytes_used(info->super_copy, old_val);
5251         spin_unlock(&info->delalloc_root_lock);
5252
5253         while (total) {
5254                 cache = btrfs_lookup_block_group(info, bytenr);
5255                 if (!cache)
5256                         return -ENOENT;
5257                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5258                                     BTRFS_BLOCK_GROUP_RAID1 |
5259                                     BTRFS_BLOCK_GROUP_RAID10))
5260                         factor = 2;
5261                 else
5262                         factor = 1;
5263                 /*
5264                  * If this block group has free space cache written out, we
5265                  * need to make sure to load it if we are removing space.  This
5266                  * is because we need the unpinning stage to actually add the
5267                  * space back to the block group, otherwise we will leak space.
5268                  */
5269                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5270                         cache_block_group(cache, 1);
5271
5272                 byte_in_group = bytenr - cache->key.objectid;
5273                 WARN_ON(byte_in_group > cache->key.offset);
5274
5275                 spin_lock(&cache->space_info->lock);
5276                 spin_lock(&cache->lock);
5277
5278                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5279                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5280                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5281
5282                 cache->dirty = 1;
5283                 old_val = btrfs_block_group_used(&cache->item);
5284                 num_bytes = min(total, cache->key.offset - byte_in_group);
5285                 if (alloc) {
5286                         old_val += num_bytes;
5287                         btrfs_set_block_group_used(&cache->item, old_val);
5288                         cache->reserved -= num_bytes;
5289                         cache->space_info->bytes_reserved -= num_bytes;
5290                         cache->space_info->bytes_used += num_bytes;
5291                         cache->space_info->disk_used += num_bytes * factor;
5292                         spin_unlock(&cache->lock);
5293                         spin_unlock(&cache->space_info->lock);
5294                 } else {
5295                         old_val -= num_bytes;
5296                         btrfs_set_block_group_used(&cache->item, old_val);
5297                         cache->pinned += num_bytes;
5298                         cache->space_info->bytes_pinned += num_bytes;
5299                         cache->space_info->bytes_used -= num_bytes;
5300                         cache->space_info->disk_used -= num_bytes * factor;
5301                         spin_unlock(&cache->lock);
5302                         spin_unlock(&cache->space_info->lock);
5303
5304                         set_extent_dirty(info->pinned_extents,
5305                                          bytenr, bytenr + num_bytes - 1,
5306                                          GFP_NOFS | __GFP_NOFAIL);
5307                 }
5308                 btrfs_put_block_group(cache);
5309                 total -= num_bytes;
5310                 bytenr += num_bytes;
5311         }
5312         return 0;
5313 }
5314
5315 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5316 {
5317         struct btrfs_block_group_cache *cache;
5318         u64 bytenr;
5319
5320         spin_lock(&root->fs_info->block_group_cache_lock);
5321         bytenr = root->fs_info->first_logical_byte;
5322         spin_unlock(&root->fs_info->block_group_cache_lock);
5323
5324         if (bytenr < (u64)-1)
5325                 return bytenr;
5326
5327         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5328         if (!cache)
5329                 return 0;
5330
5331         bytenr = cache->key.objectid;
5332         btrfs_put_block_group(cache);
5333
5334         return bytenr;
5335 }
5336
5337 static int pin_down_extent(struct btrfs_root *root,
5338                            struct btrfs_block_group_cache *cache,
5339                            u64 bytenr, u64 num_bytes, int reserved)
5340 {
5341         spin_lock(&cache->space_info->lock);
5342         spin_lock(&cache->lock);
5343         cache->pinned += num_bytes;
5344         cache->space_info->bytes_pinned += num_bytes;
5345         if (reserved) {
5346                 cache->reserved -= num_bytes;
5347                 cache->space_info->bytes_reserved -= num_bytes;
5348         }
5349         spin_unlock(&cache->lock);
5350         spin_unlock(&cache->space_info->lock);
5351
5352         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5353                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5354         if (reserved)
5355                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5356         return 0;
5357 }
5358
5359 /*
5360  * this function must be called within transaction
5361  */
5362 int btrfs_pin_extent(struct btrfs_root *root,
5363                      u64 bytenr, u64 num_bytes, int reserved)
5364 {
5365         struct btrfs_block_group_cache *cache;
5366
5367         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5368         BUG_ON(!cache); /* Logic error */
5369
5370         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5371
5372         btrfs_put_block_group(cache);
5373         return 0;
5374 }
5375
5376 /*
5377  * this function must be called within transaction
5378  */
5379 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5380                                     u64 bytenr, u64 num_bytes)
5381 {
5382         struct btrfs_block_group_cache *cache;
5383         int ret;
5384
5385         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5386         if (!cache)
5387                 return -EINVAL;
5388
5389         /*
5390          * pull in the free space cache (if any) so that our pin
5391          * removes the free space from the cache.  We have load_only set
5392          * to one because the slow code to read in the free extents does check
5393          * the pinned extents.
5394          */
5395         cache_block_group(cache, 1);
5396
5397         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5398
5399         /* remove us from the free space cache (if we're there at all) */
5400         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5401         btrfs_put_block_group(cache);
5402         return ret;
5403 }
5404
5405 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5406 {
5407         int ret;
5408         struct btrfs_block_group_cache *block_group;
5409         struct btrfs_caching_control *caching_ctl;
5410
5411         block_group = btrfs_lookup_block_group(root->fs_info, start);
5412         if (!block_group)
5413                 return -EINVAL;
5414
5415         cache_block_group(block_group, 0);
5416         caching_ctl = get_caching_control(block_group);
5417
5418         if (!caching_ctl) {
5419                 /* Logic error */
5420                 BUG_ON(!block_group_cache_done(block_group));
5421                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5422         } else {
5423                 mutex_lock(&caching_ctl->mutex);
5424
5425                 if (start >= caching_ctl->progress) {
5426                         ret = add_excluded_extent(root, start, num_bytes);
5427                 } else if (start + num_bytes <= caching_ctl->progress) {
5428                         ret = btrfs_remove_free_space(block_group,
5429                                                       start, num_bytes);
5430                 } else {
5431                         num_bytes = caching_ctl->progress - start;
5432                         ret = btrfs_remove_free_space(block_group,
5433                                                       start, num_bytes);
5434                         if (ret)
5435                                 goto out_lock;
5436
5437                         num_bytes = (start + num_bytes) -
5438                                 caching_ctl->progress;
5439                         start = caching_ctl->progress;
5440                         ret = add_excluded_extent(root, start, num_bytes);
5441                 }
5442 out_lock:
5443                 mutex_unlock(&caching_ctl->mutex);
5444                 put_caching_control(caching_ctl);
5445         }
5446         btrfs_put_block_group(block_group);
5447         return ret;
5448 }
5449
5450 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5451                                  struct extent_buffer *eb)
5452 {
5453         struct btrfs_file_extent_item *item;
5454         struct btrfs_key key;
5455         int found_type;
5456         int i;
5457
5458         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5459                 return 0;
5460
5461         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5462                 btrfs_item_key_to_cpu(eb, &key, i);
5463                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5464                         continue;
5465                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5466                 found_type = btrfs_file_extent_type(eb, item);
5467                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5468                         continue;
5469                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5470                         continue;
5471                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5472                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5473                 __exclude_logged_extent(log, key.objectid, key.offset);
5474         }
5475
5476         return 0;
5477 }
5478
5479 /**
5480  * btrfs_update_reserved_bytes - update the block_group and space info counters
5481  * @cache:      The cache we are manipulating
5482  * @num_bytes:  The number of bytes in question
5483  * @reserve:    One of the reservation enums
5484  *
5485  * This is called by the allocator when it reserves space, or by somebody who is
5486  * freeing space that was never actually used on disk.  For example if you
5487  * reserve some space for a new leaf in transaction A and before transaction A
5488  * commits you free that leaf, you call this with reserve set to 0 in order to
5489  * clear the reservation.
5490  *
5491  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5492  * ENOSPC accounting.  For data we handle the reservation through clearing the
5493  * delalloc bits in the io_tree.  We have to do this since we could end up
5494  * allocating less disk space for the amount of data we have reserved in the
5495  * case of compression.
5496  *
5497  * If this is a reservation and the block group has become read only we cannot
5498  * make the reservation and return -EAGAIN, otherwise this function always
5499  * succeeds.
5500  */
5501 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5502                                        u64 num_bytes, int reserve)
5503 {
5504         struct btrfs_space_info *space_info = cache->space_info;
5505         int ret = 0;
5506
5507         spin_lock(&space_info->lock);
5508         spin_lock(&cache->lock);
5509         if (reserve != RESERVE_FREE) {
5510                 if (cache->ro) {
5511                         ret = -EAGAIN;
5512                 } else {
5513                         cache->reserved += num_bytes;
5514                         space_info->bytes_reserved += num_bytes;
5515                         if (reserve == RESERVE_ALLOC) {
5516                                 trace_btrfs_space_reservation(cache->fs_info,
5517                                                 "space_info", space_info->flags,
5518                                                 num_bytes, 0);
5519                                 space_info->bytes_may_use -= num_bytes;
5520                         }
5521                 }
5522         } else {
5523                 if (cache->ro)
5524                         space_info->bytes_readonly += num_bytes;
5525                 cache->reserved -= num_bytes;
5526                 space_info->bytes_reserved -= num_bytes;
5527         }
5528         spin_unlock(&cache->lock);
5529         spin_unlock(&space_info->lock);
5530         return ret;
5531 }
5532
5533 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5534                                 struct btrfs_root *root)
5535 {
5536         struct btrfs_fs_info *fs_info = root->fs_info;
5537         struct btrfs_caching_control *next;
5538         struct btrfs_caching_control *caching_ctl;
5539         struct btrfs_block_group_cache *cache;
5540         struct btrfs_space_info *space_info;
5541
5542         down_write(&fs_info->extent_commit_sem);
5543
5544         list_for_each_entry_safe(caching_ctl, next,
5545                                  &fs_info->caching_block_groups, list) {
5546                 cache = caching_ctl->block_group;
5547                 if (block_group_cache_done(cache)) {
5548                         cache->last_byte_to_unpin = (u64)-1;
5549                         list_del_init(&caching_ctl->list);
5550                         put_caching_control(caching_ctl);
5551                 } else {
5552                         cache->last_byte_to_unpin = caching_ctl->progress;
5553                 }
5554         }
5555
5556         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5557                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5558         else
5559                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5560
5561         up_write(&fs_info->extent_commit_sem);
5562
5563         list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
5564                 percpu_counter_set(&space_info->total_bytes_pinned, 0);
5565
5566         update_global_block_rsv(fs_info);
5567 }
5568
5569 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5570 {
5571         struct btrfs_fs_info *fs_info = root->fs_info;
5572         struct btrfs_block_group_cache *cache = NULL;
5573         struct btrfs_space_info *space_info;
5574         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5575         u64 len;
5576         bool readonly;
5577
5578         while (start <= end) {
5579                 readonly = false;
5580                 if (!cache ||
5581                     start >= cache->key.objectid + cache->key.offset) {
5582                         if (cache)
5583                                 btrfs_put_block_group(cache);
5584                         cache = btrfs_lookup_block_group(fs_info, start);
5585                         BUG_ON(!cache); /* Logic error */
5586                 }
5587
5588                 len = cache->key.objectid + cache->key.offset - start;
5589                 len = min(len, end + 1 - start);
5590
5591                 if (start < cache->last_byte_to_unpin) {
5592                         len = min(len, cache->last_byte_to_unpin - start);
5593                         btrfs_add_free_space(cache, start, len);
5594                 }
5595
5596                 start += len;
5597                 space_info = cache->space_info;
5598
5599                 spin_lock(&space_info->lock);
5600                 spin_lock(&cache->lock);
5601                 cache->pinned -= len;
5602                 space_info->bytes_pinned -= len;
5603                 if (cache->ro) {
5604                         space_info->bytes_readonly += len;
5605                         readonly = true;
5606                 }
5607                 spin_unlock(&cache->lock);
5608                 if (!readonly && global_rsv->space_info == space_info) {
5609                         spin_lock(&global_rsv->lock);
5610                         if (!global_rsv->full) {
5611                                 len = min(len, global_rsv->size -
5612                                           global_rsv->reserved);
5613                                 global_rsv->reserved += len;
5614                                 space_info->bytes_may_use += len;
5615                                 if (global_rsv->reserved >= global_rsv->size)
5616                                         global_rsv->full = 1;
5617                         }
5618                         spin_unlock(&global_rsv->lock);
5619                 }
5620                 spin_unlock(&space_info->lock);
5621         }
5622
5623         if (cache)
5624                 btrfs_put_block_group(cache);
5625         return 0;
5626 }
5627
5628 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5629                                struct btrfs_root *root)
5630 {
5631         struct btrfs_fs_info *fs_info = root->fs_info;
5632         struct extent_io_tree *unpin;
5633         u64 start;
5634         u64 end;
5635         int ret;
5636
5637         if (trans->aborted)
5638                 return 0;
5639
5640         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5641                 unpin = &fs_info->freed_extents[1];
5642         else
5643                 unpin = &fs_info->freed_extents[0];
5644
5645         while (1) {
5646                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5647                                             EXTENT_DIRTY, NULL);
5648                 if (ret)
5649                         break;
5650
5651                 if (btrfs_test_opt(root, DISCARD))
5652                         ret = btrfs_discard_extent(root, start,
5653                                                    end + 1 - start, NULL);
5654
5655                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5656                 unpin_extent_range(root, start, end);
5657                 cond_resched();
5658         }
5659
5660         return 0;
5661 }
5662
5663 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5664                              u64 owner, u64 root_objectid)
5665 {
5666         struct btrfs_space_info *space_info;
5667         u64 flags;
5668
5669         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5670                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5671                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
5672                 else
5673                         flags = BTRFS_BLOCK_GROUP_METADATA;
5674         } else {
5675                 flags = BTRFS_BLOCK_GROUP_DATA;
5676         }
5677
5678         space_info = __find_space_info(fs_info, flags);
5679         BUG_ON(!space_info); /* Logic bug */
5680         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5681 }
5682
5683
5684 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5685                                 struct btrfs_root *root,
5686                                 u64 bytenr, u64 num_bytes, u64 parent,
5687                                 u64 root_objectid, u64 owner_objectid,
5688                                 u64 owner_offset, int refs_to_drop,
5689                                 struct btrfs_delayed_extent_op *extent_op)
5690 {
5691         struct btrfs_key key;
5692         struct btrfs_path *path;
5693         struct btrfs_fs_info *info = root->fs_info;
5694         struct btrfs_root *extent_root = info->extent_root;
5695         struct extent_buffer *leaf;
5696         struct btrfs_extent_item *ei;
5697         struct btrfs_extent_inline_ref *iref;
5698         int ret;
5699         int is_data;
5700         int extent_slot = 0;
5701         int found_extent = 0;
5702         int num_to_del = 1;
5703         u32 item_size;
5704         u64 refs;
5705         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5706                                                  SKINNY_METADATA);
5707
5708         path = btrfs_alloc_path();
5709         if (!path)
5710                 return -ENOMEM;
5711
5712         path->reada = 1;
5713         path->leave_spinning = 1;
5714
5715         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5716         BUG_ON(!is_data && refs_to_drop != 1);
5717
5718         if (is_data)
5719                 skinny_metadata = 0;
5720
5721         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5722                                     bytenr, num_bytes, parent,
5723                                     root_objectid, owner_objectid,
5724                                     owner_offset);
5725         if (ret == 0) {
5726                 extent_slot = path->slots[0];
5727                 while (extent_slot >= 0) {
5728                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5729                                               extent_slot);
5730                         if (key.objectid != bytenr)
5731                                 break;
5732                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5733                             key.offset == num_bytes) {
5734                                 found_extent = 1;
5735                                 break;
5736                         }
5737                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5738                             key.offset == owner_objectid) {
5739                                 found_extent = 1;
5740                                 break;
5741                         }
5742                         if (path->slots[0] - extent_slot > 5)
5743                                 break;
5744                         extent_slot--;
5745                 }
5746 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5747                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5748                 if (found_extent && item_size < sizeof(*ei))
5749                         found_extent = 0;
5750 #endif
5751                 if (!found_extent) {
5752                         BUG_ON(iref);
5753                         ret = remove_extent_backref(trans, extent_root, path,
5754                                                     NULL, refs_to_drop,
5755                                                     is_data);
5756                         if (ret) {
5757                                 btrfs_abort_transaction(trans, extent_root, ret);
5758                                 goto out;
5759                         }
5760                         btrfs_release_path(path);
5761                         path->leave_spinning = 1;
5762
5763                         key.objectid = bytenr;
5764                         key.type = BTRFS_EXTENT_ITEM_KEY;
5765                         key.offset = num_bytes;
5766
5767                         if (!is_data && skinny_metadata) {
5768                                 key.type = BTRFS_METADATA_ITEM_KEY;
5769                                 key.offset = owner_objectid;
5770                         }
5771
5772                         ret = btrfs_search_slot(trans, extent_root,
5773                                                 &key, path, -1, 1);
5774                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5775                                 /*
5776                                  * Couldn't find our skinny metadata item,
5777                                  * see if we have ye olde extent item.
5778                                  */
5779                                 path->slots[0]--;
5780                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5781                                                       path->slots[0]);
5782                                 if (key.objectid == bytenr &&
5783                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5784                                     key.offset == num_bytes)
5785                                         ret = 0;
5786                         }
5787
5788                         if (ret > 0 && skinny_metadata) {
5789                                 skinny_metadata = false;
5790                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5791                                 key.offset = num_bytes;
5792                                 btrfs_release_path(path);
5793                                 ret = btrfs_search_slot(trans, extent_root,
5794                                                         &key, path, -1, 1);
5795                         }
5796
5797                         if (ret) {
5798                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5799                                         ret, bytenr);
5800                                 if (ret > 0)
5801                                         btrfs_print_leaf(extent_root,
5802                                                          path->nodes[0]);
5803                         }
5804                         if (ret < 0) {
5805                                 btrfs_abort_transaction(trans, extent_root, ret);
5806                                 goto out;
5807                         }
5808                         extent_slot = path->slots[0];
5809                 }
5810         } else if (WARN_ON(ret == -ENOENT)) {
5811                 btrfs_print_leaf(extent_root, path->nodes[0]);
5812                 btrfs_err(info,
5813                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5814                         bytenr, parent, root_objectid, owner_objectid,
5815                         owner_offset);
5816         } else {
5817                 btrfs_abort_transaction(trans, extent_root, ret);
5818                 goto out;
5819         }
5820
5821         leaf = path->nodes[0];
5822         item_size = btrfs_item_size_nr(leaf, extent_slot);
5823 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5824         if (item_size < sizeof(*ei)) {
5825                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5826                 ret = convert_extent_item_v0(trans, extent_root, path,
5827                                              owner_objectid, 0);
5828                 if (ret < 0) {
5829                         btrfs_abort_transaction(trans, extent_root, ret);
5830                         goto out;
5831                 }
5832
5833                 btrfs_release_path(path);
5834                 path->leave_spinning = 1;
5835
5836                 key.objectid = bytenr;
5837                 key.type = BTRFS_EXTENT_ITEM_KEY;
5838                 key.offset = num_bytes;
5839
5840                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5841                                         -1, 1);
5842                 if (ret) {
5843                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5844                                 ret, bytenr);
5845                         btrfs_print_leaf(extent_root, path->nodes[0]);
5846                 }
5847                 if (ret < 0) {
5848                         btrfs_abort_transaction(trans, extent_root, ret);
5849                         goto out;
5850                 }
5851
5852                 extent_slot = path->slots[0];
5853                 leaf = path->nodes[0];
5854                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5855         }
5856 #endif
5857         BUG_ON(item_size < sizeof(*ei));
5858         ei = btrfs_item_ptr(leaf, extent_slot,
5859                             struct btrfs_extent_item);
5860         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
5861             key.type == BTRFS_EXTENT_ITEM_KEY) {
5862                 struct btrfs_tree_block_info *bi;
5863                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5864                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5865                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5866         }
5867
5868         refs = btrfs_extent_refs(leaf, ei);
5869         if (refs < refs_to_drop) {
5870                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
5871                           "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
5872                 ret = -EINVAL;
5873                 btrfs_abort_transaction(trans, extent_root, ret);
5874                 goto out;
5875         }
5876         refs -= refs_to_drop;
5877
5878         if (refs > 0) {
5879                 if (extent_op)
5880                         __run_delayed_extent_op(extent_op, leaf, ei);
5881                 /*
5882                  * In the case of inline back ref, reference count will
5883                  * be updated by remove_extent_backref
5884                  */
5885                 if (iref) {
5886                         BUG_ON(!found_extent);
5887                 } else {
5888                         btrfs_set_extent_refs(leaf, ei, refs);
5889                         btrfs_mark_buffer_dirty(leaf);
5890                 }
5891                 if (found_extent) {
5892                         ret = remove_extent_backref(trans, extent_root, path,
5893                                                     iref, refs_to_drop,
5894                                                     is_data);
5895                         if (ret) {
5896                                 btrfs_abort_transaction(trans, extent_root, ret);
5897                                 goto out;
5898                         }
5899                 }
5900                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
5901                                  root_objectid);
5902         } else {
5903                 if (found_extent) {
5904                         BUG_ON(is_data && refs_to_drop !=
5905                                extent_data_ref_count(root, path, iref));
5906                         if (iref) {
5907                                 BUG_ON(path->slots[0] != extent_slot);
5908                         } else {
5909                                 BUG_ON(path->slots[0] != extent_slot + 1);
5910                                 path->slots[0] = extent_slot;
5911                                 num_to_del = 2;
5912                         }
5913                 }
5914
5915                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5916                                       num_to_del);
5917                 if (ret) {
5918                         btrfs_abort_transaction(trans, extent_root, ret);
5919                         goto out;
5920                 }
5921                 btrfs_release_path(path);
5922
5923                 if (is_data) {
5924                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5925                         if (ret) {
5926                                 btrfs_abort_transaction(trans, extent_root, ret);
5927                                 goto out;
5928                         }
5929                 }
5930
5931                 ret = update_block_group(root, bytenr, num_bytes, 0);
5932                 if (ret) {
5933                         btrfs_abort_transaction(trans, extent_root, ret);
5934                         goto out;
5935                 }
5936         }
5937 out:
5938         btrfs_free_path(path);
5939         return ret;
5940 }
5941
5942 /*
5943  * when we free an block, it is possible (and likely) that we free the last
5944  * delayed ref for that extent as well.  This searches the delayed ref tree for
5945  * a given extent, and if there are no other delayed refs to be processed, it
5946  * removes it from the tree.
5947  */
5948 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5949                                       struct btrfs_root *root, u64 bytenr)
5950 {
5951         struct btrfs_delayed_ref_head *head;
5952         struct btrfs_delayed_ref_root *delayed_refs;
5953         struct btrfs_delayed_ref_node *ref;
5954         struct rb_node *node;
5955         int ret = 0;
5956
5957         delayed_refs = &trans->transaction->delayed_refs;
5958         spin_lock(&delayed_refs->lock);
5959         head = btrfs_find_delayed_ref_head(trans, bytenr);
5960         if (!head)
5961                 goto out;
5962
5963         node = rb_prev(&head->node.rb_node);
5964         if (!node)
5965                 goto out;
5966
5967         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5968
5969         /* there are still entries for this ref, we can't drop it */
5970         if (ref->bytenr == bytenr)
5971                 goto out;
5972
5973         if (head->extent_op) {
5974                 if (!head->must_insert_reserved)
5975                         goto out;
5976                 btrfs_free_delayed_extent_op(head->extent_op);
5977                 head->extent_op = NULL;
5978         }
5979
5980         /*
5981          * waiting for the lock here would deadlock.  If someone else has it
5982          * locked they are already in the process of dropping it anyway
5983          */
5984         if (!mutex_trylock(&head->mutex))
5985                 goto out;
5986
5987         /*
5988          * at this point we have a head with no other entries.  Go
5989          * ahead and process it.
5990          */
5991         head->node.in_tree = 0;
5992         rb_erase(&head->node.rb_node, &delayed_refs->root);
5993         rb_erase(&head->href_node, &delayed_refs->href_root);
5994
5995         delayed_refs->num_entries--;
5996
5997         /*
5998          * we don't take a ref on the node because we're removing it from the
5999          * tree, so we just steal the ref the tree was holding.
6000          */
6001         delayed_refs->num_heads--;
6002         if (list_empty(&head->cluster))
6003                 delayed_refs->num_heads_ready--;
6004
6005         list_del_init(&head->cluster);
6006         spin_unlock(&delayed_refs->lock);
6007
6008         BUG_ON(head->extent_op);
6009         if (head->must_insert_reserved)
6010                 ret = 1;
6011
6012         mutex_unlock(&head->mutex);
6013         btrfs_put_delayed_ref(&head->node);
6014         return ret;
6015 out:
6016         spin_unlock(&delayed_refs->lock);
6017         return 0;
6018 }
6019
6020 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6021                            struct btrfs_root *root,
6022                            struct extent_buffer *buf,
6023                            u64 parent, int last_ref)
6024 {
6025         struct btrfs_block_group_cache *cache = NULL;
6026         int pin = 1;
6027         int ret;
6028
6029         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6030                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6031                                         buf->start, buf->len,
6032                                         parent, root->root_key.objectid,
6033                                         btrfs_header_level(buf),
6034                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6035                 BUG_ON(ret); /* -ENOMEM */
6036         }
6037
6038         if (!last_ref)
6039                 return;
6040
6041         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6042
6043         if (btrfs_header_generation(buf) == trans->transid) {
6044                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6045                         ret = check_ref_cleanup(trans, root, buf->start);
6046                         if (!ret)
6047                                 goto out;
6048                 }
6049
6050                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6051                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6052                         goto out;
6053                 }
6054
6055                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6056
6057                 btrfs_add_free_space(cache, buf->start, buf->len);
6058                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
6059                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6060                 pin = 0;
6061         }
6062 out:
6063         if (pin)
6064                 add_pinned_bytes(root->fs_info, buf->len,
6065                                  btrfs_header_level(buf),
6066                                  root->root_key.objectid);
6067
6068         /*
6069          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6070          * anymore.
6071          */
6072         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6073         btrfs_put_block_group(cache);
6074 }
6075
6076 /* Can return -ENOMEM */
6077 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6078                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6079                       u64 owner, u64 offset, int for_cow)
6080 {
6081         int ret;
6082         struct btrfs_fs_info *fs_info = root->fs_info;
6083
6084         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6085
6086         /*
6087          * tree log blocks never actually go into the extent allocation
6088          * tree, just update pinning info and exit early.
6089          */
6090         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6091                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6092                 /* unlocks the pinned mutex */
6093                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6094                 ret = 0;
6095         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6096                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6097                                         num_bytes,
6098                                         parent, root_objectid, (int)owner,
6099                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
6100         } else {
6101                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6102                                                 num_bytes,
6103                                                 parent, root_objectid, owner,
6104                                                 offset, BTRFS_DROP_DELAYED_REF,
6105                                                 NULL, for_cow);
6106         }
6107         return ret;
6108 }
6109
6110 static u64 stripe_align(struct btrfs_root *root,
6111                         struct btrfs_block_group_cache *cache,
6112                         u64 val, u64 num_bytes)
6113 {
6114         u64 ret = ALIGN(val, root->stripesize);
6115         return ret;
6116 }
6117
6118 /*
6119  * when we wait for progress in the block group caching, its because
6120  * our allocation attempt failed at least once.  So, we must sleep
6121  * and let some progress happen before we try again.
6122  *
6123  * This function will sleep at least once waiting for new free space to
6124  * show up, and then it will check the block group free space numbers
6125  * for our min num_bytes.  Another option is to have it go ahead
6126  * and look in the rbtree for a free extent of a given size, but this
6127  * is a good start.
6128  *
6129  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6130  * any of the information in this block group.
6131  */
6132 static noinline void
6133 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6134                                 u64 num_bytes)
6135 {
6136         struct btrfs_caching_control *caching_ctl;
6137
6138         caching_ctl = get_caching_control(cache);
6139         if (!caching_ctl)
6140                 return;
6141
6142         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6143                    (cache->free_space_ctl->free_space >= num_bytes));
6144
6145         put_caching_control(caching_ctl);
6146 }
6147
6148 static noinline int
6149 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6150 {
6151         struct btrfs_caching_control *caching_ctl;
6152         int ret = 0;
6153
6154         caching_ctl = get_caching_control(cache);
6155         if (!caching_ctl)
6156                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6157
6158         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6159         if (cache->cached == BTRFS_CACHE_ERROR)
6160                 ret = -EIO;
6161         put_caching_control(caching_ctl);
6162         return ret;
6163 }
6164
6165 int __get_raid_index(u64 flags)
6166 {
6167         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6168                 return BTRFS_RAID_RAID10;
6169         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6170                 return BTRFS_RAID_RAID1;
6171         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6172                 return BTRFS_RAID_DUP;
6173         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6174                 return BTRFS_RAID_RAID0;
6175         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6176                 return BTRFS_RAID_RAID5;
6177         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6178                 return BTRFS_RAID_RAID6;
6179
6180         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6181 }
6182
6183 int get_block_group_index(struct btrfs_block_group_cache *cache)
6184 {
6185         return __get_raid_index(cache->flags);
6186 }
6187
6188 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6189         [BTRFS_RAID_RAID10]     = "raid10",
6190         [BTRFS_RAID_RAID1]      = "raid1",
6191         [BTRFS_RAID_DUP]        = "dup",
6192         [BTRFS_RAID_RAID0]      = "raid0",
6193         [BTRFS_RAID_SINGLE]     = "single",
6194         [BTRFS_RAID_RAID5]      = "raid5",
6195         [BTRFS_RAID_RAID6]      = "raid6",
6196 };
6197
6198 const char *get_raid_name(enum btrfs_raid_types type)
6199 {
6200         if (type >= BTRFS_NR_RAID_TYPES)
6201                 return NULL;
6202
6203         return btrfs_raid_type_names[type];
6204 }
6205
6206 enum btrfs_loop_type {
6207         LOOP_CACHING_NOWAIT = 0,
6208         LOOP_CACHING_WAIT = 1,
6209         LOOP_ALLOC_CHUNK = 2,
6210         LOOP_NO_EMPTY_SIZE = 3,
6211 };
6212
6213 /*
6214  * walks the btree of allocated extents and find a hole of a given size.
6215  * The key ins is changed to record the hole:
6216  * ins->objectid == start position
6217  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6218  * ins->offset == the size of the hole.
6219  * Any available blocks before search_start are skipped.
6220  *
6221  * If there is no suitable free space, we will record the max size of
6222  * the free space extent currently.
6223  */
6224 static noinline int find_free_extent(struct btrfs_root *orig_root,
6225                                      u64 num_bytes, u64 empty_size,
6226                                      u64 hint_byte, struct btrfs_key *ins,
6227                                      u64 flags)
6228 {
6229         int ret = 0;
6230         struct btrfs_root *root = orig_root->fs_info->extent_root;
6231         struct btrfs_free_cluster *last_ptr = NULL;
6232         struct btrfs_block_group_cache *block_group = NULL;
6233         struct btrfs_block_group_cache *used_block_group;
6234         u64 search_start = 0;
6235         u64 max_extent_size = 0;
6236         int empty_cluster = 2 * 1024 * 1024;
6237         struct btrfs_space_info *space_info;
6238         int loop = 0;
6239         int index = __get_raid_index(flags);
6240         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6241                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6242         bool failed_cluster_refill = false;
6243         bool failed_alloc = false;
6244         bool use_cluster = true;
6245         bool have_caching_bg = false;
6246
6247         WARN_ON(num_bytes < root->sectorsize);
6248         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
6249         ins->objectid = 0;
6250         ins->offset = 0;
6251
6252         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6253
6254         space_info = __find_space_info(root->fs_info, flags);
6255         if (!space_info) {
6256                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6257                 return -ENOSPC;
6258         }
6259
6260         /*
6261          * If the space info is for both data and metadata it means we have a
6262          * small filesystem and we can't use the clustering stuff.
6263          */
6264         if (btrfs_mixed_space_info(space_info))
6265                 use_cluster = false;
6266
6267         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6268                 last_ptr = &root->fs_info->meta_alloc_cluster;
6269                 if (!btrfs_test_opt(root, SSD))
6270                         empty_cluster = 64 * 1024;
6271         }
6272
6273         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6274             btrfs_test_opt(root, SSD)) {
6275                 last_ptr = &root->fs_info->data_alloc_cluster;
6276         }
6277
6278         if (last_ptr) {
6279                 spin_lock(&last_ptr->lock);
6280                 if (last_ptr->block_group)
6281                         hint_byte = last_ptr->window_start;
6282                 spin_unlock(&last_ptr->lock);
6283         }
6284
6285         search_start = max(search_start, first_logical_byte(root, 0));
6286         search_start = max(search_start, hint_byte);
6287
6288         if (!last_ptr)
6289                 empty_cluster = 0;
6290
6291         if (search_start == hint_byte) {
6292                 block_group = btrfs_lookup_block_group(root->fs_info,
6293                                                        search_start);
6294                 used_block_group = block_group;
6295                 /*
6296                  * we don't want to use the block group if it doesn't match our
6297                  * allocation bits, or if its not cached.
6298                  *
6299                  * However if we are re-searching with an ideal block group
6300                  * picked out then we don't care that the block group is cached.
6301                  */
6302                 if (block_group && block_group_bits(block_group, flags) &&
6303                     block_group->cached != BTRFS_CACHE_NO) {
6304                         down_read(&space_info->groups_sem);
6305                         if (list_empty(&block_group->list) ||
6306                             block_group->ro) {
6307                                 /*
6308                                  * someone is removing this block group,
6309                                  * we can't jump into the have_block_group
6310                                  * target because our list pointers are not
6311                                  * valid
6312                                  */
6313                                 btrfs_put_block_group(block_group);
6314                                 up_read(&space_info->groups_sem);
6315                         } else {
6316                                 index = get_block_group_index(block_group);
6317                                 goto have_block_group;
6318                         }
6319                 } else if (block_group) {
6320                         btrfs_put_block_group(block_group);
6321                 }
6322         }
6323 search:
6324         have_caching_bg = false;
6325         down_read(&space_info->groups_sem);
6326         list_for_each_entry(block_group, &space_info->block_groups[index],
6327                             list) {
6328                 u64 offset;
6329                 int cached;
6330
6331                 used_block_group = block_group;
6332                 btrfs_get_block_group(block_group);
6333                 search_start = block_group->key.objectid;
6334
6335                 /*
6336                  * this can happen if we end up cycling through all the
6337                  * raid types, but we want to make sure we only allocate
6338                  * for the proper type.
6339                  */
6340                 if (!block_group_bits(block_group, flags)) {
6341                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6342                                 BTRFS_BLOCK_GROUP_RAID1 |
6343                                 BTRFS_BLOCK_GROUP_RAID5 |
6344                                 BTRFS_BLOCK_GROUP_RAID6 |
6345                                 BTRFS_BLOCK_GROUP_RAID10;
6346
6347                         /*
6348                          * if they asked for extra copies and this block group
6349                          * doesn't provide them, bail.  This does allow us to
6350                          * fill raid0 from raid1.
6351                          */
6352                         if ((flags & extra) && !(block_group->flags & extra))
6353                                 goto loop;
6354                 }
6355
6356 have_block_group:
6357                 cached = block_group_cache_done(block_group);
6358                 if (unlikely(!cached)) {
6359                         ret = cache_block_group(block_group, 0);
6360                         BUG_ON(ret < 0);
6361                         ret = 0;
6362                 }
6363
6364                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6365                         goto loop;
6366                 if (unlikely(block_group->ro))
6367                         goto loop;
6368
6369                 /*
6370                  * Ok we want to try and use the cluster allocator, so
6371                  * lets look there
6372                  */
6373                 if (last_ptr) {
6374                         unsigned long aligned_cluster;
6375                         /*
6376                          * the refill lock keeps out other
6377                          * people trying to start a new cluster
6378                          */
6379                         spin_lock(&last_ptr->refill_lock);
6380                         used_block_group = last_ptr->block_group;
6381                         if (used_block_group != block_group &&
6382                             (!used_block_group ||
6383                              used_block_group->ro ||
6384                              !block_group_bits(used_block_group, flags))) {
6385                                 used_block_group = block_group;
6386                                 goto refill_cluster;
6387                         }
6388
6389                         if (used_block_group != block_group)
6390                                 btrfs_get_block_group(used_block_group);
6391
6392                         offset = btrfs_alloc_from_cluster(used_block_group,
6393                                                 last_ptr,
6394                                                 num_bytes,
6395                                                 used_block_group->key.objectid,
6396                                                 &max_extent_size);
6397                         if (offset) {
6398                                 /* we have a block, we're done */
6399                                 spin_unlock(&last_ptr->refill_lock);
6400                                 trace_btrfs_reserve_extent_cluster(root,
6401                                         block_group, search_start, num_bytes);
6402                                 goto checks;
6403                         }
6404
6405                         WARN_ON(last_ptr->block_group != used_block_group);
6406                         if (used_block_group != block_group) {
6407                                 btrfs_put_block_group(used_block_group);
6408                                 used_block_group = block_group;
6409                         }
6410 refill_cluster:
6411                         BUG_ON(used_block_group != block_group);
6412                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6413                          * set up a new clusters, so lets just skip it
6414                          * and let the allocator find whatever block
6415                          * it can find.  If we reach this point, we
6416                          * will have tried the cluster allocator
6417                          * plenty of times and not have found
6418                          * anything, so we are likely way too
6419                          * fragmented for the clustering stuff to find
6420                          * anything.
6421                          *
6422                          * However, if the cluster is taken from the
6423                          * current block group, release the cluster
6424                          * first, so that we stand a better chance of
6425                          * succeeding in the unclustered
6426                          * allocation.  */
6427                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6428                             last_ptr->block_group != block_group) {
6429                                 spin_unlock(&last_ptr->refill_lock);
6430                                 goto unclustered_alloc;
6431                         }
6432
6433                         /*
6434                          * this cluster didn't work out, free it and
6435                          * start over
6436                          */
6437                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6438
6439                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6440                                 spin_unlock(&last_ptr->refill_lock);
6441                                 goto unclustered_alloc;
6442                         }
6443
6444                         aligned_cluster = max_t(unsigned long,
6445                                                 empty_cluster + empty_size,
6446                                               block_group->full_stripe_len);
6447
6448                         /* allocate a cluster in this block group */
6449                         ret = btrfs_find_space_cluster(root, block_group,
6450                                                        last_ptr, search_start,
6451                                                        num_bytes,
6452                                                        aligned_cluster);
6453                         if (ret == 0) {
6454                                 /*
6455                                  * now pull our allocation out of this
6456                                  * cluster
6457                                  */
6458                                 offset = btrfs_alloc_from_cluster(block_group,
6459                                                         last_ptr,
6460                                                         num_bytes,
6461                                                         search_start,
6462                                                         &max_extent_size);
6463                                 if (offset) {
6464                                         /* we found one, proceed */
6465                                         spin_unlock(&last_ptr->refill_lock);
6466                                         trace_btrfs_reserve_extent_cluster(root,
6467                                                 block_group, search_start,
6468                                                 num_bytes);
6469                                         goto checks;
6470                                 }
6471                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6472                                    && !failed_cluster_refill) {
6473                                 spin_unlock(&last_ptr->refill_lock);
6474
6475                                 failed_cluster_refill = true;
6476                                 wait_block_group_cache_progress(block_group,
6477                                        num_bytes + empty_cluster + empty_size);
6478                                 goto have_block_group;
6479                         }
6480
6481                         /*
6482                          * at this point we either didn't find a cluster
6483                          * or we weren't able to allocate a block from our
6484                          * cluster.  Free the cluster we've been trying
6485                          * to use, and go to the next block group
6486                          */
6487                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6488                         spin_unlock(&last_ptr->refill_lock);
6489                         goto loop;
6490                 }
6491
6492 unclustered_alloc:
6493                 spin_lock(&block_group->free_space_ctl->tree_lock);
6494                 if (cached &&
6495                     block_group->free_space_ctl->free_space <
6496                     num_bytes + empty_cluster + empty_size) {
6497                         if (block_group->free_space_ctl->free_space >
6498                             max_extent_size)
6499                                 max_extent_size =
6500                                         block_group->free_space_ctl->free_space;
6501                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6502                         goto loop;
6503                 }
6504                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6505
6506                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6507                                                     num_bytes, empty_size,
6508                                                     &max_extent_size);
6509                 /*
6510                  * If we didn't find a chunk, and we haven't failed on this
6511                  * block group before, and this block group is in the middle of
6512                  * caching and we are ok with waiting, then go ahead and wait
6513                  * for progress to be made, and set failed_alloc to true.
6514                  *
6515                  * If failed_alloc is true then we've already waited on this
6516                  * block group once and should move on to the next block group.
6517                  */
6518                 if (!offset && !failed_alloc && !cached &&
6519                     loop > LOOP_CACHING_NOWAIT) {
6520                         wait_block_group_cache_progress(block_group,
6521                                                 num_bytes + empty_size);
6522                         failed_alloc = true;
6523                         goto have_block_group;
6524                 } else if (!offset) {
6525                         if (!cached)
6526                                 have_caching_bg = true;
6527                         goto loop;
6528                 }
6529 checks:
6530                 search_start = stripe_align(root, used_block_group,
6531                                             offset, num_bytes);
6532
6533                 /* move on to the next group */
6534                 if (search_start + num_bytes >
6535                     used_block_group->key.objectid + used_block_group->key.offset) {
6536                         btrfs_add_free_space(used_block_group, offset, num_bytes);
6537                         goto loop;
6538                 }
6539
6540                 if (offset < search_start)
6541                         btrfs_add_free_space(used_block_group, offset,
6542                                              search_start - offset);
6543                 BUG_ON(offset > search_start);
6544
6545                 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
6546                                                   alloc_type);
6547                 if (ret == -EAGAIN) {
6548                         btrfs_add_free_space(used_block_group, offset, num_bytes);
6549                         goto loop;
6550                 }
6551
6552                 /* we are all good, lets return */
6553                 ins->objectid = search_start;
6554                 ins->offset = num_bytes;
6555
6556                 trace_btrfs_reserve_extent(orig_root, block_group,
6557                                            search_start, num_bytes);
6558                 if (used_block_group != block_group)
6559                         btrfs_put_block_group(used_block_group);
6560                 btrfs_put_block_group(block_group);
6561                 break;
6562 loop:
6563                 failed_cluster_refill = false;
6564                 failed_alloc = false;
6565                 BUG_ON(index != get_block_group_index(block_group));
6566                 if (used_block_group != block_group)
6567                         btrfs_put_block_group(used_block_group);
6568                 btrfs_put_block_group(block_group);
6569         }
6570         up_read(&space_info->groups_sem);
6571
6572         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6573                 goto search;
6574
6575         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6576                 goto search;
6577
6578         /*
6579          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6580          *                      caching kthreads as we move along
6581          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6582          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6583          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6584          *                      again
6585          */
6586         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6587                 index = 0;
6588                 loop++;
6589                 if (loop == LOOP_ALLOC_CHUNK) {
6590                         struct btrfs_trans_handle *trans;
6591
6592                         trans = btrfs_join_transaction(root);
6593                         if (IS_ERR(trans)) {
6594                                 ret = PTR_ERR(trans);
6595                                 goto out;
6596                         }
6597
6598                         ret = do_chunk_alloc(trans, root, flags,
6599                                              CHUNK_ALLOC_FORCE);
6600                         /*
6601                          * Do not bail out on ENOSPC since we
6602                          * can do more things.
6603                          */
6604                         if (ret < 0 && ret != -ENOSPC)
6605                                 btrfs_abort_transaction(trans,
6606                                                         root, ret);
6607                         else
6608                                 ret = 0;
6609                         btrfs_end_transaction(trans, root);
6610                         if (ret)
6611                                 goto out;
6612                 }
6613
6614                 if (loop == LOOP_NO_EMPTY_SIZE) {
6615                         empty_size = 0;
6616                         empty_cluster = 0;
6617                 }
6618
6619                 goto search;
6620         } else if (!ins->objectid) {
6621                 ret = -ENOSPC;
6622         } else if (ins->objectid) {
6623                 ret = 0;
6624         }
6625 out:
6626         if (ret == -ENOSPC)
6627                 ins->offset = max_extent_size;
6628         return ret;
6629 }
6630
6631 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6632                             int dump_block_groups)
6633 {
6634         struct btrfs_block_group_cache *cache;
6635         int index = 0;
6636
6637         spin_lock(&info->lock);
6638         printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
6639                info->flags,
6640                info->total_bytes - info->bytes_used - info->bytes_pinned -
6641                info->bytes_reserved - info->bytes_readonly,
6642                (info->full) ? "" : "not ");
6643         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
6644                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6645                info->total_bytes, info->bytes_used, info->bytes_pinned,
6646                info->bytes_reserved, info->bytes_may_use,
6647                info->bytes_readonly);
6648         spin_unlock(&info->lock);
6649
6650         if (!dump_block_groups)
6651                 return;
6652
6653         down_read(&info->groups_sem);
6654 again:
6655         list_for_each_entry(cache, &info->block_groups[index], list) {
6656                 spin_lock(&cache->lock);
6657                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
6658                        cache->key.objectid, cache->key.offset,
6659                        btrfs_block_group_used(&cache->item), cache->pinned,
6660                        cache->reserved, cache->ro ? "[readonly]" : "");
6661                 btrfs_dump_free_space(cache, bytes);
6662                 spin_unlock(&cache->lock);
6663         }
6664         if (++index < BTRFS_NR_RAID_TYPES)
6665                 goto again;
6666         up_read(&info->groups_sem);
6667 }
6668
6669 int btrfs_reserve_extent(struct btrfs_root *root,
6670                          u64 num_bytes, u64 min_alloc_size,
6671                          u64 empty_size, u64 hint_byte,
6672                          struct btrfs_key *ins, int is_data)
6673 {
6674         bool final_tried = false;
6675         u64 flags;
6676         int ret;
6677
6678         flags = btrfs_get_alloc_profile(root, is_data);
6679 again:
6680         WARN_ON(num_bytes < root->sectorsize);
6681         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6682                                flags);
6683
6684         if (ret == -ENOSPC) {
6685                 if (!final_tried && ins->offset) {
6686                         num_bytes = min(num_bytes >> 1, ins->offset);
6687                         num_bytes = round_down(num_bytes, root->sectorsize);
6688                         num_bytes = max(num_bytes, min_alloc_size);
6689                         if (num_bytes == min_alloc_size)
6690                                 final_tried = true;
6691                         goto again;
6692                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6693                         struct btrfs_space_info *sinfo;
6694
6695                         sinfo = __find_space_info(root->fs_info, flags);
6696                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6697                                 flags, num_bytes);
6698                         if (sinfo)
6699                                 dump_space_info(sinfo, num_bytes, 1);
6700                 }
6701         }
6702
6703         return ret;
6704 }
6705
6706 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6707                                         u64 start, u64 len, int pin)
6708 {
6709         struct btrfs_block_group_cache *cache;
6710         int ret = 0;
6711
6712         cache = btrfs_lookup_block_group(root->fs_info, start);
6713         if (!cache) {
6714                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6715                         start);
6716                 return -ENOSPC;
6717         }
6718
6719         if (btrfs_test_opt(root, DISCARD))
6720                 ret = btrfs_discard_extent(root, start, len, NULL);
6721
6722         if (pin)
6723                 pin_down_extent(root, cache, start, len, 1);
6724         else {
6725                 btrfs_add_free_space(cache, start, len);
6726                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6727         }
6728         btrfs_put_block_group(cache);
6729
6730         trace_btrfs_reserved_extent_free(root, start, len);
6731
6732         return ret;
6733 }
6734
6735 int btrfs_free_reserved_extent(struct btrfs_root *root,
6736                                         u64 start, u64 len)
6737 {
6738         return __btrfs_free_reserved_extent(root, start, len, 0);
6739 }
6740
6741 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6742                                        u64 start, u64 len)
6743 {
6744         return __btrfs_free_reserved_extent(root, start, len, 1);
6745 }
6746
6747 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6748                                       struct btrfs_root *root,
6749                                       u64 parent, u64 root_objectid,
6750                                       u64 flags, u64 owner, u64 offset,
6751                                       struct btrfs_key *ins, int ref_mod)
6752 {
6753         int ret;
6754         struct btrfs_fs_info *fs_info = root->fs_info;
6755         struct btrfs_extent_item *extent_item;
6756         struct btrfs_extent_inline_ref *iref;
6757         struct btrfs_path *path;
6758         struct extent_buffer *leaf;
6759         int type;
6760         u32 size;
6761
6762         if (parent > 0)
6763                 type = BTRFS_SHARED_DATA_REF_KEY;
6764         else
6765                 type = BTRFS_EXTENT_DATA_REF_KEY;
6766
6767         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6768
6769         path = btrfs_alloc_path();
6770         if (!path)
6771                 return -ENOMEM;
6772
6773         path->leave_spinning = 1;
6774         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6775                                       ins, size);
6776         if (ret) {
6777                 btrfs_free_path(path);
6778                 return ret;
6779         }
6780
6781         leaf = path->nodes[0];
6782         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6783                                      struct btrfs_extent_item);
6784         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6785         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6786         btrfs_set_extent_flags(leaf, extent_item,
6787                                flags | BTRFS_EXTENT_FLAG_DATA);
6788
6789         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6790         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6791         if (parent > 0) {
6792                 struct btrfs_shared_data_ref *ref;
6793                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6794                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6795                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6796         } else {
6797                 struct btrfs_extent_data_ref *ref;
6798                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6799                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6800                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6801                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6802                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6803         }
6804
6805         btrfs_mark_buffer_dirty(path->nodes[0]);
6806         btrfs_free_path(path);
6807
6808         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6809         if (ret) { /* -ENOENT, logic error */
6810                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6811                         ins->objectid, ins->offset);
6812                 BUG();
6813         }
6814         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6815         return ret;
6816 }
6817
6818 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6819                                      struct btrfs_root *root,
6820                                      u64 parent, u64 root_objectid,
6821                                      u64 flags, struct btrfs_disk_key *key,
6822                                      int level, struct btrfs_key *ins)
6823 {
6824         int ret;
6825         struct btrfs_fs_info *fs_info = root->fs_info;
6826         struct btrfs_extent_item *extent_item;
6827         struct btrfs_tree_block_info *block_info;
6828         struct btrfs_extent_inline_ref *iref;
6829         struct btrfs_path *path;
6830         struct extent_buffer *leaf;
6831         u32 size = sizeof(*extent_item) + sizeof(*iref);
6832         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6833                                                  SKINNY_METADATA);
6834
6835         if (!skinny_metadata)
6836                 size += sizeof(*block_info);
6837
6838         path = btrfs_alloc_path();
6839         if (!path) {
6840                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
6841                                                    root->leafsize);
6842                 return -ENOMEM;
6843         }
6844
6845         path->leave_spinning = 1;
6846         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6847                                       ins, size);
6848         if (ret) {
6849                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
6850                                                    root->leafsize);
6851                 btrfs_free_path(path);
6852                 return ret;
6853         }
6854
6855         leaf = path->nodes[0];
6856         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6857                                      struct btrfs_extent_item);
6858         btrfs_set_extent_refs(leaf, extent_item, 1);
6859         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6860         btrfs_set_extent_flags(leaf, extent_item,
6861                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6862
6863         if (skinny_metadata) {
6864                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6865         } else {
6866                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6867                 btrfs_set_tree_block_key(leaf, block_info, key);
6868                 btrfs_set_tree_block_level(leaf, block_info, level);
6869                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6870         }
6871
6872         if (parent > 0) {
6873                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6874                 btrfs_set_extent_inline_ref_type(leaf, iref,
6875                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6876                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6877         } else {
6878                 btrfs_set_extent_inline_ref_type(leaf, iref,
6879                                                  BTRFS_TREE_BLOCK_REF_KEY);
6880                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6881         }
6882
6883         btrfs_mark_buffer_dirty(leaf);
6884         btrfs_free_path(path);
6885
6886         ret = update_block_group(root, ins->objectid, root->leafsize, 1);
6887         if (ret) { /* -ENOENT, logic error */
6888                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6889                         ins->objectid, ins->offset);
6890                 BUG();
6891         }
6892
6893         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->leafsize);
6894         return ret;
6895 }
6896
6897 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6898                                      struct btrfs_root *root,
6899                                      u64 root_objectid, u64 owner,
6900                                      u64 offset, struct btrfs_key *ins)
6901 {
6902         int ret;
6903
6904         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6905
6906         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6907                                          ins->offset, 0,
6908                                          root_objectid, owner, offset,
6909                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6910         return ret;
6911 }
6912
6913 /*
6914  * this is used by the tree logging recovery code.  It records that
6915  * an extent has been allocated and makes sure to clear the free
6916  * space cache bits as well
6917  */
6918 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6919                                    struct btrfs_root *root,
6920                                    u64 root_objectid, u64 owner, u64 offset,
6921                                    struct btrfs_key *ins)
6922 {
6923         int ret;
6924         struct btrfs_block_group_cache *block_group;
6925
6926         /*
6927          * Mixed block groups will exclude before processing the log so we only
6928          * need to do the exlude dance if this fs isn't mixed.
6929          */
6930         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
6931                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
6932                 if (ret)
6933                         return ret;
6934         }
6935
6936         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6937         if (!block_group)
6938                 return -EINVAL;
6939
6940         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6941                                           RESERVE_ALLOC_NO_ACCOUNT);
6942         BUG_ON(ret); /* logic error */
6943         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6944                                          0, owner, offset, ins, 1);
6945         btrfs_put_block_group(block_group);
6946         return ret;
6947 }
6948
6949 static struct extent_buffer *
6950 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6951                       u64 bytenr, u32 blocksize, int level)
6952 {
6953         struct extent_buffer *buf;
6954
6955         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6956         if (!buf)
6957                 return ERR_PTR(-ENOMEM);
6958         btrfs_set_header_generation(buf, trans->transid);
6959         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6960         btrfs_tree_lock(buf);
6961         clean_tree_block(trans, root, buf);
6962         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6963
6964         btrfs_set_lock_blocking(buf);
6965         btrfs_set_buffer_uptodate(buf);
6966
6967         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6968                 /*
6969                  * we allow two log transactions at a time, use different
6970                  * EXENT bit to differentiate dirty pages.
6971                  */
6972                 if (root->log_transid % 2 == 0)
6973                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6974                                         buf->start + buf->len - 1, GFP_NOFS);
6975                 else
6976                         set_extent_new(&root->dirty_log_pages, buf->start,
6977                                         buf->start + buf->len - 1, GFP_NOFS);
6978         } else {
6979                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6980                          buf->start + buf->len - 1, GFP_NOFS);
6981         }
6982         trans->blocks_used++;
6983         /* this returns a buffer locked for blocking */
6984         return buf;
6985 }
6986
6987 static struct btrfs_block_rsv *
6988 use_block_rsv(struct btrfs_trans_handle *trans,
6989               struct btrfs_root *root, u32 blocksize)
6990 {
6991         struct btrfs_block_rsv *block_rsv;
6992         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6993         int ret;
6994         bool global_updated = false;
6995
6996         block_rsv = get_block_rsv(trans, root);
6997
6998         if (unlikely(block_rsv->size == 0))
6999                 goto try_reserve;
7000 again:
7001         ret = block_rsv_use_bytes(block_rsv, blocksize);
7002         if (!ret)
7003                 return block_rsv;
7004
7005         if (block_rsv->failfast)
7006                 return ERR_PTR(ret);
7007
7008         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7009                 global_updated = true;
7010                 update_global_block_rsv(root->fs_info);
7011                 goto again;
7012         }
7013
7014         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7015                 static DEFINE_RATELIMIT_STATE(_rs,
7016                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7017                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7018                 if (__ratelimit(&_rs))
7019                         WARN(1, KERN_DEBUG
7020                                 "btrfs: block rsv returned %d\n", ret);
7021         }
7022 try_reserve:
7023         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7024                                      BTRFS_RESERVE_NO_FLUSH);
7025         if (!ret)
7026                 return block_rsv;
7027         /*
7028          * If we couldn't reserve metadata bytes try and use some from
7029          * the global reserve if its space type is the same as the global
7030          * reservation.
7031          */
7032         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7033             block_rsv->space_info == global_rsv->space_info) {
7034                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7035                 if (!ret)
7036                         return global_rsv;
7037         }
7038         return ERR_PTR(ret);
7039 }
7040
7041 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7042                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7043 {
7044         block_rsv_add_bytes(block_rsv, blocksize, 0);
7045         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7046 }
7047
7048 /*
7049  * finds a free extent and does all the dirty work required for allocation
7050  * returns the key for the extent through ins, and a tree buffer for
7051  * the first block of the extent through buf.
7052  *
7053  * returns the tree buffer or NULL.
7054  */
7055 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
7056                                         struct btrfs_root *root, u32 blocksize,
7057                                         u64 parent, u64 root_objectid,
7058                                         struct btrfs_disk_key *key, int level,
7059                                         u64 hint, u64 empty_size)
7060 {
7061         struct btrfs_key ins;
7062         struct btrfs_block_rsv *block_rsv;
7063         struct extent_buffer *buf;
7064         u64 flags = 0;
7065         int ret;
7066         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7067                                                  SKINNY_METADATA);
7068
7069         block_rsv = use_block_rsv(trans, root, blocksize);
7070         if (IS_ERR(block_rsv))
7071                 return ERR_CAST(block_rsv);
7072
7073         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7074                                    empty_size, hint, &ins, 0);
7075         if (ret) {
7076                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7077                 return ERR_PTR(ret);
7078         }
7079
7080         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
7081                                     blocksize, level);
7082         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
7083
7084         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7085                 if (parent == 0)
7086                         parent = ins.objectid;
7087                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7088         } else
7089                 BUG_ON(parent > 0);
7090
7091         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7092                 struct btrfs_delayed_extent_op *extent_op;
7093                 extent_op = btrfs_alloc_delayed_extent_op();
7094                 BUG_ON(!extent_op); /* -ENOMEM */
7095                 if (key)
7096                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7097                 else
7098                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7099                 extent_op->flags_to_set = flags;
7100                 if (skinny_metadata)
7101                         extent_op->update_key = 0;
7102                 else
7103                         extent_op->update_key = 1;
7104                 extent_op->update_flags = 1;
7105                 extent_op->is_data = 0;
7106                 extent_op->level = level;
7107
7108                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7109                                         ins.objectid,
7110                                         ins.offset, parent, root_objectid,
7111                                         level, BTRFS_ADD_DELAYED_EXTENT,
7112                                         extent_op, 0);
7113                 BUG_ON(ret); /* -ENOMEM */
7114         }
7115         return buf;
7116 }
7117
7118 struct walk_control {
7119         u64 refs[BTRFS_MAX_LEVEL];
7120         u64 flags[BTRFS_MAX_LEVEL];
7121         struct btrfs_key update_progress;
7122         int stage;
7123         int level;
7124         int shared_level;
7125         int update_ref;
7126         int keep_locks;
7127         int reada_slot;
7128         int reada_count;
7129         int for_reloc;
7130 };
7131
7132 #define DROP_REFERENCE  1
7133 #define UPDATE_BACKREF  2
7134
7135 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7136                                      struct btrfs_root *root,
7137                                      struct walk_control *wc,
7138                                      struct btrfs_path *path)
7139 {
7140         u64 bytenr;
7141         u64 generation;
7142         u64 refs;
7143         u64 flags;
7144         u32 nritems;
7145         u32 blocksize;
7146         struct btrfs_key key;
7147         struct extent_buffer *eb;
7148         int ret;
7149         int slot;
7150         int nread = 0;
7151
7152         if (path->slots[wc->level] < wc->reada_slot) {
7153                 wc->reada_count = wc->reada_count * 2 / 3;
7154                 wc->reada_count = max(wc->reada_count, 2);
7155         } else {
7156                 wc->reada_count = wc->reada_count * 3 / 2;
7157                 wc->reada_count = min_t(int, wc->reada_count,
7158                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7159         }
7160
7161         eb = path->nodes[wc->level];
7162         nritems = btrfs_header_nritems(eb);
7163         blocksize = btrfs_level_size(root, wc->level - 1);
7164
7165         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7166                 if (nread >= wc->reada_count)
7167                         break;
7168
7169                 cond_resched();
7170                 bytenr = btrfs_node_blockptr(eb, slot);
7171                 generation = btrfs_node_ptr_generation(eb, slot);
7172
7173                 if (slot == path->slots[wc->level])
7174                         goto reada;
7175
7176                 if (wc->stage == UPDATE_BACKREF &&
7177                     generation <= root->root_key.offset)
7178                         continue;
7179
7180                 /* We don't lock the tree block, it's OK to be racy here */
7181                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7182                                                wc->level - 1, 1, &refs,
7183                                                &flags);
7184                 /* We don't care about errors in readahead. */
7185                 if (ret < 0)
7186                         continue;
7187                 BUG_ON(refs == 0);
7188
7189                 if (wc->stage == DROP_REFERENCE) {
7190                         if (refs == 1)
7191                                 goto reada;
7192
7193                         if (wc->level == 1 &&
7194                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7195                                 continue;
7196                         if (!wc->update_ref ||
7197                             generation <= root->root_key.offset)
7198                                 continue;
7199                         btrfs_node_key_to_cpu(eb, &key, slot);
7200                         ret = btrfs_comp_cpu_keys(&key,
7201                                                   &wc->update_progress);
7202                         if (ret < 0)
7203                                 continue;
7204                 } else {
7205                         if (wc->level == 1 &&
7206                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7207                                 continue;
7208                 }
7209 reada:
7210                 ret = readahead_tree_block(root, bytenr, blocksize,
7211                                            generation);
7212                 if (ret)
7213                         break;
7214                 nread++;
7215         }
7216         wc->reada_slot = slot;
7217 }
7218
7219 /*
7220  * helper to process tree block while walking down the tree.
7221  *
7222  * when wc->stage == UPDATE_BACKREF, this function updates
7223  * back refs for pointers in the block.
7224  *
7225  * NOTE: return value 1 means we should stop walking down.
7226  */
7227 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7228                                    struct btrfs_root *root,
7229                                    struct btrfs_path *path,
7230                                    struct walk_control *wc, int lookup_info)
7231 {
7232         int level = wc->level;
7233         struct extent_buffer *eb = path->nodes[level];
7234         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7235         int ret;
7236
7237         if (wc->stage == UPDATE_BACKREF &&
7238             btrfs_header_owner(eb) != root->root_key.objectid)
7239                 return 1;
7240
7241         /*
7242          * when reference count of tree block is 1, it won't increase
7243          * again. once full backref flag is set, we never clear it.
7244          */
7245         if (lookup_info &&
7246             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7247              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7248                 BUG_ON(!path->locks[level]);
7249                 ret = btrfs_lookup_extent_info(trans, root,
7250                                                eb->start, level, 1,
7251                                                &wc->refs[level],
7252                                                &wc->flags[level]);
7253                 BUG_ON(ret == -ENOMEM);
7254                 if (ret)
7255                         return ret;
7256                 BUG_ON(wc->refs[level] == 0);
7257         }
7258
7259         if (wc->stage == DROP_REFERENCE) {
7260                 if (wc->refs[level] > 1)
7261                         return 1;
7262
7263                 if (path->locks[level] && !wc->keep_locks) {
7264                         btrfs_tree_unlock_rw(eb, path->locks[level]);
7265                         path->locks[level] = 0;
7266                 }
7267                 return 0;
7268         }
7269
7270         /* wc->stage == UPDATE_BACKREF */
7271         if (!(wc->flags[level] & flag)) {
7272                 BUG_ON(!path->locks[level]);
7273                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
7274                 BUG_ON(ret); /* -ENOMEM */
7275                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
7276                 BUG_ON(ret); /* -ENOMEM */
7277                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7278                                                   eb->len, flag,
7279                                                   btrfs_header_level(eb), 0);
7280                 BUG_ON(ret); /* -ENOMEM */
7281                 wc->flags[level] |= flag;
7282         }
7283
7284         /*
7285          * the block is shared by multiple trees, so it's not good to
7286          * keep the tree lock
7287          */
7288         if (path->locks[level] && level > 0) {
7289                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7290                 path->locks[level] = 0;
7291         }
7292         return 0;
7293 }
7294
7295 /*
7296  * helper to process tree block pointer.
7297  *
7298  * when wc->stage == DROP_REFERENCE, this function checks
7299  * reference count of the block pointed to. if the block
7300  * is shared and we need update back refs for the subtree
7301  * rooted at the block, this function changes wc->stage to
7302  * UPDATE_BACKREF. if the block is shared and there is no
7303  * need to update back, this function drops the reference
7304  * to the block.
7305  *
7306  * NOTE: return value 1 means we should stop walking down.
7307  */
7308 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7309                                  struct btrfs_root *root,
7310                                  struct btrfs_path *path,
7311                                  struct walk_control *wc, int *lookup_info)
7312 {
7313         u64 bytenr;
7314         u64 generation;
7315         u64 parent;
7316         u32 blocksize;
7317         struct btrfs_key key;
7318         struct extent_buffer *next;
7319         int level = wc->level;
7320         int reada = 0;
7321         int ret = 0;
7322
7323         generation = btrfs_node_ptr_generation(path->nodes[level],
7324                                                path->slots[level]);
7325         /*
7326          * if the lower level block was created before the snapshot
7327          * was created, we know there is no need to update back refs
7328          * for the subtree
7329          */
7330         if (wc->stage == UPDATE_BACKREF &&
7331             generation <= root->root_key.offset) {
7332                 *lookup_info = 1;
7333                 return 1;
7334         }
7335
7336         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7337         blocksize = btrfs_level_size(root, level - 1);
7338
7339         next = btrfs_find_tree_block(root, bytenr, blocksize);
7340         if (!next) {
7341                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7342                 if (!next)
7343                         return -ENOMEM;
7344                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7345                                                level - 1);
7346                 reada = 1;
7347         }
7348         btrfs_tree_lock(next);
7349         btrfs_set_lock_blocking(next);
7350
7351         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7352                                        &wc->refs[level - 1],
7353                                        &wc->flags[level - 1]);
7354         if (ret < 0) {
7355                 btrfs_tree_unlock(next);
7356                 return ret;
7357         }
7358
7359         if (unlikely(wc->refs[level - 1] == 0)) {
7360                 btrfs_err(root->fs_info, "Missing references.");
7361                 BUG();
7362         }
7363         *lookup_info = 0;
7364
7365         if (wc->stage == DROP_REFERENCE) {
7366                 if (wc->refs[level - 1] > 1) {
7367                         if (level == 1 &&
7368                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7369                                 goto skip;
7370
7371                         if (!wc->update_ref ||
7372                             generation <= root->root_key.offset)
7373                                 goto skip;
7374
7375                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7376                                               path->slots[level]);
7377                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7378                         if (ret < 0)
7379                                 goto skip;
7380
7381                         wc->stage = UPDATE_BACKREF;
7382                         wc->shared_level = level - 1;
7383                 }
7384         } else {
7385                 if (level == 1 &&
7386                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7387                         goto skip;
7388         }
7389
7390         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7391                 btrfs_tree_unlock(next);
7392                 free_extent_buffer(next);
7393                 next = NULL;
7394                 *lookup_info = 1;
7395         }
7396
7397         if (!next) {
7398                 if (reada && level == 1)
7399                         reada_walk_down(trans, root, wc, path);
7400                 next = read_tree_block(root, bytenr, blocksize, generation);
7401                 if (!next || !extent_buffer_uptodate(next)) {
7402                         free_extent_buffer(next);
7403                         return -EIO;
7404                 }
7405                 btrfs_tree_lock(next);
7406                 btrfs_set_lock_blocking(next);
7407         }
7408
7409         level--;
7410         BUG_ON(level != btrfs_header_level(next));
7411         path->nodes[level] = next;
7412         path->slots[level] = 0;
7413         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7414         wc->level = level;
7415         if (wc->level == 1)
7416                 wc->reada_slot = 0;
7417         return 0;
7418 skip:
7419         wc->refs[level - 1] = 0;
7420         wc->flags[level - 1] = 0;
7421         if (wc->stage == DROP_REFERENCE) {
7422                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7423                         parent = path->nodes[level]->start;
7424                 } else {
7425                         BUG_ON(root->root_key.objectid !=
7426                                btrfs_header_owner(path->nodes[level]));
7427                         parent = 0;
7428                 }
7429
7430                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7431                                 root->root_key.objectid, level - 1, 0, 0);
7432                 BUG_ON(ret); /* -ENOMEM */
7433         }
7434         btrfs_tree_unlock(next);
7435         free_extent_buffer(next);
7436         *lookup_info = 1;
7437         return 1;
7438 }
7439
7440 /*
7441  * helper to process tree block while walking up the tree.
7442  *
7443  * when wc->stage == DROP_REFERENCE, this function drops
7444  * reference count on the block.
7445  *
7446  * when wc->stage == UPDATE_BACKREF, this function changes
7447  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7448  * to UPDATE_BACKREF previously while processing the block.
7449  *
7450  * NOTE: return value 1 means we should stop walking up.
7451  */
7452 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7453                                  struct btrfs_root *root,
7454                                  struct btrfs_path *path,
7455                                  struct walk_control *wc)
7456 {
7457         int ret;
7458         int level = wc->level;
7459         struct extent_buffer *eb = path->nodes[level];
7460         u64 parent = 0;
7461
7462         if (wc->stage == UPDATE_BACKREF) {
7463                 BUG_ON(wc->shared_level < level);
7464                 if (level < wc->shared_level)
7465                         goto out;
7466
7467                 ret = find_next_key(path, level + 1, &wc->update_progress);
7468                 if (ret > 0)
7469                         wc->update_ref = 0;
7470
7471                 wc->stage = DROP_REFERENCE;
7472                 wc->shared_level = -1;
7473                 path->slots[level] = 0;
7474
7475                 /*
7476                  * check reference count again if the block isn't locked.
7477                  * we should start walking down the tree again if reference
7478                  * count is one.
7479                  */
7480                 if (!path->locks[level]) {
7481                         BUG_ON(level == 0);
7482                         btrfs_tree_lock(eb);
7483                         btrfs_set_lock_blocking(eb);
7484                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7485
7486                         ret = btrfs_lookup_extent_info(trans, root,
7487                                                        eb->start, level, 1,
7488                                                        &wc->refs[level],
7489                                                        &wc->flags[level]);
7490                         if (ret < 0) {
7491                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7492                                 path->locks[level] = 0;
7493                                 return ret;
7494                         }
7495                         BUG_ON(wc->refs[level] == 0);
7496                         if (wc->refs[level] == 1) {
7497                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7498                                 path->locks[level] = 0;
7499                                 return 1;
7500                         }
7501                 }
7502         }
7503
7504         /* wc->stage == DROP_REFERENCE */
7505         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7506
7507         if (wc->refs[level] == 1) {
7508                 if (level == 0) {
7509                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7510                                 ret = btrfs_dec_ref(trans, root, eb, 1,
7511                                                     wc->for_reloc);
7512                         else
7513                                 ret = btrfs_dec_ref(trans, root, eb, 0,
7514                                                     wc->for_reloc);
7515                         BUG_ON(ret); /* -ENOMEM */
7516                 }
7517                 /* make block locked assertion in clean_tree_block happy */
7518                 if (!path->locks[level] &&
7519                     btrfs_header_generation(eb) == trans->transid) {
7520                         btrfs_tree_lock(eb);
7521                         btrfs_set_lock_blocking(eb);
7522                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7523                 }
7524                 clean_tree_block(trans, root, eb);
7525         }
7526
7527         if (eb == root->node) {
7528                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7529                         parent = eb->start;
7530                 else
7531                         BUG_ON(root->root_key.objectid !=
7532                                btrfs_header_owner(eb));
7533         } else {
7534                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7535                         parent = path->nodes[level + 1]->start;
7536                 else
7537                         BUG_ON(root->root_key.objectid !=
7538                                btrfs_header_owner(path->nodes[level + 1]));
7539         }
7540
7541         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7542 out:
7543         wc->refs[level] = 0;
7544         wc->flags[level] = 0;
7545         return 0;
7546 }
7547
7548 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7549                                    struct btrfs_root *root,
7550                                    struct btrfs_path *path,
7551                                    struct walk_control *wc)
7552 {
7553         int level = wc->level;
7554         int lookup_info = 1;
7555         int ret;
7556
7557         while (level >= 0) {
7558                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7559                 if (ret > 0)
7560                         break;
7561
7562                 if (level == 0)
7563                         break;
7564
7565                 if (path->slots[level] >=
7566                     btrfs_header_nritems(path->nodes[level]))
7567                         break;
7568
7569                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
7570                 if (ret > 0) {
7571                         path->slots[level]++;
7572                         continue;
7573                 } else if (ret < 0)
7574                         return ret;
7575                 level = wc->level;
7576         }
7577         return 0;
7578 }
7579
7580 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7581                                  struct btrfs_root *root,
7582                                  struct btrfs_path *path,
7583                                  struct walk_control *wc, int max_level)
7584 {
7585         int level = wc->level;
7586         int ret;
7587
7588         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7589         while (level < max_level && path->nodes[level]) {
7590                 wc->level = level;
7591                 if (path->slots[level] + 1 <
7592                     btrfs_header_nritems(path->nodes[level])) {
7593                         path->slots[level]++;
7594                         return 0;
7595                 } else {
7596                         ret = walk_up_proc(trans, root, path, wc);
7597                         if (ret > 0)
7598                                 return 0;
7599
7600                         if (path->locks[level]) {
7601                                 btrfs_tree_unlock_rw(path->nodes[level],
7602                                                      path->locks[level]);
7603                                 path->locks[level] = 0;
7604                         }
7605                         free_extent_buffer(path->nodes[level]);
7606                         path->nodes[level] = NULL;
7607                         level++;
7608                 }
7609         }
7610         return 1;
7611 }
7612
7613 /*
7614  * drop a subvolume tree.
7615  *
7616  * this function traverses the tree freeing any blocks that only
7617  * referenced by the tree.
7618  *
7619  * when a shared tree block is found. this function decreases its
7620  * reference count by one. if update_ref is true, this function
7621  * also make sure backrefs for the shared block and all lower level
7622  * blocks are properly updated.
7623  *
7624  * If called with for_reloc == 0, may exit early with -EAGAIN
7625  */
7626 int btrfs_drop_snapshot(struct btrfs_root *root,
7627                          struct btrfs_block_rsv *block_rsv, int update_ref,
7628                          int for_reloc)
7629 {
7630         struct btrfs_path *path;
7631         struct btrfs_trans_handle *trans;
7632         struct btrfs_root *tree_root = root->fs_info->tree_root;
7633         struct btrfs_root_item *root_item = &root->root_item;
7634         struct walk_control *wc;
7635         struct btrfs_key key;
7636         int err = 0;
7637         int ret;
7638         int level;
7639         bool root_dropped = false;
7640
7641         path = btrfs_alloc_path();
7642         if (!path) {
7643                 err = -ENOMEM;
7644                 goto out;
7645         }
7646
7647         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7648         if (!wc) {
7649                 btrfs_free_path(path);
7650                 err = -ENOMEM;
7651                 goto out;
7652         }
7653
7654         trans = btrfs_start_transaction(tree_root, 0);
7655         if (IS_ERR(trans)) {
7656                 err = PTR_ERR(trans);
7657                 goto out_free;
7658         }
7659
7660         if (block_rsv)
7661                 trans->block_rsv = block_rsv;
7662
7663         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7664                 level = btrfs_header_level(root->node);
7665                 path->nodes[level] = btrfs_lock_root_node(root);
7666                 btrfs_set_lock_blocking(path->nodes[level]);
7667                 path->slots[level] = 0;
7668                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7669                 memset(&wc->update_progress, 0,
7670                        sizeof(wc->update_progress));
7671         } else {
7672                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7673                 memcpy(&wc->update_progress, &key,
7674                        sizeof(wc->update_progress));
7675
7676                 level = root_item->drop_level;
7677                 BUG_ON(level == 0);
7678                 path->lowest_level = level;
7679                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7680                 path->lowest_level = 0;
7681                 if (ret < 0) {
7682                         err = ret;
7683                         goto out_end_trans;
7684                 }
7685                 WARN_ON(ret > 0);
7686
7687                 /*
7688                  * unlock our path, this is safe because only this
7689                  * function is allowed to delete this snapshot
7690                  */
7691                 btrfs_unlock_up_safe(path, 0);
7692
7693                 level = btrfs_header_level(root->node);
7694                 while (1) {
7695                         btrfs_tree_lock(path->nodes[level]);
7696                         btrfs_set_lock_blocking(path->nodes[level]);
7697                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7698
7699                         ret = btrfs_lookup_extent_info(trans, root,
7700                                                 path->nodes[level]->start,
7701                                                 level, 1, &wc->refs[level],
7702                                                 &wc->flags[level]);
7703                         if (ret < 0) {
7704                                 err = ret;
7705                                 goto out_end_trans;
7706                         }
7707                         BUG_ON(wc->refs[level] == 0);
7708
7709                         if (level == root_item->drop_level)
7710                                 break;
7711
7712                         btrfs_tree_unlock(path->nodes[level]);
7713                         path->locks[level] = 0;
7714                         WARN_ON(wc->refs[level] != 1);
7715                         level--;
7716                 }
7717         }
7718
7719         wc->level = level;
7720         wc->shared_level = -1;
7721         wc->stage = DROP_REFERENCE;
7722         wc->update_ref = update_ref;
7723         wc->keep_locks = 0;
7724         wc->for_reloc = for_reloc;
7725         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7726
7727         while (1) {
7728
7729                 ret = walk_down_tree(trans, root, path, wc);
7730                 if (ret < 0) {
7731                         err = ret;
7732                         break;
7733                 }
7734
7735                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7736                 if (ret < 0) {
7737                         err = ret;
7738                         break;
7739                 }
7740
7741                 if (ret > 0) {
7742                         BUG_ON(wc->stage != DROP_REFERENCE);
7743                         break;
7744                 }
7745
7746                 if (wc->stage == DROP_REFERENCE) {
7747                         level = wc->level;
7748                         btrfs_node_key(path->nodes[level],
7749                                        &root_item->drop_progress,
7750                                        path->slots[level]);
7751                         root_item->drop_level = level;
7752                 }
7753
7754                 BUG_ON(wc->level == 0);
7755                 if (btrfs_should_end_transaction(trans, tree_root) ||
7756                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
7757                         ret = btrfs_update_root(trans, tree_root,
7758                                                 &root->root_key,
7759                                                 root_item);
7760                         if (ret) {
7761                                 btrfs_abort_transaction(trans, tree_root, ret);
7762                                 err = ret;
7763                                 goto out_end_trans;
7764                         }
7765
7766                         btrfs_end_transaction_throttle(trans, tree_root);
7767                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
7768                                 pr_debug("btrfs: drop snapshot early exit\n");
7769                                 err = -EAGAIN;
7770                                 goto out_free;
7771                         }
7772
7773                         trans = btrfs_start_transaction(tree_root, 0);
7774                         if (IS_ERR(trans)) {
7775                                 err = PTR_ERR(trans);
7776                                 goto out_free;
7777                         }
7778                         if (block_rsv)
7779                                 trans->block_rsv = block_rsv;
7780                 }
7781         }
7782         btrfs_release_path(path);
7783         if (err)
7784                 goto out_end_trans;
7785
7786         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7787         if (ret) {
7788                 btrfs_abort_transaction(trans, tree_root, ret);
7789                 goto out_end_trans;
7790         }
7791
7792         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7793                 ret = btrfs_find_root(tree_root, &root->root_key, path,
7794                                       NULL, NULL);
7795                 if (ret < 0) {
7796                         btrfs_abort_transaction(trans, tree_root, ret);
7797                         err = ret;
7798                         goto out_end_trans;
7799                 } else if (ret > 0) {
7800                         /* if we fail to delete the orphan item this time
7801                          * around, it'll get picked up the next time.
7802                          *
7803                          * The most common failure here is just -ENOENT.
7804                          */
7805                         btrfs_del_orphan_item(trans, tree_root,
7806                                               root->root_key.objectid);
7807                 }
7808         }
7809
7810         if (root->in_radix) {
7811                 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
7812         } else {
7813                 free_extent_buffer(root->node);
7814                 free_extent_buffer(root->commit_root);
7815                 btrfs_put_fs_root(root);
7816         }
7817         root_dropped = true;
7818 out_end_trans:
7819         btrfs_end_transaction_throttle(trans, tree_root);
7820 out_free:
7821         kfree(wc);
7822         btrfs_free_path(path);
7823 out:
7824         /*
7825          * So if we need to stop dropping the snapshot for whatever reason we
7826          * need to make sure to add it back to the dead root list so that we
7827          * keep trying to do the work later.  This also cleans up roots if we
7828          * don't have it in the radix (like when we recover after a power fail
7829          * or unmount) so we don't leak memory.
7830          */
7831         if (!for_reloc && root_dropped == false)
7832                 btrfs_add_dead_root(root);
7833         if (err)
7834                 btrfs_std_error(root->fs_info, err);
7835         return err;
7836 }
7837
7838 /*
7839  * drop subtree rooted at tree block 'node'.
7840  *
7841  * NOTE: this function will unlock and release tree block 'node'
7842  * only used by relocation code
7843  */
7844 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7845                         struct btrfs_root *root,
7846                         struct extent_buffer *node,
7847                         struct extent_buffer *parent)
7848 {
7849         struct btrfs_path *path;
7850         struct walk_control *wc;
7851         int level;
7852         int parent_level;
7853         int ret = 0;
7854         int wret;
7855
7856         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7857
7858         path = btrfs_alloc_path();
7859         if (!path)
7860                 return -ENOMEM;
7861
7862         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7863         if (!wc) {
7864                 btrfs_free_path(path);
7865                 return -ENOMEM;
7866         }
7867
7868         btrfs_assert_tree_locked(parent);
7869         parent_level = btrfs_header_level(parent);
7870         extent_buffer_get(parent);
7871         path->nodes[parent_level] = parent;
7872         path->slots[parent_level] = btrfs_header_nritems(parent);
7873
7874         btrfs_assert_tree_locked(node);
7875         level = btrfs_header_level(node);
7876         path->nodes[level] = node;
7877         path->slots[level] = 0;
7878         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7879
7880         wc->refs[parent_level] = 1;
7881         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7882         wc->level = level;
7883         wc->shared_level = -1;
7884         wc->stage = DROP_REFERENCE;
7885         wc->update_ref = 0;
7886         wc->keep_locks = 1;
7887         wc->for_reloc = 1;
7888         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7889
7890         while (1) {
7891                 wret = walk_down_tree(trans, root, path, wc);
7892                 if (wret < 0) {
7893                         ret = wret;
7894                         break;
7895                 }
7896
7897                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7898                 if (wret < 0)
7899                         ret = wret;
7900                 if (wret != 0)
7901                         break;
7902         }
7903
7904         kfree(wc);
7905         btrfs_free_path(path);
7906         return ret;
7907 }
7908
7909 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7910 {
7911         u64 num_devices;
7912         u64 stripped;
7913
7914         /*
7915          * if restripe for this chunk_type is on pick target profile and
7916          * return, otherwise do the usual balance
7917          */
7918         stripped = get_restripe_target(root->fs_info, flags);
7919         if (stripped)
7920                 return extended_to_chunk(stripped);
7921
7922         /*
7923          * we add in the count of missing devices because we want
7924          * to make sure that any RAID levels on a degraded FS
7925          * continue to be honored.
7926          */
7927         num_devices = root->fs_info->fs_devices->rw_devices +
7928                 root->fs_info->fs_devices->missing_devices;
7929
7930         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7931                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7932                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7933
7934         if (num_devices == 1) {
7935                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7936                 stripped = flags & ~stripped;
7937
7938                 /* turn raid0 into single device chunks */
7939                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7940                         return stripped;
7941
7942                 /* turn mirroring into duplication */
7943                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7944                              BTRFS_BLOCK_GROUP_RAID10))
7945                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7946         } else {
7947                 /* they already had raid on here, just return */
7948                 if (flags & stripped)
7949                         return flags;
7950
7951                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7952                 stripped = flags & ~stripped;
7953
7954                 /* switch duplicated blocks with raid1 */
7955                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7956                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7957
7958                 /* this is drive concat, leave it alone */
7959         }
7960
7961         return flags;
7962 }
7963
7964 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7965 {
7966         struct btrfs_space_info *sinfo = cache->space_info;
7967         u64 num_bytes;
7968         u64 min_allocable_bytes;
7969         int ret = -ENOSPC;
7970
7971
7972         /*
7973          * We need some metadata space and system metadata space for
7974          * allocating chunks in some corner cases until we force to set
7975          * it to be readonly.
7976          */
7977         if ((sinfo->flags &
7978              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7979             !force)
7980                 min_allocable_bytes = 1 * 1024 * 1024;
7981         else
7982                 min_allocable_bytes = 0;
7983
7984         spin_lock(&sinfo->lock);
7985         spin_lock(&cache->lock);
7986
7987         if (cache->ro) {
7988                 ret = 0;
7989                 goto out;
7990         }
7991
7992         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7993                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7994
7995         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7996             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7997             min_allocable_bytes <= sinfo->total_bytes) {
7998                 sinfo->bytes_readonly += num_bytes;
7999                 cache->ro = 1;
8000                 ret = 0;
8001         }
8002 out:
8003         spin_unlock(&cache->lock);
8004         spin_unlock(&sinfo->lock);
8005         return ret;
8006 }
8007
8008 int btrfs_set_block_group_ro(struct btrfs_root *root,
8009                              struct btrfs_block_group_cache *cache)
8010
8011 {
8012         struct btrfs_trans_handle *trans;
8013         u64 alloc_flags;
8014         int ret;
8015
8016         BUG_ON(cache->ro);
8017
8018         trans = btrfs_join_transaction(root);
8019         if (IS_ERR(trans))
8020                 return PTR_ERR(trans);
8021
8022         alloc_flags = update_block_group_flags(root, cache->flags);
8023         if (alloc_flags != cache->flags) {
8024                 ret = do_chunk_alloc(trans, root, alloc_flags,
8025                                      CHUNK_ALLOC_FORCE);
8026                 if (ret < 0)
8027                         goto out;
8028         }
8029
8030         ret = set_block_group_ro(cache, 0);
8031         if (!ret)
8032                 goto out;
8033         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8034         ret = do_chunk_alloc(trans, root, alloc_flags,
8035                              CHUNK_ALLOC_FORCE);
8036         if (ret < 0)
8037                 goto out;
8038         ret = set_block_group_ro(cache, 0);
8039 out:
8040         btrfs_end_transaction(trans, root);
8041         return ret;
8042 }
8043
8044 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8045                             struct btrfs_root *root, u64 type)
8046 {
8047         u64 alloc_flags = get_alloc_profile(root, type);
8048         return do_chunk_alloc(trans, root, alloc_flags,
8049                               CHUNK_ALLOC_FORCE);
8050 }
8051
8052 /*
8053  * helper to account the unused space of all the readonly block group in the
8054  * list. takes mirrors into account.
8055  */
8056 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
8057 {
8058         struct btrfs_block_group_cache *block_group;
8059         u64 free_bytes = 0;
8060         int factor;
8061
8062         list_for_each_entry(block_group, groups_list, list) {
8063                 spin_lock(&block_group->lock);
8064
8065                 if (!block_group->ro) {
8066                         spin_unlock(&block_group->lock);
8067                         continue;
8068                 }
8069
8070                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8071                                           BTRFS_BLOCK_GROUP_RAID10 |
8072                                           BTRFS_BLOCK_GROUP_DUP))
8073                         factor = 2;
8074                 else
8075                         factor = 1;
8076
8077                 free_bytes += (block_group->key.offset -
8078                                btrfs_block_group_used(&block_group->item)) *
8079                                factor;
8080
8081                 spin_unlock(&block_group->lock);
8082         }
8083
8084         return free_bytes;
8085 }
8086
8087 /*
8088  * helper to account the unused space of all the readonly block group in the
8089  * space_info. takes mirrors into account.
8090  */
8091 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8092 {
8093         int i;
8094         u64 free_bytes = 0;
8095
8096         spin_lock(&sinfo->lock);
8097
8098         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
8099                 if (!list_empty(&sinfo->block_groups[i]))
8100                         free_bytes += __btrfs_get_ro_block_group_free_space(
8101                                                 &sinfo->block_groups[i]);
8102
8103         spin_unlock(&sinfo->lock);
8104
8105         return free_bytes;
8106 }
8107
8108 void btrfs_set_block_group_rw(struct btrfs_root *root,
8109                               struct btrfs_block_group_cache *cache)
8110 {
8111         struct btrfs_space_info *sinfo = cache->space_info;
8112         u64 num_bytes;
8113
8114         BUG_ON(!cache->ro);
8115
8116         spin_lock(&sinfo->lock);
8117         spin_lock(&cache->lock);
8118         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8119                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8120         sinfo->bytes_readonly -= num_bytes;
8121         cache->ro = 0;
8122         spin_unlock(&cache->lock);
8123         spin_unlock(&sinfo->lock);
8124 }
8125
8126 /*
8127  * checks to see if its even possible to relocate this block group.
8128  *
8129  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8130  * ok to go ahead and try.
8131  */
8132 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8133 {
8134         struct btrfs_block_group_cache *block_group;
8135         struct btrfs_space_info *space_info;
8136         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8137         struct btrfs_device *device;
8138         struct btrfs_trans_handle *trans;
8139         u64 min_free;
8140         u64 dev_min = 1;
8141         u64 dev_nr = 0;
8142         u64 target;
8143         int index;
8144         int full = 0;
8145         int ret = 0;
8146
8147         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8148
8149         /* odd, couldn't find the block group, leave it alone */
8150         if (!block_group)
8151                 return -1;
8152
8153         min_free = btrfs_block_group_used(&block_group->item);
8154
8155         /* no bytes used, we're good */
8156         if (!min_free)
8157                 goto out;
8158
8159         space_info = block_group->space_info;
8160         spin_lock(&space_info->lock);
8161
8162         full = space_info->full;
8163
8164         /*
8165          * if this is the last block group we have in this space, we can't
8166          * relocate it unless we're able to allocate a new chunk below.
8167          *
8168          * Otherwise, we need to make sure we have room in the space to handle
8169          * all of the extents from this block group.  If we can, we're good
8170          */
8171         if ((space_info->total_bytes != block_group->key.offset) &&
8172             (space_info->bytes_used + space_info->bytes_reserved +
8173              space_info->bytes_pinned + space_info->bytes_readonly +
8174              min_free < space_info->total_bytes)) {
8175                 spin_unlock(&space_info->lock);
8176                 goto out;
8177         }
8178         spin_unlock(&space_info->lock);
8179
8180         /*
8181          * ok we don't have enough space, but maybe we have free space on our
8182          * devices to allocate new chunks for relocation, so loop through our
8183          * alloc devices and guess if we have enough space.  if this block
8184          * group is going to be restriped, run checks against the target
8185          * profile instead of the current one.
8186          */
8187         ret = -1;
8188
8189         /*
8190          * index:
8191          *      0: raid10
8192          *      1: raid1
8193          *      2: dup
8194          *      3: raid0
8195          *      4: single
8196          */
8197         target = get_restripe_target(root->fs_info, block_group->flags);
8198         if (target) {
8199                 index = __get_raid_index(extended_to_chunk(target));
8200         } else {
8201                 /*
8202                  * this is just a balance, so if we were marked as full
8203                  * we know there is no space for a new chunk
8204                  */
8205                 if (full)
8206                         goto out;
8207
8208                 index = get_block_group_index(block_group);
8209         }
8210
8211         if (index == BTRFS_RAID_RAID10) {
8212                 dev_min = 4;
8213                 /* Divide by 2 */
8214                 min_free >>= 1;
8215         } else if (index == BTRFS_RAID_RAID1) {
8216                 dev_min = 2;
8217         } else if (index == BTRFS_RAID_DUP) {
8218                 /* Multiply by 2 */
8219                 min_free <<= 1;
8220         } else if (index == BTRFS_RAID_RAID0) {
8221                 dev_min = fs_devices->rw_devices;
8222                 do_div(min_free, dev_min);
8223         }
8224
8225         /* We need to do this so that we can look at pending chunks */
8226         trans = btrfs_join_transaction(root);
8227         if (IS_ERR(trans)) {
8228                 ret = PTR_ERR(trans);
8229                 goto out;
8230         }
8231
8232         mutex_lock(&root->fs_info->chunk_mutex);
8233         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8234                 u64 dev_offset;
8235
8236                 /*
8237                  * check to make sure we can actually find a chunk with enough
8238                  * space to fit our block group in.
8239                  */
8240                 if (device->total_bytes > device->bytes_used + min_free &&
8241                     !device->is_tgtdev_for_dev_replace) {
8242                         ret = find_free_dev_extent(trans, device, min_free,
8243                                                    &dev_offset, NULL);
8244                         if (!ret)
8245                                 dev_nr++;
8246
8247                         if (dev_nr >= dev_min)
8248                                 break;
8249
8250                         ret = -1;
8251                 }
8252         }
8253         mutex_unlock(&root->fs_info->chunk_mutex);
8254         btrfs_end_transaction(trans, root);
8255 out:
8256         btrfs_put_block_group(block_group);
8257         return ret;
8258 }
8259
8260 static int find_first_block_group(struct btrfs_root *root,
8261                 struct btrfs_path *path, struct btrfs_key *key)
8262 {
8263         int ret = 0;
8264         struct btrfs_key found_key;
8265         struct extent_buffer *leaf;
8266         int slot;
8267
8268         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8269         if (ret < 0)
8270                 goto out;
8271
8272         while (1) {
8273                 slot = path->slots[0];
8274                 leaf = path->nodes[0];
8275                 if (slot >= btrfs_header_nritems(leaf)) {
8276                         ret = btrfs_next_leaf(root, path);
8277                         if (ret == 0)
8278                                 continue;
8279                         if (ret < 0)
8280                                 goto out;
8281                         break;
8282                 }
8283                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8284
8285                 if (found_key.objectid >= key->objectid &&
8286                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8287                         ret = 0;
8288                         goto out;
8289                 }
8290                 path->slots[0]++;
8291         }
8292 out:
8293         return ret;
8294 }
8295
8296 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8297 {
8298         struct btrfs_block_group_cache *block_group;
8299         u64 last = 0;
8300
8301         while (1) {
8302                 struct inode *inode;
8303
8304                 block_group = btrfs_lookup_first_block_group(info, last);
8305                 while (block_group) {
8306                         spin_lock(&block_group->lock);
8307                         if (block_group->iref)
8308                                 break;
8309                         spin_unlock(&block_group->lock);
8310                         block_group = next_block_group(info->tree_root,
8311                                                        block_group);
8312                 }
8313                 if (!block_group) {
8314                         if (last == 0)
8315                                 break;
8316                         last = 0;
8317                         continue;
8318                 }
8319
8320                 inode = block_group->inode;
8321                 block_group->iref = 0;
8322                 block_group->inode = NULL;
8323                 spin_unlock(&block_group->lock);
8324                 iput(inode);
8325                 last = block_group->key.objectid + block_group->key.offset;
8326                 btrfs_put_block_group(block_group);
8327         }
8328 }
8329
8330 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8331 {
8332         struct btrfs_block_group_cache *block_group;
8333         struct btrfs_space_info *space_info;
8334         struct btrfs_caching_control *caching_ctl;
8335         struct rb_node *n;
8336
8337         down_write(&info->extent_commit_sem);
8338         while (!list_empty(&info->caching_block_groups)) {
8339                 caching_ctl = list_entry(info->caching_block_groups.next,
8340                                          struct btrfs_caching_control, list);
8341                 list_del(&caching_ctl->list);
8342                 put_caching_control(caching_ctl);
8343         }
8344         up_write(&info->extent_commit_sem);
8345
8346         spin_lock(&info->block_group_cache_lock);
8347         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8348                 block_group = rb_entry(n, struct btrfs_block_group_cache,
8349                                        cache_node);
8350                 rb_erase(&block_group->cache_node,
8351                          &info->block_group_cache_tree);
8352                 spin_unlock(&info->block_group_cache_lock);
8353
8354                 down_write(&block_group->space_info->groups_sem);
8355                 list_del(&block_group->list);
8356                 up_write(&block_group->space_info->groups_sem);
8357
8358                 if (block_group->cached == BTRFS_CACHE_STARTED)
8359                         wait_block_group_cache_done(block_group);
8360
8361                 /*
8362                  * We haven't cached this block group, which means we could
8363                  * possibly have excluded extents on this block group.
8364                  */
8365                 if (block_group->cached == BTRFS_CACHE_NO ||
8366                     block_group->cached == BTRFS_CACHE_ERROR)
8367                         free_excluded_extents(info->extent_root, block_group);
8368
8369                 btrfs_remove_free_space_cache(block_group);
8370                 btrfs_put_block_group(block_group);
8371
8372                 spin_lock(&info->block_group_cache_lock);
8373         }
8374         spin_unlock(&info->block_group_cache_lock);
8375
8376         /* now that all the block groups are freed, go through and
8377          * free all the space_info structs.  This is only called during
8378          * the final stages of unmount, and so we know nobody is
8379          * using them.  We call synchronize_rcu() once before we start,
8380          * just to be on the safe side.
8381          */
8382         synchronize_rcu();
8383
8384         release_global_block_rsv(info);
8385
8386         while (!list_empty(&info->space_info)) {
8387                 int i;
8388
8389                 space_info = list_entry(info->space_info.next,
8390                                         struct btrfs_space_info,
8391                                         list);
8392                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8393                         if (WARN_ON(space_info->bytes_pinned > 0 ||
8394                             space_info->bytes_reserved > 0 ||
8395                             space_info->bytes_may_use > 0)) {
8396                                 dump_space_info(space_info, 0, 0);
8397                         }
8398                 }
8399                 list_del(&space_info->list);
8400                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
8401                         struct kobject *kobj;
8402                         kobj = &space_info->block_group_kobjs[i];
8403                         if (kobj->parent) {
8404                                 kobject_del(kobj);
8405                                 kobject_put(kobj);
8406                         }
8407                 }
8408                 kobject_del(&space_info->kobj);
8409                 kobject_put(&space_info->kobj);
8410         }
8411         return 0;
8412 }
8413
8414 static void __link_block_group(struct btrfs_space_info *space_info,
8415                                struct btrfs_block_group_cache *cache)
8416 {
8417         int index = get_block_group_index(cache);
8418
8419         down_write(&space_info->groups_sem);
8420         if (list_empty(&space_info->block_groups[index])) {
8421                 struct kobject *kobj = &space_info->block_group_kobjs[index];
8422                 int ret;
8423
8424                 kobject_get(&space_info->kobj); /* put in release */
8425                 ret = kobject_init_and_add(kobj, &btrfs_raid_ktype,
8426                                            &space_info->kobj,
8427                                            get_raid_name(index));
8428                 if (ret) {
8429                         pr_warn("btrfs: failed to add kobject for block cache. ignoring.\n");
8430                         kobject_put(&space_info->kobj);
8431                 }
8432         }
8433         list_add_tail(&cache->list, &space_info->block_groups[index]);
8434         up_write(&space_info->groups_sem);
8435 }
8436
8437 int btrfs_read_block_groups(struct btrfs_root *root)
8438 {
8439         struct btrfs_path *path;
8440         int ret;
8441         struct btrfs_block_group_cache *cache;
8442         struct btrfs_fs_info *info = root->fs_info;
8443         struct btrfs_space_info *space_info;
8444         struct btrfs_key key;
8445         struct btrfs_key found_key;
8446         struct extent_buffer *leaf;
8447         int need_clear = 0;
8448         u64 cache_gen;
8449
8450         root = info->extent_root;
8451         key.objectid = 0;
8452         key.offset = 0;
8453         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8454         path = btrfs_alloc_path();
8455         if (!path)
8456                 return -ENOMEM;
8457         path->reada = 1;
8458
8459         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8460         if (btrfs_test_opt(root, SPACE_CACHE) &&
8461             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8462                 need_clear = 1;
8463         if (btrfs_test_opt(root, CLEAR_CACHE))
8464                 need_clear = 1;
8465
8466         while (1) {
8467                 ret = find_first_block_group(root, path, &key);
8468                 if (ret > 0)
8469                         break;
8470                 if (ret != 0)
8471                         goto error;
8472                 leaf = path->nodes[0];
8473                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8474                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8475                 if (!cache) {
8476                         ret = -ENOMEM;
8477                         goto error;
8478                 }
8479                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8480                                                 GFP_NOFS);
8481                 if (!cache->free_space_ctl) {
8482                         kfree(cache);
8483                         ret = -ENOMEM;
8484                         goto error;
8485                 }
8486
8487                 atomic_set(&cache->count, 1);
8488                 spin_lock_init(&cache->lock);
8489                 cache->fs_info = info;
8490                 INIT_LIST_HEAD(&cache->list);
8491                 INIT_LIST_HEAD(&cache->cluster_list);
8492
8493                 if (need_clear) {
8494                         /*
8495                          * When we mount with old space cache, we need to
8496                          * set BTRFS_DC_CLEAR and set dirty flag.
8497                          *
8498                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
8499                          *    truncate the old free space cache inode and
8500                          *    setup a new one.
8501                          * b) Setting 'dirty flag' makes sure that we flush
8502                          *    the new space cache info onto disk.
8503                          */
8504                         cache->disk_cache_state = BTRFS_DC_CLEAR;
8505                         if (btrfs_test_opt(root, SPACE_CACHE))
8506                                 cache->dirty = 1;
8507                 }
8508
8509                 read_extent_buffer(leaf, &cache->item,
8510                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
8511                                    sizeof(cache->item));
8512                 memcpy(&cache->key, &found_key, sizeof(found_key));
8513
8514                 key.objectid = found_key.objectid + found_key.offset;
8515                 btrfs_release_path(path);
8516                 cache->flags = btrfs_block_group_flags(&cache->item);
8517                 cache->sectorsize = root->sectorsize;
8518                 cache->full_stripe_len = btrfs_full_stripe_len(root,
8519                                                &root->fs_info->mapping_tree,
8520                                                found_key.objectid);
8521                 btrfs_init_free_space_ctl(cache);
8522
8523                 /*
8524                  * We need to exclude the super stripes now so that the space
8525                  * info has super bytes accounted for, otherwise we'll think
8526                  * we have more space than we actually do.
8527                  */
8528                 ret = exclude_super_stripes(root, cache);
8529                 if (ret) {
8530                         /*
8531                          * We may have excluded something, so call this just in
8532                          * case.
8533                          */
8534                         free_excluded_extents(root, cache);
8535                         kfree(cache->free_space_ctl);
8536                         kfree(cache);
8537                         goto error;
8538                 }
8539
8540                 /*
8541                  * check for two cases, either we are full, and therefore
8542                  * don't need to bother with the caching work since we won't
8543                  * find any space, or we are empty, and we can just add all
8544                  * the space in and be done with it.  This saves us _alot_ of
8545                  * time, particularly in the full case.
8546                  */
8547                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8548                         cache->last_byte_to_unpin = (u64)-1;
8549                         cache->cached = BTRFS_CACHE_FINISHED;
8550                         free_excluded_extents(root, cache);
8551                 } else if (btrfs_block_group_used(&cache->item) == 0) {
8552                         cache->last_byte_to_unpin = (u64)-1;
8553                         cache->cached = BTRFS_CACHE_FINISHED;
8554                         add_new_free_space(cache, root->fs_info,
8555                                            found_key.objectid,
8556                                            found_key.objectid +
8557                                            found_key.offset);
8558                         free_excluded_extents(root, cache);
8559                 }
8560
8561                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8562                 if (ret) {
8563                         btrfs_remove_free_space_cache(cache);
8564                         btrfs_put_block_group(cache);
8565                         goto error;
8566                 }
8567
8568                 ret = update_space_info(info, cache->flags, found_key.offset,
8569                                         btrfs_block_group_used(&cache->item),
8570                                         &space_info);
8571                 if (ret) {
8572                         btrfs_remove_free_space_cache(cache);
8573                         spin_lock(&info->block_group_cache_lock);
8574                         rb_erase(&cache->cache_node,
8575                                  &info->block_group_cache_tree);
8576                         spin_unlock(&info->block_group_cache_lock);
8577                         btrfs_put_block_group(cache);
8578                         goto error;
8579                 }
8580
8581                 cache->space_info = space_info;
8582                 spin_lock(&cache->space_info->lock);
8583                 cache->space_info->bytes_readonly += cache->bytes_super;
8584                 spin_unlock(&cache->space_info->lock);
8585
8586                 __link_block_group(space_info, cache);
8587
8588                 set_avail_alloc_bits(root->fs_info, cache->flags);
8589                 if (btrfs_chunk_readonly(root, cache->key.objectid))
8590                         set_block_group_ro(cache, 1);
8591         }
8592
8593         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8594                 if (!(get_alloc_profile(root, space_info->flags) &
8595                       (BTRFS_BLOCK_GROUP_RAID10 |
8596                        BTRFS_BLOCK_GROUP_RAID1 |
8597                        BTRFS_BLOCK_GROUP_RAID5 |
8598                        BTRFS_BLOCK_GROUP_RAID6 |
8599                        BTRFS_BLOCK_GROUP_DUP)))
8600                         continue;
8601                 /*
8602                  * avoid allocating from un-mirrored block group if there are
8603                  * mirrored block groups.
8604                  */
8605                 list_for_each_entry(cache,
8606                                 &space_info->block_groups[BTRFS_RAID_RAID0],
8607                                 list)
8608                         set_block_group_ro(cache, 1);
8609                 list_for_each_entry(cache,
8610                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
8611                                 list)
8612                         set_block_group_ro(cache, 1);
8613         }
8614
8615         init_global_block_rsv(info);
8616         ret = 0;
8617 error:
8618         btrfs_free_path(path);
8619         return ret;
8620 }
8621
8622 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8623                                        struct btrfs_root *root)
8624 {
8625         struct btrfs_block_group_cache *block_group, *tmp;
8626         struct btrfs_root *extent_root = root->fs_info->extent_root;
8627         struct btrfs_block_group_item item;
8628         struct btrfs_key key;
8629         int ret = 0;
8630
8631         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8632                                  new_bg_list) {
8633                 list_del_init(&block_group->new_bg_list);
8634
8635                 if (ret)
8636                         continue;
8637
8638                 spin_lock(&block_group->lock);
8639                 memcpy(&item, &block_group->item, sizeof(item));
8640                 memcpy(&key, &block_group->key, sizeof(key));
8641                 spin_unlock(&block_group->lock);
8642
8643                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
8644                                         sizeof(item));
8645                 if (ret)
8646                         btrfs_abort_transaction(trans, extent_root, ret);
8647                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
8648                                                key.objectid, key.offset);
8649                 if (ret)
8650                         btrfs_abort_transaction(trans, extent_root, ret);
8651         }
8652 }
8653
8654 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8655                            struct btrfs_root *root, u64 bytes_used,
8656                            u64 type, u64 chunk_objectid, u64 chunk_offset,
8657                            u64 size)
8658 {
8659         int ret;
8660         struct btrfs_root *extent_root;
8661         struct btrfs_block_group_cache *cache;
8662
8663         extent_root = root->fs_info->extent_root;
8664
8665         root->fs_info->last_trans_log_full_commit = trans->transid;
8666
8667         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8668         if (!cache)
8669                 return -ENOMEM;
8670         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8671                                         GFP_NOFS);
8672         if (!cache->free_space_ctl) {
8673                 kfree(cache);
8674                 return -ENOMEM;
8675         }
8676
8677         cache->key.objectid = chunk_offset;
8678         cache->key.offset = size;
8679         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8680         cache->sectorsize = root->sectorsize;
8681         cache->fs_info = root->fs_info;
8682         cache->full_stripe_len = btrfs_full_stripe_len(root,
8683                                                &root->fs_info->mapping_tree,
8684                                                chunk_offset);
8685
8686         atomic_set(&cache->count, 1);
8687         spin_lock_init(&cache->lock);
8688         INIT_LIST_HEAD(&cache->list);
8689         INIT_LIST_HEAD(&cache->cluster_list);
8690         INIT_LIST_HEAD(&cache->new_bg_list);
8691
8692         btrfs_init_free_space_ctl(cache);
8693
8694         btrfs_set_block_group_used(&cache->item, bytes_used);
8695         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8696         cache->flags = type;
8697         btrfs_set_block_group_flags(&cache->item, type);
8698
8699         cache->last_byte_to_unpin = (u64)-1;
8700         cache->cached = BTRFS_CACHE_FINISHED;
8701         ret = exclude_super_stripes(root, cache);
8702         if (ret) {
8703                 /*
8704                  * We may have excluded something, so call this just in
8705                  * case.
8706                  */
8707                 free_excluded_extents(root, cache);
8708                 kfree(cache->free_space_ctl);
8709                 kfree(cache);
8710                 return ret;
8711         }
8712
8713         add_new_free_space(cache, root->fs_info, chunk_offset,
8714                            chunk_offset + size);
8715
8716         free_excluded_extents(root, cache);
8717
8718         ret = btrfs_add_block_group_cache(root->fs_info, cache);
8719         if (ret) {
8720                 btrfs_remove_free_space_cache(cache);
8721                 btrfs_put_block_group(cache);
8722                 return ret;
8723         }
8724
8725         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8726                                 &cache->space_info);
8727         if (ret) {
8728                 btrfs_remove_free_space_cache(cache);
8729                 spin_lock(&root->fs_info->block_group_cache_lock);
8730                 rb_erase(&cache->cache_node,
8731                          &root->fs_info->block_group_cache_tree);
8732                 spin_unlock(&root->fs_info->block_group_cache_lock);
8733                 btrfs_put_block_group(cache);
8734                 return ret;
8735         }
8736         update_global_block_rsv(root->fs_info);
8737
8738         spin_lock(&cache->space_info->lock);
8739         cache->space_info->bytes_readonly += cache->bytes_super;
8740         spin_unlock(&cache->space_info->lock);
8741
8742         __link_block_group(cache->space_info, cache);
8743
8744         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8745
8746         set_avail_alloc_bits(extent_root->fs_info, type);
8747
8748         return 0;
8749 }
8750
8751 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8752 {
8753         u64 extra_flags = chunk_to_extended(flags) &
8754                                 BTRFS_EXTENDED_PROFILE_MASK;
8755
8756         write_seqlock(&fs_info->profiles_lock);
8757         if (flags & BTRFS_BLOCK_GROUP_DATA)
8758                 fs_info->avail_data_alloc_bits &= ~extra_flags;
8759         if (flags & BTRFS_BLOCK_GROUP_METADATA)
8760                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8761         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8762                 fs_info->avail_system_alloc_bits &= ~extra_flags;
8763         write_sequnlock(&fs_info->profiles_lock);
8764 }
8765
8766 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8767                              struct btrfs_root *root, u64 group_start)
8768 {
8769         struct btrfs_path *path;
8770         struct btrfs_block_group_cache *block_group;
8771         struct btrfs_free_cluster *cluster;
8772         struct btrfs_root *tree_root = root->fs_info->tree_root;
8773         struct btrfs_key key;
8774         struct inode *inode;
8775         int ret;
8776         int index;
8777         int factor;
8778
8779         root = root->fs_info->extent_root;
8780
8781         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8782         BUG_ON(!block_group);
8783         BUG_ON(!block_group->ro);
8784
8785         /*
8786          * Free the reserved super bytes from this block group before
8787          * remove it.
8788          */
8789         free_excluded_extents(root, block_group);
8790
8791         memcpy(&key, &block_group->key, sizeof(key));
8792         index = get_block_group_index(block_group);
8793         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8794                                   BTRFS_BLOCK_GROUP_RAID1 |
8795                                   BTRFS_BLOCK_GROUP_RAID10))
8796                 factor = 2;
8797         else
8798                 factor = 1;
8799
8800         /* make sure this block group isn't part of an allocation cluster */
8801         cluster = &root->fs_info->data_alloc_cluster;
8802         spin_lock(&cluster->refill_lock);
8803         btrfs_return_cluster_to_free_space(block_group, cluster);
8804         spin_unlock(&cluster->refill_lock);
8805
8806         /*
8807          * make sure this block group isn't part of a metadata
8808          * allocation cluster
8809          */
8810         cluster = &root->fs_info->meta_alloc_cluster;
8811         spin_lock(&cluster->refill_lock);
8812         btrfs_return_cluster_to_free_space(block_group, cluster);
8813         spin_unlock(&cluster->refill_lock);
8814
8815         path = btrfs_alloc_path();
8816         if (!path) {
8817                 ret = -ENOMEM;
8818                 goto out;
8819         }
8820
8821         inode = lookup_free_space_inode(tree_root, block_group, path);
8822         if (!IS_ERR(inode)) {
8823                 ret = btrfs_orphan_add(trans, inode);
8824                 if (ret) {
8825                         btrfs_add_delayed_iput(inode);
8826                         goto out;
8827                 }
8828                 clear_nlink(inode);
8829                 /* One for the block groups ref */
8830                 spin_lock(&block_group->lock);
8831                 if (block_group->iref) {
8832                         block_group->iref = 0;
8833                         block_group->inode = NULL;
8834                         spin_unlock(&block_group->lock);
8835                         iput(inode);
8836                 } else {
8837                         spin_unlock(&block_group->lock);
8838                 }
8839                 /* One for our lookup ref */
8840                 btrfs_add_delayed_iput(inode);
8841         }
8842
8843         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8844         key.offset = block_group->key.objectid;
8845         key.type = 0;
8846
8847         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8848         if (ret < 0)
8849                 goto out;
8850         if (ret > 0)
8851                 btrfs_release_path(path);
8852         if (ret == 0) {
8853                 ret = btrfs_del_item(trans, tree_root, path);
8854                 if (ret)
8855                         goto out;
8856                 btrfs_release_path(path);
8857         }
8858
8859         spin_lock(&root->fs_info->block_group_cache_lock);
8860         rb_erase(&block_group->cache_node,
8861                  &root->fs_info->block_group_cache_tree);
8862
8863         if (root->fs_info->first_logical_byte == block_group->key.objectid)
8864                 root->fs_info->first_logical_byte = (u64)-1;
8865         spin_unlock(&root->fs_info->block_group_cache_lock);
8866
8867         down_write(&block_group->space_info->groups_sem);
8868         /*
8869          * we must use list_del_init so people can check to see if they
8870          * are still on the list after taking the semaphore
8871          */
8872         list_del_init(&block_group->list);
8873         if (list_empty(&block_group->space_info->block_groups[index])) {
8874                 kobject_del(&block_group->space_info->block_group_kobjs[index]);
8875                 kobject_put(&block_group->space_info->block_group_kobjs[index]);
8876                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8877         }
8878         up_write(&block_group->space_info->groups_sem);
8879
8880         if (block_group->cached == BTRFS_CACHE_STARTED)
8881                 wait_block_group_cache_done(block_group);
8882
8883         btrfs_remove_free_space_cache(block_group);
8884
8885         spin_lock(&block_group->space_info->lock);
8886         block_group->space_info->total_bytes -= block_group->key.offset;
8887         block_group->space_info->bytes_readonly -= block_group->key.offset;
8888         block_group->space_info->disk_total -= block_group->key.offset * factor;
8889         spin_unlock(&block_group->space_info->lock);
8890
8891         memcpy(&key, &block_group->key, sizeof(key));
8892
8893         btrfs_clear_space_info_full(root->fs_info);
8894
8895         btrfs_put_block_group(block_group);
8896         btrfs_put_block_group(block_group);
8897
8898         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8899         if (ret > 0)
8900                 ret = -EIO;
8901         if (ret < 0)
8902                 goto out;
8903
8904         ret = btrfs_del_item(trans, root, path);
8905 out:
8906         btrfs_free_path(path);
8907         return ret;
8908 }
8909
8910 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8911 {
8912         struct btrfs_space_info *space_info;
8913         struct btrfs_super_block *disk_super;
8914         u64 features;
8915         u64 flags;
8916         int mixed = 0;
8917         int ret;
8918
8919         disk_super = fs_info->super_copy;
8920         if (!btrfs_super_root(disk_super))
8921                 return 1;
8922
8923         features = btrfs_super_incompat_flags(disk_super);
8924         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8925                 mixed = 1;
8926
8927         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8928         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8929         if (ret)
8930                 goto out;
8931
8932         if (mixed) {
8933                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8934                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8935         } else {
8936                 flags = BTRFS_BLOCK_GROUP_METADATA;
8937                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8938                 if (ret)
8939                         goto out;
8940
8941                 flags = BTRFS_BLOCK_GROUP_DATA;
8942                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8943         }
8944 out:
8945         return ret;
8946 }
8947
8948 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8949 {
8950         return unpin_extent_range(root, start, end);
8951 }
8952
8953 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8954                                u64 num_bytes, u64 *actual_bytes)
8955 {
8956         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8957 }
8958
8959 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8960 {
8961         struct btrfs_fs_info *fs_info = root->fs_info;
8962         struct btrfs_block_group_cache *cache = NULL;
8963         u64 group_trimmed;
8964         u64 start;
8965         u64 end;
8966         u64 trimmed = 0;
8967         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8968         int ret = 0;
8969
8970         /*
8971          * try to trim all FS space, our block group may start from non-zero.
8972          */
8973         if (range->len == total_bytes)
8974                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8975         else
8976                 cache = btrfs_lookup_block_group(fs_info, range->start);
8977
8978         while (cache) {
8979                 if (cache->key.objectid >= (range->start + range->len)) {
8980                         btrfs_put_block_group(cache);
8981                         break;
8982                 }
8983
8984                 start = max(range->start, cache->key.objectid);
8985                 end = min(range->start + range->len,
8986                                 cache->key.objectid + cache->key.offset);
8987
8988                 if (end - start >= range->minlen) {
8989                         if (!block_group_cache_done(cache)) {
8990                                 ret = cache_block_group(cache, 0);
8991                                 if (ret) {
8992                                         btrfs_put_block_group(cache);
8993                                         break;
8994                                 }
8995                                 ret = wait_block_group_cache_done(cache);
8996                                 if (ret) {
8997                                         btrfs_put_block_group(cache);
8998                                         break;
8999                                 }
9000                         }
9001                         ret = btrfs_trim_block_group(cache,
9002                                                      &group_trimmed,
9003                                                      start,
9004                                                      end,
9005                                                      range->minlen);
9006
9007                         trimmed += group_trimmed;
9008                         if (ret) {
9009                                 btrfs_put_block_group(cache);
9010                                 break;
9011                         }
9012                 }
9013
9014                 cache = next_block_group(fs_info->tree_root, cache);
9015         }
9016
9017         range->len = trimmed;
9018         return ret;
9019 }