epoll: fix use-after-free in eventpoll_release_file
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include "ctree.h"
27 #include "disk-io.h"
28 #include "transaction.h"
29 #include "locking.h"
30 #include "tree-log.h"
31 #include "inode-map.h"
32 #include "volumes.h"
33 #include "dev-replace.h"
34
35 #define BTRFS_ROOT_TRANS_TAG 0
36
37 static unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
38         [TRANS_STATE_RUNNING]           = 0U,
39         [TRANS_STATE_BLOCKED]           = (__TRANS_USERSPACE |
40                                            __TRANS_START),
41         [TRANS_STATE_COMMIT_START]      = (__TRANS_USERSPACE |
42                                            __TRANS_START |
43                                            __TRANS_ATTACH),
44         [TRANS_STATE_COMMIT_DOING]      = (__TRANS_USERSPACE |
45                                            __TRANS_START |
46                                            __TRANS_ATTACH |
47                                            __TRANS_JOIN),
48         [TRANS_STATE_UNBLOCKED]         = (__TRANS_USERSPACE |
49                                            __TRANS_START |
50                                            __TRANS_ATTACH |
51                                            __TRANS_JOIN |
52                                            __TRANS_JOIN_NOLOCK),
53         [TRANS_STATE_COMPLETED]         = (__TRANS_USERSPACE |
54                                            __TRANS_START |
55                                            __TRANS_ATTACH |
56                                            __TRANS_JOIN |
57                                            __TRANS_JOIN_NOLOCK),
58 };
59
60 void btrfs_put_transaction(struct btrfs_transaction *transaction)
61 {
62         WARN_ON(atomic_read(&transaction->use_count) == 0);
63         if (atomic_dec_and_test(&transaction->use_count)) {
64                 BUG_ON(!list_empty(&transaction->list));
65                 WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
66                 while (!list_empty(&transaction->pending_chunks)) {
67                         struct extent_map *em;
68
69                         em = list_first_entry(&transaction->pending_chunks,
70                                               struct extent_map, list);
71                         list_del_init(&em->list);
72                         free_extent_map(em);
73                 }
74                 kmem_cache_free(btrfs_transaction_cachep, transaction);
75         }
76 }
77
78 static noinline void switch_commit_root(struct btrfs_root *root)
79 {
80         free_extent_buffer(root->commit_root);
81         root->commit_root = btrfs_root_node(root);
82 }
83
84 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
85                                          unsigned int type)
86 {
87         if (type & TRANS_EXTWRITERS)
88                 atomic_inc(&trans->num_extwriters);
89 }
90
91 static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
92                                          unsigned int type)
93 {
94         if (type & TRANS_EXTWRITERS)
95                 atomic_dec(&trans->num_extwriters);
96 }
97
98 static inline void extwriter_counter_init(struct btrfs_transaction *trans,
99                                           unsigned int type)
100 {
101         atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
102 }
103
104 static inline int extwriter_counter_read(struct btrfs_transaction *trans)
105 {
106         return atomic_read(&trans->num_extwriters);
107 }
108
109 /*
110  * either allocate a new transaction or hop into the existing one
111  */
112 static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
113 {
114         struct btrfs_transaction *cur_trans;
115         struct btrfs_fs_info *fs_info = root->fs_info;
116
117         spin_lock(&fs_info->trans_lock);
118 loop:
119         /* The file system has been taken offline. No new transactions. */
120         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
121                 spin_unlock(&fs_info->trans_lock);
122                 return -EROFS;
123         }
124
125         cur_trans = fs_info->running_transaction;
126         if (cur_trans) {
127                 if (cur_trans->aborted) {
128                         spin_unlock(&fs_info->trans_lock);
129                         return cur_trans->aborted;
130                 }
131                 if (btrfs_blocked_trans_types[cur_trans->state] & type) {
132                         spin_unlock(&fs_info->trans_lock);
133                         return -EBUSY;
134                 }
135                 atomic_inc(&cur_trans->use_count);
136                 atomic_inc(&cur_trans->num_writers);
137                 extwriter_counter_inc(cur_trans, type);
138                 spin_unlock(&fs_info->trans_lock);
139                 return 0;
140         }
141         spin_unlock(&fs_info->trans_lock);
142
143         /*
144          * If we are ATTACH, we just want to catch the current transaction,
145          * and commit it. If there is no transaction, just return ENOENT.
146          */
147         if (type == TRANS_ATTACH)
148                 return -ENOENT;
149
150         /*
151          * JOIN_NOLOCK only happens during the transaction commit, so
152          * it is impossible that ->running_transaction is NULL
153          */
154         BUG_ON(type == TRANS_JOIN_NOLOCK);
155
156         cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
157         if (!cur_trans)
158                 return -ENOMEM;
159
160         spin_lock(&fs_info->trans_lock);
161         if (fs_info->running_transaction) {
162                 /*
163                  * someone started a transaction after we unlocked.  Make sure
164                  * to redo the checks above
165                  */
166                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
167                 goto loop;
168         } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
169                 spin_unlock(&fs_info->trans_lock);
170                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
171                 return -EROFS;
172         }
173
174         atomic_set(&cur_trans->num_writers, 1);
175         extwriter_counter_init(cur_trans, type);
176         init_waitqueue_head(&cur_trans->writer_wait);
177         init_waitqueue_head(&cur_trans->commit_wait);
178         cur_trans->state = TRANS_STATE_RUNNING;
179         /*
180          * One for this trans handle, one so it will live on until we
181          * commit the transaction.
182          */
183         atomic_set(&cur_trans->use_count, 2);
184         cur_trans->start_time = get_seconds();
185
186         cur_trans->delayed_refs.href_root = RB_ROOT;
187         atomic_set(&cur_trans->delayed_refs.num_entries, 0);
188         cur_trans->delayed_refs.num_heads_ready = 0;
189         cur_trans->delayed_refs.num_heads = 0;
190         cur_trans->delayed_refs.flushing = 0;
191         cur_trans->delayed_refs.run_delayed_start = 0;
192
193         /*
194          * although the tree mod log is per file system and not per transaction,
195          * the log must never go across transaction boundaries.
196          */
197         smp_mb();
198         if (!list_empty(&fs_info->tree_mod_seq_list))
199                 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when "
200                         "creating a fresh transaction\n");
201         if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
202                 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
203                         "creating a fresh transaction\n");
204         atomic64_set(&fs_info->tree_mod_seq, 0);
205
206         spin_lock_init(&cur_trans->delayed_refs.lock);
207
208         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
209         INIT_LIST_HEAD(&cur_trans->ordered_operations);
210         INIT_LIST_HEAD(&cur_trans->pending_chunks);
211         list_add_tail(&cur_trans->list, &fs_info->trans_list);
212         extent_io_tree_init(&cur_trans->dirty_pages,
213                              fs_info->btree_inode->i_mapping);
214         fs_info->generation++;
215         cur_trans->transid = fs_info->generation;
216         fs_info->running_transaction = cur_trans;
217         cur_trans->aborted = 0;
218         spin_unlock(&fs_info->trans_lock);
219
220         return 0;
221 }
222
223 /*
224  * this does all the record keeping required to make sure that a reference
225  * counted root is properly recorded in a given transaction.  This is required
226  * to make sure the old root from before we joined the transaction is deleted
227  * when the transaction commits
228  */
229 static int record_root_in_trans(struct btrfs_trans_handle *trans,
230                                struct btrfs_root *root)
231 {
232         if (root->ref_cows && root->last_trans < trans->transid) {
233                 WARN_ON(root == root->fs_info->extent_root);
234                 WARN_ON(root->commit_root != root->node);
235
236                 /*
237                  * see below for in_trans_setup usage rules
238                  * we have the reloc mutex held now, so there
239                  * is only one writer in this function
240                  */
241                 root->in_trans_setup = 1;
242
243                 /* make sure readers find in_trans_setup before
244                  * they find our root->last_trans update
245                  */
246                 smp_wmb();
247
248                 spin_lock(&root->fs_info->fs_roots_radix_lock);
249                 if (root->last_trans == trans->transid) {
250                         spin_unlock(&root->fs_info->fs_roots_radix_lock);
251                         return 0;
252                 }
253                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
254                            (unsigned long)root->root_key.objectid,
255                            BTRFS_ROOT_TRANS_TAG);
256                 spin_unlock(&root->fs_info->fs_roots_radix_lock);
257                 root->last_trans = trans->transid;
258
259                 /* this is pretty tricky.  We don't want to
260                  * take the relocation lock in btrfs_record_root_in_trans
261                  * unless we're really doing the first setup for this root in
262                  * this transaction.
263                  *
264                  * Normally we'd use root->last_trans as a flag to decide
265                  * if we want to take the expensive mutex.
266                  *
267                  * But, we have to set root->last_trans before we
268                  * init the relocation root, otherwise, we trip over warnings
269                  * in ctree.c.  The solution used here is to flag ourselves
270                  * with root->in_trans_setup.  When this is 1, we're still
271                  * fixing up the reloc trees and everyone must wait.
272                  *
273                  * When this is zero, they can trust root->last_trans and fly
274                  * through btrfs_record_root_in_trans without having to take the
275                  * lock.  smp_wmb() makes sure that all the writes above are
276                  * done before we pop in the zero below
277                  */
278                 btrfs_init_reloc_root(trans, root);
279                 smp_wmb();
280                 root->in_trans_setup = 0;
281         }
282         return 0;
283 }
284
285
286 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
287                                struct btrfs_root *root)
288 {
289         if (!root->ref_cows)
290                 return 0;
291
292         /*
293          * see record_root_in_trans for comments about in_trans_setup usage
294          * and barriers
295          */
296         smp_rmb();
297         if (root->last_trans == trans->transid &&
298             !root->in_trans_setup)
299                 return 0;
300
301         mutex_lock(&root->fs_info->reloc_mutex);
302         record_root_in_trans(trans, root);
303         mutex_unlock(&root->fs_info->reloc_mutex);
304
305         return 0;
306 }
307
308 static inline int is_transaction_blocked(struct btrfs_transaction *trans)
309 {
310         return (trans->state >= TRANS_STATE_BLOCKED &&
311                 trans->state < TRANS_STATE_UNBLOCKED &&
312                 !trans->aborted);
313 }
314
315 /* wait for commit against the current transaction to become unblocked
316  * when this is done, it is safe to start a new transaction, but the current
317  * transaction might not be fully on disk.
318  */
319 static void wait_current_trans(struct btrfs_root *root)
320 {
321         struct btrfs_transaction *cur_trans;
322
323         spin_lock(&root->fs_info->trans_lock);
324         cur_trans = root->fs_info->running_transaction;
325         if (cur_trans && is_transaction_blocked(cur_trans)) {
326                 atomic_inc(&cur_trans->use_count);
327                 spin_unlock(&root->fs_info->trans_lock);
328
329                 wait_event(root->fs_info->transaction_wait,
330                            cur_trans->state >= TRANS_STATE_UNBLOCKED ||
331                            cur_trans->aborted);
332                 btrfs_put_transaction(cur_trans);
333         } else {
334                 spin_unlock(&root->fs_info->trans_lock);
335         }
336 }
337
338 static int may_wait_transaction(struct btrfs_root *root, int type)
339 {
340         if (root->fs_info->log_root_recovering)
341                 return 0;
342
343         if (type == TRANS_USERSPACE)
344                 return 1;
345
346         if (type == TRANS_START &&
347             !atomic_read(&root->fs_info->open_ioctl_trans))
348                 return 1;
349
350         return 0;
351 }
352
353 static inline bool need_reserve_reloc_root(struct btrfs_root *root)
354 {
355         if (!root->fs_info->reloc_ctl ||
356             !root->ref_cows ||
357             root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
358             root->reloc_root)
359                 return false;
360
361         return true;
362 }
363
364 static struct btrfs_trans_handle *
365 start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
366                   enum btrfs_reserve_flush_enum flush)
367 {
368         struct btrfs_trans_handle *h;
369         struct btrfs_transaction *cur_trans;
370         u64 num_bytes = 0;
371         u64 qgroup_reserved = 0;
372         bool reloc_reserved = false;
373         int ret;
374
375         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
376                 return ERR_PTR(-EROFS);
377
378         if (current->journal_info) {
379                 WARN_ON(type & TRANS_EXTWRITERS);
380                 h = current->journal_info;
381                 h->use_count++;
382                 WARN_ON(h->use_count > 2);
383                 h->orig_rsv = h->block_rsv;
384                 h->block_rsv = NULL;
385                 goto got_it;
386         }
387
388         /*
389          * Do the reservation before we join the transaction so we can do all
390          * the appropriate flushing if need be.
391          */
392         if (num_items > 0 && root != root->fs_info->chunk_root) {
393                 if (root->fs_info->quota_enabled &&
394                     is_fstree(root->root_key.objectid)) {
395                         qgroup_reserved = num_items * root->leafsize;
396                         ret = btrfs_qgroup_reserve(root, qgroup_reserved);
397                         if (ret)
398                                 return ERR_PTR(ret);
399                 }
400
401                 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
402                 /*
403                  * Do the reservation for the relocation root creation
404                  */
405                 if (unlikely(need_reserve_reloc_root(root))) {
406                         num_bytes += root->nodesize;
407                         reloc_reserved = true;
408                 }
409
410                 ret = btrfs_block_rsv_add(root,
411                                           &root->fs_info->trans_block_rsv,
412                                           num_bytes, flush);
413                 if (ret)
414                         goto reserve_fail;
415         }
416 again:
417         h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
418         if (!h) {
419                 ret = -ENOMEM;
420                 goto alloc_fail;
421         }
422
423         /*
424          * If we are JOIN_NOLOCK we're already committing a transaction and
425          * waiting on this guy, so we don't need to do the sb_start_intwrite
426          * because we're already holding a ref.  We need this because we could
427          * have raced in and did an fsync() on a file which can kick a commit
428          * and then we deadlock with somebody doing a freeze.
429          *
430          * If we are ATTACH, it means we just want to catch the current
431          * transaction and commit it, so we needn't do sb_start_intwrite(). 
432          */
433         if (type & __TRANS_FREEZABLE)
434                 sb_start_intwrite(root->fs_info->sb);
435
436         if (may_wait_transaction(root, type))
437                 wait_current_trans(root);
438
439         do {
440                 ret = join_transaction(root, type);
441                 if (ret == -EBUSY) {
442                         wait_current_trans(root);
443                         if (unlikely(type == TRANS_ATTACH))
444                                 ret = -ENOENT;
445                 }
446         } while (ret == -EBUSY);
447
448         if (ret < 0) {
449                 /* We must get the transaction if we are JOIN_NOLOCK. */
450                 BUG_ON(type == TRANS_JOIN_NOLOCK);
451                 goto join_fail;
452         }
453
454         cur_trans = root->fs_info->running_transaction;
455
456         h->transid = cur_trans->transid;
457         h->transaction = cur_trans;
458         h->blocks_used = 0;
459         h->bytes_reserved = 0;
460         h->root = root;
461         h->delayed_ref_updates = 0;
462         h->use_count = 1;
463         h->adding_csums = 0;
464         h->block_rsv = NULL;
465         h->orig_rsv = NULL;
466         h->aborted = 0;
467         h->qgroup_reserved = 0;
468         h->delayed_ref_elem.seq = 0;
469         h->type = type;
470         h->allocating_chunk = false;
471         h->reloc_reserved = false;
472         h->sync = false;
473         INIT_LIST_HEAD(&h->qgroup_ref_list);
474         INIT_LIST_HEAD(&h->new_bgs);
475
476         smp_mb();
477         if (cur_trans->state >= TRANS_STATE_BLOCKED &&
478             may_wait_transaction(root, type)) {
479                 btrfs_commit_transaction(h, root);
480                 goto again;
481         }
482
483         if (num_bytes) {
484                 trace_btrfs_space_reservation(root->fs_info, "transaction",
485                                               h->transid, num_bytes, 1);
486                 h->block_rsv = &root->fs_info->trans_block_rsv;
487                 h->bytes_reserved = num_bytes;
488                 h->reloc_reserved = reloc_reserved;
489         }
490         h->qgroup_reserved = qgroup_reserved;
491
492 got_it:
493         btrfs_record_root_in_trans(h, root);
494
495         if (!current->journal_info && type != TRANS_USERSPACE)
496                 current->journal_info = h;
497         return h;
498
499 join_fail:
500         if (type & __TRANS_FREEZABLE)
501                 sb_end_intwrite(root->fs_info->sb);
502         kmem_cache_free(btrfs_trans_handle_cachep, h);
503 alloc_fail:
504         if (num_bytes)
505                 btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
506                                         num_bytes);
507 reserve_fail:
508         if (qgroup_reserved)
509                 btrfs_qgroup_free(root, qgroup_reserved);
510         return ERR_PTR(ret);
511 }
512
513 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
514                                                    int num_items)
515 {
516         return start_transaction(root, num_items, TRANS_START,
517                                  BTRFS_RESERVE_FLUSH_ALL);
518 }
519
520 struct btrfs_trans_handle *btrfs_start_transaction_lflush(
521                                         struct btrfs_root *root, int num_items)
522 {
523         return start_transaction(root, num_items, TRANS_START,
524                                  BTRFS_RESERVE_FLUSH_LIMIT);
525 }
526
527 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
528 {
529         return start_transaction(root, 0, TRANS_JOIN, 0);
530 }
531
532 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
533 {
534         return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
535 }
536
537 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
538 {
539         return start_transaction(root, 0, TRANS_USERSPACE, 0);
540 }
541
542 /*
543  * btrfs_attach_transaction() - catch the running transaction
544  *
545  * It is used when we want to commit the current the transaction, but
546  * don't want to start a new one.
547  *
548  * Note: If this function return -ENOENT, it just means there is no
549  * running transaction. But it is possible that the inactive transaction
550  * is still in the memory, not fully on disk. If you hope there is no
551  * inactive transaction in the fs when -ENOENT is returned, you should
552  * invoke
553  *     btrfs_attach_transaction_barrier()
554  */
555 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
556 {
557         return start_transaction(root, 0, TRANS_ATTACH, 0);
558 }
559
560 /*
561  * btrfs_attach_transaction_barrier() - catch the running transaction
562  *
563  * It is similar to the above function, the differentia is this one
564  * will wait for all the inactive transactions until they fully
565  * complete.
566  */
567 struct btrfs_trans_handle *
568 btrfs_attach_transaction_barrier(struct btrfs_root *root)
569 {
570         struct btrfs_trans_handle *trans;
571
572         trans = start_transaction(root, 0, TRANS_ATTACH, 0);
573         if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
574                 btrfs_wait_for_commit(root, 0);
575
576         return trans;
577 }
578
579 /* wait for a transaction commit to be fully complete */
580 static noinline void wait_for_commit(struct btrfs_root *root,
581                                     struct btrfs_transaction *commit)
582 {
583         wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
584 }
585
586 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
587 {
588         struct btrfs_transaction *cur_trans = NULL, *t;
589         int ret = 0;
590
591         if (transid) {
592                 if (transid <= root->fs_info->last_trans_committed)
593                         goto out;
594
595                 ret = -EINVAL;
596                 /* find specified transaction */
597                 spin_lock(&root->fs_info->trans_lock);
598                 list_for_each_entry(t, &root->fs_info->trans_list, list) {
599                         if (t->transid == transid) {
600                                 cur_trans = t;
601                                 atomic_inc(&cur_trans->use_count);
602                                 ret = 0;
603                                 break;
604                         }
605                         if (t->transid > transid) {
606                                 ret = 0;
607                                 break;
608                         }
609                 }
610                 spin_unlock(&root->fs_info->trans_lock);
611                 /* The specified transaction doesn't exist */
612                 if (!cur_trans)
613                         goto out;
614         } else {
615                 /* find newest transaction that is committing | committed */
616                 spin_lock(&root->fs_info->trans_lock);
617                 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
618                                             list) {
619                         if (t->state >= TRANS_STATE_COMMIT_START) {
620                                 if (t->state == TRANS_STATE_COMPLETED)
621                                         break;
622                                 cur_trans = t;
623                                 atomic_inc(&cur_trans->use_count);
624                                 break;
625                         }
626                 }
627                 spin_unlock(&root->fs_info->trans_lock);
628                 if (!cur_trans)
629                         goto out;  /* nothing committing|committed */
630         }
631
632         wait_for_commit(root, cur_trans);
633         btrfs_put_transaction(cur_trans);
634 out:
635         return ret;
636 }
637
638 void btrfs_throttle(struct btrfs_root *root)
639 {
640         if (!atomic_read(&root->fs_info->open_ioctl_trans))
641                 wait_current_trans(root);
642 }
643
644 static int should_end_transaction(struct btrfs_trans_handle *trans,
645                                   struct btrfs_root *root)
646 {
647         if (root->fs_info->global_block_rsv.space_info->full &&
648             btrfs_check_space_for_delayed_refs(trans, root))
649                 return 1;
650
651         return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
652 }
653
654 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
655                                  struct btrfs_root *root)
656 {
657         struct btrfs_transaction *cur_trans = trans->transaction;
658         int updates;
659         int err;
660
661         smp_mb();
662         if (cur_trans->state >= TRANS_STATE_BLOCKED ||
663             cur_trans->delayed_refs.flushing)
664                 return 1;
665
666         updates = trans->delayed_ref_updates;
667         trans->delayed_ref_updates = 0;
668         if (updates) {
669                 err = btrfs_run_delayed_refs(trans, root, updates);
670                 if (err) /* Error code will also eval true */
671                         return err;
672         }
673
674         return should_end_transaction(trans, root);
675 }
676
677 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
678                           struct btrfs_root *root, int throttle)
679 {
680         struct btrfs_transaction *cur_trans = trans->transaction;
681         struct btrfs_fs_info *info = root->fs_info;
682         unsigned long cur = trans->delayed_ref_updates;
683         int lock = (trans->type != TRANS_JOIN_NOLOCK);
684         int err = 0;
685
686         if (trans->use_count > 1) {
687                 trans->use_count--;
688                 trans->block_rsv = trans->orig_rsv;
689                 return 0;
690         }
691
692         /*
693          * do the qgroup accounting as early as possible
694          */
695         err = btrfs_delayed_refs_qgroup_accounting(trans, info);
696
697         btrfs_trans_release_metadata(trans, root);
698         trans->block_rsv = NULL;
699
700         if (trans->qgroup_reserved) {
701                 /*
702                  * the same root has to be passed here between start_transaction
703                  * and end_transaction. Subvolume quota depends on this.
704                  */
705                 btrfs_qgroup_free(trans->root, trans->qgroup_reserved);
706                 trans->qgroup_reserved = 0;
707         }
708
709         if (!list_empty(&trans->new_bgs))
710                 btrfs_create_pending_block_groups(trans, root);
711
712         trans->delayed_ref_updates = 0;
713         if (!trans->sync && btrfs_should_throttle_delayed_refs(trans, root)) {
714                 cur = max_t(unsigned long, cur, 32);
715                 trans->delayed_ref_updates = 0;
716                 btrfs_run_delayed_refs(trans, root, cur);
717         }
718
719         btrfs_trans_release_metadata(trans, root);
720         trans->block_rsv = NULL;
721
722         if (!list_empty(&trans->new_bgs))
723                 btrfs_create_pending_block_groups(trans, root);
724
725         if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
726             should_end_transaction(trans, root) &&
727             ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
728                 spin_lock(&info->trans_lock);
729                 if (cur_trans->state == TRANS_STATE_RUNNING)
730                         cur_trans->state = TRANS_STATE_BLOCKED;
731                 spin_unlock(&info->trans_lock);
732         }
733
734         if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
735                 if (throttle)
736                         return btrfs_commit_transaction(trans, root);
737                 else
738                         wake_up_process(info->transaction_kthread);
739         }
740
741         if (trans->type & __TRANS_FREEZABLE)
742                 sb_end_intwrite(root->fs_info->sb);
743
744         WARN_ON(cur_trans != info->running_transaction);
745         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
746         atomic_dec(&cur_trans->num_writers);
747         extwriter_counter_dec(cur_trans, trans->type);
748
749         smp_mb();
750         if (waitqueue_active(&cur_trans->writer_wait))
751                 wake_up(&cur_trans->writer_wait);
752         btrfs_put_transaction(cur_trans);
753
754         if (current->journal_info == trans)
755                 current->journal_info = NULL;
756
757         if (throttle)
758                 btrfs_run_delayed_iputs(root);
759
760         if (trans->aborted ||
761             test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
762                 wake_up_process(info->transaction_kthread);
763                 err = -EIO;
764         }
765         assert_qgroups_uptodate(trans);
766
767         kmem_cache_free(btrfs_trans_handle_cachep, trans);
768         return err;
769 }
770
771 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
772                           struct btrfs_root *root)
773 {
774         return __btrfs_end_transaction(trans, root, 0);
775 }
776
777 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
778                                    struct btrfs_root *root)
779 {
780         return __btrfs_end_transaction(trans, root, 1);
781 }
782
783 /*
784  * when btree blocks are allocated, they have some corresponding bits set for
785  * them in one of two extent_io trees.  This is used to make sure all of
786  * those extents are sent to disk but does not wait on them
787  */
788 int btrfs_write_marked_extents(struct btrfs_root *root,
789                                struct extent_io_tree *dirty_pages, int mark)
790 {
791         int err = 0;
792         int werr = 0;
793         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
794         struct extent_state *cached_state = NULL;
795         u64 start = 0;
796         u64 end;
797
798         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
799                                       mark, &cached_state)) {
800                 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
801                                    mark, &cached_state, GFP_NOFS);
802                 cached_state = NULL;
803                 err = filemap_fdatawrite_range(mapping, start, end);
804                 if (err)
805                         werr = err;
806                 cond_resched();
807                 start = end + 1;
808         }
809         if (err)
810                 werr = err;
811         return werr;
812 }
813
814 /*
815  * when btree blocks are allocated, they have some corresponding bits set for
816  * them in one of two extent_io trees.  This is used to make sure all of
817  * those extents are on disk for transaction or log commit.  We wait
818  * on all the pages and clear them from the dirty pages state tree
819  */
820 int btrfs_wait_marked_extents(struct btrfs_root *root,
821                               struct extent_io_tree *dirty_pages, int mark)
822 {
823         int err = 0;
824         int werr = 0;
825         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
826         struct extent_state *cached_state = NULL;
827         u64 start = 0;
828         u64 end;
829
830         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
831                                       EXTENT_NEED_WAIT, &cached_state)) {
832                 clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
833                                  0, 0, &cached_state, GFP_NOFS);
834                 err = filemap_fdatawait_range(mapping, start, end);
835                 if (err)
836                         werr = err;
837                 cond_resched();
838                 start = end + 1;
839         }
840         if (err)
841                 werr = err;
842         return werr;
843 }
844
845 /*
846  * when btree blocks are allocated, they have some corresponding bits set for
847  * them in one of two extent_io trees.  This is used to make sure all of
848  * those extents are on disk for transaction or log commit
849  */
850 static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
851                                 struct extent_io_tree *dirty_pages, int mark)
852 {
853         int ret;
854         int ret2;
855         struct blk_plug plug;
856
857         blk_start_plug(&plug);
858         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
859         blk_finish_plug(&plug);
860         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
861
862         if (ret)
863                 return ret;
864         if (ret2)
865                 return ret2;
866         return 0;
867 }
868
869 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
870                                      struct btrfs_root *root)
871 {
872         if (!trans || !trans->transaction) {
873                 struct inode *btree_inode;
874                 btree_inode = root->fs_info->btree_inode;
875                 return filemap_write_and_wait(btree_inode->i_mapping);
876         }
877         return btrfs_write_and_wait_marked_extents(root,
878                                            &trans->transaction->dirty_pages,
879                                            EXTENT_DIRTY);
880 }
881
882 /*
883  * this is used to update the root pointer in the tree of tree roots.
884  *
885  * But, in the case of the extent allocation tree, updating the root
886  * pointer may allocate blocks which may change the root of the extent
887  * allocation tree.
888  *
889  * So, this loops and repeats and makes sure the cowonly root didn't
890  * change while the root pointer was being updated in the metadata.
891  */
892 static int update_cowonly_root(struct btrfs_trans_handle *trans,
893                                struct btrfs_root *root)
894 {
895         int ret;
896         u64 old_root_bytenr;
897         u64 old_root_used;
898         struct btrfs_root *tree_root = root->fs_info->tree_root;
899
900         old_root_used = btrfs_root_used(&root->root_item);
901         btrfs_write_dirty_block_groups(trans, root);
902
903         while (1) {
904                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
905                 if (old_root_bytenr == root->node->start &&
906                     old_root_used == btrfs_root_used(&root->root_item))
907                         break;
908
909                 btrfs_set_root_node(&root->root_item, root->node);
910                 ret = btrfs_update_root(trans, tree_root,
911                                         &root->root_key,
912                                         &root->root_item);
913                 if (ret)
914                         return ret;
915
916                 old_root_used = btrfs_root_used(&root->root_item);
917                 ret = btrfs_write_dirty_block_groups(trans, root);
918                 if (ret)
919                         return ret;
920         }
921
922         if (root != root->fs_info->extent_root)
923                 switch_commit_root(root);
924
925         return 0;
926 }
927
928 /*
929  * update all the cowonly tree roots on disk
930  *
931  * The error handling in this function may not be obvious. Any of the
932  * failures will cause the file system to go offline. We still need
933  * to clean up the delayed refs.
934  */
935 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
936                                          struct btrfs_root *root)
937 {
938         struct btrfs_fs_info *fs_info = root->fs_info;
939         struct list_head *next;
940         struct extent_buffer *eb;
941         int ret;
942
943         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
944         if (ret)
945                 return ret;
946
947         eb = btrfs_lock_root_node(fs_info->tree_root);
948         ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
949                               0, &eb);
950         btrfs_tree_unlock(eb);
951         free_extent_buffer(eb);
952
953         if (ret)
954                 return ret;
955
956         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
957         if (ret)
958                 return ret;
959
960         ret = btrfs_run_dev_stats(trans, root->fs_info);
961         if (ret)
962                 return ret;
963         ret = btrfs_run_dev_replace(trans, root->fs_info);
964         if (ret)
965                 return ret;
966         ret = btrfs_run_qgroups(trans, root->fs_info);
967         if (ret)
968                 return ret;
969
970         /* run_qgroups might have added some more refs */
971         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
972         if (ret)
973                 return ret;
974
975         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
976                 next = fs_info->dirty_cowonly_roots.next;
977                 list_del_init(next);
978                 root = list_entry(next, struct btrfs_root, dirty_list);
979
980                 ret = update_cowonly_root(trans, root);
981                 if (ret)
982                         return ret;
983         }
984
985         down_write(&fs_info->extent_commit_sem);
986         switch_commit_root(fs_info->extent_root);
987         up_write(&fs_info->extent_commit_sem);
988
989         btrfs_after_dev_replace_commit(fs_info);
990
991         return 0;
992 }
993
994 /*
995  * dead roots are old snapshots that need to be deleted.  This allocates
996  * a dirty root struct and adds it into the list of dead roots that need to
997  * be deleted
998  */
999 void btrfs_add_dead_root(struct btrfs_root *root)
1000 {
1001         spin_lock(&root->fs_info->trans_lock);
1002         if (list_empty(&root->root_list))
1003                 list_add_tail(&root->root_list, &root->fs_info->dead_roots);
1004         spin_unlock(&root->fs_info->trans_lock);
1005 }
1006
1007 /*
1008  * update all the cowonly tree roots on disk
1009  */
1010 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
1011                                     struct btrfs_root *root)
1012 {
1013         struct btrfs_root *gang[8];
1014         struct btrfs_fs_info *fs_info = root->fs_info;
1015         int i;
1016         int ret;
1017         int err = 0;
1018
1019         spin_lock(&fs_info->fs_roots_radix_lock);
1020         while (1) {
1021                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1022                                                  (void **)gang, 0,
1023                                                  ARRAY_SIZE(gang),
1024                                                  BTRFS_ROOT_TRANS_TAG);
1025                 if (ret == 0)
1026                         break;
1027                 for (i = 0; i < ret; i++) {
1028                         root = gang[i];
1029                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
1030                                         (unsigned long)root->root_key.objectid,
1031                                         BTRFS_ROOT_TRANS_TAG);
1032                         spin_unlock(&fs_info->fs_roots_radix_lock);
1033
1034                         btrfs_free_log(trans, root);
1035                         btrfs_update_reloc_root(trans, root);
1036                         btrfs_orphan_commit_root(trans, root);
1037
1038                         btrfs_save_ino_cache(root, trans);
1039
1040                         /* see comments in should_cow_block() */
1041                         root->force_cow = 0;
1042                         smp_wmb();
1043
1044                         if (root->commit_root != root->node) {
1045                                 mutex_lock(&root->fs_commit_mutex);
1046                                 switch_commit_root(root);
1047                                 btrfs_unpin_free_ino(root);
1048                                 mutex_unlock(&root->fs_commit_mutex);
1049
1050                                 btrfs_set_root_node(&root->root_item,
1051                                                     root->node);
1052                         }
1053
1054                         err = btrfs_update_root(trans, fs_info->tree_root,
1055                                                 &root->root_key,
1056                                                 &root->root_item);
1057                         spin_lock(&fs_info->fs_roots_radix_lock);
1058                         if (err)
1059                                 break;
1060                 }
1061         }
1062         spin_unlock(&fs_info->fs_roots_radix_lock);
1063         return err;
1064 }
1065
1066 /*
1067  * defrag a given btree.
1068  * Every leaf in the btree is read and defragged.
1069  */
1070 int btrfs_defrag_root(struct btrfs_root *root)
1071 {
1072         struct btrfs_fs_info *info = root->fs_info;
1073         struct btrfs_trans_handle *trans;
1074         int ret;
1075
1076         if (xchg(&root->defrag_running, 1))
1077                 return 0;
1078
1079         while (1) {
1080                 trans = btrfs_start_transaction(root, 0);
1081                 if (IS_ERR(trans))
1082                         return PTR_ERR(trans);
1083
1084                 ret = btrfs_defrag_leaves(trans, root);
1085
1086                 btrfs_end_transaction(trans, root);
1087                 btrfs_btree_balance_dirty(info->tree_root);
1088                 cond_resched();
1089
1090                 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
1091                         break;
1092
1093                 if (btrfs_defrag_cancelled(root->fs_info)) {
1094                         pr_debug("BTRFS: defrag_root cancelled\n");
1095                         ret = -EAGAIN;
1096                         break;
1097                 }
1098         }
1099         root->defrag_running = 0;
1100         return ret;
1101 }
1102
1103 /*
1104  * new snapshots need to be created at a very specific time in the
1105  * transaction commit.  This does the actual creation.
1106  *
1107  * Note:
1108  * If the error which may affect the commitment of the current transaction
1109  * happens, we should return the error number. If the error which just affect
1110  * the creation of the pending snapshots, just return 0.
1111  */
1112 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1113                                    struct btrfs_fs_info *fs_info,
1114                                    struct btrfs_pending_snapshot *pending)
1115 {
1116         struct btrfs_key key;
1117         struct btrfs_root_item *new_root_item;
1118         struct btrfs_root *tree_root = fs_info->tree_root;
1119         struct btrfs_root *root = pending->root;
1120         struct btrfs_root *parent_root;
1121         struct btrfs_block_rsv *rsv;
1122         struct inode *parent_inode;
1123         struct btrfs_path *path;
1124         struct btrfs_dir_item *dir_item;
1125         struct dentry *dentry;
1126         struct extent_buffer *tmp;
1127         struct extent_buffer *old;
1128         struct timespec cur_time = CURRENT_TIME;
1129         int ret = 0;
1130         u64 to_reserve = 0;
1131         u64 index = 0;
1132         u64 objectid;
1133         u64 root_flags;
1134         uuid_le new_uuid;
1135
1136         path = btrfs_alloc_path();
1137         if (!path) {
1138                 pending->error = -ENOMEM;
1139                 return 0;
1140         }
1141
1142         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
1143         if (!new_root_item) {
1144                 pending->error = -ENOMEM;
1145                 goto root_item_alloc_fail;
1146         }
1147
1148         pending->error = btrfs_find_free_objectid(tree_root, &objectid);
1149         if (pending->error)
1150                 goto no_free_objectid;
1151
1152         btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
1153
1154         if (to_reserve > 0) {
1155                 pending->error = btrfs_block_rsv_add(root,
1156                                                      &pending->block_rsv,
1157                                                      to_reserve,
1158                                                      BTRFS_RESERVE_NO_FLUSH);
1159                 if (pending->error)
1160                         goto no_free_objectid;
1161         }
1162
1163         pending->error = btrfs_qgroup_inherit(trans, fs_info,
1164                                               root->root_key.objectid,
1165                                               objectid, pending->inherit);
1166         if (pending->error)
1167                 goto no_free_objectid;
1168
1169         key.objectid = objectid;
1170         key.offset = (u64)-1;
1171         key.type = BTRFS_ROOT_ITEM_KEY;
1172
1173         rsv = trans->block_rsv;
1174         trans->block_rsv = &pending->block_rsv;
1175         trans->bytes_reserved = trans->block_rsv->reserved;
1176
1177         dentry = pending->dentry;
1178         parent_inode = pending->dir;
1179         parent_root = BTRFS_I(parent_inode)->root;
1180         record_root_in_trans(trans, parent_root);
1181
1182         /*
1183          * insert the directory item
1184          */
1185         ret = btrfs_set_inode_index(parent_inode, &index);
1186         BUG_ON(ret); /* -ENOMEM */
1187
1188         /* check if there is a file/dir which has the same name. */
1189         dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1190                                          btrfs_ino(parent_inode),
1191                                          dentry->d_name.name,
1192                                          dentry->d_name.len, 0);
1193         if (dir_item != NULL && !IS_ERR(dir_item)) {
1194                 pending->error = -EEXIST;
1195                 goto dir_item_existed;
1196         } else if (IS_ERR(dir_item)) {
1197                 ret = PTR_ERR(dir_item);
1198                 btrfs_abort_transaction(trans, root, ret);
1199                 goto fail;
1200         }
1201         btrfs_release_path(path);
1202
1203         /*
1204          * pull in the delayed directory update
1205          * and the delayed inode item
1206          * otherwise we corrupt the FS during
1207          * snapshot
1208          */
1209         ret = btrfs_run_delayed_items(trans, root);
1210         if (ret) {      /* Transaction aborted */
1211                 btrfs_abort_transaction(trans, root, ret);
1212                 goto fail;
1213         }
1214
1215         record_root_in_trans(trans, root);
1216         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1217         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1218         btrfs_check_and_init_root_item(new_root_item);
1219
1220         root_flags = btrfs_root_flags(new_root_item);
1221         if (pending->readonly)
1222                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1223         else
1224                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1225         btrfs_set_root_flags(new_root_item, root_flags);
1226
1227         btrfs_set_root_generation_v2(new_root_item,
1228                         trans->transid);
1229         uuid_le_gen(&new_uuid);
1230         memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1231         memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1232                         BTRFS_UUID_SIZE);
1233         if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1234                 memset(new_root_item->received_uuid, 0,
1235                        sizeof(new_root_item->received_uuid));
1236                 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1237                 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1238                 btrfs_set_root_stransid(new_root_item, 0);
1239                 btrfs_set_root_rtransid(new_root_item, 0);
1240         }
1241         btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1242         btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1243         btrfs_set_root_otransid(new_root_item, trans->transid);
1244
1245         old = btrfs_lock_root_node(root);
1246         ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1247         if (ret) {
1248                 btrfs_tree_unlock(old);
1249                 free_extent_buffer(old);
1250                 btrfs_abort_transaction(trans, root, ret);
1251                 goto fail;
1252         }
1253
1254         btrfs_set_lock_blocking(old);
1255
1256         ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1257         /* clean up in any case */
1258         btrfs_tree_unlock(old);
1259         free_extent_buffer(old);
1260         if (ret) {
1261                 btrfs_abort_transaction(trans, root, ret);
1262                 goto fail;
1263         }
1264
1265         /* see comments in should_cow_block() */
1266         root->force_cow = 1;
1267         smp_wmb();
1268
1269         btrfs_set_root_node(new_root_item, tmp);
1270         /* record when the snapshot was created in key.offset */
1271         key.offset = trans->transid;
1272         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1273         btrfs_tree_unlock(tmp);
1274         free_extent_buffer(tmp);
1275         if (ret) {
1276                 btrfs_abort_transaction(trans, root, ret);
1277                 goto fail;
1278         }
1279
1280         /*
1281          * insert root back/forward references
1282          */
1283         ret = btrfs_add_root_ref(trans, tree_root, objectid,
1284                                  parent_root->root_key.objectid,
1285                                  btrfs_ino(parent_inode), index,
1286                                  dentry->d_name.name, dentry->d_name.len);
1287         if (ret) {
1288                 btrfs_abort_transaction(trans, root, ret);
1289                 goto fail;
1290         }
1291
1292         key.offset = (u64)-1;
1293         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1294         if (IS_ERR(pending->snap)) {
1295                 ret = PTR_ERR(pending->snap);
1296                 btrfs_abort_transaction(trans, root, ret);
1297                 goto fail;
1298         }
1299
1300         ret = btrfs_reloc_post_snapshot(trans, pending);
1301         if (ret) {
1302                 btrfs_abort_transaction(trans, root, ret);
1303                 goto fail;
1304         }
1305
1306         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1307         if (ret) {
1308                 btrfs_abort_transaction(trans, root, ret);
1309                 goto fail;
1310         }
1311
1312         ret = btrfs_insert_dir_item(trans, parent_root,
1313                                     dentry->d_name.name, dentry->d_name.len,
1314                                     parent_inode, &key,
1315                                     BTRFS_FT_DIR, index);
1316         /* We have check then name at the beginning, so it is impossible. */
1317         BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1318         if (ret) {
1319                 btrfs_abort_transaction(trans, root, ret);
1320                 goto fail;
1321         }
1322
1323         btrfs_i_size_write(parent_inode, parent_inode->i_size +
1324                                          dentry->d_name.len * 2);
1325         parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1326         ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1327         if (ret) {
1328                 btrfs_abort_transaction(trans, root, ret);
1329                 goto fail;
1330         }
1331         ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b,
1332                                   BTRFS_UUID_KEY_SUBVOL, objectid);
1333         if (ret) {
1334                 btrfs_abort_transaction(trans, root, ret);
1335                 goto fail;
1336         }
1337         if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1338                 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
1339                                           new_root_item->received_uuid,
1340                                           BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1341                                           objectid);
1342                 if (ret && ret != -EEXIST) {
1343                         btrfs_abort_transaction(trans, root, ret);
1344                         goto fail;
1345                 }
1346         }
1347 fail:
1348         pending->error = ret;
1349 dir_item_existed:
1350         trans->block_rsv = rsv;
1351         trans->bytes_reserved = 0;
1352 no_free_objectid:
1353         kfree(new_root_item);
1354 root_item_alloc_fail:
1355         btrfs_free_path(path);
1356         return ret;
1357 }
1358
1359 /*
1360  * create all the snapshots we've scheduled for creation
1361  */
1362 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1363                                              struct btrfs_fs_info *fs_info)
1364 {
1365         struct btrfs_pending_snapshot *pending, *next;
1366         struct list_head *head = &trans->transaction->pending_snapshots;
1367         int ret = 0;
1368
1369         list_for_each_entry_safe(pending, next, head, list) {
1370                 list_del(&pending->list);
1371                 ret = create_pending_snapshot(trans, fs_info, pending);
1372                 if (ret)
1373                         break;
1374         }
1375         return ret;
1376 }
1377
1378 static void update_super_roots(struct btrfs_root *root)
1379 {
1380         struct btrfs_root_item *root_item;
1381         struct btrfs_super_block *super;
1382
1383         super = root->fs_info->super_copy;
1384
1385         root_item = &root->fs_info->chunk_root->root_item;
1386         super->chunk_root = root_item->bytenr;
1387         super->chunk_root_generation = root_item->generation;
1388         super->chunk_root_level = root_item->level;
1389
1390         root_item = &root->fs_info->tree_root->root_item;
1391         super->root = root_item->bytenr;
1392         super->generation = root_item->generation;
1393         super->root_level = root_item->level;
1394         if (btrfs_test_opt(root, SPACE_CACHE))
1395                 super->cache_generation = root_item->generation;
1396         if (root->fs_info->update_uuid_tree_gen)
1397                 super->uuid_tree_generation = root_item->generation;
1398 }
1399
1400 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1401 {
1402         struct btrfs_transaction *trans;
1403         int ret = 0;
1404
1405         spin_lock(&info->trans_lock);
1406         trans = info->running_transaction;
1407         if (trans)
1408                 ret = (trans->state >= TRANS_STATE_COMMIT_START);
1409         spin_unlock(&info->trans_lock);
1410         return ret;
1411 }
1412
1413 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1414 {
1415         struct btrfs_transaction *trans;
1416         int ret = 0;
1417
1418         spin_lock(&info->trans_lock);
1419         trans = info->running_transaction;
1420         if (trans)
1421                 ret = is_transaction_blocked(trans);
1422         spin_unlock(&info->trans_lock);
1423         return ret;
1424 }
1425
1426 /*
1427  * wait for the current transaction commit to start and block subsequent
1428  * transaction joins
1429  */
1430 static void wait_current_trans_commit_start(struct btrfs_root *root,
1431                                             struct btrfs_transaction *trans)
1432 {
1433         wait_event(root->fs_info->transaction_blocked_wait,
1434                    trans->state >= TRANS_STATE_COMMIT_START ||
1435                    trans->aborted);
1436 }
1437
1438 /*
1439  * wait for the current transaction to start and then become unblocked.
1440  * caller holds ref.
1441  */
1442 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1443                                          struct btrfs_transaction *trans)
1444 {
1445         wait_event(root->fs_info->transaction_wait,
1446                    trans->state >= TRANS_STATE_UNBLOCKED ||
1447                    trans->aborted);
1448 }
1449
1450 /*
1451  * commit transactions asynchronously. once btrfs_commit_transaction_async
1452  * returns, any subsequent transaction will not be allowed to join.
1453  */
1454 struct btrfs_async_commit {
1455         struct btrfs_trans_handle *newtrans;
1456         struct btrfs_root *root;
1457         struct work_struct work;
1458 };
1459
1460 static void do_async_commit(struct work_struct *work)
1461 {
1462         struct btrfs_async_commit *ac =
1463                 container_of(work, struct btrfs_async_commit, work);
1464
1465         /*
1466          * We've got freeze protection passed with the transaction.
1467          * Tell lockdep about it.
1468          */
1469         if (ac->newtrans->type & __TRANS_FREEZABLE)
1470                 rwsem_acquire_read(
1471                      &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1472                      0, 1, _THIS_IP_);
1473
1474         current->journal_info = ac->newtrans;
1475
1476         btrfs_commit_transaction(ac->newtrans, ac->root);
1477         kfree(ac);
1478 }
1479
1480 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1481                                    struct btrfs_root *root,
1482                                    int wait_for_unblock)
1483 {
1484         struct btrfs_async_commit *ac;
1485         struct btrfs_transaction *cur_trans;
1486
1487         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1488         if (!ac)
1489                 return -ENOMEM;
1490
1491         INIT_WORK(&ac->work, do_async_commit);
1492         ac->root = root;
1493         ac->newtrans = btrfs_join_transaction(root);
1494         if (IS_ERR(ac->newtrans)) {
1495                 int err = PTR_ERR(ac->newtrans);
1496                 kfree(ac);
1497                 return err;
1498         }
1499
1500         /* take transaction reference */
1501         cur_trans = trans->transaction;
1502         atomic_inc(&cur_trans->use_count);
1503
1504         btrfs_end_transaction(trans, root);
1505
1506         /*
1507          * Tell lockdep we've released the freeze rwsem, since the
1508          * async commit thread will be the one to unlock it.
1509          */
1510         if (ac->newtrans->type & __TRANS_FREEZABLE)
1511                 rwsem_release(
1512                         &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1513                         1, _THIS_IP_);
1514
1515         schedule_work(&ac->work);
1516
1517         /* wait for transaction to start and unblock */
1518         if (wait_for_unblock)
1519                 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1520         else
1521                 wait_current_trans_commit_start(root, cur_trans);
1522
1523         if (current->journal_info == trans)
1524                 current->journal_info = NULL;
1525
1526         btrfs_put_transaction(cur_trans);
1527         return 0;
1528 }
1529
1530
1531 static void cleanup_transaction(struct btrfs_trans_handle *trans,
1532                                 struct btrfs_root *root, int err)
1533 {
1534         struct btrfs_transaction *cur_trans = trans->transaction;
1535         DEFINE_WAIT(wait);
1536
1537         WARN_ON(trans->use_count > 1);
1538
1539         btrfs_abort_transaction(trans, root, err);
1540
1541         spin_lock(&root->fs_info->trans_lock);
1542
1543         /*
1544          * If the transaction is removed from the list, it means this
1545          * transaction has been committed successfully, so it is impossible
1546          * to call the cleanup function.
1547          */
1548         BUG_ON(list_empty(&cur_trans->list));
1549
1550         list_del_init(&cur_trans->list);
1551         if (cur_trans == root->fs_info->running_transaction) {
1552                 cur_trans->state = TRANS_STATE_COMMIT_DOING;
1553                 spin_unlock(&root->fs_info->trans_lock);
1554                 wait_event(cur_trans->writer_wait,
1555                            atomic_read(&cur_trans->num_writers) == 1);
1556
1557                 spin_lock(&root->fs_info->trans_lock);
1558         }
1559         spin_unlock(&root->fs_info->trans_lock);
1560
1561         btrfs_cleanup_one_transaction(trans->transaction, root);
1562
1563         spin_lock(&root->fs_info->trans_lock);
1564         if (cur_trans == root->fs_info->running_transaction)
1565                 root->fs_info->running_transaction = NULL;
1566         spin_unlock(&root->fs_info->trans_lock);
1567
1568         if (trans->type & __TRANS_FREEZABLE)
1569                 sb_end_intwrite(root->fs_info->sb);
1570         btrfs_put_transaction(cur_trans);
1571         btrfs_put_transaction(cur_trans);
1572
1573         trace_btrfs_transaction_commit(root);
1574
1575         btrfs_scrub_continue(root);
1576
1577         if (current->journal_info == trans)
1578                 current->journal_info = NULL;
1579
1580         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1581 }
1582
1583 static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
1584                                           struct btrfs_root *root)
1585 {
1586         int ret;
1587
1588         ret = btrfs_run_delayed_items(trans, root);
1589         /*
1590          * running the delayed items may have added new refs. account
1591          * them now so that they hinder processing of more delayed refs
1592          * as little as possible.
1593          */
1594         if (ret) {
1595                 btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
1596                 return ret;
1597         }
1598
1599         ret = btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
1600         if (ret)
1601                 return ret;
1602
1603         /*
1604          * rename don't use btrfs_join_transaction, so, once we
1605          * set the transaction to blocked above, we aren't going
1606          * to get any new ordered operations.  We can safely run
1607          * it here and no for sure that nothing new will be added
1608          * to the list
1609          */
1610         ret = btrfs_run_ordered_operations(trans, root, 1);
1611
1612         return ret;
1613 }
1614
1615 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
1616 {
1617         if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
1618                 return btrfs_start_delalloc_roots(fs_info, 1);
1619         return 0;
1620 }
1621
1622 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
1623 {
1624         if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
1625                 btrfs_wait_ordered_roots(fs_info, -1);
1626 }
1627
1628 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1629                              struct btrfs_root *root)
1630 {
1631         struct btrfs_transaction *cur_trans = trans->transaction;
1632         struct btrfs_transaction *prev_trans = NULL;
1633         int ret;
1634
1635         ret = btrfs_run_ordered_operations(trans, root, 0);
1636         if (ret) {
1637                 btrfs_abort_transaction(trans, root, ret);
1638                 btrfs_end_transaction(trans, root);
1639                 return ret;
1640         }
1641
1642         /* Stop the commit early if ->aborted is set */
1643         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1644                 ret = cur_trans->aborted;
1645                 btrfs_end_transaction(trans, root);
1646                 return ret;
1647         }
1648
1649         /* make a pass through all the delayed refs we have so far
1650          * any runnings procs may add more while we are here
1651          */
1652         ret = btrfs_run_delayed_refs(trans, root, 0);
1653         if (ret) {
1654                 btrfs_end_transaction(trans, root);
1655                 return ret;
1656         }
1657
1658         btrfs_trans_release_metadata(trans, root);
1659         trans->block_rsv = NULL;
1660         if (trans->qgroup_reserved) {
1661                 btrfs_qgroup_free(root, trans->qgroup_reserved);
1662                 trans->qgroup_reserved = 0;
1663         }
1664
1665         cur_trans = trans->transaction;
1666
1667         /*
1668          * set the flushing flag so procs in this transaction have to
1669          * start sending their work down.
1670          */
1671         cur_trans->delayed_refs.flushing = 1;
1672         smp_wmb();
1673
1674         if (!list_empty(&trans->new_bgs))
1675                 btrfs_create_pending_block_groups(trans, root);
1676
1677         ret = btrfs_run_delayed_refs(trans, root, 0);
1678         if (ret) {
1679                 btrfs_end_transaction(trans, root);
1680                 return ret;
1681         }
1682
1683         spin_lock(&root->fs_info->trans_lock);
1684         if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
1685                 spin_unlock(&root->fs_info->trans_lock);
1686                 atomic_inc(&cur_trans->use_count);
1687                 ret = btrfs_end_transaction(trans, root);
1688
1689                 wait_for_commit(root, cur_trans);
1690
1691                 btrfs_put_transaction(cur_trans);
1692
1693                 return ret;
1694         }
1695
1696         cur_trans->state = TRANS_STATE_COMMIT_START;
1697         wake_up(&root->fs_info->transaction_blocked_wait);
1698
1699         if (cur_trans->list.prev != &root->fs_info->trans_list) {
1700                 prev_trans = list_entry(cur_trans->list.prev,
1701                                         struct btrfs_transaction, list);
1702                 if (prev_trans->state != TRANS_STATE_COMPLETED) {
1703                         atomic_inc(&prev_trans->use_count);
1704                         spin_unlock(&root->fs_info->trans_lock);
1705
1706                         wait_for_commit(root, prev_trans);
1707
1708                         btrfs_put_transaction(prev_trans);
1709                 } else {
1710                         spin_unlock(&root->fs_info->trans_lock);
1711                 }
1712         } else {
1713                 spin_unlock(&root->fs_info->trans_lock);
1714         }
1715
1716         extwriter_counter_dec(cur_trans, trans->type);
1717
1718         ret = btrfs_start_delalloc_flush(root->fs_info);
1719         if (ret)
1720                 goto cleanup_transaction;
1721
1722         ret = btrfs_flush_all_pending_stuffs(trans, root);
1723         if (ret)
1724                 goto cleanup_transaction;
1725
1726         wait_event(cur_trans->writer_wait,
1727                    extwriter_counter_read(cur_trans) == 0);
1728
1729         /* some pending stuffs might be added after the previous flush. */
1730         ret = btrfs_flush_all_pending_stuffs(trans, root);
1731         if (ret)
1732                 goto cleanup_transaction;
1733
1734         btrfs_wait_delalloc_flush(root->fs_info);
1735
1736         btrfs_scrub_pause(root);
1737         /*
1738          * Ok now we need to make sure to block out any other joins while we
1739          * commit the transaction.  We could have started a join before setting
1740          * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
1741          */
1742         spin_lock(&root->fs_info->trans_lock);
1743         cur_trans->state = TRANS_STATE_COMMIT_DOING;
1744         spin_unlock(&root->fs_info->trans_lock);
1745         wait_event(cur_trans->writer_wait,
1746                    atomic_read(&cur_trans->num_writers) == 1);
1747
1748         /* ->aborted might be set after the previous check, so check it */
1749         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1750                 ret = cur_trans->aborted;
1751                 goto cleanup_transaction;
1752         }
1753         /*
1754          * the reloc mutex makes sure that we stop
1755          * the balancing code from coming in and moving
1756          * extents around in the middle of the commit
1757          */
1758         mutex_lock(&root->fs_info->reloc_mutex);
1759
1760         /*
1761          * We needn't worry about the delayed items because we will
1762          * deal with them in create_pending_snapshot(), which is the
1763          * core function of the snapshot creation.
1764          */
1765         ret = create_pending_snapshots(trans, root->fs_info);
1766         if (ret) {
1767                 mutex_unlock(&root->fs_info->reloc_mutex);
1768                 goto cleanup_transaction;
1769         }
1770
1771         /*
1772          * We insert the dir indexes of the snapshots and update the inode
1773          * of the snapshots' parents after the snapshot creation, so there
1774          * are some delayed items which are not dealt with. Now deal with
1775          * them.
1776          *
1777          * We needn't worry that this operation will corrupt the snapshots,
1778          * because all the tree which are snapshoted will be forced to COW
1779          * the nodes and leaves.
1780          */
1781         ret = btrfs_run_delayed_items(trans, root);
1782         if (ret) {
1783                 mutex_unlock(&root->fs_info->reloc_mutex);
1784                 goto cleanup_transaction;
1785         }
1786
1787         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1788         if (ret) {
1789                 mutex_unlock(&root->fs_info->reloc_mutex);
1790                 goto cleanup_transaction;
1791         }
1792
1793         /*
1794          * make sure none of the code above managed to slip in a
1795          * delayed item
1796          */
1797         btrfs_assert_delayed_root_empty(root);
1798
1799         WARN_ON(cur_trans != trans->transaction);
1800
1801         /* btrfs_commit_tree_roots is responsible for getting the
1802          * various roots consistent with each other.  Every pointer
1803          * in the tree of tree roots has to point to the most up to date
1804          * root for every subvolume and other tree.  So, we have to keep
1805          * the tree logging code from jumping in and changing any
1806          * of the trees.
1807          *
1808          * At this point in the commit, there can't be any tree-log
1809          * writers, but a little lower down we drop the trans mutex
1810          * and let new people in.  By holding the tree_log_mutex
1811          * from now until after the super is written, we avoid races
1812          * with the tree-log code.
1813          */
1814         mutex_lock(&root->fs_info->tree_log_mutex);
1815
1816         ret = commit_fs_roots(trans, root);
1817         if (ret) {
1818                 mutex_unlock(&root->fs_info->tree_log_mutex);
1819                 mutex_unlock(&root->fs_info->reloc_mutex);
1820                 goto cleanup_transaction;
1821         }
1822
1823         /*
1824          * Since the transaction is done, we should set the inode map cache flag
1825          * before any other comming transaction.
1826          */
1827         if (btrfs_test_opt(root, CHANGE_INODE_CACHE))
1828                 btrfs_set_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
1829         else
1830                 btrfs_clear_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
1831
1832         /* commit_fs_roots gets rid of all the tree log roots, it is now
1833          * safe to free the root of tree log roots
1834          */
1835         btrfs_free_log_root_tree(trans, root->fs_info);
1836
1837         ret = commit_cowonly_roots(trans, root);
1838         if (ret) {
1839                 mutex_unlock(&root->fs_info->tree_log_mutex);
1840                 mutex_unlock(&root->fs_info->reloc_mutex);
1841                 goto cleanup_transaction;
1842         }
1843
1844         /*
1845          * The tasks which save the space cache and inode cache may also
1846          * update ->aborted, check it.
1847          */
1848         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1849                 ret = cur_trans->aborted;
1850                 mutex_unlock(&root->fs_info->tree_log_mutex);
1851                 mutex_unlock(&root->fs_info->reloc_mutex);
1852                 goto cleanup_transaction;
1853         }
1854
1855         btrfs_prepare_extent_commit(trans, root);
1856
1857         cur_trans = root->fs_info->running_transaction;
1858
1859         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1860                             root->fs_info->tree_root->node);
1861         switch_commit_root(root->fs_info->tree_root);
1862
1863         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1864                             root->fs_info->chunk_root->node);
1865         switch_commit_root(root->fs_info->chunk_root);
1866
1867         assert_qgroups_uptodate(trans);
1868         update_super_roots(root);
1869
1870         btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1871         btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1872         memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1873                sizeof(*root->fs_info->super_copy));
1874
1875         spin_lock(&root->fs_info->trans_lock);
1876         cur_trans->state = TRANS_STATE_UNBLOCKED;
1877         root->fs_info->running_transaction = NULL;
1878         spin_unlock(&root->fs_info->trans_lock);
1879         mutex_unlock(&root->fs_info->reloc_mutex);
1880
1881         wake_up(&root->fs_info->transaction_wait);
1882
1883         ret = btrfs_write_and_wait_transaction(trans, root);
1884         if (ret) {
1885                 btrfs_error(root->fs_info, ret,
1886                             "Error while writing out transaction");
1887                 mutex_unlock(&root->fs_info->tree_log_mutex);
1888                 goto cleanup_transaction;
1889         }
1890
1891         ret = write_ctree_super(trans, root, 0);
1892         if (ret) {
1893                 mutex_unlock(&root->fs_info->tree_log_mutex);
1894                 goto cleanup_transaction;
1895         }
1896
1897         /*
1898          * the super is written, we can safely allow the tree-loggers
1899          * to go about their business
1900          */
1901         mutex_unlock(&root->fs_info->tree_log_mutex);
1902
1903         btrfs_finish_extent_commit(trans, root);
1904
1905         root->fs_info->last_trans_committed = cur_trans->transid;
1906         /*
1907          * We needn't acquire the lock here because there is no other task
1908          * which can change it.
1909          */
1910         cur_trans->state = TRANS_STATE_COMPLETED;
1911         wake_up(&cur_trans->commit_wait);
1912
1913         spin_lock(&root->fs_info->trans_lock);
1914         list_del_init(&cur_trans->list);
1915         spin_unlock(&root->fs_info->trans_lock);
1916
1917         btrfs_put_transaction(cur_trans);
1918         btrfs_put_transaction(cur_trans);
1919
1920         if (trans->type & __TRANS_FREEZABLE)
1921                 sb_end_intwrite(root->fs_info->sb);
1922
1923         trace_btrfs_transaction_commit(root);
1924
1925         btrfs_scrub_continue(root);
1926
1927         if (current->journal_info == trans)
1928                 current->journal_info = NULL;
1929
1930         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1931
1932         if (current != root->fs_info->transaction_kthread)
1933                 btrfs_run_delayed_iputs(root);
1934
1935         return ret;
1936
1937 cleanup_transaction:
1938         btrfs_trans_release_metadata(trans, root);
1939         trans->block_rsv = NULL;
1940         if (trans->qgroup_reserved) {
1941                 btrfs_qgroup_free(root, trans->qgroup_reserved);
1942                 trans->qgroup_reserved = 0;
1943         }
1944         btrfs_warn(root->fs_info, "Skipping commit of aborted transaction.");
1945         if (current->journal_info == trans)
1946                 current->journal_info = NULL;
1947         cleanup_transaction(trans, root, ret);
1948
1949         return ret;
1950 }
1951
1952 /*
1953  * return < 0 if error
1954  * 0 if there are no more dead_roots at the time of call
1955  * 1 there are more to be processed, call me again
1956  *
1957  * The return value indicates there are certainly more snapshots to delete, but
1958  * if there comes a new one during processing, it may return 0. We don't mind,
1959  * because btrfs_commit_super will poke cleaner thread and it will process it a
1960  * few seconds later.
1961  */
1962 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
1963 {
1964         int ret;
1965         struct btrfs_fs_info *fs_info = root->fs_info;
1966
1967         spin_lock(&fs_info->trans_lock);
1968         if (list_empty(&fs_info->dead_roots)) {
1969                 spin_unlock(&fs_info->trans_lock);
1970                 return 0;
1971         }
1972         root = list_first_entry(&fs_info->dead_roots,
1973                         struct btrfs_root, root_list);
1974         /*
1975          * Make sure root is not involved in send,
1976          * if we fail with first root, we return
1977          * directly rather than continue.
1978          */
1979         spin_lock(&root->root_item_lock);
1980         if (root->send_in_progress) {
1981                 spin_unlock(&fs_info->trans_lock);
1982                 spin_unlock(&root->root_item_lock);
1983                 return 0;
1984         }
1985         spin_unlock(&root->root_item_lock);
1986
1987         list_del_init(&root->root_list);
1988         spin_unlock(&fs_info->trans_lock);
1989
1990         pr_debug("BTRFS: cleaner removing %llu\n", root->objectid);
1991
1992         btrfs_kill_all_delayed_nodes(root);
1993
1994         if (btrfs_header_backref_rev(root->node) <
1995                         BTRFS_MIXED_BACKREF_REV)
1996                 ret = btrfs_drop_snapshot(root, NULL, 0, 0);
1997         else
1998                 ret = btrfs_drop_snapshot(root, NULL, 1, 0);
1999         /*
2000          * If we encounter a transaction abort during snapshot cleaning, we
2001          * don't want to crash here
2002          */
2003         return (ret < 0) ? 0 : 1;
2004 }