Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include "ctree.h"
27 #include "disk-io.h"
28 #include "transaction.h"
29 #include "locking.h"
30 #include "tree-log.h"
31 #include "inode-map.h"
32 #include "volumes.h"
33 #include "dev-replace.h"
34
35 #define BTRFS_ROOT_TRANS_TAG 0
36
37 void put_transaction(struct btrfs_transaction *transaction)
38 {
39         WARN_ON(atomic_read(&transaction->use_count) == 0);
40         if (atomic_dec_and_test(&transaction->use_count)) {
41                 BUG_ON(!list_empty(&transaction->list));
42                 WARN_ON(transaction->delayed_refs.root.rb_node);
43                 memset(transaction, 0, sizeof(*transaction));
44                 kmem_cache_free(btrfs_transaction_cachep, transaction);
45         }
46 }
47
48 static noinline void switch_commit_root(struct btrfs_root *root)
49 {
50         free_extent_buffer(root->commit_root);
51         root->commit_root = btrfs_root_node(root);
52 }
53
54 /*
55  * either allocate a new transaction or hop into the existing one
56  */
57 static noinline int join_transaction(struct btrfs_root *root, int type)
58 {
59         struct btrfs_transaction *cur_trans;
60         struct btrfs_fs_info *fs_info = root->fs_info;
61
62         spin_lock(&fs_info->trans_lock);
63 loop:
64         /* The file system has been taken offline. No new transactions. */
65         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
66                 spin_unlock(&fs_info->trans_lock);
67                 return -EROFS;
68         }
69
70         if (fs_info->trans_no_join) {
71                 /* 
72                  * If we are JOIN_NOLOCK we're already committing a current
73                  * transaction, we just need a handle to deal with something
74                  * when committing the transaction, such as inode cache and
75                  * space cache. It is a special case.
76                  */
77                 if (type != TRANS_JOIN_NOLOCK) {
78                         spin_unlock(&fs_info->trans_lock);
79                         return -EBUSY;
80                 }
81         }
82
83         cur_trans = fs_info->running_transaction;
84         if (cur_trans) {
85                 if (cur_trans->aborted) {
86                         spin_unlock(&fs_info->trans_lock);
87                         return cur_trans->aborted;
88                 }
89                 atomic_inc(&cur_trans->use_count);
90                 atomic_inc(&cur_trans->num_writers);
91                 cur_trans->num_joined++;
92                 spin_unlock(&fs_info->trans_lock);
93                 return 0;
94         }
95         spin_unlock(&fs_info->trans_lock);
96
97         /*
98          * If we are ATTACH, we just want to catch the current transaction,
99          * and commit it. If there is no transaction, just return ENOENT.
100          */
101         if (type == TRANS_ATTACH)
102                 return -ENOENT;
103
104         cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
105         if (!cur_trans)
106                 return -ENOMEM;
107
108         spin_lock(&fs_info->trans_lock);
109         if (fs_info->running_transaction) {
110                 /*
111                  * someone started a transaction after we unlocked.  Make sure
112                  * to redo the trans_no_join checks above
113                  */
114                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
115                 goto loop;
116         } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
117                 spin_unlock(&fs_info->trans_lock);
118                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
119                 return -EROFS;
120         }
121
122         atomic_set(&cur_trans->num_writers, 1);
123         cur_trans->num_joined = 0;
124         init_waitqueue_head(&cur_trans->writer_wait);
125         init_waitqueue_head(&cur_trans->commit_wait);
126         cur_trans->in_commit = 0;
127         cur_trans->blocked = 0;
128         /*
129          * One for this trans handle, one so it will live on until we
130          * commit the transaction.
131          */
132         atomic_set(&cur_trans->use_count, 2);
133         cur_trans->commit_done = 0;
134         cur_trans->start_time = get_seconds();
135
136         cur_trans->delayed_refs.root = RB_ROOT;
137         cur_trans->delayed_refs.num_entries = 0;
138         cur_trans->delayed_refs.num_heads_ready = 0;
139         cur_trans->delayed_refs.num_heads = 0;
140         cur_trans->delayed_refs.flushing = 0;
141         cur_trans->delayed_refs.run_delayed_start = 0;
142
143         /*
144          * although the tree mod log is per file system and not per transaction,
145          * the log must never go across transaction boundaries.
146          */
147         smp_mb();
148         if (!list_empty(&fs_info->tree_mod_seq_list))
149                 WARN(1, KERN_ERR "btrfs: tree_mod_seq_list not empty when "
150                         "creating a fresh transaction\n");
151         if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
152                 WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
153                         "creating a fresh transaction\n");
154         atomic_set(&fs_info->tree_mod_seq, 0);
155
156         spin_lock_init(&cur_trans->commit_lock);
157         spin_lock_init(&cur_trans->delayed_refs.lock);
158
159         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
160         list_add_tail(&cur_trans->list, &fs_info->trans_list);
161         extent_io_tree_init(&cur_trans->dirty_pages,
162                              fs_info->btree_inode->i_mapping);
163         fs_info->generation++;
164         cur_trans->transid = fs_info->generation;
165         fs_info->running_transaction = cur_trans;
166         cur_trans->aborted = 0;
167         spin_unlock(&fs_info->trans_lock);
168
169         return 0;
170 }
171
172 /*
173  * this does all the record keeping required to make sure that a reference
174  * counted root is properly recorded in a given transaction.  This is required
175  * to make sure the old root from before we joined the transaction is deleted
176  * when the transaction commits
177  */
178 static int record_root_in_trans(struct btrfs_trans_handle *trans,
179                                struct btrfs_root *root)
180 {
181         if (root->ref_cows && root->last_trans < trans->transid) {
182                 WARN_ON(root == root->fs_info->extent_root);
183                 WARN_ON(root->commit_root != root->node);
184
185                 /*
186                  * see below for in_trans_setup usage rules
187                  * we have the reloc mutex held now, so there
188                  * is only one writer in this function
189                  */
190                 root->in_trans_setup = 1;
191
192                 /* make sure readers find in_trans_setup before
193                  * they find our root->last_trans update
194                  */
195                 smp_wmb();
196
197                 spin_lock(&root->fs_info->fs_roots_radix_lock);
198                 if (root->last_trans == trans->transid) {
199                         spin_unlock(&root->fs_info->fs_roots_radix_lock);
200                         return 0;
201                 }
202                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
203                            (unsigned long)root->root_key.objectid,
204                            BTRFS_ROOT_TRANS_TAG);
205                 spin_unlock(&root->fs_info->fs_roots_radix_lock);
206                 root->last_trans = trans->transid;
207
208                 /* this is pretty tricky.  We don't want to
209                  * take the relocation lock in btrfs_record_root_in_trans
210                  * unless we're really doing the first setup for this root in
211                  * this transaction.
212                  *
213                  * Normally we'd use root->last_trans as a flag to decide
214                  * if we want to take the expensive mutex.
215                  *
216                  * But, we have to set root->last_trans before we
217                  * init the relocation root, otherwise, we trip over warnings
218                  * in ctree.c.  The solution used here is to flag ourselves
219                  * with root->in_trans_setup.  When this is 1, we're still
220                  * fixing up the reloc trees and everyone must wait.
221                  *
222                  * When this is zero, they can trust root->last_trans and fly
223                  * through btrfs_record_root_in_trans without having to take the
224                  * lock.  smp_wmb() makes sure that all the writes above are
225                  * done before we pop in the zero below
226                  */
227                 btrfs_init_reloc_root(trans, root);
228                 smp_wmb();
229                 root->in_trans_setup = 0;
230         }
231         return 0;
232 }
233
234
235 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
236                                struct btrfs_root *root)
237 {
238         if (!root->ref_cows)
239                 return 0;
240
241         /*
242          * see record_root_in_trans for comments about in_trans_setup usage
243          * and barriers
244          */
245         smp_rmb();
246         if (root->last_trans == trans->transid &&
247             !root->in_trans_setup)
248                 return 0;
249
250         mutex_lock(&root->fs_info->reloc_mutex);
251         record_root_in_trans(trans, root);
252         mutex_unlock(&root->fs_info->reloc_mutex);
253
254         return 0;
255 }
256
257 /* wait for commit against the current transaction to become unblocked
258  * when this is done, it is safe to start a new transaction, but the current
259  * transaction might not be fully on disk.
260  */
261 static void wait_current_trans(struct btrfs_root *root)
262 {
263         struct btrfs_transaction *cur_trans;
264
265         spin_lock(&root->fs_info->trans_lock);
266         cur_trans = root->fs_info->running_transaction;
267         if (cur_trans && cur_trans->blocked) {
268                 atomic_inc(&cur_trans->use_count);
269                 spin_unlock(&root->fs_info->trans_lock);
270
271                 wait_event(root->fs_info->transaction_wait,
272                            !cur_trans->blocked);
273                 put_transaction(cur_trans);
274         } else {
275                 spin_unlock(&root->fs_info->trans_lock);
276         }
277 }
278
279 static int may_wait_transaction(struct btrfs_root *root, int type)
280 {
281         if (root->fs_info->log_root_recovering)
282                 return 0;
283
284         if (type == TRANS_USERSPACE)
285                 return 1;
286
287         if (type == TRANS_START &&
288             !atomic_read(&root->fs_info->open_ioctl_trans))
289                 return 1;
290
291         return 0;
292 }
293
294 static struct btrfs_trans_handle *
295 start_transaction(struct btrfs_root *root, u64 num_items, int type,
296                   enum btrfs_reserve_flush_enum flush)
297 {
298         struct btrfs_trans_handle *h;
299         struct btrfs_transaction *cur_trans;
300         u64 num_bytes = 0;
301         int ret;
302         u64 qgroup_reserved = 0;
303
304         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
305                 return ERR_PTR(-EROFS);
306
307         if (current->journal_info) {
308                 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
309                 h = current->journal_info;
310                 h->use_count++;
311                 WARN_ON(h->use_count > 2);
312                 h->orig_rsv = h->block_rsv;
313                 h->block_rsv = NULL;
314                 goto got_it;
315         }
316
317         /*
318          * Do the reservation before we join the transaction so we can do all
319          * the appropriate flushing if need be.
320          */
321         if (num_items > 0 && root != root->fs_info->chunk_root) {
322                 if (root->fs_info->quota_enabled &&
323                     is_fstree(root->root_key.objectid)) {
324                         qgroup_reserved = num_items * root->leafsize;
325                         ret = btrfs_qgroup_reserve(root, qgroup_reserved);
326                         if (ret)
327                                 return ERR_PTR(ret);
328                 }
329
330                 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
331                 ret = btrfs_block_rsv_add(root,
332                                           &root->fs_info->trans_block_rsv,
333                                           num_bytes, flush);
334                 if (ret)
335                         goto reserve_fail;
336         }
337 again:
338         h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
339         if (!h) {
340                 ret = -ENOMEM;
341                 goto alloc_fail;
342         }
343
344         /*
345          * If we are JOIN_NOLOCK we're already committing a transaction and
346          * waiting on this guy, so we don't need to do the sb_start_intwrite
347          * because we're already holding a ref.  We need this because we could
348          * have raced in and did an fsync() on a file which can kick a commit
349          * and then we deadlock with somebody doing a freeze.
350          *
351          * If we are ATTACH, it means we just want to catch the current
352          * transaction and commit it, so we needn't do sb_start_intwrite(). 
353          */
354         if (type < TRANS_JOIN_NOLOCK)
355                 sb_start_intwrite(root->fs_info->sb);
356
357         if (may_wait_transaction(root, type))
358                 wait_current_trans(root);
359
360         do {
361                 ret = join_transaction(root, type);
362                 if (ret == -EBUSY)
363                         wait_current_trans(root);
364         } while (ret == -EBUSY);
365
366         if (ret < 0) {
367                 /* We must get the transaction if we are JOIN_NOLOCK. */
368                 BUG_ON(type == TRANS_JOIN_NOLOCK);
369                 goto join_fail;
370         }
371
372         cur_trans = root->fs_info->running_transaction;
373
374         h->transid = cur_trans->transid;
375         h->transaction = cur_trans;
376         h->blocks_used = 0;
377         h->bytes_reserved = 0;
378         h->root = root;
379         h->delayed_ref_updates = 0;
380         h->use_count = 1;
381         h->adding_csums = 0;
382         h->block_rsv = NULL;
383         h->orig_rsv = NULL;
384         h->aborted = 0;
385         h->qgroup_reserved = qgroup_reserved;
386         h->delayed_ref_elem.seq = 0;
387         h->type = type;
388         INIT_LIST_HEAD(&h->qgroup_ref_list);
389         INIT_LIST_HEAD(&h->new_bgs);
390
391         smp_mb();
392         if (cur_trans->blocked && may_wait_transaction(root, type)) {
393                 btrfs_commit_transaction(h, root);
394                 goto again;
395         }
396
397         if (num_bytes) {
398                 trace_btrfs_space_reservation(root->fs_info, "transaction",
399                                               h->transid, num_bytes, 1);
400                 h->block_rsv = &root->fs_info->trans_block_rsv;
401                 h->bytes_reserved = num_bytes;
402         }
403
404 got_it:
405         btrfs_record_root_in_trans(h, root);
406
407         if (!current->journal_info && type != TRANS_USERSPACE)
408                 current->journal_info = h;
409         return h;
410
411 join_fail:
412         if (type < TRANS_JOIN_NOLOCK)
413                 sb_end_intwrite(root->fs_info->sb);
414         kmem_cache_free(btrfs_trans_handle_cachep, h);
415 alloc_fail:
416         if (num_bytes)
417                 btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
418                                         num_bytes);
419 reserve_fail:
420         if (qgroup_reserved)
421                 btrfs_qgroup_free(root, qgroup_reserved);
422         return ERR_PTR(ret);
423 }
424
425 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
426                                                    int num_items)
427 {
428         return start_transaction(root, num_items, TRANS_START,
429                                  BTRFS_RESERVE_FLUSH_ALL);
430 }
431
432 struct btrfs_trans_handle *btrfs_start_transaction_lflush(
433                                         struct btrfs_root *root, int num_items)
434 {
435         return start_transaction(root, num_items, TRANS_START,
436                                  BTRFS_RESERVE_FLUSH_LIMIT);
437 }
438
439 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
440 {
441         return start_transaction(root, 0, TRANS_JOIN, 0);
442 }
443
444 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
445 {
446         return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
447 }
448
449 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
450 {
451         return start_transaction(root, 0, TRANS_USERSPACE, 0);
452 }
453
454 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
455 {
456         return start_transaction(root, 0, TRANS_ATTACH, 0);
457 }
458
459 /* wait for a transaction commit to be fully complete */
460 static noinline void wait_for_commit(struct btrfs_root *root,
461                                     struct btrfs_transaction *commit)
462 {
463         wait_event(commit->commit_wait, commit->commit_done);
464 }
465
466 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
467 {
468         struct btrfs_transaction *cur_trans = NULL, *t;
469         int ret = 0;
470
471         if (transid) {
472                 if (transid <= root->fs_info->last_trans_committed)
473                         goto out;
474
475                 ret = -EINVAL;
476                 /* find specified transaction */
477                 spin_lock(&root->fs_info->trans_lock);
478                 list_for_each_entry(t, &root->fs_info->trans_list, list) {
479                         if (t->transid == transid) {
480                                 cur_trans = t;
481                                 atomic_inc(&cur_trans->use_count);
482                                 ret = 0;
483                                 break;
484                         }
485                         if (t->transid > transid) {
486                                 ret = 0;
487                                 break;
488                         }
489                 }
490                 spin_unlock(&root->fs_info->trans_lock);
491                 /* The specified transaction doesn't exist */
492                 if (!cur_trans)
493                         goto out;
494         } else {
495                 /* find newest transaction that is committing | committed */
496                 spin_lock(&root->fs_info->trans_lock);
497                 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
498                                             list) {
499                         if (t->in_commit) {
500                                 if (t->commit_done)
501                                         break;
502                                 cur_trans = t;
503                                 atomic_inc(&cur_trans->use_count);
504                                 break;
505                         }
506                 }
507                 spin_unlock(&root->fs_info->trans_lock);
508                 if (!cur_trans)
509                         goto out;  /* nothing committing|committed */
510         }
511
512         wait_for_commit(root, cur_trans);
513         put_transaction(cur_trans);
514 out:
515         return ret;
516 }
517
518 void btrfs_throttle(struct btrfs_root *root)
519 {
520         if (!atomic_read(&root->fs_info->open_ioctl_trans))
521                 wait_current_trans(root);
522 }
523
524 static int should_end_transaction(struct btrfs_trans_handle *trans,
525                                   struct btrfs_root *root)
526 {
527         int ret;
528
529         ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
530         return ret ? 1 : 0;
531 }
532
533 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
534                                  struct btrfs_root *root)
535 {
536         struct btrfs_transaction *cur_trans = trans->transaction;
537         int updates;
538         int err;
539
540         smp_mb();
541         if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
542                 return 1;
543
544         updates = trans->delayed_ref_updates;
545         trans->delayed_ref_updates = 0;
546         if (updates) {
547                 err = btrfs_run_delayed_refs(trans, root, updates);
548                 if (err) /* Error code will also eval true */
549                         return err;
550         }
551
552         return should_end_transaction(trans, root);
553 }
554
555 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
556                           struct btrfs_root *root, int throttle)
557 {
558         struct btrfs_transaction *cur_trans = trans->transaction;
559         struct btrfs_fs_info *info = root->fs_info;
560         int count = 0;
561         int lock = (trans->type != TRANS_JOIN_NOLOCK);
562         int err = 0;
563
564         if (--trans->use_count) {
565                 trans->block_rsv = trans->orig_rsv;
566                 return 0;
567         }
568
569         /*
570          * do the qgroup accounting as early as possible
571          */
572         err = btrfs_delayed_refs_qgroup_accounting(trans, info);
573
574         btrfs_trans_release_metadata(trans, root);
575         trans->block_rsv = NULL;
576         /*
577          * the same root has to be passed to start_transaction and
578          * end_transaction. Subvolume quota depends on this.
579          */
580         WARN_ON(trans->root != root);
581
582         if (trans->qgroup_reserved) {
583                 btrfs_qgroup_free(root, trans->qgroup_reserved);
584                 trans->qgroup_reserved = 0;
585         }
586
587         if (!list_empty(&trans->new_bgs))
588                 btrfs_create_pending_block_groups(trans, root);
589
590         while (count < 2) {
591                 unsigned long cur = trans->delayed_ref_updates;
592                 trans->delayed_ref_updates = 0;
593                 if (cur &&
594                     trans->transaction->delayed_refs.num_heads_ready > 64) {
595                         trans->delayed_ref_updates = 0;
596                         btrfs_run_delayed_refs(trans, root, cur);
597                 } else {
598                         break;
599                 }
600                 count++;
601         }
602         btrfs_trans_release_metadata(trans, root);
603         trans->block_rsv = NULL;
604
605         if (!list_empty(&trans->new_bgs))
606                 btrfs_create_pending_block_groups(trans, root);
607
608         if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
609             should_end_transaction(trans, root)) {
610                 trans->transaction->blocked = 1;
611                 smp_wmb();
612         }
613
614         if (lock && cur_trans->blocked && !cur_trans->in_commit) {
615                 if (throttle) {
616                         /*
617                          * We may race with somebody else here so end up having
618                          * to call end_transaction on ourselves again, so inc
619                          * our use_count.
620                          */
621                         trans->use_count++;
622                         return btrfs_commit_transaction(trans, root);
623                 } else {
624                         wake_up_process(info->transaction_kthread);
625                 }
626         }
627
628         if (trans->type < TRANS_JOIN_NOLOCK)
629                 sb_end_intwrite(root->fs_info->sb);
630
631         WARN_ON(cur_trans != info->running_transaction);
632         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
633         atomic_dec(&cur_trans->num_writers);
634
635         smp_mb();
636         if (waitqueue_active(&cur_trans->writer_wait))
637                 wake_up(&cur_trans->writer_wait);
638         put_transaction(cur_trans);
639
640         if (current->journal_info == trans)
641                 current->journal_info = NULL;
642
643         if (throttle)
644                 btrfs_run_delayed_iputs(root);
645
646         if (trans->aborted ||
647             root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
648                 err = -EIO;
649         }
650         assert_qgroups_uptodate(trans);
651
652         memset(trans, 0, sizeof(*trans));
653         kmem_cache_free(btrfs_trans_handle_cachep, trans);
654         return err;
655 }
656
657 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
658                           struct btrfs_root *root)
659 {
660         int ret;
661
662         ret = __btrfs_end_transaction(trans, root, 0);
663         if (ret)
664                 return ret;
665         return 0;
666 }
667
668 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
669                                    struct btrfs_root *root)
670 {
671         int ret;
672
673         ret = __btrfs_end_transaction(trans, root, 1);
674         if (ret)
675                 return ret;
676         return 0;
677 }
678
679 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
680                                 struct btrfs_root *root)
681 {
682         return __btrfs_end_transaction(trans, root, 1);
683 }
684
685 /*
686  * when btree blocks are allocated, they have some corresponding bits set for
687  * them in one of two extent_io trees.  This is used to make sure all of
688  * those extents are sent to disk but does not wait on them
689  */
690 int btrfs_write_marked_extents(struct btrfs_root *root,
691                                struct extent_io_tree *dirty_pages, int mark)
692 {
693         int err = 0;
694         int werr = 0;
695         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
696         struct extent_state *cached_state = NULL;
697         u64 start = 0;
698         u64 end;
699
700         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
701                                       mark, &cached_state)) {
702                 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
703                                    mark, &cached_state, GFP_NOFS);
704                 cached_state = NULL;
705                 err = filemap_fdatawrite_range(mapping, start, end);
706                 if (err)
707                         werr = err;
708                 cond_resched();
709                 start = end + 1;
710         }
711         if (err)
712                 werr = err;
713         return werr;
714 }
715
716 /*
717  * when btree blocks are allocated, they have some corresponding bits set for
718  * them in one of two extent_io trees.  This is used to make sure all of
719  * those extents are on disk for transaction or log commit.  We wait
720  * on all the pages and clear them from the dirty pages state tree
721  */
722 int btrfs_wait_marked_extents(struct btrfs_root *root,
723                               struct extent_io_tree *dirty_pages, int mark)
724 {
725         int err = 0;
726         int werr = 0;
727         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
728         struct extent_state *cached_state = NULL;
729         u64 start = 0;
730         u64 end;
731
732         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
733                                       EXTENT_NEED_WAIT, &cached_state)) {
734                 clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
735                                  0, 0, &cached_state, GFP_NOFS);
736                 err = filemap_fdatawait_range(mapping, start, end);
737                 if (err)
738                         werr = err;
739                 cond_resched();
740                 start = end + 1;
741         }
742         if (err)
743                 werr = err;
744         return werr;
745 }
746
747 /*
748  * when btree blocks are allocated, they have some corresponding bits set for
749  * them in one of two extent_io trees.  This is used to make sure all of
750  * those extents are on disk for transaction or log commit
751  */
752 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
753                                 struct extent_io_tree *dirty_pages, int mark)
754 {
755         int ret;
756         int ret2;
757
758         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
759         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
760
761         if (ret)
762                 return ret;
763         if (ret2)
764                 return ret2;
765         return 0;
766 }
767
768 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
769                                      struct btrfs_root *root)
770 {
771         if (!trans || !trans->transaction) {
772                 struct inode *btree_inode;
773                 btree_inode = root->fs_info->btree_inode;
774                 return filemap_write_and_wait(btree_inode->i_mapping);
775         }
776         return btrfs_write_and_wait_marked_extents(root,
777                                            &trans->transaction->dirty_pages,
778                                            EXTENT_DIRTY);
779 }
780
781 /*
782  * this is used to update the root pointer in the tree of tree roots.
783  *
784  * But, in the case of the extent allocation tree, updating the root
785  * pointer may allocate blocks which may change the root of the extent
786  * allocation tree.
787  *
788  * So, this loops and repeats and makes sure the cowonly root didn't
789  * change while the root pointer was being updated in the metadata.
790  */
791 static int update_cowonly_root(struct btrfs_trans_handle *trans,
792                                struct btrfs_root *root)
793 {
794         int ret;
795         u64 old_root_bytenr;
796         u64 old_root_used;
797         struct btrfs_root *tree_root = root->fs_info->tree_root;
798
799         old_root_used = btrfs_root_used(&root->root_item);
800         btrfs_write_dirty_block_groups(trans, root);
801
802         while (1) {
803                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
804                 if (old_root_bytenr == root->node->start &&
805                     old_root_used == btrfs_root_used(&root->root_item))
806                         break;
807
808                 btrfs_set_root_node(&root->root_item, root->node);
809                 ret = btrfs_update_root(trans, tree_root,
810                                         &root->root_key,
811                                         &root->root_item);
812                 if (ret)
813                         return ret;
814
815                 old_root_used = btrfs_root_used(&root->root_item);
816                 ret = btrfs_write_dirty_block_groups(trans, root);
817                 if (ret)
818                         return ret;
819         }
820
821         if (root != root->fs_info->extent_root)
822                 switch_commit_root(root);
823
824         return 0;
825 }
826
827 /*
828  * update all the cowonly tree roots on disk
829  *
830  * The error handling in this function may not be obvious. Any of the
831  * failures will cause the file system to go offline. We still need
832  * to clean up the delayed refs.
833  */
834 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
835                                          struct btrfs_root *root)
836 {
837         struct btrfs_fs_info *fs_info = root->fs_info;
838         struct list_head *next;
839         struct extent_buffer *eb;
840         int ret;
841
842         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
843         if (ret)
844                 return ret;
845
846         eb = btrfs_lock_root_node(fs_info->tree_root);
847         ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
848                               0, &eb);
849         btrfs_tree_unlock(eb);
850         free_extent_buffer(eb);
851
852         if (ret)
853                 return ret;
854
855         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
856         if (ret)
857                 return ret;
858
859         ret = btrfs_run_dev_stats(trans, root->fs_info);
860         WARN_ON(ret);
861         ret = btrfs_run_dev_replace(trans, root->fs_info);
862         WARN_ON(ret);
863
864         ret = btrfs_run_qgroups(trans, root->fs_info);
865         BUG_ON(ret);
866
867         /* run_qgroups might have added some more refs */
868         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
869         BUG_ON(ret);
870
871         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
872                 next = fs_info->dirty_cowonly_roots.next;
873                 list_del_init(next);
874                 root = list_entry(next, struct btrfs_root, dirty_list);
875
876                 ret = update_cowonly_root(trans, root);
877                 if (ret)
878                         return ret;
879         }
880
881         down_write(&fs_info->extent_commit_sem);
882         switch_commit_root(fs_info->extent_root);
883         up_write(&fs_info->extent_commit_sem);
884
885         btrfs_after_dev_replace_commit(fs_info);
886
887         return 0;
888 }
889
890 /*
891  * dead roots are old snapshots that need to be deleted.  This allocates
892  * a dirty root struct and adds it into the list of dead roots that need to
893  * be deleted
894  */
895 int btrfs_add_dead_root(struct btrfs_root *root)
896 {
897         spin_lock(&root->fs_info->trans_lock);
898         list_add(&root->root_list, &root->fs_info->dead_roots);
899         spin_unlock(&root->fs_info->trans_lock);
900         return 0;
901 }
902
903 /*
904  * update all the cowonly tree roots on disk
905  */
906 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
907                                     struct btrfs_root *root)
908 {
909         struct btrfs_root *gang[8];
910         struct btrfs_fs_info *fs_info = root->fs_info;
911         int i;
912         int ret;
913         int err = 0;
914
915         spin_lock(&fs_info->fs_roots_radix_lock);
916         while (1) {
917                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
918                                                  (void **)gang, 0,
919                                                  ARRAY_SIZE(gang),
920                                                  BTRFS_ROOT_TRANS_TAG);
921                 if (ret == 0)
922                         break;
923                 for (i = 0; i < ret; i++) {
924                         root = gang[i];
925                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
926                                         (unsigned long)root->root_key.objectid,
927                                         BTRFS_ROOT_TRANS_TAG);
928                         spin_unlock(&fs_info->fs_roots_radix_lock);
929
930                         btrfs_free_log(trans, root);
931                         btrfs_update_reloc_root(trans, root);
932                         btrfs_orphan_commit_root(trans, root);
933
934                         btrfs_save_ino_cache(root, trans);
935
936                         /* see comments in should_cow_block() */
937                         root->force_cow = 0;
938                         smp_wmb();
939
940                         if (root->commit_root != root->node) {
941                                 mutex_lock(&root->fs_commit_mutex);
942                                 switch_commit_root(root);
943                                 btrfs_unpin_free_ino(root);
944                                 mutex_unlock(&root->fs_commit_mutex);
945
946                                 btrfs_set_root_node(&root->root_item,
947                                                     root->node);
948                         }
949
950                         err = btrfs_update_root(trans, fs_info->tree_root,
951                                                 &root->root_key,
952                                                 &root->root_item);
953                         spin_lock(&fs_info->fs_roots_radix_lock);
954                         if (err)
955                                 break;
956                 }
957         }
958         spin_unlock(&fs_info->fs_roots_radix_lock);
959         return err;
960 }
961
962 /*
963  * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
964  * otherwise every leaf in the btree is read and defragged.
965  */
966 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
967 {
968         struct btrfs_fs_info *info = root->fs_info;
969         struct btrfs_trans_handle *trans;
970         int ret;
971
972         if (xchg(&root->defrag_running, 1))
973                 return 0;
974
975         while (1) {
976                 trans = btrfs_start_transaction(root, 0);
977                 if (IS_ERR(trans))
978                         return PTR_ERR(trans);
979
980                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
981
982                 btrfs_end_transaction(trans, root);
983                 btrfs_btree_balance_dirty(info->tree_root);
984                 cond_resched();
985
986                 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
987                         break;
988         }
989         root->defrag_running = 0;
990         return ret;
991 }
992
993 /*
994  * new snapshots need to be created at a very specific time in the
995  * transaction commit.  This does the actual creation
996  */
997 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
998                                    struct btrfs_fs_info *fs_info,
999                                    struct btrfs_pending_snapshot *pending)
1000 {
1001         struct btrfs_key key;
1002         struct btrfs_root_item *new_root_item;
1003         struct btrfs_root *tree_root = fs_info->tree_root;
1004         struct btrfs_root *root = pending->root;
1005         struct btrfs_root *parent_root;
1006         struct btrfs_block_rsv *rsv;
1007         struct inode *parent_inode;
1008         struct btrfs_path *path;
1009         struct btrfs_dir_item *dir_item;
1010         struct dentry *parent;
1011         struct dentry *dentry;
1012         struct extent_buffer *tmp;
1013         struct extent_buffer *old;
1014         struct timespec cur_time = CURRENT_TIME;
1015         int ret;
1016         u64 to_reserve = 0;
1017         u64 index = 0;
1018         u64 objectid;
1019         u64 root_flags;
1020         uuid_le new_uuid;
1021
1022         path = btrfs_alloc_path();
1023         if (!path) {
1024                 ret = pending->error = -ENOMEM;
1025                 goto path_alloc_fail;
1026         }
1027
1028         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
1029         if (!new_root_item) {
1030                 ret = pending->error = -ENOMEM;
1031                 goto root_item_alloc_fail;
1032         }
1033
1034         ret = btrfs_find_free_objectid(tree_root, &objectid);
1035         if (ret) {
1036                 pending->error = ret;
1037                 goto no_free_objectid;
1038         }
1039
1040         btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
1041
1042         if (to_reserve > 0) {
1043                 ret = btrfs_block_rsv_add(root, &pending->block_rsv,
1044                                           to_reserve,
1045                                           BTRFS_RESERVE_NO_FLUSH);
1046                 if (ret) {
1047                         pending->error = ret;
1048                         goto no_free_objectid;
1049                 }
1050         }
1051
1052         ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid,
1053                                    objectid, pending->inherit);
1054         if (ret) {
1055                 pending->error = ret;
1056                 goto no_free_objectid;
1057         }
1058
1059         key.objectid = objectid;
1060         key.offset = (u64)-1;
1061         key.type = BTRFS_ROOT_ITEM_KEY;
1062
1063         rsv = trans->block_rsv;
1064         trans->block_rsv = &pending->block_rsv;
1065
1066         dentry = pending->dentry;
1067         parent = dget_parent(dentry);
1068         parent_inode = parent->d_inode;
1069         parent_root = BTRFS_I(parent_inode)->root;
1070         record_root_in_trans(trans, parent_root);
1071
1072         /*
1073          * insert the directory item
1074          */
1075         ret = btrfs_set_inode_index(parent_inode, &index);
1076         BUG_ON(ret); /* -ENOMEM */
1077
1078         /* check if there is a file/dir which has the same name. */
1079         dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1080                                          btrfs_ino(parent_inode),
1081                                          dentry->d_name.name,
1082                                          dentry->d_name.len, 0);
1083         if (dir_item != NULL && !IS_ERR(dir_item)) {
1084                 pending->error = -EEXIST;
1085                 goto fail;
1086         } else if (IS_ERR(dir_item)) {
1087                 ret = PTR_ERR(dir_item);
1088                 btrfs_abort_transaction(trans, root, ret);
1089                 goto fail;
1090         }
1091         btrfs_release_path(path);
1092
1093         /*
1094          * pull in the delayed directory update
1095          * and the delayed inode item
1096          * otherwise we corrupt the FS during
1097          * snapshot
1098          */
1099         ret = btrfs_run_delayed_items(trans, root);
1100         if (ret) {      /* Transaction aborted */
1101                 btrfs_abort_transaction(trans, root, ret);
1102                 goto fail;
1103         }
1104
1105         record_root_in_trans(trans, root);
1106         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1107         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1108         btrfs_check_and_init_root_item(new_root_item);
1109
1110         root_flags = btrfs_root_flags(new_root_item);
1111         if (pending->readonly)
1112                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1113         else
1114                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1115         btrfs_set_root_flags(new_root_item, root_flags);
1116
1117         btrfs_set_root_generation_v2(new_root_item,
1118                         trans->transid);
1119         uuid_le_gen(&new_uuid);
1120         memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1121         memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1122                         BTRFS_UUID_SIZE);
1123         new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
1124         new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
1125         btrfs_set_root_otransid(new_root_item, trans->transid);
1126         memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1127         memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1128         btrfs_set_root_stransid(new_root_item, 0);
1129         btrfs_set_root_rtransid(new_root_item, 0);
1130
1131         old = btrfs_lock_root_node(root);
1132         ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1133         if (ret) {
1134                 btrfs_tree_unlock(old);
1135                 free_extent_buffer(old);
1136                 btrfs_abort_transaction(trans, root, ret);
1137                 goto fail;
1138         }
1139
1140         btrfs_set_lock_blocking(old);
1141
1142         ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1143         /* clean up in any case */
1144         btrfs_tree_unlock(old);
1145         free_extent_buffer(old);
1146         if (ret) {
1147                 btrfs_abort_transaction(trans, root, ret);
1148                 goto fail;
1149         }
1150
1151         /* see comments in should_cow_block() */
1152         root->force_cow = 1;
1153         smp_wmb();
1154
1155         btrfs_set_root_node(new_root_item, tmp);
1156         /* record when the snapshot was created in key.offset */
1157         key.offset = trans->transid;
1158         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1159         btrfs_tree_unlock(tmp);
1160         free_extent_buffer(tmp);
1161         if (ret) {
1162                 btrfs_abort_transaction(trans, root, ret);
1163                 goto fail;
1164         }
1165
1166         /*
1167          * insert root back/forward references
1168          */
1169         ret = btrfs_add_root_ref(trans, tree_root, objectid,
1170                                  parent_root->root_key.objectid,
1171                                  btrfs_ino(parent_inode), index,
1172                                  dentry->d_name.name, dentry->d_name.len);
1173         if (ret) {
1174                 btrfs_abort_transaction(trans, root, ret);
1175                 goto fail;
1176         }
1177
1178         key.offset = (u64)-1;
1179         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1180         if (IS_ERR(pending->snap)) {
1181                 ret = PTR_ERR(pending->snap);
1182                 btrfs_abort_transaction(trans, root, ret);
1183                 goto fail;
1184         }
1185
1186         ret = btrfs_reloc_post_snapshot(trans, pending);
1187         if (ret) {
1188                 btrfs_abort_transaction(trans, root, ret);
1189                 goto fail;
1190         }
1191
1192         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1193         if (ret) {
1194                 btrfs_abort_transaction(trans, root, ret);
1195                 goto fail;
1196         }
1197
1198         ret = btrfs_insert_dir_item(trans, parent_root,
1199                                     dentry->d_name.name, dentry->d_name.len,
1200                                     parent_inode, &key,
1201                                     BTRFS_FT_DIR, index);
1202         /* We have check then name at the beginning, so it is impossible. */
1203         BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1204         if (ret) {
1205                 btrfs_abort_transaction(trans, root, ret);
1206                 goto fail;
1207         }
1208
1209         btrfs_i_size_write(parent_inode, parent_inode->i_size +
1210                                          dentry->d_name.len * 2);
1211         parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1212         ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1213         if (ret)
1214                 btrfs_abort_transaction(trans, root, ret);
1215 fail:
1216         dput(parent);
1217         trans->block_rsv = rsv;
1218 no_free_objectid:
1219         kfree(new_root_item);
1220 root_item_alloc_fail:
1221         btrfs_free_path(path);
1222 path_alloc_fail:
1223         btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
1224         return ret;
1225 }
1226
1227 /*
1228  * create all the snapshots we've scheduled for creation
1229  */
1230 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1231                                              struct btrfs_fs_info *fs_info)
1232 {
1233         struct btrfs_pending_snapshot *pending;
1234         struct list_head *head = &trans->transaction->pending_snapshots;
1235
1236         list_for_each_entry(pending, head, list)
1237                 create_pending_snapshot(trans, fs_info, pending);
1238         return 0;
1239 }
1240
1241 static void update_super_roots(struct btrfs_root *root)
1242 {
1243         struct btrfs_root_item *root_item;
1244         struct btrfs_super_block *super;
1245
1246         super = root->fs_info->super_copy;
1247
1248         root_item = &root->fs_info->chunk_root->root_item;
1249         super->chunk_root = root_item->bytenr;
1250         super->chunk_root_generation = root_item->generation;
1251         super->chunk_root_level = root_item->level;
1252
1253         root_item = &root->fs_info->tree_root->root_item;
1254         super->root = root_item->bytenr;
1255         super->generation = root_item->generation;
1256         super->root_level = root_item->level;
1257         if (btrfs_test_opt(root, SPACE_CACHE))
1258                 super->cache_generation = root_item->generation;
1259 }
1260
1261 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1262 {
1263         int ret = 0;
1264         spin_lock(&info->trans_lock);
1265         if (info->running_transaction)
1266                 ret = info->running_transaction->in_commit;
1267         spin_unlock(&info->trans_lock);
1268         return ret;
1269 }
1270
1271 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1272 {
1273         int ret = 0;
1274         spin_lock(&info->trans_lock);
1275         if (info->running_transaction)
1276                 ret = info->running_transaction->blocked;
1277         spin_unlock(&info->trans_lock);
1278         return ret;
1279 }
1280
1281 /*
1282  * wait for the current transaction commit to start and block subsequent
1283  * transaction joins
1284  */
1285 static void wait_current_trans_commit_start(struct btrfs_root *root,
1286                                             struct btrfs_transaction *trans)
1287 {
1288         wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
1289 }
1290
1291 /*
1292  * wait for the current transaction to start and then become unblocked.
1293  * caller holds ref.
1294  */
1295 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1296                                          struct btrfs_transaction *trans)
1297 {
1298         wait_event(root->fs_info->transaction_wait,
1299                    trans->commit_done || (trans->in_commit && !trans->blocked));
1300 }
1301
1302 /*
1303  * commit transactions asynchronously. once btrfs_commit_transaction_async
1304  * returns, any subsequent transaction will not be allowed to join.
1305  */
1306 struct btrfs_async_commit {
1307         struct btrfs_trans_handle *newtrans;
1308         struct btrfs_root *root;
1309         struct delayed_work work;
1310 };
1311
1312 static void do_async_commit(struct work_struct *work)
1313 {
1314         struct btrfs_async_commit *ac =
1315                 container_of(work, struct btrfs_async_commit, work.work);
1316
1317         /*
1318          * We've got freeze protection passed with the transaction.
1319          * Tell lockdep about it.
1320          */
1321         if (ac->newtrans->type < TRANS_JOIN_NOLOCK)
1322                 rwsem_acquire_read(
1323                      &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1324                      0, 1, _THIS_IP_);
1325
1326         current->journal_info = ac->newtrans;
1327
1328         btrfs_commit_transaction(ac->newtrans, ac->root);
1329         kfree(ac);
1330 }
1331
1332 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1333                                    struct btrfs_root *root,
1334                                    int wait_for_unblock)
1335 {
1336         struct btrfs_async_commit *ac;
1337         struct btrfs_transaction *cur_trans;
1338
1339         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1340         if (!ac)
1341                 return -ENOMEM;
1342
1343         INIT_DELAYED_WORK(&ac->work, do_async_commit);
1344         ac->root = root;
1345         ac->newtrans = btrfs_join_transaction(root);
1346         if (IS_ERR(ac->newtrans)) {
1347                 int err = PTR_ERR(ac->newtrans);
1348                 kfree(ac);
1349                 return err;
1350         }
1351
1352         /* take transaction reference */
1353         cur_trans = trans->transaction;
1354         atomic_inc(&cur_trans->use_count);
1355
1356         btrfs_end_transaction(trans, root);
1357
1358         /*
1359          * Tell lockdep we've released the freeze rwsem, since the
1360          * async commit thread will be the one to unlock it.
1361          */
1362         if (trans->type < TRANS_JOIN_NOLOCK)
1363                 rwsem_release(
1364                         &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1365                         1, _THIS_IP_);
1366
1367         schedule_delayed_work(&ac->work, 0);
1368
1369         /* wait for transaction to start and unblock */
1370         if (wait_for_unblock)
1371                 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1372         else
1373                 wait_current_trans_commit_start(root, cur_trans);
1374
1375         if (current->journal_info == trans)
1376                 current->journal_info = NULL;
1377
1378         put_transaction(cur_trans);
1379         return 0;
1380 }
1381
1382
1383 static void cleanup_transaction(struct btrfs_trans_handle *trans,
1384                                 struct btrfs_root *root, int err)
1385 {
1386         struct btrfs_transaction *cur_trans = trans->transaction;
1387
1388         WARN_ON(trans->use_count > 1);
1389
1390         btrfs_abort_transaction(trans, root, err);
1391
1392         spin_lock(&root->fs_info->trans_lock);
1393         list_del_init(&cur_trans->list);
1394         if (cur_trans == root->fs_info->running_transaction) {
1395                 root->fs_info->running_transaction = NULL;
1396                 root->fs_info->trans_no_join = 0;
1397         }
1398         spin_unlock(&root->fs_info->trans_lock);
1399
1400         btrfs_cleanup_one_transaction(trans->transaction, root);
1401
1402         put_transaction(cur_trans);
1403         put_transaction(cur_trans);
1404
1405         trace_btrfs_transaction_commit(root);
1406
1407         btrfs_scrub_continue(root);
1408
1409         if (current->journal_info == trans)
1410                 current->journal_info = NULL;
1411
1412         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1413 }
1414
1415 static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
1416                                           struct btrfs_root *root)
1417 {
1418         int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1419         int snap_pending = 0;
1420         int ret;
1421
1422         if (!flush_on_commit) {
1423                 spin_lock(&root->fs_info->trans_lock);
1424                 if (!list_empty(&trans->transaction->pending_snapshots))
1425                         snap_pending = 1;
1426                 spin_unlock(&root->fs_info->trans_lock);
1427         }
1428
1429         if (flush_on_commit || snap_pending) {
1430                 btrfs_start_delalloc_inodes(root, 1);
1431                 btrfs_wait_ordered_extents(root, 1);
1432         }
1433
1434         ret = btrfs_run_delayed_items(trans, root);
1435         if (ret)
1436                 return ret;
1437
1438         /*
1439          * running the delayed items may have added new refs. account
1440          * them now so that they hinder processing of more delayed refs
1441          * as little as possible.
1442          */
1443         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
1444
1445         /*
1446          * rename don't use btrfs_join_transaction, so, once we
1447          * set the transaction to blocked above, we aren't going
1448          * to get any new ordered operations.  We can safely run
1449          * it here and no for sure that nothing new will be added
1450          * to the list
1451          */
1452         btrfs_run_ordered_operations(root, 1);
1453
1454         return 0;
1455 }
1456
1457 /*
1458  * btrfs_transaction state sequence:
1459  *    in_commit = 0, blocked = 0  (initial)
1460  *    in_commit = 1, blocked = 1
1461  *    blocked = 0
1462  *    commit_done = 1
1463  */
1464 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1465                              struct btrfs_root *root)
1466 {
1467         unsigned long joined = 0;
1468         struct btrfs_transaction *cur_trans = trans->transaction;
1469         struct btrfs_transaction *prev_trans = NULL;
1470         DEFINE_WAIT(wait);
1471         int ret;
1472         int should_grow = 0;
1473         unsigned long now = get_seconds();
1474
1475         ret = btrfs_run_ordered_operations(root, 0);
1476         if (ret) {
1477                 btrfs_abort_transaction(trans, root, ret);
1478                 goto cleanup_transaction;
1479         }
1480
1481         /* Stop the commit early if ->aborted is set */
1482         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1483                 ret = cur_trans->aborted;
1484                 goto cleanup_transaction;
1485         }
1486
1487         /* make a pass through all the delayed refs we have so far
1488          * any runnings procs may add more while we are here
1489          */
1490         ret = btrfs_run_delayed_refs(trans, root, 0);
1491         if (ret)
1492                 goto cleanup_transaction;
1493
1494         btrfs_trans_release_metadata(trans, root);
1495         trans->block_rsv = NULL;
1496
1497         cur_trans = trans->transaction;
1498
1499         /*
1500          * set the flushing flag so procs in this transaction have to
1501          * start sending their work down.
1502          */
1503         cur_trans->delayed_refs.flushing = 1;
1504
1505         if (!list_empty(&trans->new_bgs))
1506                 btrfs_create_pending_block_groups(trans, root);
1507
1508         ret = btrfs_run_delayed_refs(trans, root, 0);
1509         if (ret)
1510                 goto cleanup_transaction;
1511
1512         spin_lock(&cur_trans->commit_lock);
1513         if (cur_trans->in_commit) {
1514                 spin_unlock(&cur_trans->commit_lock);
1515                 atomic_inc(&cur_trans->use_count);
1516                 ret = btrfs_end_transaction(trans, root);
1517
1518                 wait_for_commit(root, cur_trans);
1519
1520                 put_transaction(cur_trans);
1521
1522                 return ret;
1523         }
1524
1525         trans->transaction->in_commit = 1;
1526         trans->transaction->blocked = 1;
1527         spin_unlock(&cur_trans->commit_lock);
1528         wake_up(&root->fs_info->transaction_blocked_wait);
1529
1530         spin_lock(&root->fs_info->trans_lock);
1531         if (cur_trans->list.prev != &root->fs_info->trans_list) {
1532                 prev_trans = list_entry(cur_trans->list.prev,
1533                                         struct btrfs_transaction, list);
1534                 if (!prev_trans->commit_done) {
1535                         atomic_inc(&prev_trans->use_count);
1536                         spin_unlock(&root->fs_info->trans_lock);
1537
1538                         wait_for_commit(root, prev_trans);
1539
1540                         put_transaction(prev_trans);
1541                 } else {
1542                         spin_unlock(&root->fs_info->trans_lock);
1543                 }
1544         } else {
1545                 spin_unlock(&root->fs_info->trans_lock);
1546         }
1547
1548         if (!btrfs_test_opt(root, SSD) &&
1549             (now < cur_trans->start_time || now - cur_trans->start_time < 1))
1550                 should_grow = 1;
1551
1552         do {
1553                 joined = cur_trans->num_joined;
1554
1555                 WARN_ON(cur_trans != trans->transaction);
1556
1557                 ret = btrfs_flush_all_pending_stuffs(trans, root);
1558                 if (ret)
1559                         goto cleanup_transaction;
1560
1561                 prepare_to_wait(&cur_trans->writer_wait, &wait,
1562                                 TASK_UNINTERRUPTIBLE);
1563
1564                 if (atomic_read(&cur_trans->num_writers) > 1)
1565                         schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1566                 else if (should_grow)
1567                         schedule_timeout(1);
1568
1569                 finish_wait(&cur_trans->writer_wait, &wait);
1570         } while (atomic_read(&cur_trans->num_writers) > 1 ||
1571                  (should_grow && cur_trans->num_joined != joined));
1572
1573         ret = btrfs_flush_all_pending_stuffs(trans, root);
1574         if (ret)
1575                 goto cleanup_transaction;
1576
1577         /*
1578          * Ok now we need to make sure to block out any other joins while we
1579          * commit the transaction.  We could have started a join before setting
1580          * no_join so make sure to wait for num_writers to == 1 again.
1581          */
1582         spin_lock(&root->fs_info->trans_lock);
1583         root->fs_info->trans_no_join = 1;
1584         spin_unlock(&root->fs_info->trans_lock);
1585         wait_event(cur_trans->writer_wait,
1586                    atomic_read(&cur_trans->num_writers) == 1);
1587
1588         /* ->aborted might be set after the previous check, so check it */
1589         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1590                 ret = cur_trans->aborted;
1591                 goto cleanup_transaction;
1592         }
1593         /*
1594          * the reloc mutex makes sure that we stop
1595          * the balancing code from coming in and moving
1596          * extents around in the middle of the commit
1597          */
1598         mutex_lock(&root->fs_info->reloc_mutex);
1599
1600         /*
1601          * We needn't worry about the delayed items because we will
1602          * deal with them in create_pending_snapshot(), which is the
1603          * core function of the snapshot creation.
1604          */
1605         ret = create_pending_snapshots(trans, root->fs_info);
1606         if (ret) {
1607                 mutex_unlock(&root->fs_info->reloc_mutex);
1608                 goto cleanup_transaction;
1609         }
1610
1611         /*
1612          * We insert the dir indexes of the snapshots and update the inode
1613          * of the snapshots' parents after the snapshot creation, so there
1614          * are some delayed items which are not dealt with. Now deal with
1615          * them.
1616          *
1617          * We needn't worry that this operation will corrupt the snapshots,
1618          * because all the tree which are snapshoted will be forced to COW
1619          * the nodes and leaves.
1620          */
1621         ret = btrfs_run_delayed_items(trans, root);
1622         if (ret) {
1623                 mutex_unlock(&root->fs_info->reloc_mutex);
1624                 goto cleanup_transaction;
1625         }
1626
1627         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1628         if (ret) {
1629                 mutex_unlock(&root->fs_info->reloc_mutex);
1630                 goto cleanup_transaction;
1631         }
1632
1633         /*
1634          * make sure none of the code above managed to slip in a
1635          * delayed item
1636          */
1637         btrfs_assert_delayed_root_empty(root);
1638
1639         WARN_ON(cur_trans != trans->transaction);
1640
1641         btrfs_scrub_pause(root);
1642         /* btrfs_commit_tree_roots is responsible for getting the
1643          * various roots consistent with each other.  Every pointer
1644          * in the tree of tree roots has to point to the most up to date
1645          * root for every subvolume and other tree.  So, we have to keep
1646          * the tree logging code from jumping in and changing any
1647          * of the trees.
1648          *
1649          * At this point in the commit, there can't be any tree-log
1650          * writers, but a little lower down we drop the trans mutex
1651          * and let new people in.  By holding the tree_log_mutex
1652          * from now until after the super is written, we avoid races
1653          * with the tree-log code.
1654          */
1655         mutex_lock(&root->fs_info->tree_log_mutex);
1656
1657         ret = commit_fs_roots(trans, root);
1658         if (ret) {
1659                 mutex_unlock(&root->fs_info->tree_log_mutex);
1660                 mutex_unlock(&root->fs_info->reloc_mutex);
1661                 goto cleanup_transaction;
1662         }
1663
1664         /* commit_fs_roots gets rid of all the tree log roots, it is now
1665          * safe to free the root of tree log roots
1666          */
1667         btrfs_free_log_root_tree(trans, root->fs_info);
1668
1669         ret = commit_cowonly_roots(trans, root);
1670         if (ret) {
1671                 mutex_unlock(&root->fs_info->tree_log_mutex);
1672                 mutex_unlock(&root->fs_info->reloc_mutex);
1673                 goto cleanup_transaction;
1674         }
1675
1676         /*
1677          * The tasks which save the space cache and inode cache may also
1678          * update ->aborted, check it.
1679          */
1680         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1681                 ret = cur_trans->aborted;
1682                 mutex_unlock(&root->fs_info->tree_log_mutex);
1683                 mutex_unlock(&root->fs_info->reloc_mutex);
1684                 goto cleanup_transaction;
1685         }
1686
1687         btrfs_prepare_extent_commit(trans, root);
1688
1689         cur_trans = root->fs_info->running_transaction;
1690
1691         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1692                             root->fs_info->tree_root->node);
1693         switch_commit_root(root->fs_info->tree_root);
1694
1695         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1696                             root->fs_info->chunk_root->node);
1697         switch_commit_root(root->fs_info->chunk_root);
1698
1699         assert_qgroups_uptodate(trans);
1700         update_super_roots(root);
1701
1702         if (!root->fs_info->log_root_recovering) {
1703                 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1704                 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1705         }
1706
1707         memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1708                sizeof(*root->fs_info->super_copy));
1709
1710         trans->transaction->blocked = 0;
1711         spin_lock(&root->fs_info->trans_lock);
1712         root->fs_info->running_transaction = NULL;
1713         root->fs_info->trans_no_join = 0;
1714         spin_unlock(&root->fs_info->trans_lock);
1715         mutex_unlock(&root->fs_info->reloc_mutex);
1716
1717         wake_up(&root->fs_info->transaction_wait);
1718
1719         ret = btrfs_write_and_wait_transaction(trans, root);
1720         if (ret) {
1721                 btrfs_error(root->fs_info, ret,
1722                             "Error while writing out transaction.");
1723                 mutex_unlock(&root->fs_info->tree_log_mutex);
1724                 goto cleanup_transaction;
1725         }
1726
1727         ret = write_ctree_super(trans, root, 0);
1728         if (ret) {
1729                 mutex_unlock(&root->fs_info->tree_log_mutex);
1730                 goto cleanup_transaction;
1731         }
1732
1733         /*
1734          * the super is written, we can safely allow the tree-loggers
1735          * to go about their business
1736          */
1737         mutex_unlock(&root->fs_info->tree_log_mutex);
1738
1739         btrfs_finish_extent_commit(trans, root);
1740
1741         cur_trans->commit_done = 1;
1742
1743         root->fs_info->last_trans_committed = cur_trans->transid;
1744
1745         wake_up(&cur_trans->commit_wait);
1746
1747         spin_lock(&root->fs_info->trans_lock);
1748         list_del_init(&cur_trans->list);
1749         spin_unlock(&root->fs_info->trans_lock);
1750
1751         put_transaction(cur_trans);
1752         put_transaction(cur_trans);
1753
1754         if (trans->type < TRANS_JOIN_NOLOCK)
1755                 sb_end_intwrite(root->fs_info->sb);
1756
1757         trace_btrfs_transaction_commit(root);
1758
1759         btrfs_scrub_continue(root);
1760
1761         if (current->journal_info == trans)
1762                 current->journal_info = NULL;
1763
1764         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1765
1766         if (current != root->fs_info->transaction_kthread)
1767                 btrfs_run_delayed_iputs(root);
1768
1769         return ret;
1770
1771 cleanup_transaction:
1772         btrfs_trans_release_metadata(trans, root);
1773         trans->block_rsv = NULL;
1774         btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
1775 //      WARN_ON(1);
1776         if (current->journal_info == trans)
1777                 current->journal_info = NULL;
1778         cleanup_transaction(trans, root, ret);
1779
1780         return ret;
1781 }
1782
1783 /*
1784  * interface function to delete all the snapshots we have scheduled for deletion
1785  */
1786 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1787 {
1788         LIST_HEAD(list);
1789         struct btrfs_fs_info *fs_info = root->fs_info;
1790
1791         spin_lock(&fs_info->trans_lock);
1792         list_splice_init(&fs_info->dead_roots, &list);
1793         spin_unlock(&fs_info->trans_lock);
1794
1795         while (!list_empty(&list)) {
1796                 int ret;
1797
1798                 root = list_entry(list.next, struct btrfs_root, root_list);
1799                 list_del(&root->root_list);
1800
1801                 btrfs_kill_all_delayed_nodes(root);
1802
1803                 if (btrfs_header_backref_rev(root->node) <
1804                     BTRFS_MIXED_BACKREF_REV)
1805                         ret = btrfs_drop_snapshot(root, NULL, 0, 0);
1806                 else
1807                         ret =btrfs_drop_snapshot(root, NULL, 1, 0);
1808                 BUG_ON(ret < 0);
1809         }
1810         return 0;
1811 }