Btrfs: reduce CPU contention while waiting for delayed extent operations
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include "ctree.h"
27 #include "disk-io.h"
28 #include "transaction.h"
29 #include "locking.h"
30 #include "tree-log.h"
31 #include "inode-map.h"
32 #include "volumes.h"
33 #include "dev-replace.h"
34
35 #define BTRFS_ROOT_TRANS_TAG 0
36
37 void put_transaction(struct btrfs_transaction *transaction)
38 {
39         WARN_ON(atomic_read(&transaction->use_count) == 0);
40         if (atomic_dec_and_test(&transaction->use_count)) {
41                 BUG_ON(!list_empty(&transaction->list));
42                 WARN_ON(transaction->delayed_refs.root.rb_node);
43                 memset(transaction, 0, sizeof(*transaction));
44                 kmem_cache_free(btrfs_transaction_cachep, transaction);
45         }
46 }
47
48 static noinline void switch_commit_root(struct btrfs_root *root)
49 {
50         free_extent_buffer(root->commit_root);
51         root->commit_root = btrfs_root_node(root);
52 }
53
54 /*
55  * either allocate a new transaction or hop into the existing one
56  */
57 static noinline int join_transaction(struct btrfs_root *root, int type)
58 {
59         struct btrfs_transaction *cur_trans;
60         struct btrfs_fs_info *fs_info = root->fs_info;
61
62         spin_lock(&fs_info->trans_lock);
63 loop:
64         /* The file system has been taken offline. No new transactions. */
65         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
66                 spin_unlock(&fs_info->trans_lock);
67                 return -EROFS;
68         }
69
70         if (fs_info->trans_no_join) {
71                 /* 
72                  * If we are JOIN_NOLOCK we're already committing a current
73                  * transaction, we just need a handle to deal with something
74                  * when committing the transaction, such as inode cache and
75                  * space cache. It is a special case.
76                  */
77                 if (type != TRANS_JOIN_NOLOCK) {
78                         spin_unlock(&fs_info->trans_lock);
79                         return -EBUSY;
80                 }
81         }
82
83         cur_trans = fs_info->running_transaction;
84         if (cur_trans) {
85                 if (cur_trans->aborted) {
86                         spin_unlock(&fs_info->trans_lock);
87                         return cur_trans->aborted;
88                 }
89                 atomic_inc(&cur_trans->use_count);
90                 atomic_inc(&cur_trans->num_writers);
91                 cur_trans->num_joined++;
92                 spin_unlock(&fs_info->trans_lock);
93                 return 0;
94         }
95         spin_unlock(&fs_info->trans_lock);
96
97         /*
98          * If we are ATTACH, we just want to catch the current transaction,
99          * and commit it. If there is no transaction, just return ENOENT.
100          */
101         if (type == TRANS_ATTACH)
102                 return -ENOENT;
103
104         cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
105         if (!cur_trans)
106                 return -ENOMEM;
107
108         spin_lock(&fs_info->trans_lock);
109         if (fs_info->running_transaction) {
110                 /*
111                  * someone started a transaction after we unlocked.  Make sure
112                  * to redo the trans_no_join checks above
113                  */
114                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
115                 cur_trans = fs_info->running_transaction;
116                 goto loop;
117         } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
118                 spin_unlock(&fs_info->trans_lock);
119                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
120                 return -EROFS;
121         }
122
123         atomic_set(&cur_trans->num_writers, 1);
124         cur_trans->num_joined = 0;
125         init_waitqueue_head(&cur_trans->writer_wait);
126         init_waitqueue_head(&cur_trans->commit_wait);
127         cur_trans->in_commit = 0;
128         cur_trans->blocked = 0;
129         /*
130          * One for this trans handle, one so it will live on until we
131          * commit the transaction.
132          */
133         atomic_set(&cur_trans->use_count, 2);
134         cur_trans->commit_done = 0;
135         cur_trans->start_time = get_seconds();
136
137         cur_trans->delayed_refs.root = RB_ROOT;
138         cur_trans->delayed_refs.num_entries = 0;
139         cur_trans->delayed_refs.num_heads_ready = 0;
140         cur_trans->delayed_refs.num_heads = 0;
141         cur_trans->delayed_refs.flushing = 0;
142         cur_trans->delayed_refs.run_delayed_start = 0;
143
144         /*
145          * although the tree mod log is per file system and not per transaction,
146          * the log must never go across transaction boundaries.
147          */
148         smp_mb();
149         if (!list_empty(&fs_info->tree_mod_seq_list))
150                 WARN(1, KERN_ERR "btrfs: tree_mod_seq_list not empty when "
151                         "creating a fresh transaction\n");
152         if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
153                 WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
154                         "creating a fresh transaction\n");
155         atomic_set(&fs_info->tree_mod_seq, 0);
156
157         spin_lock_init(&cur_trans->commit_lock);
158         spin_lock_init(&cur_trans->delayed_refs.lock);
159         atomic_set(&cur_trans->delayed_refs.procs_running_refs, 0);
160         atomic_set(&cur_trans->delayed_refs.ref_seq, 0);
161         init_waitqueue_head(&cur_trans->delayed_refs.wait);
162
163         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
164         list_add_tail(&cur_trans->list, &fs_info->trans_list);
165         extent_io_tree_init(&cur_trans->dirty_pages,
166                              fs_info->btree_inode->i_mapping);
167         fs_info->generation++;
168         cur_trans->transid = fs_info->generation;
169         fs_info->running_transaction = cur_trans;
170         cur_trans->aborted = 0;
171         spin_unlock(&fs_info->trans_lock);
172
173         return 0;
174 }
175
176 /*
177  * this does all the record keeping required to make sure that a reference
178  * counted root is properly recorded in a given transaction.  This is required
179  * to make sure the old root from before we joined the transaction is deleted
180  * when the transaction commits
181  */
182 static int record_root_in_trans(struct btrfs_trans_handle *trans,
183                                struct btrfs_root *root)
184 {
185         if (root->ref_cows && root->last_trans < trans->transid) {
186                 WARN_ON(root == root->fs_info->extent_root);
187                 WARN_ON(root->commit_root != root->node);
188
189                 /*
190                  * see below for in_trans_setup usage rules
191                  * we have the reloc mutex held now, so there
192                  * is only one writer in this function
193                  */
194                 root->in_trans_setup = 1;
195
196                 /* make sure readers find in_trans_setup before
197                  * they find our root->last_trans update
198                  */
199                 smp_wmb();
200
201                 spin_lock(&root->fs_info->fs_roots_radix_lock);
202                 if (root->last_trans == trans->transid) {
203                         spin_unlock(&root->fs_info->fs_roots_radix_lock);
204                         return 0;
205                 }
206                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
207                            (unsigned long)root->root_key.objectid,
208                            BTRFS_ROOT_TRANS_TAG);
209                 spin_unlock(&root->fs_info->fs_roots_radix_lock);
210                 root->last_trans = trans->transid;
211
212                 /* this is pretty tricky.  We don't want to
213                  * take the relocation lock in btrfs_record_root_in_trans
214                  * unless we're really doing the first setup for this root in
215                  * this transaction.
216                  *
217                  * Normally we'd use root->last_trans as a flag to decide
218                  * if we want to take the expensive mutex.
219                  *
220                  * But, we have to set root->last_trans before we
221                  * init the relocation root, otherwise, we trip over warnings
222                  * in ctree.c.  The solution used here is to flag ourselves
223                  * with root->in_trans_setup.  When this is 1, we're still
224                  * fixing up the reloc trees and everyone must wait.
225                  *
226                  * When this is zero, they can trust root->last_trans and fly
227                  * through btrfs_record_root_in_trans without having to take the
228                  * lock.  smp_wmb() makes sure that all the writes above are
229                  * done before we pop in the zero below
230                  */
231                 btrfs_init_reloc_root(trans, root);
232                 smp_wmb();
233                 root->in_trans_setup = 0;
234         }
235         return 0;
236 }
237
238
239 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
240                                struct btrfs_root *root)
241 {
242         if (!root->ref_cows)
243                 return 0;
244
245         /*
246          * see record_root_in_trans for comments about in_trans_setup usage
247          * and barriers
248          */
249         smp_rmb();
250         if (root->last_trans == trans->transid &&
251             !root->in_trans_setup)
252                 return 0;
253
254         mutex_lock(&root->fs_info->reloc_mutex);
255         record_root_in_trans(trans, root);
256         mutex_unlock(&root->fs_info->reloc_mutex);
257
258         return 0;
259 }
260
261 /* wait for commit against the current transaction to become unblocked
262  * when this is done, it is safe to start a new transaction, but the current
263  * transaction might not be fully on disk.
264  */
265 static void wait_current_trans(struct btrfs_root *root)
266 {
267         struct btrfs_transaction *cur_trans;
268
269         spin_lock(&root->fs_info->trans_lock);
270         cur_trans = root->fs_info->running_transaction;
271         if (cur_trans && cur_trans->blocked) {
272                 atomic_inc(&cur_trans->use_count);
273                 spin_unlock(&root->fs_info->trans_lock);
274
275                 wait_event(root->fs_info->transaction_wait,
276                            !cur_trans->blocked);
277                 put_transaction(cur_trans);
278         } else {
279                 spin_unlock(&root->fs_info->trans_lock);
280         }
281 }
282
283 static int may_wait_transaction(struct btrfs_root *root, int type)
284 {
285         if (root->fs_info->log_root_recovering)
286                 return 0;
287
288         if (type == TRANS_USERSPACE)
289                 return 1;
290
291         if (type == TRANS_START &&
292             !atomic_read(&root->fs_info->open_ioctl_trans))
293                 return 1;
294
295         return 0;
296 }
297
298 static struct btrfs_trans_handle *
299 start_transaction(struct btrfs_root *root, u64 num_items, int type,
300                   enum btrfs_reserve_flush_enum flush)
301 {
302         struct btrfs_trans_handle *h;
303         struct btrfs_transaction *cur_trans;
304         u64 num_bytes = 0;
305         int ret;
306         u64 qgroup_reserved = 0;
307
308         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
309                 return ERR_PTR(-EROFS);
310
311         if (current->journal_info) {
312                 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
313                 h = current->journal_info;
314                 h->use_count++;
315                 WARN_ON(h->use_count > 2);
316                 h->orig_rsv = h->block_rsv;
317                 h->block_rsv = NULL;
318                 goto got_it;
319         }
320
321         /*
322          * Do the reservation before we join the transaction so we can do all
323          * the appropriate flushing if need be.
324          */
325         if (num_items > 0 && root != root->fs_info->chunk_root) {
326                 if (root->fs_info->quota_enabled &&
327                     is_fstree(root->root_key.objectid)) {
328                         qgroup_reserved = num_items * root->leafsize;
329                         ret = btrfs_qgroup_reserve(root, qgroup_reserved);
330                         if (ret)
331                                 return ERR_PTR(ret);
332                 }
333
334                 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
335                 ret = btrfs_block_rsv_add(root,
336                                           &root->fs_info->trans_block_rsv,
337                                           num_bytes, flush);
338                 if (ret)
339                         return ERR_PTR(ret);
340         }
341 again:
342         h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
343         if (!h)
344                 return ERR_PTR(-ENOMEM);
345
346         /*
347          * If we are JOIN_NOLOCK we're already committing a transaction and
348          * waiting on this guy, so we don't need to do the sb_start_intwrite
349          * because we're already holding a ref.  We need this because we could
350          * have raced in and did an fsync() on a file which can kick a commit
351          * and then we deadlock with somebody doing a freeze.
352          *
353          * If we are ATTACH, it means we just want to catch the current
354          * transaction and commit it, so we needn't do sb_start_intwrite(). 
355          */
356         if (type < TRANS_JOIN_NOLOCK)
357                 sb_start_intwrite(root->fs_info->sb);
358
359         if (may_wait_transaction(root, type))
360                 wait_current_trans(root);
361
362         do {
363                 ret = join_transaction(root, type);
364                 if (ret == -EBUSY)
365                         wait_current_trans(root);
366         } while (ret == -EBUSY);
367
368         if (ret < 0) {
369                 /* We must get the transaction if we are JOIN_NOLOCK. */
370                 BUG_ON(type == TRANS_JOIN_NOLOCK);
371
372                 if (type < TRANS_JOIN_NOLOCK)
373                         sb_end_intwrite(root->fs_info->sb);
374                 kmem_cache_free(btrfs_trans_handle_cachep, h);
375                 return ERR_PTR(ret);
376         }
377
378         cur_trans = root->fs_info->running_transaction;
379
380         h->transid = cur_trans->transid;
381         h->transaction = cur_trans;
382         h->blocks_used = 0;
383         h->bytes_reserved = 0;
384         h->root = root;
385         h->delayed_ref_updates = 0;
386         h->use_count = 1;
387         h->adding_csums = 0;
388         h->block_rsv = NULL;
389         h->orig_rsv = NULL;
390         h->aborted = 0;
391         h->qgroup_reserved = qgroup_reserved;
392         h->delayed_ref_elem.seq = 0;
393         h->type = type;
394         INIT_LIST_HEAD(&h->qgroup_ref_list);
395         INIT_LIST_HEAD(&h->new_bgs);
396
397         smp_mb();
398         if (cur_trans->blocked && may_wait_transaction(root, type)) {
399                 btrfs_commit_transaction(h, root);
400                 goto again;
401         }
402
403         if (num_bytes) {
404                 trace_btrfs_space_reservation(root->fs_info, "transaction",
405                                               h->transid, num_bytes, 1);
406                 h->block_rsv = &root->fs_info->trans_block_rsv;
407                 h->bytes_reserved = num_bytes;
408         }
409
410 got_it:
411         btrfs_record_root_in_trans(h, root);
412
413         if (!current->journal_info && type != TRANS_USERSPACE)
414                 current->journal_info = h;
415         return h;
416 }
417
418 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
419                                                    int num_items)
420 {
421         return start_transaction(root, num_items, TRANS_START,
422                                  BTRFS_RESERVE_FLUSH_ALL);
423 }
424
425 struct btrfs_trans_handle *btrfs_start_transaction_lflush(
426                                         struct btrfs_root *root, int num_items)
427 {
428         return start_transaction(root, num_items, TRANS_START,
429                                  BTRFS_RESERVE_FLUSH_LIMIT);
430 }
431
432 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
433 {
434         return start_transaction(root, 0, TRANS_JOIN, 0);
435 }
436
437 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
438 {
439         return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
440 }
441
442 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
443 {
444         return start_transaction(root, 0, TRANS_USERSPACE, 0);
445 }
446
447 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
448 {
449         return start_transaction(root, 0, TRANS_ATTACH, 0);
450 }
451
452 /* wait for a transaction commit to be fully complete */
453 static noinline void wait_for_commit(struct btrfs_root *root,
454                                     struct btrfs_transaction *commit)
455 {
456         wait_event(commit->commit_wait, commit->commit_done);
457 }
458
459 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
460 {
461         struct btrfs_transaction *cur_trans = NULL, *t;
462         int ret = 0;
463
464         if (transid) {
465                 if (transid <= root->fs_info->last_trans_committed)
466                         goto out;
467
468                 ret = -EINVAL;
469                 /* find specified transaction */
470                 spin_lock(&root->fs_info->trans_lock);
471                 list_for_each_entry(t, &root->fs_info->trans_list, list) {
472                         if (t->transid == transid) {
473                                 cur_trans = t;
474                                 atomic_inc(&cur_trans->use_count);
475                                 ret = 0;
476                                 break;
477                         }
478                         if (t->transid > transid) {
479                                 ret = 0;
480                                 break;
481                         }
482                 }
483                 spin_unlock(&root->fs_info->trans_lock);
484                 /* The specified transaction doesn't exist */
485                 if (!cur_trans)
486                         goto out;
487         } else {
488                 /* find newest transaction that is committing | committed */
489                 spin_lock(&root->fs_info->trans_lock);
490                 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
491                                             list) {
492                         if (t->in_commit) {
493                                 if (t->commit_done)
494                                         break;
495                                 cur_trans = t;
496                                 atomic_inc(&cur_trans->use_count);
497                                 break;
498                         }
499                 }
500                 spin_unlock(&root->fs_info->trans_lock);
501                 if (!cur_trans)
502                         goto out;  /* nothing committing|committed */
503         }
504
505         wait_for_commit(root, cur_trans);
506         put_transaction(cur_trans);
507 out:
508         return ret;
509 }
510
511 void btrfs_throttle(struct btrfs_root *root)
512 {
513         if (!atomic_read(&root->fs_info->open_ioctl_trans))
514                 wait_current_trans(root);
515 }
516
517 static int should_end_transaction(struct btrfs_trans_handle *trans,
518                                   struct btrfs_root *root)
519 {
520         int ret;
521
522         ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
523         return ret ? 1 : 0;
524 }
525
526 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
527                                  struct btrfs_root *root)
528 {
529         struct btrfs_transaction *cur_trans = trans->transaction;
530         int updates;
531         int err;
532
533         smp_mb();
534         if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
535                 return 1;
536
537         updates = trans->delayed_ref_updates;
538         trans->delayed_ref_updates = 0;
539         if (updates) {
540                 err = btrfs_run_delayed_refs(trans, root, updates);
541                 if (err) /* Error code will also eval true */
542                         return err;
543         }
544
545         return should_end_transaction(trans, root);
546 }
547
548 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
549                           struct btrfs_root *root, int throttle)
550 {
551         struct btrfs_transaction *cur_trans = trans->transaction;
552         struct btrfs_fs_info *info = root->fs_info;
553         int count = 0;
554         int lock = (trans->type != TRANS_JOIN_NOLOCK);
555         int err = 0;
556
557         if (--trans->use_count) {
558                 trans->block_rsv = trans->orig_rsv;
559                 return 0;
560         }
561
562         /*
563          * do the qgroup accounting as early as possible
564          */
565         err = btrfs_delayed_refs_qgroup_accounting(trans, info);
566
567         btrfs_trans_release_metadata(trans, root);
568         trans->block_rsv = NULL;
569         /*
570          * the same root has to be passed to start_transaction and
571          * end_transaction. Subvolume quota depends on this.
572          */
573         WARN_ON(trans->root != root);
574
575         if (trans->qgroup_reserved) {
576                 btrfs_qgroup_free(root, trans->qgroup_reserved);
577                 trans->qgroup_reserved = 0;
578         }
579
580         if (!list_empty(&trans->new_bgs))
581                 btrfs_create_pending_block_groups(trans, root);
582
583         while (count < 1) {
584                 unsigned long cur = trans->delayed_ref_updates;
585                 trans->delayed_ref_updates = 0;
586                 if (cur &&
587                     trans->transaction->delayed_refs.num_heads_ready > 64) {
588                         trans->delayed_ref_updates = 0;
589                         btrfs_run_delayed_refs(trans, root, cur);
590                 } else {
591                         break;
592                 }
593                 count++;
594         }
595
596         btrfs_trans_release_metadata(trans, root);
597         trans->block_rsv = NULL;
598
599         if (!list_empty(&trans->new_bgs))
600                 btrfs_create_pending_block_groups(trans, root);
601
602         if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
603             should_end_transaction(trans, root)) {
604                 trans->transaction->blocked = 1;
605                 smp_wmb();
606         }
607
608         if (lock && cur_trans->blocked && !cur_trans->in_commit) {
609                 if (throttle) {
610                         /*
611                          * We may race with somebody else here so end up having
612                          * to call end_transaction on ourselves again, so inc
613                          * our use_count.
614                          */
615                         trans->use_count++;
616                         return btrfs_commit_transaction(trans, root);
617                 } else {
618                         wake_up_process(info->transaction_kthread);
619                 }
620         }
621
622         if (trans->type < TRANS_JOIN_NOLOCK)
623                 sb_end_intwrite(root->fs_info->sb);
624
625         WARN_ON(cur_trans != info->running_transaction);
626         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
627         atomic_dec(&cur_trans->num_writers);
628
629         smp_mb();
630         if (waitqueue_active(&cur_trans->writer_wait))
631                 wake_up(&cur_trans->writer_wait);
632         put_transaction(cur_trans);
633
634         if (current->journal_info == trans)
635                 current->journal_info = NULL;
636
637         if (throttle)
638                 btrfs_run_delayed_iputs(root);
639
640         if (trans->aborted ||
641             root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
642                 err = -EIO;
643         }
644         assert_qgroups_uptodate(trans);
645
646         memset(trans, 0, sizeof(*trans));
647         kmem_cache_free(btrfs_trans_handle_cachep, trans);
648         return err;
649 }
650
651 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
652                           struct btrfs_root *root)
653 {
654         int ret;
655
656         ret = __btrfs_end_transaction(trans, root, 0);
657         if (ret)
658                 return ret;
659         return 0;
660 }
661
662 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
663                                    struct btrfs_root *root)
664 {
665         int ret;
666
667         ret = __btrfs_end_transaction(trans, root, 1);
668         if (ret)
669                 return ret;
670         return 0;
671 }
672
673 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
674                                 struct btrfs_root *root)
675 {
676         return __btrfs_end_transaction(trans, root, 1);
677 }
678
679 /*
680  * when btree blocks are allocated, they have some corresponding bits set for
681  * them in one of two extent_io trees.  This is used to make sure all of
682  * those extents are sent to disk but does not wait on them
683  */
684 int btrfs_write_marked_extents(struct btrfs_root *root,
685                                struct extent_io_tree *dirty_pages, int mark)
686 {
687         int err = 0;
688         int werr = 0;
689         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
690         struct extent_state *cached_state = NULL;
691         u64 start = 0;
692         u64 end;
693         struct blk_plug plug;
694
695         blk_start_plug(&plug);
696         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
697                                       mark, &cached_state)) {
698                 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
699                                    mark, &cached_state, GFP_NOFS);
700                 cached_state = NULL;
701                 err = filemap_fdatawrite_range(mapping, start, end);
702                 if (err)
703                         werr = err;
704                 cond_resched();
705                 start = end + 1;
706         }
707         if (err)
708                 werr = err;
709         blk_finish_plug(&plug);
710         return werr;
711 }
712
713 /*
714  * when btree blocks are allocated, they have some corresponding bits set for
715  * them in one of two extent_io trees.  This is used to make sure all of
716  * those extents are on disk for transaction or log commit.  We wait
717  * on all the pages and clear them from the dirty pages state tree
718  */
719 int btrfs_wait_marked_extents(struct btrfs_root *root,
720                               struct extent_io_tree *dirty_pages, int mark)
721 {
722         int err = 0;
723         int werr = 0;
724         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
725         struct extent_state *cached_state = NULL;
726         u64 start = 0;
727         u64 end;
728
729         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
730                                       EXTENT_NEED_WAIT, &cached_state)) {
731                 clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
732                                  0, 0, &cached_state, GFP_NOFS);
733                 err = filemap_fdatawait_range(mapping, start, end);
734                 if (err)
735                         werr = err;
736                 cond_resched();
737                 start = end + 1;
738         }
739         if (err)
740                 werr = err;
741         return werr;
742 }
743
744 /*
745  * when btree blocks are allocated, they have some corresponding bits set for
746  * them in one of two extent_io trees.  This is used to make sure all of
747  * those extents are on disk for transaction or log commit
748  */
749 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
750                                 struct extent_io_tree *dirty_pages, int mark)
751 {
752         int ret;
753         int ret2;
754
755         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
756         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
757
758         if (ret)
759                 return ret;
760         if (ret2)
761                 return ret2;
762         return 0;
763 }
764
765 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
766                                      struct btrfs_root *root)
767 {
768         if (!trans || !trans->transaction) {
769                 struct inode *btree_inode;
770                 btree_inode = root->fs_info->btree_inode;
771                 return filemap_write_and_wait(btree_inode->i_mapping);
772         }
773         return btrfs_write_and_wait_marked_extents(root,
774                                            &trans->transaction->dirty_pages,
775                                            EXTENT_DIRTY);
776 }
777
778 /*
779  * this is used to update the root pointer in the tree of tree roots.
780  *
781  * But, in the case of the extent allocation tree, updating the root
782  * pointer may allocate blocks which may change the root of the extent
783  * allocation tree.
784  *
785  * So, this loops and repeats and makes sure the cowonly root didn't
786  * change while the root pointer was being updated in the metadata.
787  */
788 static int update_cowonly_root(struct btrfs_trans_handle *trans,
789                                struct btrfs_root *root)
790 {
791         int ret;
792         u64 old_root_bytenr;
793         u64 old_root_used;
794         struct btrfs_root *tree_root = root->fs_info->tree_root;
795
796         old_root_used = btrfs_root_used(&root->root_item);
797         btrfs_write_dirty_block_groups(trans, root);
798
799         while (1) {
800                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
801                 if (old_root_bytenr == root->node->start &&
802                     old_root_used == btrfs_root_used(&root->root_item))
803                         break;
804
805                 btrfs_set_root_node(&root->root_item, root->node);
806                 ret = btrfs_update_root(trans, tree_root,
807                                         &root->root_key,
808                                         &root->root_item);
809                 if (ret)
810                         return ret;
811
812                 old_root_used = btrfs_root_used(&root->root_item);
813                 ret = btrfs_write_dirty_block_groups(trans, root);
814                 if (ret)
815                         return ret;
816         }
817
818         if (root != root->fs_info->extent_root)
819                 switch_commit_root(root);
820
821         return 0;
822 }
823
824 /*
825  * update all the cowonly tree roots on disk
826  *
827  * The error handling in this function may not be obvious. Any of the
828  * failures will cause the file system to go offline. We still need
829  * to clean up the delayed refs.
830  */
831 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
832                                          struct btrfs_root *root)
833 {
834         struct btrfs_fs_info *fs_info = root->fs_info;
835         struct list_head *next;
836         struct extent_buffer *eb;
837         int ret;
838
839         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
840         if (ret)
841                 return ret;
842
843         eb = btrfs_lock_root_node(fs_info->tree_root);
844         ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
845                               0, &eb);
846         btrfs_tree_unlock(eb);
847         free_extent_buffer(eb);
848
849         if (ret)
850                 return ret;
851
852         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
853         if (ret)
854                 return ret;
855
856         ret = btrfs_run_dev_stats(trans, root->fs_info);
857         WARN_ON(ret);
858         ret = btrfs_run_dev_replace(trans, root->fs_info);
859         WARN_ON(ret);
860
861         ret = btrfs_run_qgroups(trans, root->fs_info);
862         BUG_ON(ret);
863
864         /* run_qgroups might have added some more refs */
865         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
866         BUG_ON(ret);
867
868         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
869                 next = fs_info->dirty_cowonly_roots.next;
870                 list_del_init(next);
871                 root = list_entry(next, struct btrfs_root, dirty_list);
872
873                 ret = update_cowonly_root(trans, root);
874                 if (ret)
875                         return ret;
876         }
877
878         down_write(&fs_info->extent_commit_sem);
879         switch_commit_root(fs_info->extent_root);
880         up_write(&fs_info->extent_commit_sem);
881
882         btrfs_after_dev_replace_commit(fs_info);
883
884         return 0;
885 }
886
887 /*
888  * dead roots are old snapshots that need to be deleted.  This allocates
889  * a dirty root struct and adds it into the list of dead roots that need to
890  * be deleted
891  */
892 int btrfs_add_dead_root(struct btrfs_root *root)
893 {
894         spin_lock(&root->fs_info->trans_lock);
895         list_add(&root->root_list, &root->fs_info->dead_roots);
896         spin_unlock(&root->fs_info->trans_lock);
897         return 0;
898 }
899
900 /*
901  * update all the cowonly tree roots on disk
902  */
903 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
904                                     struct btrfs_root *root)
905 {
906         struct btrfs_root *gang[8];
907         struct btrfs_fs_info *fs_info = root->fs_info;
908         int i;
909         int ret;
910         int err = 0;
911
912         spin_lock(&fs_info->fs_roots_radix_lock);
913         while (1) {
914                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
915                                                  (void **)gang, 0,
916                                                  ARRAY_SIZE(gang),
917                                                  BTRFS_ROOT_TRANS_TAG);
918                 if (ret == 0)
919                         break;
920                 for (i = 0; i < ret; i++) {
921                         root = gang[i];
922                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
923                                         (unsigned long)root->root_key.objectid,
924                                         BTRFS_ROOT_TRANS_TAG);
925                         spin_unlock(&fs_info->fs_roots_radix_lock);
926
927                         btrfs_free_log(trans, root);
928                         btrfs_update_reloc_root(trans, root);
929                         btrfs_orphan_commit_root(trans, root);
930
931                         btrfs_save_ino_cache(root, trans);
932
933                         /* see comments in should_cow_block() */
934                         root->force_cow = 0;
935                         smp_wmb();
936
937                         if (root->commit_root != root->node) {
938                                 mutex_lock(&root->fs_commit_mutex);
939                                 switch_commit_root(root);
940                                 btrfs_unpin_free_ino(root);
941                                 mutex_unlock(&root->fs_commit_mutex);
942
943                                 btrfs_set_root_node(&root->root_item,
944                                                     root->node);
945                         }
946
947                         err = btrfs_update_root(trans, fs_info->tree_root,
948                                                 &root->root_key,
949                                                 &root->root_item);
950                         spin_lock(&fs_info->fs_roots_radix_lock);
951                         if (err)
952                                 break;
953                 }
954         }
955         spin_unlock(&fs_info->fs_roots_radix_lock);
956         return err;
957 }
958
959 /*
960  * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
961  * otherwise every leaf in the btree is read and defragged.
962  */
963 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
964 {
965         struct btrfs_fs_info *info = root->fs_info;
966         struct btrfs_trans_handle *trans;
967         int ret;
968
969         if (xchg(&root->defrag_running, 1))
970                 return 0;
971
972         while (1) {
973                 trans = btrfs_start_transaction(root, 0);
974                 if (IS_ERR(trans))
975                         return PTR_ERR(trans);
976
977                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
978
979                 btrfs_end_transaction(trans, root);
980                 btrfs_btree_balance_dirty(info->tree_root);
981                 cond_resched();
982
983                 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
984                         break;
985         }
986         root->defrag_running = 0;
987         return ret;
988 }
989
990 /*
991  * new snapshots need to be created at a very specific time in the
992  * transaction commit.  This does the actual creation
993  */
994 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
995                                    struct btrfs_fs_info *fs_info,
996                                    struct btrfs_pending_snapshot *pending)
997 {
998         struct btrfs_key key;
999         struct btrfs_root_item *new_root_item;
1000         struct btrfs_root *tree_root = fs_info->tree_root;
1001         struct btrfs_root *root = pending->root;
1002         struct btrfs_root *parent_root;
1003         struct btrfs_block_rsv *rsv;
1004         struct inode *parent_inode;
1005         struct btrfs_path *path;
1006         struct btrfs_dir_item *dir_item;
1007         struct dentry *parent;
1008         struct dentry *dentry;
1009         struct extent_buffer *tmp;
1010         struct extent_buffer *old;
1011         struct timespec cur_time = CURRENT_TIME;
1012         int ret;
1013         u64 to_reserve = 0;
1014         u64 index = 0;
1015         u64 objectid;
1016         u64 root_flags;
1017         uuid_le new_uuid;
1018
1019         path = btrfs_alloc_path();
1020         if (!path) {
1021                 ret = pending->error = -ENOMEM;
1022                 goto path_alloc_fail;
1023         }
1024
1025         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
1026         if (!new_root_item) {
1027                 ret = pending->error = -ENOMEM;
1028                 goto root_item_alloc_fail;
1029         }
1030
1031         ret = btrfs_find_free_objectid(tree_root, &objectid);
1032         if (ret) {
1033                 pending->error = ret;
1034                 goto no_free_objectid;
1035         }
1036
1037         btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
1038
1039         if (to_reserve > 0) {
1040                 ret = btrfs_block_rsv_add(root, &pending->block_rsv,
1041                                           to_reserve,
1042                                           BTRFS_RESERVE_NO_FLUSH);
1043                 if (ret) {
1044                         pending->error = ret;
1045                         goto no_free_objectid;
1046                 }
1047         }
1048
1049         ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid,
1050                                    objectid, pending->inherit);
1051         if (ret) {
1052                 pending->error = ret;
1053                 goto no_free_objectid;
1054         }
1055
1056         key.objectid = objectid;
1057         key.offset = (u64)-1;
1058         key.type = BTRFS_ROOT_ITEM_KEY;
1059
1060         rsv = trans->block_rsv;
1061         trans->block_rsv = &pending->block_rsv;
1062
1063         dentry = pending->dentry;
1064         parent = dget_parent(dentry);
1065         parent_inode = parent->d_inode;
1066         parent_root = BTRFS_I(parent_inode)->root;
1067         record_root_in_trans(trans, parent_root);
1068
1069         /*
1070          * insert the directory item
1071          */
1072         ret = btrfs_set_inode_index(parent_inode, &index);
1073         BUG_ON(ret); /* -ENOMEM */
1074
1075         /* check if there is a file/dir which has the same name. */
1076         dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1077                                          btrfs_ino(parent_inode),
1078                                          dentry->d_name.name,
1079                                          dentry->d_name.len, 0);
1080         if (dir_item != NULL && !IS_ERR(dir_item)) {
1081                 pending->error = -EEXIST;
1082                 goto fail;
1083         } else if (IS_ERR(dir_item)) {
1084                 ret = PTR_ERR(dir_item);
1085                 btrfs_abort_transaction(trans, root, ret);
1086                 goto fail;
1087         }
1088         btrfs_release_path(path);
1089
1090         /*
1091          * pull in the delayed directory update
1092          * and the delayed inode item
1093          * otherwise we corrupt the FS during
1094          * snapshot
1095          */
1096         ret = btrfs_run_delayed_items(trans, root);
1097         if (ret) {      /* Transaction aborted */
1098                 btrfs_abort_transaction(trans, root, ret);
1099                 goto fail;
1100         }
1101
1102         record_root_in_trans(trans, root);
1103         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1104         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1105         btrfs_check_and_init_root_item(new_root_item);
1106
1107         root_flags = btrfs_root_flags(new_root_item);
1108         if (pending->readonly)
1109                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1110         else
1111                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1112         btrfs_set_root_flags(new_root_item, root_flags);
1113
1114         btrfs_set_root_generation_v2(new_root_item,
1115                         trans->transid);
1116         uuid_le_gen(&new_uuid);
1117         memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1118         memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1119                         BTRFS_UUID_SIZE);
1120         new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
1121         new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
1122         btrfs_set_root_otransid(new_root_item, trans->transid);
1123         memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1124         memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1125         btrfs_set_root_stransid(new_root_item, 0);
1126         btrfs_set_root_rtransid(new_root_item, 0);
1127
1128         old = btrfs_lock_root_node(root);
1129         ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1130         if (ret) {
1131                 btrfs_tree_unlock(old);
1132                 free_extent_buffer(old);
1133                 btrfs_abort_transaction(trans, root, ret);
1134                 goto fail;
1135         }
1136
1137         btrfs_set_lock_blocking(old);
1138
1139         ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1140         /* clean up in any case */
1141         btrfs_tree_unlock(old);
1142         free_extent_buffer(old);
1143         if (ret) {
1144                 btrfs_abort_transaction(trans, root, ret);
1145                 goto fail;
1146         }
1147
1148         /* see comments in should_cow_block() */
1149         root->force_cow = 1;
1150         smp_wmb();
1151
1152         btrfs_set_root_node(new_root_item, tmp);
1153         /* record when the snapshot was created in key.offset */
1154         key.offset = trans->transid;
1155         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1156         btrfs_tree_unlock(tmp);
1157         free_extent_buffer(tmp);
1158         if (ret) {
1159                 btrfs_abort_transaction(trans, root, ret);
1160                 goto fail;
1161         }
1162
1163         /*
1164          * insert root back/forward references
1165          */
1166         ret = btrfs_add_root_ref(trans, tree_root, objectid,
1167                                  parent_root->root_key.objectid,
1168                                  btrfs_ino(parent_inode), index,
1169                                  dentry->d_name.name, dentry->d_name.len);
1170         if (ret) {
1171                 btrfs_abort_transaction(trans, root, ret);
1172                 goto fail;
1173         }
1174
1175         key.offset = (u64)-1;
1176         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1177         if (IS_ERR(pending->snap)) {
1178                 ret = PTR_ERR(pending->snap);
1179                 btrfs_abort_transaction(trans, root, ret);
1180                 goto fail;
1181         }
1182
1183         ret = btrfs_reloc_post_snapshot(trans, pending);
1184         if (ret) {
1185                 btrfs_abort_transaction(trans, root, ret);
1186                 goto fail;
1187         }
1188
1189         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1190         if (ret) {
1191                 btrfs_abort_transaction(trans, root, ret);
1192                 goto fail;
1193         }
1194
1195         ret = btrfs_insert_dir_item(trans, parent_root,
1196                                     dentry->d_name.name, dentry->d_name.len,
1197                                     parent_inode, &key,
1198                                     BTRFS_FT_DIR, index);
1199         /* We have check then name at the beginning, so it is impossible. */
1200         BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1201         if (ret) {
1202                 btrfs_abort_transaction(trans, root, ret);
1203                 goto fail;
1204         }
1205
1206         btrfs_i_size_write(parent_inode, parent_inode->i_size +
1207                                          dentry->d_name.len * 2);
1208         parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1209         ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1210         if (ret)
1211                 btrfs_abort_transaction(trans, root, ret);
1212 fail:
1213         dput(parent);
1214         trans->block_rsv = rsv;
1215 no_free_objectid:
1216         kfree(new_root_item);
1217 root_item_alloc_fail:
1218         btrfs_free_path(path);
1219 path_alloc_fail:
1220         btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
1221         return ret;
1222 }
1223
1224 /*
1225  * create all the snapshots we've scheduled for creation
1226  */
1227 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1228                                              struct btrfs_fs_info *fs_info)
1229 {
1230         struct btrfs_pending_snapshot *pending;
1231         struct list_head *head = &trans->transaction->pending_snapshots;
1232
1233         list_for_each_entry(pending, head, list)
1234                 create_pending_snapshot(trans, fs_info, pending);
1235         return 0;
1236 }
1237
1238 static void update_super_roots(struct btrfs_root *root)
1239 {
1240         struct btrfs_root_item *root_item;
1241         struct btrfs_super_block *super;
1242
1243         super = root->fs_info->super_copy;
1244
1245         root_item = &root->fs_info->chunk_root->root_item;
1246         super->chunk_root = root_item->bytenr;
1247         super->chunk_root_generation = root_item->generation;
1248         super->chunk_root_level = root_item->level;
1249
1250         root_item = &root->fs_info->tree_root->root_item;
1251         super->root = root_item->bytenr;
1252         super->generation = root_item->generation;
1253         super->root_level = root_item->level;
1254         if (btrfs_test_opt(root, SPACE_CACHE))
1255                 super->cache_generation = root_item->generation;
1256 }
1257
1258 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1259 {
1260         int ret = 0;
1261         spin_lock(&info->trans_lock);
1262         if (info->running_transaction)
1263                 ret = info->running_transaction->in_commit;
1264         spin_unlock(&info->trans_lock);
1265         return ret;
1266 }
1267
1268 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1269 {
1270         int ret = 0;
1271         spin_lock(&info->trans_lock);
1272         if (info->running_transaction)
1273                 ret = info->running_transaction->blocked;
1274         spin_unlock(&info->trans_lock);
1275         return ret;
1276 }
1277
1278 /*
1279  * wait for the current transaction commit to start and block subsequent
1280  * transaction joins
1281  */
1282 static void wait_current_trans_commit_start(struct btrfs_root *root,
1283                                             struct btrfs_transaction *trans)
1284 {
1285         wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
1286 }
1287
1288 /*
1289  * wait for the current transaction to start and then become unblocked.
1290  * caller holds ref.
1291  */
1292 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1293                                          struct btrfs_transaction *trans)
1294 {
1295         wait_event(root->fs_info->transaction_wait,
1296                    trans->commit_done || (trans->in_commit && !trans->blocked));
1297 }
1298
1299 /*
1300  * commit transactions asynchronously. once btrfs_commit_transaction_async
1301  * returns, any subsequent transaction will not be allowed to join.
1302  */
1303 struct btrfs_async_commit {
1304         struct btrfs_trans_handle *newtrans;
1305         struct btrfs_root *root;
1306         struct delayed_work work;
1307 };
1308
1309 static void do_async_commit(struct work_struct *work)
1310 {
1311         struct btrfs_async_commit *ac =
1312                 container_of(work, struct btrfs_async_commit, work.work);
1313
1314         /*
1315          * We've got freeze protection passed with the transaction.
1316          * Tell lockdep about it.
1317          */
1318         if (ac->newtrans->type < TRANS_JOIN_NOLOCK)
1319                 rwsem_acquire_read(
1320                      &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1321                      0, 1, _THIS_IP_);
1322
1323         current->journal_info = ac->newtrans;
1324
1325         btrfs_commit_transaction(ac->newtrans, ac->root);
1326         kfree(ac);
1327 }
1328
1329 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1330                                    struct btrfs_root *root,
1331                                    int wait_for_unblock)
1332 {
1333         struct btrfs_async_commit *ac;
1334         struct btrfs_transaction *cur_trans;
1335
1336         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1337         if (!ac)
1338                 return -ENOMEM;
1339
1340         INIT_DELAYED_WORK(&ac->work, do_async_commit);
1341         ac->root = root;
1342         ac->newtrans = btrfs_join_transaction(root);
1343         if (IS_ERR(ac->newtrans)) {
1344                 int err = PTR_ERR(ac->newtrans);
1345                 kfree(ac);
1346                 return err;
1347         }
1348
1349         /* take transaction reference */
1350         cur_trans = trans->transaction;
1351         atomic_inc(&cur_trans->use_count);
1352
1353         btrfs_end_transaction(trans, root);
1354
1355         /*
1356          * Tell lockdep we've released the freeze rwsem, since the
1357          * async commit thread will be the one to unlock it.
1358          */
1359         if (trans->type < TRANS_JOIN_NOLOCK)
1360                 rwsem_release(
1361                         &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1362                         1, _THIS_IP_);
1363
1364         schedule_delayed_work(&ac->work, 0);
1365
1366         /* wait for transaction to start and unblock */
1367         if (wait_for_unblock)
1368                 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1369         else
1370                 wait_current_trans_commit_start(root, cur_trans);
1371
1372         if (current->journal_info == trans)
1373                 current->journal_info = NULL;
1374
1375         put_transaction(cur_trans);
1376         return 0;
1377 }
1378
1379
1380 static void cleanup_transaction(struct btrfs_trans_handle *trans,
1381                                 struct btrfs_root *root, int err)
1382 {
1383         struct btrfs_transaction *cur_trans = trans->transaction;
1384
1385         WARN_ON(trans->use_count > 1);
1386
1387         btrfs_abort_transaction(trans, root, err);
1388
1389         spin_lock(&root->fs_info->trans_lock);
1390         list_del_init(&cur_trans->list);
1391         if (cur_trans == root->fs_info->running_transaction) {
1392                 root->fs_info->running_transaction = NULL;
1393                 root->fs_info->trans_no_join = 0;
1394         }
1395         spin_unlock(&root->fs_info->trans_lock);
1396
1397         btrfs_cleanup_one_transaction(trans->transaction, root);
1398
1399         put_transaction(cur_trans);
1400         put_transaction(cur_trans);
1401
1402         trace_btrfs_transaction_commit(root);
1403
1404         btrfs_scrub_continue(root);
1405
1406         if (current->journal_info == trans)
1407                 current->journal_info = NULL;
1408
1409         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1410 }
1411
1412 static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
1413                                           struct btrfs_root *root)
1414 {
1415         int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1416         int snap_pending = 0;
1417         int ret;
1418
1419         if (!flush_on_commit) {
1420                 spin_lock(&root->fs_info->trans_lock);
1421                 if (!list_empty(&trans->transaction->pending_snapshots))
1422                         snap_pending = 1;
1423                 spin_unlock(&root->fs_info->trans_lock);
1424         }
1425
1426         if (flush_on_commit || snap_pending) {
1427                 btrfs_start_delalloc_inodes(root, 1);
1428                 btrfs_wait_ordered_extents(root, 1);
1429         }
1430
1431         ret = btrfs_run_delayed_items(trans, root);
1432         if (ret)
1433                 return ret;
1434
1435         /*
1436          * running the delayed items may have added new refs. account
1437          * them now so that they hinder processing of more delayed refs
1438          * as little as possible.
1439          */
1440         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
1441
1442         /*
1443          * rename don't use btrfs_join_transaction, so, once we
1444          * set the transaction to blocked above, we aren't going
1445          * to get any new ordered operations.  We can safely run
1446          * it here and no for sure that nothing new will be added
1447          * to the list
1448          */
1449         btrfs_run_ordered_operations(root, 1);
1450
1451         return 0;
1452 }
1453
1454 /*
1455  * btrfs_transaction state sequence:
1456  *    in_commit = 0, blocked = 0  (initial)
1457  *    in_commit = 1, blocked = 1
1458  *    blocked = 0
1459  *    commit_done = 1
1460  */
1461 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1462                              struct btrfs_root *root)
1463 {
1464         unsigned long joined = 0;
1465         struct btrfs_transaction *cur_trans = trans->transaction;
1466         struct btrfs_transaction *prev_trans = NULL;
1467         DEFINE_WAIT(wait);
1468         int ret;
1469         int should_grow = 0;
1470         unsigned long now = get_seconds();
1471
1472         ret = btrfs_run_ordered_operations(root, 0);
1473         if (ret) {
1474                 btrfs_abort_transaction(trans, root, ret);
1475                 goto cleanup_transaction;
1476         }
1477
1478         if (cur_trans->aborted) {
1479                 ret = cur_trans->aborted;
1480                 goto cleanup_transaction;
1481         }
1482
1483         /* make a pass through all the delayed refs we have so far
1484          * any runnings procs may add more while we are here
1485          */
1486         ret = btrfs_run_delayed_refs(trans, root, 0);
1487         if (ret)
1488                 goto cleanup_transaction;
1489
1490         btrfs_trans_release_metadata(trans, root);
1491         trans->block_rsv = NULL;
1492
1493         cur_trans = trans->transaction;
1494
1495         /*
1496          * set the flushing flag so procs in this transaction have to
1497          * start sending their work down.
1498          */
1499         cur_trans->delayed_refs.flushing = 1;
1500
1501         if (!list_empty(&trans->new_bgs))
1502                 btrfs_create_pending_block_groups(trans, root);
1503
1504         ret = btrfs_run_delayed_refs(trans, root, 0);
1505         if (ret)
1506                 goto cleanup_transaction;
1507
1508         spin_lock(&cur_trans->commit_lock);
1509         if (cur_trans->in_commit) {
1510                 spin_unlock(&cur_trans->commit_lock);
1511                 atomic_inc(&cur_trans->use_count);
1512                 ret = btrfs_end_transaction(trans, root);
1513
1514                 wait_for_commit(root, cur_trans);
1515
1516                 put_transaction(cur_trans);
1517
1518                 return ret;
1519         }
1520
1521         trans->transaction->in_commit = 1;
1522         trans->transaction->blocked = 1;
1523         spin_unlock(&cur_trans->commit_lock);
1524         wake_up(&root->fs_info->transaction_blocked_wait);
1525
1526         spin_lock(&root->fs_info->trans_lock);
1527         if (cur_trans->list.prev != &root->fs_info->trans_list) {
1528                 prev_trans = list_entry(cur_trans->list.prev,
1529                                         struct btrfs_transaction, list);
1530                 if (!prev_trans->commit_done) {
1531                         atomic_inc(&prev_trans->use_count);
1532                         spin_unlock(&root->fs_info->trans_lock);
1533
1534                         wait_for_commit(root, prev_trans);
1535
1536                         put_transaction(prev_trans);
1537                 } else {
1538                         spin_unlock(&root->fs_info->trans_lock);
1539                 }
1540         } else {
1541                 spin_unlock(&root->fs_info->trans_lock);
1542         }
1543
1544         if (!btrfs_test_opt(root, SSD) &&
1545             (now < cur_trans->start_time || now - cur_trans->start_time < 1))
1546                 should_grow = 1;
1547
1548         do {
1549                 joined = cur_trans->num_joined;
1550
1551                 WARN_ON(cur_trans != trans->transaction);
1552
1553                 ret = btrfs_flush_all_pending_stuffs(trans, root);
1554                 if (ret)
1555                         goto cleanup_transaction;
1556
1557                 prepare_to_wait(&cur_trans->writer_wait, &wait,
1558                                 TASK_UNINTERRUPTIBLE);
1559
1560                 if (atomic_read(&cur_trans->num_writers) > 1)
1561                         schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1562                 else if (should_grow)
1563                         schedule_timeout(1);
1564
1565                 finish_wait(&cur_trans->writer_wait, &wait);
1566         } while (atomic_read(&cur_trans->num_writers) > 1 ||
1567                  (should_grow && cur_trans->num_joined != joined));
1568
1569         ret = btrfs_flush_all_pending_stuffs(trans, root);
1570         if (ret)
1571                 goto cleanup_transaction;
1572
1573         /*
1574          * Ok now we need to make sure to block out any other joins while we
1575          * commit the transaction.  We could have started a join before setting
1576          * no_join so make sure to wait for num_writers to == 1 again.
1577          */
1578         spin_lock(&root->fs_info->trans_lock);
1579         root->fs_info->trans_no_join = 1;
1580         spin_unlock(&root->fs_info->trans_lock);
1581         wait_event(cur_trans->writer_wait,
1582                    atomic_read(&cur_trans->num_writers) == 1);
1583
1584         /*
1585          * the reloc mutex makes sure that we stop
1586          * the balancing code from coming in and moving
1587          * extents around in the middle of the commit
1588          */
1589         mutex_lock(&root->fs_info->reloc_mutex);
1590
1591         /*
1592          * We needn't worry about the delayed items because we will
1593          * deal with them in create_pending_snapshot(), which is the
1594          * core function of the snapshot creation.
1595          */
1596         ret = create_pending_snapshots(trans, root->fs_info);
1597         if (ret) {
1598                 mutex_unlock(&root->fs_info->reloc_mutex);
1599                 goto cleanup_transaction;
1600         }
1601
1602         /*
1603          * We insert the dir indexes of the snapshots and update the inode
1604          * of the snapshots' parents after the snapshot creation, so there
1605          * are some delayed items which are not dealt with. Now deal with
1606          * them.
1607          *
1608          * We needn't worry that this operation will corrupt the snapshots,
1609          * because all the tree which are snapshoted will be forced to COW
1610          * the nodes and leaves.
1611          */
1612         ret = btrfs_run_delayed_items(trans, root);
1613         if (ret) {
1614                 mutex_unlock(&root->fs_info->reloc_mutex);
1615                 goto cleanup_transaction;
1616         }
1617
1618         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1619         if (ret) {
1620                 mutex_unlock(&root->fs_info->reloc_mutex);
1621                 goto cleanup_transaction;
1622         }
1623
1624         /*
1625          * make sure none of the code above managed to slip in a
1626          * delayed item
1627          */
1628         btrfs_assert_delayed_root_empty(root);
1629
1630         WARN_ON(cur_trans != trans->transaction);
1631
1632         btrfs_scrub_pause(root);
1633         /* btrfs_commit_tree_roots is responsible for getting the
1634          * various roots consistent with each other.  Every pointer
1635          * in the tree of tree roots has to point to the most up to date
1636          * root for every subvolume and other tree.  So, we have to keep
1637          * the tree logging code from jumping in and changing any
1638          * of the trees.
1639          *
1640          * At this point in the commit, there can't be any tree-log
1641          * writers, but a little lower down we drop the trans mutex
1642          * and let new people in.  By holding the tree_log_mutex
1643          * from now until after the super is written, we avoid races
1644          * with the tree-log code.
1645          */
1646         mutex_lock(&root->fs_info->tree_log_mutex);
1647
1648         ret = commit_fs_roots(trans, root);
1649         if (ret) {
1650                 mutex_unlock(&root->fs_info->tree_log_mutex);
1651                 mutex_unlock(&root->fs_info->reloc_mutex);
1652                 goto cleanup_transaction;
1653         }
1654
1655         /* commit_fs_roots gets rid of all the tree log roots, it is now
1656          * safe to free the root of tree log roots
1657          */
1658         btrfs_free_log_root_tree(trans, root->fs_info);
1659
1660         ret = commit_cowonly_roots(trans, root);
1661         if (ret) {
1662                 mutex_unlock(&root->fs_info->tree_log_mutex);
1663                 mutex_unlock(&root->fs_info->reloc_mutex);
1664                 goto cleanup_transaction;
1665         }
1666
1667         btrfs_prepare_extent_commit(trans, root);
1668
1669         cur_trans = root->fs_info->running_transaction;
1670
1671         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1672                             root->fs_info->tree_root->node);
1673         switch_commit_root(root->fs_info->tree_root);
1674
1675         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1676                             root->fs_info->chunk_root->node);
1677         switch_commit_root(root->fs_info->chunk_root);
1678
1679         assert_qgroups_uptodate(trans);
1680         update_super_roots(root);
1681
1682         if (!root->fs_info->log_root_recovering) {
1683                 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1684                 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1685         }
1686
1687         memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1688                sizeof(*root->fs_info->super_copy));
1689
1690         trans->transaction->blocked = 0;
1691         spin_lock(&root->fs_info->trans_lock);
1692         root->fs_info->running_transaction = NULL;
1693         root->fs_info->trans_no_join = 0;
1694         spin_unlock(&root->fs_info->trans_lock);
1695         mutex_unlock(&root->fs_info->reloc_mutex);
1696
1697         wake_up(&root->fs_info->transaction_wait);
1698
1699         ret = btrfs_write_and_wait_transaction(trans, root);
1700         if (ret) {
1701                 btrfs_error(root->fs_info, ret,
1702                             "Error while writing out transaction.");
1703                 mutex_unlock(&root->fs_info->tree_log_mutex);
1704                 goto cleanup_transaction;
1705         }
1706
1707         ret = write_ctree_super(trans, root, 0);
1708         if (ret) {
1709                 mutex_unlock(&root->fs_info->tree_log_mutex);
1710                 goto cleanup_transaction;
1711         }
1712
1713         /*
1714          * the super is written, we can safely allow the tree-loggers
1715          * to go about their business
1716          */
1717         mutex_unlock(&root->fs_info->tree_log_mutex);
1718
1719         btrfs_finish_extent_commit(trans, root);
1720
1721         cur_trans->commit_done = 1;
1722
1723         root->fs_info->last_trans_committed = cur_trans->transid;
1724
1725         wake_up(&cur_trans->commit_wait);
1726
1727         spin_lock(&root->fs_info->trans_lock);
1728         list_del_init(&cur_trans->list);
1729         spin_unlock(&root->fs_info->trans_lock);
1730
1731         put_transaction(cur_trans);
1732         put_transaction(cur_trans);
1733
1734         if (trans->type < TRANS_JOIN_NOLOCK)
1735                 sb_end_intwrite(root->fs_info->sb);
1736
1737         trace_btrfs_transaction_commit(root);
1738
1739         btrfs_scrub_continue(root);
1740
1741         if (current->journal_info == trans)
1742                 current->journal_info = NULL;
1743
1744         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1745
1746         if (current != root->fs_info->transaction_kthread)
1747                 btrfs_run_delayed_iputs(root);
1748
1749         return ret;
1750
1751 cleanup_transaction:
1752         btrfs_trans_release_metadata(trans, root);
1753         trans->block_rsv = NULL;
1754         btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
1755 //      WARN_ON(1);
1756         if (current->journal_info == trans)
1757                 current->journal_info = NULL;
1758         cleanup_transaction(trans, root, ret);
1759
1760         return ret;
1761 }
1762
1763 /*
1764  * interface function to delete all the snapshots we have scheduled for deletion
1765  */
1766 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1767 {
1768         LIST_HEAD(list);
1769         struct btrfs_fs_info *fs_info = root->fs_info;
1770
1771         spin_lock(&fs_info->trans_lock);
1772         list_splice_init(&fs_info->dead_roots, &list);
1773         spin_unlock(&fs_info->trans_lock);
1774
1775         while (!list_empty(&list)) {
1776                 int ret;
1777
1778                 root = list_entry(list.next, struct btrfs_root, root_list);
1779                 list_del(&root->root_list);
1780
1781                 btrfs_kill_all_delayed_nodes(root);
1782
1783                 if (btrfs_header_backref_rev(root->node) <
1784                     BTRFS_MIXED_BACKREF_REV)
1785                         ret = btrfs_drop_snapshot(root, NULL, 0, 0);
1786                 else
1787                         ret =btrfs_drop_snapshot(root, NULL, 1, 0);
1788                 BUG_ON(ret < 0);
1789         }
1790         return 0;
1791 }