90b643e07f3caa8d9146019bc13f8a1988a1d066
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / btrfs / disk-io.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
31 #include <linux/migrate.h>
32 #include <linux/ratelimit.h>
33 #include <linux/uuid.h>
34 #include <asm/unaligned.h>
35 #include "compat.h"
36 #include "ctree.h"
37 #include "disk-io.h"
38 #include "transaction.h"
39 #include "btrfs_inode.h"
40 #include "volumes.h"
41 #include "print-tree.h"
42 #include "async-thread.h"
43 #include "locking.h"
44 #include "tree-log.h"
45 #include "free-space-cache.h"
46 #include "inode-map.h"
47 #include "check-integrity.h"
48 #include "rcu-string.h"
49 #include "dev-replace.h"
50 #include "raid56.h"
51
52 #ifdef CONFIG_X86
53 #include <asm/cpufeature.h>
54 #endif
55
56 static struct extent_io_ops btree_extent_io_ops;
57 static void end_workqueue_fn(struct btrfs_work *work);
58 static void free_fs_root(struct btrfs_root *root);
59 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
60                                     int read_only);
61 static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
62                                              struct btrfs_root *root);
63 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
64 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
65                                       struct btrfs_root *root);
66 static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t);
67 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
68 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
69                                         struct extent_io_tree *dirty_pages,
70                                         int mark);
71 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
72                                        struct extent_io_tree *pinned_extents);
73 static int btrfs_cleanup_transaction(struct btrfs_root *root);
74 static void btrfs_error_commit_super(struct btrfs_root *root);
75
76 /*
77  * end_io_wq structs are used to do processing in task context when an IO is
78  * complete.  This is used during reads to verify checksums, and it is used
79  * by writes to insert metadata for new file extents after IO is complete.
80  */
81 struct end_io_wq {
82         struct bio *bio;
83         bio_end_io_t *end_io;
84         void *private;
85         struct btrfs_fs_info *info;
86         int error;
87         int metadata;
88         struct list_head list;
89         struct btrfs_work work;
90 };
91
92 /*
93  * async submit bios are used to offload expensive checksumming
94  * onto the worker threads.  They checksum file and metadata bios
95  * just before they are sent down the IO stack.
96  */
97 struct async_submit_bio {
98         struct inode *inode;
99         struct bio *bio;
100         struct list_head list;
101         extent_submit_bio_hook_t *submit_bio_start;
102         extent_submit_bio_hook_t *submit_bio_done;
103         int rw;
104         int mirror_num;
105         unsigned long bio_flags;
106         /*
107          * bio_offset is optional, can be used if the pages in the bio
108          * can't tell us where in the file the bio should go
109          */
110         u64 bio_offset;
111         struct btrfs_work work;
112         int error;
113 };
114
115 /*
116  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
117  * eb, the lockdep key is determined by the btrfs_root it belongs to and
118  * the level the eb occupies in the tree.
119  *
120  * Different roots are used for different purposes and may nest inside each
121  * other and they require separate keysets.  As lockdep keys should be
122  * static, assign keysets according to the purpose of the root as indicated
123  * by btrfs_root->objectid.  This ensures that all special purpose roots
124  * have separate keysets.
125  *
126  * Lock-nesting across peer nodes is always done with the immediate parent
127  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
128  * subclass to avoid triggering lockdep warning in such cases.
129  *
130  * The key is set by the readpage_end_io_hook after the buffer has passed
131  * csum validation but before the pages are unlocked.  It is also set by
132  * btrfs_init_new_buffer on freshly allocated blocks.
133  *
134  * We also add a check to make sure the highest level of the tree is the
135  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
136  * needs update as well.
137  */
138 #ifdef CONFIG_DEBUG_LOCK_ALLOC
139 # if BTRFS_MAX_LEVEL != 8
140 #  error
141 # endif
142
143 static struct btrfs_lockdep_keyset {
144         u64                     id;             /* root objectid */
145         const char              *name_stem;     /* lock name stem */
146         char                    names[BTRFS_MAX_LEVEL + 1][20];
147         struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
148 } btrfs_lockdep_keysets[] = {
149         { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
150         { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
151         { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
152         { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
153         { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
154         { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
155         { .id = BTRFS_QUOTA_TREE_OBJECTID,      .name_stem = "quota"    },
156         { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
157         { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
158         { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
159         { .id = 0,                              .name_stem = "tree"     },
160 };
161
162 void __init btrfs_init_lockdep(void)
163 {
164         int i, j;
165
166         /* initialize lockdep class names */
167         for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
168                 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
169
170                 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
171                         snprintf(ks->names[j], sizeof(ks->names[j]),
172                                  "btrfs-%s-%02d", ks->name_stem, j);
173         }
174 }
175
176 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
177                                     int level)
178 {
179         struct btrfs_lockdep_keyset *ks;
180
181         BUG_ON(level >= ARRAY_SIZE(ks->keys));
182
183         /* find the matching keyset, id 0 is the default entry */
184         for (ks = btrfs_lockdep_keysets; ks->id; ks++)
185                 if (ks->id == objectid)
186                         break;
187
188         lockdep_set_class_and_name(&eb->lock,
189                                    &ks->keys[level], ks->names[level]);
190 }
191
192 #endif
193
194 /*
195  * extents on the btree inode are pretty simple, there's one extent
196  * that covers the entire device
197  */
198 static struct extent_map *btree_get_extent(struct inode *inode,
199                 struct page *page, size_t pg_offset, u64 start, u64 len,
200                 int create)
201 {
202         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
203         struct extent_map *em;
204         int ret;
205
206         read_lock(&em_tree->lock);
207         em = lookup_extent_mapping(em_tree, start, len);
208         if (em) {
209                 em->bdev =
210                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
211                 read_unlock(&em_tree->lock);
212                 goto out;
213         }
214         read_unlock(&em_tree->lock);
215
216         em = alloc_extent_map();
217         if (!em) {
218                 em = ERR_PTR(-ENOMEM);
219                 goto out;
220         }
221         em->start = 0;
222         em->len = (u64)-1;
223         em->block_len = (u64)-1;
224         em->block_start = 0;
225         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
226
227         write_lock(&em_tree->lock);
228         ret = add_extent_mapping(em_tree, em, 0);
229         if (ret == -EEXIST) {
230                 free_extent_map(em);
231                 em = lookup_extent_mapping(em_tree, start, len);
232                 if (!em)
233                         em = ERR_PTR(-EIO);
234         } else if (ret) {
235                 free_extent_map(em);
236                 em = ERR_PTR(ret);
237         }
238         write_unlock(&em_tree->lock);
239
240 out:
241         return em;
242 }
243
244 u32 btrfs_csum_data(char *data, u32 seed, size_t len)
245 {
246         return crc32c(seed, data, len);
247 }
248
249 void btrfs_csum_final(u32 crc, char *result)
250 {
251         put_unaligned_le32(~crc, result);
252 }
253
254 /*
255  * compute the csum for a btree block, and either verify it or write it
256  * into the csum field of the block.
257  */
258 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
259                            int verify)
260 {
261         u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
262         char *result = NULL;
263         unsigned long len;
264         unsigned long cur_len;
265         unsigned long offset = BTRFS_CSUM_SIZE;
266         char *kaddr;
267         unsigned long map_start;
268         unsigned long map_len;
269         int err;
270         u32 crc = ~(u32)0;
271         unsigned long inline_result;
272
273         len = buf->len - offset;
274         while (len > 0) {
275                 err = map_private_extent_buffer(buf, offset, 32,
276                                         &kaddr, &map_start, &map_len);
277                 if (err)
278                         return 1;
279                 cur_len = min(len, map_len - (offset - map_start));
280                 crc = btrfs_csum_data(kaddr + offset - map_start,
281                                       crc, cur_len);
282                 len -= cur_len;
283                 offset += cur_len;
284         }
285         if (csum_size > sizeof(inline_result)) {
286                 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
287                 if (!result)
288                         return 1;
289         } else {
290                 result = (char *)&inline_result;
291         }
292
293         btrfs_csum_final(crc, result);
294
295         if (verify) {
296                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
297                         u32 val;
298                         u32 found = 0;
299                         memcpy(&found, result, csum_size);
300
301                         read_extent_buffer(buf, &val, 0, csum_size);
302                         printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
303                                        "failed on %llu wanted %X found %X "
304                                        "level %d\n",
305                                        root->fs_info->sb->s_id,
306                                        (unsigned long long)buf->start, val, found,
307                                        btrfs_header_level(buf));
308                         if (result != (char *)&inline_result)
309                                 kfree(result);
310                         return 1;
311                 }
312         } else {
313                 write_extent_buffer(buf, result, 0, csum_size);
314         }
315         if (result != (char *)&inline_result)
316                 kfree(result);
317         return 0;
318 }
319
320 /*
321  * we can't consider a given block up to date unless the transid of the
322  * block matches the transid in the parent node's pointer.  This is how we
323  * detect blocks that either didn't get written at all or got written
324  * in the wrong place.
325  */
326 static int verify_parent_transid(struct extent_io_tree *io_tree,
327                                  struct extent_buffer *eb, u64 parent_transid,
328                                  int atomic)
329 {
330         struct extent_state *cached_state = NULL;
331         int ret;
332
333         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
334                 return 0;
335
336         if (atomic)
337                 return -EAGAIN;
338
339         lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
340                          0, &cached_state);
341         if (extent_buffer_uptodate(eb) &&
342             btrfs_header_generation(eb) == parent_transid) {
343                 ret = 0;
344                 goto out;
345         }
346         printk_ratelimited("parent transid verify failed on %llu wanted %llu "
347                        "found %llu\n",
348                        (unsigned long long)eb->start,
349                        (unsigned long long)parent_transid,
350                        (unsigned long long)btrfs_header_generation(eb));
351         ret = 1;
352         clear_extent_buffer_uptodate(eb);
353 out:
354         unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
355                              &cached_state, GFP_NOFS);
356         return ret;
357 }
358
359 /*
360  * Return 0 if the superblock checksum type matches the checksum value of that
361  * algorithm. Pass the raw disk superblock data.
362  */
363 static int btrfs_check_super_csum(char *raw_disk_sb)
364 {
365         struct btrfs_super_block *disk_sb =
366                 (struct btrfs_super_block *)raw_disk_sb;
367         u16 csum_type = btrfs_super_csum_type(disk_sb);
368         int ret = 0;
369
370         if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
371                 u32 crc = ~(u32)0;
372                 const int csum_size = sizeof(crc);
373                 char result[csum_size];
374
375                 /*
376                  * The super_block structure does not span the whole
377                  * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
378                  * is filled with zeros and is included in the checkum.
379                  */
380                 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
381                                 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
382                 btrfs_csum_final(crc, result);
383
384                 if (memcmp(raw_disk_sb, result, csum_size))
385                         ret = 1;
386
387                 if (ret && btrfs_super_generation(disk_sb) < 10) {
388                         printk(KERN_WARNING "btrfs: super block crcs don't match, older mkfs detected\n");
389                         ret = 0;
390                 }
391         }
392
393         if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
394                 printk(KERN_ERR "btrfs: unsupported checksum algorithm %u\n",
395                                 csum_type);
396                 ret = 1;
397         }
398
399         return ret;
400 }
401
402 /*
403  * helper to read a given tree block, doing retries as required when
404  * the checksums don't match and we have alternate mirrors to try.
405  */
406 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
407                                           struct extent_buffer *eb,
408                                           u64 start, u64 parent_transid)
409 {
410         struct extent_io_tree *io_tree;
411         int failed = 0;
412         int ret;
413         int num_copies = 0;
414         int mirror_num = 0;
415         int failed_mirror = 0;
416
417         clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
418         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
419         while (1) {
420                 ret = read_extent_buffer_pages(io_tree, eb, start,
421                                                WAIT_COMPLETE,
422                                                btree_get_extent, mirror_num);
423                 if (!ret) {
424                         if (!verify_parent_transid(io_tree, eb,
425                                                    parent_transid, 0))
426                                 break;
427                         else
428                                 ret = -EIO;
429                 }
430
431                 /*
432                  * This buffer's crc is fine, but its contents are corrupted, so
433                  * there is no reason to read the other copies, they won't be
434                  * any less wrong.
435                  */
436                 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
437                         break;
438
439                 num_copies = btrfs_num_copies(root->fs_info,
440                                               eb->start, eb->len);
441                 if (num_copies == 1)
442                         break;
443
444                 if (!failed_mirror) {
445                         failed = 1;
446                         failed_mirror = eb->read_mirror;
447                 }
448
449                 mirror_num++;
450                 if (mirror_num == failed_mirror)
451                         mirror_num++;
452
453                 if (mirror_num > num_copies)
454                         break;
455         }
456
457         if (failed && !ret && failed_mirror)
458                 repair_eb_io_failure(root, eb, failed_mirror);
459
460         return ret;
461 }
462
463 /*
464  * checksum a dirty tree block before IO.  This has extra checks to make sure
465  * we only fill in the checksum field in the first page of a multi-page block
466  */
467
468 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
469 {
470         struct extent_io_tree *tree;
471         u64 start = page_offset(page);
472         u64 found_start;
473         struct extent_buffer *eb;
474
475         tree = &BTRFS_I(page->mapping->host)->io_tree;
476
477         eb = (struct extent_buffer *)page->private;
478         if (page != eb->pages[0])
479                 return 0;
480         found_start = btrfs_header_bytenr(eb);
481         if (found_start != start) {
482                 WARN_ON(1);
483                 return 0;
484         }
485         if (!PageUptodate(page)) {
486                 WARN_ON(1);
487                 return 0;
488         }
489         csum_tree_block(root, eb, 0);
490         return 0;
491 }
492
493 static int check_tree_block_fsid(struct btrfs_root *root,
494                                  struct extent_buffer *eb)
495 {
496         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
497         u8 fsid[BTRFS_UUID_SIZE];
498         int ret = 1;
499
500         read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
501                            BTRFS_FSID_SIZE);
502         while (fs_devices) {
503                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
504                         ret = 0;
505                         break;
506                 }
507                 fs_devices = fs_devices->seed;
508         }
509         return ret;
510 }
511
512 #define CORRUPT(reason, eb, root, slot)                         \
513         printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
514                "root=%llu, slot=%d\n", reason,                  \
515                (unsigned long long)btrfs_header_bytenr(eb),     \
516                (unsigned long long)root->objectid, slot)
517
518 static noinline int check_leaf(struct btrfs_root *root,
519                                struct extent_buffer *leaf)
520 {
521         struct btrfs_key key;
522         struct btrfs_key leaf_key;
523         u32 nritems = btrfs_header_nritems(leaf);
524         int slot;
525
526         if (nritems == 0)
527                 return 0;
528
529         /* Check the 0 item */
530         if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
531             BTRFS_LEAF_DATA_SIZE(root)) {
532                 CORRUPT("invalid item offset size pair", leaf, root, 0);
533                 return -EIO;
534         }
535
536         /*
537          * Check to make sure each items keys are in the correct order and their
538          * offsets make sense.  We only have to loop through nritems-1 because
539          * we check the current slot against the next slot, which verifies the
540          * next slot's offset+size makes sense and that the current's slot
541          * offset is correct.
542          */
543         for (slot = 0; slot < nritems - 1; slot++) {
544                 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
545                 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
546
547                 /* Make sure the keys are in the right order */
548                 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
549                         CORRUPT("bad key order", leaf, root, slot);
550                         return -EIO;
551                 }
552
553                 /*
554                  * Make sure the offset and ends are right, remember that the
555                  * item data starts at the end of the leaf and grows towards the
556                  * front.
557                  */
558                 if (btrfs_item_offset_nr(leaf, slot) !=
559                         btrfs_item_end_nr(leaf, slot + 1)) {
560                         CORRUPT("slot offset bad", leaf, root, slot);
561                         return -EIO;
562                 }
563
564                 /*
565                  * Check to make sure that we don't point outside of the leaf,
566                  * just incase all the items are consistent to eachother, but
567                  * all point outside of the leaf.
568                  */
569                 if (btrfs_item_end_nr(leaf, slot) >
570                     BTRFS_LEAF_DATA_SIZE(root)) {
571                         CORRUPT("slot end outside of leaf", leaf, root, slot);
572                         return -EIO;
573                 }
574         }
575
576         return 0;
577 }
578
579 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
580                                struct extent_state *state, int mirror)
581 {
582         struct extent_io_tree *tree;
583         u64 found_start;
584         int found_level;
585         struct extent_buffer *eb;
586         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
587         int ret = 0;
588         int reads_done;
589
590         if (!page->private)
591                 goto out;
592
593         tree = &BTRFS_I(page->mapping->host)->io_tree;
594         eb = (struct extent_buffer *)page->private;
595
596         /* the pending IO might have been the only thing that kept this buffer
597          * in memory.  Make sure we have a ref for all this other checks
598          */
599         extent_buffer_get(eb);
600
601         reads_done = atomic_dec_and_test(&eb->io_pages);
602         if (!reads_done)
603                 goto err;
604
605         eb->read_mirror = mirror;
606         if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
607                 ret = -EIO;
608                 goto err;
609         }
610
611         found_start = btrfs_header_bytenr(eb);
612         if (found_start != eb->start) {
613                 printk_ratelimited(KERN_INFO "btrfs bad tree block start "
614                                "%llu %llu\n",
615                                (unsigned long long)found_start,
616                                (unsigned long long)eb->start);
617                 ret = -EIO;
618                 goto err;
619         }
620         if (check_tree_block_fsid(root, eb)) {
621                 printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
622                                (unsigned long long)eb->start);
623                 ret = -EIO;
624                 goto err;
625         }
626         found_level = btrfs_header_level(eb);
627         if (found_level >= BTRFS_MAX_LEVEL) {
628                 btrfs_info(root->fs_info, "bad tree block level %d\n",
629                            (int)btrfs_header_level(eb));
630                 ret = -EIO;
631                 goto err;
632         }
633
634         btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
635                                        eb, found_level);
636
637         ret = csum_tree_block(root, eb, 1);
638         if (ret) {
639                 ret = -EIO;
640                 goto err;
641         }
642
643         /*
644          * If this is a leaf block and it is corrupt, set the corrupt bit so
645          * that we don't try and read the other copies of this block, just
646          * return -EIO.
647          */
648         if (found_level == 0 && check_leaf(root, eb)) {
649                 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
650                 ret = -EIO;
651         }
652
653         if (!ret)
654                 set_extent_buffer_uptodate(eb);
655 err:
656         if (reads_done &&
657             test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
658                 btree_readahead_hook(root, eb, eb->start, ret);
659
660         if (ret) {
661                 /*
662                  * our io error hook is going to dec the io pages
663                  * again, we have to make sure it has something
664                  * to decrement
665                  */
666                 atomic_inc(&eb->io_pages);
667                 clear_extent_buffer_uptodate(eb);
668         }
669         free_extent_buffer(eb);
670 out:
671         return ret;
672 }
673
674 static int btree_io_failed_hook(struct page *page, int failed_mirror)
675 {
676         struct extent_buffer *eb;
677         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
678
679         eb = (struct extent_buffer *)page->private;
680         set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
681         eb->read_mirror = failed_mirror;
682         atomic_dec(&eb->io_pages);
683         if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
684                 btree_readahead_hook(root, eb, eb->start, -EIO);
685         return -EIO;    /* we fixed nothing */
686 }
687
688 static void end_workqueue_bio(struct bio *bio, int err)
689 {
690         struct end_io_wq *end_io_wq = bio->bi_private;
691         struct btrfs_fs_info *fs_info;
692
693         fs_info = end_io_wq->info;
694         end_io_wq->error = err;
695         end_io_wq->work.func = end_workqueue_fn;
696         end_io_wq->work.flags = 0;
697
698         if (bio->bi_rw & REQ_WRITE) {
699                 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
700                         btrfs_queue_worker(&fs_info->endio_meta_write_workers,
701                                            &end_io_wq->work);
702                 else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
703                         btrfs_queue_worker(&fs_info->endio_freespace_worker,
704                                            &end_io_wq->work);
705                 else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
706                         btrfs_queue_worker(&fs_info->endio_raid56_workers,
707                                            &end_io_wq->work);
708                 else
709                         btrfs_queue_worker(&fs_info->endio_write_workers,
710                                            &end_io_wq->work);
711         } else {
712                 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
713                         btrfs_queue_worker(&fs_info->endio_raid56_workers,
714                                            &end_io_wq->work);
715                 else if (end_io_wq->metadata)
716                         btrfs_queue_worker(&fs_info->endio_meta_workers,
717                                            &end_io_wq->work);
718                 else
719                         btrfs_queue_worker(&fs_info->endio_workers,
720                                            &end_io_wq->work);
721         }
722 }
723
724 /*
725  * For the metadata arg you want
726  *
727  * 0 - if data
728  * 1 - if normal metadta
729  * 2 - if writing to the free space cache area
730  * 3 - raid parity work
731  */
732 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
733                         int metadata)
734 {
735         struct end_io_wq *end_io_wq;
736         end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
737         if (!end_io_wq)
738                 return -ENOMEM;
739
740         end_io_wq->private = bio->bi_private;
741         end_io_wq->end_io = bio->bi_end_io;
742         end_io_wq->info = info;
743         end_io_wq->error = 0;
744         end_io_wq->bio = bio;
745         end_io_wq->metadata = metadata;
746
747         bio->bi_private = end_io_wq;
748         bio->bi_end_io = end_workqueue_bio;
749         return 0;
750 }
751
752 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
753 {
754         unsigned long limit = min_t(unsigned long,
755                                     info->workers.max_workers,
756                                     info->fs_devices->open_devices);
757         return 256 * limit;
758 }
759
760 static void run_one_async_start(struct btrfs_work *work)
761 {
762         struct async_submit_bio *async;
763         int ret;
764
765         async = container_of(work, struct  async_submit_bio, work);
766         ret = async->submit_bio_start(async->inode, async->rw, async->bio,
767                                       async->mirror_num, async->bio_flags,
768                                       async->bio_offset);
769         if (ret)
770                 async->error = ret;
771 }
772
773 static void run_one_async_done(struct btrfs_work *work)
774 {
775         struct btrfs_fs_info *fs_info;
776         struct async_submit_bio *async;
777         int limit;
778
779         async = container_of(work, struct  async_submit_bio, work);
780         fs_info = BTRFS_I(async->inode)->root->fs_info;
781
782         limit = btrfs_async_submit_limit(fs_info);
783         limit = limit * 2 / 3;
784
785         if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
786             waitqueue_active(&fs_info->async_submit_wait))
787                 wake_up(&fs_info->async_submit_wait);
788
789         /* If an error occured we just want to clean up the bio and move on */
790         if (async->error) {
791                 bio_endio(async->bio, async->error);
792                 return;
793         }
794
795         async->submit_bio_done(async->inode, async->rw, async->bio,
796                                async->mirror_num, async->bio_flags,
797                                async->bio_offset);
798 }
799
800 static void run_one_async_free(struct btrfs_work *work)
801 {
802         struct async_submit_bio *async;
803
804         async = container_of(work, struct  async_submit_bio, work);
805         kfree(async);
806 }
807
808 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
809                         int rw, struct bio *bio, int mirror_num,
810                         unsigned long bio_flags,
811                         u64 bio_offset,
812                         extent_submit_bio_hook_t *submit_bio_start,
813                         extent_submit_bio_hook_t *submit_bio_done)
814 {
815         struct async_submit_bio *async;
816
817         async = kmalloc(sizeof(*async), GFP_NOFS);
818         if (!async)
819                 return -ENOMEM;
820
821         async->inode = inode;
822         async->rw = rw;
823         async->bio = bio;
824         async->mirror_num = mirror_num;
825         async->submit_bio_start = submit_bio_start;
826         async->submit_bio_done = submit_bio_done;
827
828         async->work.func = run_one_async_start;
829         async->work.ordered_func = run_one_async_done;
830         async->work.ordered_free = run_one_async_free;
831
832         async->work.flags = 0;
833         async->bio_flags = bio_flags;
834         async->bio_offset = bio_offset;
835
836         async->error = 0;
837
838         atomic_inc(&fs_info->nr_async_submits);
839
840         if (rw & REQ_SYNC)
841                 btrfs_set_work_high_prio(&async->work);
842
843         btrfs_queue_worker(&fs_info->workers, &async->work);
844
845         while (atomic_read(&fs_info->async_submit_draining) &&
846               atomic_read(&fs_info->nr_async_submits)) {
847                 wait_event(fs_info->async_submit_wait,
848                            (atomic_read(&fs_info->nr_async_submits) == 0));
849         }
850
851         return 0;
852 }
853
854 static int btree_csum_one_bio(struct bio *bio)
855 {
856         struct bio_vec *bvec = bio->bi_io_vec;
857         int bio_index = 0;
858         struct btrfs_root *root;
859         int ret = 0;
860
861         WARN_ON(bio->bi_vcnt <= 0);
862         while (bio_index < bio->bi_vcnt) {
863                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
864                 ret = csum_dirty_buffer(root, bvec->bv_page);
865                 if (ret)
866                         break;
867                 bio_index++;
868                 bvec++;
869         }
870         return ret;
871 }
872
873 static int __btree_submit_bio_start(struct inode *inode, int rw,
874                                     struct bio *bio, int mirror_num,
875                                     unsigned long bio_flags,
876                                     u64 bio_offset)
877 {
878         /*
879          * when we're called for a write, we're already in the async
880          * submission context.  Just jump into btrfs_map_bio
881          */
882         return btree_csum_one_bio(bio);
883 }
884
885 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
886                                  int mirror_num, unsigned long bio_flags,
887                                  u64 bio_offset)
888 {
889         int ret;
890
891         /*
892          * when we're called for a write, we're already in the async
893          * submission context.  Just jump into btrfs_map_bio
894          */
895         ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
896         if (ret)
897                 bio_endio(bio, ret);
898         return ret;
899 }
900
901 static int check_async_write(struct inode *inode, unsigned long bio_flags)
902 {
903         if (bio_flags & EXTENT_BIO_TREE_LOG)
904                 return 0;
905 #ifdef CONFIG_X86
906         if (cpu_has_xmm4_2)
907                 return 0;
908 #endif
909         return 1;
910 }
911
912 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
913                                  int mirror_num, unsigned long bio_flags,
914                                  u64 bio_offset)
915 {
916         int async = check_async_write(inode, bio_flags);
917         int ret;
918
919         if (!(rw & REQ_WRITE)) {
920                 /*
921                  * called for a read, do the setup so that checksum validation
922                  * can happen in the async kernel threads
923                  */
924                 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
925                                           bio, 1);
926                 if (ret)
927                         goto out_w_error;
928                 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
929                                     mirror_num, 0);
930         } else if (!async) {
931                 ret = btree_csum_one_bio(bio);
932                 if (ret)
933                         goto out_w_error;
934                 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
935                                     mirror_num, 0);
936         } else {
937                 /*
938                  * kthread helpers are used to submit writes so that
939                  * checksumming can happen in parallel across all CPUs
940                  */
941                 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
942                                           inode, rw, bio, mirror_num, 0,
943                                           bio_offset,
944                                           __btree_submit_bio_start,
945                                           __btree_submit_bio_done);
946         }
947
948         if (ret) {
949 out_w_error:
950                 bio_endio(bio, ret);
951         }
952         return ret;
953 }
954
955 #ifdef CONFIG_MIGRATION
956 static int btree_migratepage(struct address_space *mapping,
957                         struct page *newpage, struct page *page,
958                         enum migrate_mode mode)
959 {
960         /*
961          * we can't safely write a btree page from here,
962          * we haven't done the locking hook
963          */
964         if (PageDirty(page))
965                 return -EAGAIN;
966         /*
967          * Buffers may be managed in a filesystem specific way.
968          * We must have no buffers or drop them.
969          */
970         if (page_has_private(page) &&
971             !try_to_release_page(page, GFP_KERNEL))
972                 return -EAGAIN;
973         return migrate_page(mapping, newpage, page, mode);
974 }
975 #endif
976
977
978 static int btree_writepages(struct address_space *mapping,
979                             struct writeback_control *wbc)
980 {
981         struct extent_io_tree *tree;
982         struct btrfs_fs_info *fs_info;
983         int ret;
984
985         tree = &BTRFS_I(mapping->host)->io_tree;
986         if (wbc->sync_mode == WB_SYNC_NONE) {
987
988                 if (wbc->for_kupdate)
989                         return 0;
990
991                 fs_info = BTRFS_I(mapping->host)->root->fs_info;
992                 /* this is a bit racy, but that's ok */
993                 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
994                                              BTRFS_DIRTY_METADATA_THRESH);
995                 if (ret < 0)
996                         return 0;
997         }
998         return btree_write_cache_pages(mapping, wbc);
999 }
1000
1001 static int btree_readpage(struct file *file, struct page *page)
1002 {
1003         struct extent_io_tree *tree;
1004         tree = &BTRFS_I(page->mapping->host)->io_tree;
1005         return extent_read_full_page(tree, page, btree_get_extent, 0);
1006 }
1007
1008 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
1009 {
1010         if (PageWriteback(page) || PageDirty(page))
1011                 return 0;
1012
1013         return try_release_extent_buffer(page);
1014 }
1015
1016 static void btree_invalidatepage(struct page *page, unsigned long offset)
1017 {
1018         struct extent_io_tree *tree;
1019         tree = &BTRFS_I(page->mapping->host)->io_tree;
1020         extent_invalidatepage(tree, page, offset);
1021         btree_releasepage(page, GFP_NOFS);
1022         if (PagePrivate(page)) {
1023                 printk(KERN_WARNING "btrfs warning page private not zero "
1024                        "on page %llu\n", (unsigned long long)page_offset(page));
1025                 ClearPagePrivate(page);
1026                 set_page_private(page, 0);
1027                 page_cache_release(page);
1028         }
1029 }
1030
1031 static int btree_set_page_dirty(struct page *page)
1032 {
1033 #ifdef DEBUG
1034         struct extent_buffer *eb;
1035
1036         BUG_ON(!PagePrivate(page));
1037         eb = (struct extent_buffer *)page->private;
1038         BUG_ON(!eb);
1039         BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1040         BUG_ON(!atomic_read(&eb->refs));
1041         btrfs_assert_tree_locked(eb);
1042 #endif
1043         return __set_page_dirty_nobuffers(page);
1044 }
1045
1046 static const struct address_space_operations btree_aops = {
1047         .readpage       = btree_readpage,
1048         .writepages     = btree_writepages,
1049         .releasepage    = btree_releasepage,
1050         .invalidatepage = btree_invalidatepage,
1051 #ifdef CONFIG_MIGRATION
1052         .migratepage    = btree_migratepage,
1053 #endif
1054         .set_page_dirty = btree_set_page_dirty,
1055 };
1056
1057 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1058                          u64 parent_transid)
1059 {
1060         struct extent_buffer *buf = NULL;
1061         struct inode *btree_inode = root->fs_info->btree_inode;
1062         int ret = 0;
1063
1064         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1065         if (!buf)
1066                 return 0;
1067         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1068                                  buf, 0, WAIT_NONE, btree_get_extent, 0);
1069         free_extent_buffer(buf);
1070         return ret;
1071 }
1072
1073 int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1074                          int mirror_num, struct extent_buffer **eb)
1075 {
1076         struct extent_buffer *buf = NULL;
1077         struct inode *btree_inode = root->fs_info->btree_inode;
1078         struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1079         int ret;
1080
1081         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1082         if (!buf)
1083                 return 0;
1084
1085         set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1086
1087         ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
1088                                        btree_get_extent, mirror_num);
1089         if (ret) {
1090                 free_extent_buffer(buf);
1091                 return ret;
1092         }
1093
1094         if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1095                 free_extent_buffer(buf);
1096                 return -EIO;
1097         } else if (extent_buffer_uptodate(buf)) {
1098                 *eb = buf;
1099         } else {
1100                 free_extent_buffer(buf);
1101         }
1102         return 0;
1103 }
1104
1105 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
1106                                             u64 bytenr, u32 blocksize)
1107 {
1108         struct inode *btree_inode = root->fs_info->btree_inode;
1109         struct extent_buffer *eb;
1110         eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1111                                 bytenr, blocksize);
1112         return eb;
1113 }
1114
1115 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1116                                                  u64 bytenr, u32 blocksize)
1117 {
1118         struct inode *btree_inode = root->fs_info->btree_inode;
1119         struct extent_buffer *eb;
1120
1121         eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1122                                  bytenr, blocksize);
1123         return eb;
1124 }
1125
1126
1127 int btrfs_write_tree_block(struct extent_buffer *buf)
1128 {
1129         return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1130                                         buf->start + buf->len - 1);
1131 }
1132
1133 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1134 {
1135         return filemap_fdatawait_range(buf->pages[0]->mapping,
1136                                        buf->start, buf->start + buf->len - 1);
1137 }
1138
1139 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1140                                       u32 blocksize, u64 parent_transid)
1141 {
1142         struct extent_buffer *buf = NULL;
1143         int ret;
1144
1145         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1146         if (!buf)
1147                 return NULL;
1148
1149         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1150         return buf;
1151
1152 }
1153
1154 void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1155                       struct extent_buffer *buf)
1156 {
1157         struct btrfs_fs_info *fs_info = root->fs_info;
1158
1159         if (btrfs_header_generation(buf) ==
1160             fs_info->running_transaction->transid) {
1161                 btrfs_assert_tree_locked(buf);
1162
1163                 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1164                         __percpu_counter_add(&fs_info->dirty_metadata_bytes,
1165                                              -buf->len,
1166                                              fs_info->dirty_metadata_batch);
1167                         /* ugh, clear_extent_buffer_dirty needs to lock the page */
1168                         btrfs_set_lock_blocking(buf);
1169                         clear_extent_buffer_dirty(buf);
1170                 }
1171         }
1172 }
1173
1174 static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1175                          u32 stripesize, struct btrfs_root *root,
1176                          struct btrfs_fs_info *fs_info,
1177                          u64 objectid)
1178 {
1179         root->node = NULL;
1180         root->commit_root = NULL;
1181         root->sectorsize = sectorsize;
1182         root->nodesize = nodesize;
1183         root->leafsize = leafsize;
1184         root->stripesize = stripesize;
1185         root->ref_cows = 0;
1186         root->track_dirty = 0;
1187         root->in_radix = 0;
1188         root->orphan_item_inserted = 0;
1189         root->orphan_cleanup_state = 0;
1190
1191         root->objectid = objectid;
1192         root->last_trans = 0;
1193         root->highest_objectid = 0;
1194         root->name = NULL;
1195         root->inode_tree = RB_ROOT;
1196         INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1197         root->block_rsv = NULL;
1198         root->orphan_block_rsv = NULL;
1199
1200         INIT_LIST_HEAD(&root->dirty_list);
1201         INIT_LIST_HEAD(&root->root_list);
1202         INIT_LIST_HEAD(&root->logged_list[0]);
1203         INIT_LIST_HEAD(&root->logged_list[1]);
1204         spin_lock_init(&root->orphan_lock);
1205         spin_lock_init(&root->inode_lock);
1206         spin_lock_init(&root->accounting_lock);
1207         spin_lock_init(&root->log_extents_lock[0]);
1208         spin_lock_init(&root->log_extents_lock[1]);
1209         mutex_init(&root->objectid_mutex);
1210         mutex_init(&root->log_mutex);
1211         init_waitqueue_head(&root->log_writer_wait);
1212         init_waitqueue_head(&root->log_commit_wait[0]);
1213         init_waitqueue_head(&root->log_commit_wait[1]);
1214         atomic_set(&root->log_commit[0], 0);
1215         atomic_set(&root->log_commit[1], 0);
1216         atomic_set(&root->log_writers, 0);
1217         atomic_set(&root->log_batch, 0);
1218         atomic_set(&root->orphan_inodes, 0);
1219         atomic_set(&root->refs, 1);
1220         root->log_transid = 0;
1221         root->last_log_commit = 0;
1222         extent_io_tree_init(&root->dirty_log_pages,
1223                              fs_info->btree_inode->i_mapping);
1224
1225         memset(&root->root_key, 0, sizeof(root->root_key));
1226         memset(&root->root_item, 0, sizeof(root->root_item));
1227         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1228         memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1229         root->defrag_trans_start = fs_info->generation;
1230         init_completion(&root->kobj_unregister);
1231         root->defrag_running = 0;
1232         root->root_key.objectid = objectid;
1233         root->anon_dev = 0;
1234
1235         spin_lock_init(&root->root_item_lock);
1236 }
1237
1238 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
1239 {
1240         struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
1241         if (root)
1242                 root->fs_info = fs_info;
1243         return root;
1244 }
1245
1246 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1247                                      struct btrfs_fs_info *fs_info,
1248                                      u64 objectid)
1249 {
1250         struct extent_buffer *leaf;
1251         struct btrfs_root *tree_root = fs_info->tree_root;
1252         struct btrfs_root *root;
1253         struct btrfs_key key;
1254         int ret = 0;
1255         u64 bytenr;
1256         uuid_le uuid;
1257
1258         root = btrfs_alloc_root(fs_info);
1259         if (!root)
1260                 return ERR_PTR(-ENOMEM);
1261
1262         __setup_root(tree_root->nodesize, tree_root->leafsize,
1263                      tree_root->sectorsize, tree_root->stripesize,
1264                      root, fs_info, objectid);
1265         root->root_key.objectid = objectid;
1266         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1267         root->root_key.offset = 0;
1268
1269         leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
1270                                       0, objectid, NULL, 0, 0, 0);
1271         if (IS_ERR(leaf)) {
1272                 ret = PTR_ERR(leaf);
1273                 leaf = NULL;
1274                 goto fail;
1275         }
1276
1277         bytenr = leaf->start;
1278         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1279         btrfs_set_header_bytenr(leaf, leaf->start);
1280         btrfs_set_header_generation(leaf, trans->transid);
1281         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1282         btrfs_set_header_owner(leaf, objectid);
1283         root->node = leaf;
1284
1285         write_extent_buffer(leaf, fs_info->fsid,
1286                             (unsigned long)btrfs_header_fsid(leaf),
1287                             BTRFS_FSID_SIZE);
1288         write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
1289                             (unsigned long)btrfs_header_chunk_tree_uuid(leaf),
1290                             BTRFS_UUID_SIZE);
1291         btrfs_mark_buffer_dirty(leaf);
1292
1293         root->commit_root = btrfs_root_node(root);
1294         root->track_dirty = 1;
1295
1296
1297         root->root_item.flags = 0;
1298         root->root_item.byte_limit = 0;
1299         btrfs_set_root_bytenr(&root->root_item, leaf->start);
1300         btrfs_set_root_generation(&root->root_item, trans->transid);
1301         btrfs_set_root_level(&root->root_item, 0);
1302         btrfs_set_root_refs(&root->root_item, 1);
1303         btrfs_set_root_used(&root->root_item, leaf->len);
1304         btrfs_set_root_last_snapshot(&root->root_item, 0);
1305         btrfs_set_root_dirid(&root->root_item, 0);
1306         uuid_le_gen(&uuid);
1307         memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1308         root->root_item.drop_level = 0;
1309
1310         key.objectid = objectid;
1311         key.type = BTRFS_ROOT_ITEM_KEY;
1312         key.offset = 0;
1313         ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1314         if (ret)
1315                 goto fail;
1316
1317         btrfs_tree_unlock(leaf);
1318
1319         return root;
1320
1321 fail:
1322         if (leaf) {
1323                 btrfs_tree_unlock(leaf);
1324                 free_extent_buffer(leaf);
1325         }
1326         kfree(root);
1327
1328         return ERR_PTR(ret);
1329 }
1330
1331 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1332                                          struct btrfs_fs_info *fs_info)
1333 {
1334         struct btrfs_root *root;
1335         struct btrfs_root *tree_root = fs_info->tree_root;
1336         struct extent_buffer *leaf;
1337
1338         root = btrfs_alloc_root(fs_info);
1339         if (!root)
1340                 return ERR_PTR(-ENOMEM);
1341
1342         __setup_root(tree_root->nodesize, tree_root->leafsize,
1343                      tree_root->sectorsize, tree_root->stripesize,
1344                      root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1345
1346         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1347         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1348         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1349         /*
1350          * log trees do not get reference counted because they go away
1351          * before a real commit is actually done.  They do store pointers
1352          * to file data extents, and those reference counts still get
1353          * updated (along with back refs to the log tree).
1354          */
1355         root->ref_cows = 0;
1356
1357         leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1358                                       BTRFS_TREE_LOG_OBJECTID, NULL,
1359                                       0, 0, 0);
1360         if (IS_ERR(leaf)) {
1361                 kfree(root);
1362                 return ERR_CAST(leaf);
1363         }
1364
1365         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1366         btrfs_set_header_bytenr(leaf, leaf->start);
1367         btrfs_set_header_generation(leaf, trans->transid);
1368         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1369         btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1370         root->node = leaf;
1371
1372         write_extent_buffer(root->node, root->fs_info->fsid,
1373                             (unsigned long)btrfs_header_fsid(root->node),
1374                             BTRFS_FSID_SIZE);
1375         btrfs_mark_buffer_dirty(root->node);
1376         btrfs_tree_unlock(root->node);
1377         return root;
1378 }
1379
1380 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1381                              struct btrfs_fs_info *fs_info)
1382 {
1383         struct btrfs_root *log_root;
1384
1385         log_root = alloc_log_tree(trans, fs_info);
1386         if (IS_ERR(log_root))
1387                 return PTR_ERR(log_root);
1388         WARN_ON(fs_info->log_root_tree);
1389         fs_info->log_root_tree = log_root;
1390         return 0;
1391 }
1392
1393 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1394                        struct btrfs_root *root)
1395 {
1396         struct btrfs_root *log_root;
1397         struct btrfs_inode_item *inode_item;
1398
1399         log_root = alloc_log_tree(trans, root->fs_info);
1400         if (IS_ERR(log_root))
1401                 return PTR_ERR(log_root);
1402
1403         log_root->last_trans = trans->transid;
1404         log_root->root_key.offset = root->root_key.objectid;
1405
1406         inode_item = &log_root->root_item.inode;
1407         inode_item->generation = cpu_to_le64(1);
1408         inode_item->size = cpu_to_le64(3);
1409         inode_item->nlink = cpu_to_le32(1);
1410         inode_item->nbytes = cpu_to_le64(root->leafsize);
1411         inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1412
1413         btrfs_set_root_node(&log_root->root_item, log_root->node);
1414
1415         WARN_ON(root->log_root);
1416         root->log_root = log_root;
1417         root->log_transid = 0;
1418         root->last_log_commit = 0;
1419         return 0;
1420 }
1421
1422 struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1423                                         struct btrfs_key *key)
1424 {
1425         struct btrfs_root *root;
1426         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1427         struct btrfs_path *path;
1428         u64 generation;
1429         u32 blocksize;
1430         int ret;
1431
1432         path = btrfs_alloc_path();
1433         if (!path)
1434                 return ERR_PTR(-ENOMEM);
1435
1436         root = btrfs_alloc_root(fs_info);
1437         if (!root) {
1438                 ret = -ENOMEM;
1439                 goto alloc_fail;
1440         }
1441
1442         __setup_root(tree_root->nodesize, tree_root->leafsize,
1443                      tree_root->sectorsize, tree_root->stripesize,
1444                      root, fs_info, key->objectid);
1445
1446         ret = btrfs_find_root(tree_root, key, path,
1447                               &root->root_item, &root->root_key);
1448         if (ret) {
1449                 if (ret > 0)
1450                         ret = -ENOENT;
1451                 goto find_fail;
1452         }
1453
1454         generation = btrfs_root_generation(&root->root_item);
1455         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1456         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1457                                      blocksize, generation);
1458         if (!root->node) {
1459                 ret = -ENOMEM;
1460                 goto find_fail;
1461         } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1462                 ret = -EIO;
1463                 goto read_fail;
1464         }
1465         root->commit_root = btrfs_root_node(root);
1466 out:
1467         btrfs_free_path(path);
1468         return root;
1469
1470 read_fail:
1471         free_extent_buffer(root->node);
1472 find_fail:
1473         kfree(root);
1474 alloc_fail:
1475         root = ERR_PTR(ret);
1476         goto out;
1477 }
1478
1479 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1480                                       struct btrfs_key *location)
1481 {
1482         struct btrfs_root *root;
1483
1484         root = btrfs_read_tree_root(tree_root, location);
1485         if (IS_ERR(root))
1486                 return root;
1487
1488         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1489                 root->ref_cows = 1;
1490                 btrfs_check_and_init_root_item(&root->root_item);
1491         }
1492
1493         return root;
1494 }
1495
1496 int btrfs_init_fs_root(struct btrfs_root *root)
1497 {
1498         int ret;
1499
1500         root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1501         root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1502                                         GFP_NOFS);
1503         if (!root->free_ino_pinned || !root->free_ino_ctl) {
1504                 ret = -ENOMEM;
1505                 goto fail;
1506         }
1507
1508         btrfs_init_free_ino_ctl(root);
1509         mutex_init(&root->fs_commit_mutex);
1510         spin_lock_init(&root->cache_lock);
1511         init_waitqueue_head(&root->cache_wait);
1512
1513         ret = get_anon_bdev(&root->anon_dev);
1514         if (ret)
1515                 goto fail;
1516         return 0;
1517 fail:
1518         kfree(root->free_ino_ctl);
1519         kfree(root->free_ino_pinned);
1520         return ret;
1521 }
1522
1523 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1524                                         u64 root_id)
1525 {
1526         struct btrfs_root *root;
1527
1528         spin_lock(&fs_info->fs_roots_radix_lock);
1529         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1530                                  (unsigned long)root_id);
1531         spin_unlock(&fs_info->fs_roots_radix_lock);
1532         return root;
1533 }
1534
1535 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1536                          struct btrfs_root *root)
1537 {
1538         int ret;
1539
1540         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1541         if (ret)
1542                 return ret;
1543
1544         spin_lock(&fs_info->fs_roots_radix_lock);
1545         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1546                                 (unsigned long)root->root_key.objectid,
1547                                 root);
1548         if (ret == 0)
1549                 root->in_radix = 1;
1550         spin_unlock(&fs_info->fs_roots_radix_lock);
1551         radix_tree_preload_end();
1552
1553         return ret;
1554 }
1555
1556 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1557                                               struct btrfs_key *location)
1558 {
1559         struct btrfs_root *root;
1560         int ret;
1561
1562         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1563                 return fs_info->tree_root;
1564         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1565                 return fs_info->extent_root;
1566         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1567                 return fs_info->chunk_root;
1568         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1569                 return fs_info->dev_root;
1570         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1571                 return fs_info->csum_root;
1572         if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1573                 return fs_info->quota_root ? fs_info->quota_root :
1574                                              ERR_PTR(-ENOENT);
1575 again:
1576         root = btrfs_lookup_fs_root(fs_info, location->objectid);
1577         if (root)
1578                 return root;
1579
1580         root = btrfs_read_fs_root(fs_info->tree_root, location);
1581         if (IS_ERR(root))
1582                 return root;
1583
1584         if (btrfs_root_refs(&root->root_item) == 0) {
1585                 ret = -ENOENT;
1586                 goto fail;
1587         }
1588
1589         ret = btrfs_init_fs_root(root);
1590         if (ret)
1591                 goto fail;
1592
1593         ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1594         if (ret < 0)
1595                 goto fail;
1596         if (ret == 0)
1597                 root->orphan_item_inserted = 1;
1598
1599         ret = btrfs_insert_fs_root(fs_info, root);
1600         if (ret) {
1601                 if (ret == -EEXIST) {
1602                         free_fs_root(root);
1603                         goto again;
1604                 }
1605                 goto fail;
1606         }
1607         return root;
1608 fail:
1609         free_fs_root(root);
1610         return ERR_PTR(ret);
1611 }
1612
1613 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1614 {
1615         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1616         int ret = 0;
1617         struct btrfs_device *device;
1618         struct backing_dev_info *bdi;
1619
1620         rcu_read_lock();
1621         list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1622                 if (!device->bdev)
1623                         continue;
1624                 bdi = blk_get_backing_dev_info(device->bdev);
1625                 if (bdi && bdi_congested(bdi, bdi_bits)) {
1626                         ret = 1;
1627                         break;
1628                 }
1629         }
1630         rcu_read_unlock();
1631         return ret;
1632 }
1633
1634 /*
1635  * If this fails, caller must call bdi_destroy() to get rid of the
1636  * bdi again.
1637  */
1638 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1639 {
1640         int err;
1641
1642         bdi->capabilities = BDI_CAP_MAP_COPY;
1643         err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1644         if (err)
1645                 return err;
1646
1647         bdi->ra_pages   = default_backing_dev_info.ra_pages;
1648         bdi->congested_fn       = btrfs_congested_fn;
1649         bdi->congested_data     = info;
1650         return 0;
1651 }
1652
1653 /*
1654  * called by the kthread helper functions to finally call the bio end_io
1655  * functions.  This is where read checksum verification actually happens
1656  */
1657 static void end_workqueue_fn(struct btrfs_work *work)
1658 {
1659         struct bio *bio;
1660         struct end_io_wq *end_io_wq;
1661         struct btrfs_fs_info *fs_info;
1662         int error;
1663
1664         end_io_wq = container_of(work, struct end_io_wq, work);
1665         bio = end_io_wq->bio;
1666         fs_info = end_io_wq->info;
1667
1668         error = end_io_wq->error;
1669         bio->bi_private = end_io_wq->private;
1670         bio->bi_end_io = end_io_wq->end_io;
1671         kfree(end_io_wq);
1672         bio_endio(bio, error);
1673 }
1674
1675 static int cleaner_kthread(void *arg)
1676 {
1677         struct btrfs_root *root = arg;
1678         int again;
1679
1680         do {
1681                 again = 0;
1682
1683                 /* Make the cleaner go to sleep early. */
1684                 if (btrfs_need_cleaner_sleep(root))
1685                         goto sleep;
1686
1687                 if (!mutex_trylock(&root->fs_info->cleaner_mutex))
1688                         goto sleep;
1689
1690                 /*
1691                  * Avoid the problem that we change the status of the fs
1692                  * during the above check and trylock.
1693                  */
1694                 if (btrfs_need_cleaner_sleep(root)) {
1695                         mutex_unlock(&root->fs_info->cleaner_mutex);
1696                         goto sleep;
1697                 }
1698
1699                 btrfs_run_delayed_iputs(root);
1700                 again = btrfs_clean_one_deleted_snapshot(root);
1701                 mutex_unlock(&root->fs_info->cleaner_mutex);
1702
1703                 /*
1704                  * The defragger has dealt with the R/O remount and umount,
1705                  * needn't do anything special here.
1706                  */
1707                 btrfs_run_defrag_inodes(root->fs_info);
1708 sleep:
1709                 if (!try_to_freeze() && !again) {
1710                         set_current_state(TASK_INTERRUPTIBLE);
1711                         if (!kthread_should_stop())
1712                                 schedule();
1713                         __set_current_state(TASK_RUNNING);
1714                 }
1715         } while (!kthread_should_stop());
1716         return 0;
1717 }
1718
1719 static int transaction_kthread(void *arg)
1720 {
1721         struct btrfs_root *root = arg;
1722         struct btrfs_trans_handle *trans;
1723         struct btrfs_transaction *cur;
1724         u64 transid;
1725         unsigned long now;
1726         unsigned long delay;
1727         bool cannot_commit;
1728
1729         do {
1730                 cannot_commit = false;
1731                 delay = HZ * 30;
1732                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1733
1734                 spin_lock(&root->fs_info->trans_lock);
1735                 cur = root->fs_info->running_transaction;
1736                 if (!cur) {
1737                         spin_unlock(&root->fs_info->trans_lock);
1738                         goto sleep;
1739                 }
1740
1741                 now = get_seconds();
1742                 if (!cur->blocked &&
1743                     (now < cur->start_time || now - cur->start_time < 30)) {
1744                         spin_unlock(&root->fs_info->trans_lock);
1745                         delay = HZ * 5;
1746                         goto sleep;
1747                 }
1748                 transid = cur->transid;
1749                 spin_unlock(&root->fs_info->trans_lock);
1750
1751                 /* If the file system is aborted, this will always fail. */
1752                 trans = btrfs_attach_transaction(root);
1753                 if (IS_ERR(trans)) {
1754                         if (PTR_ERR(trans) != -ENOENT)
1755                                 cannot_commit = true;
1756                         goto sleep;
1757                 }
1758                 if (transid == trans->transid) {
1759                         btrfs_commit_transaction(trans, root);
1760                 } else {
1761                         btrfs_end_transaction(trans, root);
1762                 }
1763 sleep:
1764                 wake_up_process(root->fs_info->cleaner_kthread);
1765                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1766
1767                 if (!try_to_freeze()) {
1768                         set_current_state(TASK_INTERRUPTIBLE);
1769                         if (!kthread_should_stop() &&
1770                             (!btrfs_transaction_blocked(root->fs_info) ||
1771                              cannot_commit))
1772                                 schedule_timeout(delay);
1773                         __set_current_state(TASK_RUNNING);
1774                 }
1775         } while (!kthread_should_stop());
1776         return 0;
1777 }
1778
1779 /*
1780  * this will find the highest generation in the array of
1781  * root backups.  The index of the highest array is returned,
1782  * or -1 if we can't find anything.
1783  *
1784  * We check to make sure the array is valid by comparing the
1785  * generation of the latest  root in the array with the generation
1786  * in the super block.  If they don't match we pitch it.
1787  */
1788 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1789 {
1790         u64 cur;
1791         int newest_index = -1;
1792         struct btrfs_root_backup *root_backup;
1793         int i;
1794
1795         for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1796                 root_backup = info->super_copy->super_roots + i;
1797                 cur = btrfs_backup_tree_root_gen(root_backup);
1798                 if (cur == newest_gen)
1799                         newest_index = i;
1800         }
1801
1802         /* check to see if we actually wrapped around */
1803         if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1804                 root_backup = info->super_copy->super_roots;
1805                 cur = btrfs_backup_tree_root_gen(root_backup);
1806                 if (cur == newest_gen)
1807                         newest_index = 0;
1808         }
1809         return newest_index;
1810 }
1811
1812
1813 /*
1814  * find the oldest backup so we know where to store new entries
1815  * in the backup array.  This will set the backup_root_index
1816  * field in the fs_info struct
1817  */
1818 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1819                                      u64 newest_gen)
1820 {
1821         int newest_index = -1;
1822
1823         newest_index = find_newest_super_backup(info, newest_gen);
1824         /* if there was garbage in there, just move along */
1825         if (newest_index == -1) {
1826                 info->backup_root_index = 0;
1827         } else {
1828                 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1829         }
1830 }
1831
1832 /*
1833  * copy all the root pointers into the super backup array.
1834  * this will bump the backup pointer by one when it is
1835  * done
1836  */
1837 static void backup_super_roots(struct btrfs_fs_info *info)
1838 {
1839         int next_backup;
1840         struct btrfs_root_backup *root_backup;
1841         int last_backup;
1842
1843         next_backup = info->backup_root_index;
1844         last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1845                 BTRFS_NUM_BACKUP_ROOTS;
1846
1847         /*
1848          * just overwrite the last backup if we're at the same generation
1849          * this happens only at umount
1850          */
1851         root_backup = info->super_for_commit->super_roots + last_backup;
1852         if (btrfs_backup_tree_root_gen(root_backup) ==
1853             btrfs_header_generation(info->tree_root->node))
1854                 next_backup = last_backup;
1855
1856         root_backup = info->super_for_commit->super_roots + next_backup;
1857
1858         /*
1859          * make sure all of our padding and empty slots get zero filled
1860          * regardless of which ones we use today
1861          */
1862         memset(root_backup, 0, sizeof(*root_backup));
1863
1864         info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1865
1866         btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1867         btrfs_set_backup_tree_root_gen(root_backup,
1868                                btrfs_header_generation(info->tree_root->node));
1869
1870         btrfs_set_backup_tree_root_level(root_backup,
1871                                btrfs_header_level(info->tree_root->node));
1872
1873         btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1874         btrfs_set_backup_chunk_root_gen(root_backup,
1875                                btrfs_header_generation(info->chunk_root->node));
1876         btrfs_set_backup_chunk_root_level(root_backup,
1877                                btrfs_header_level(info->chunk_root->node));
1878
1879         btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1880         btrfs_set_backup_extent_root_gen(root_backup,
1881                                btrfs_header_generation(info->extent_root->node));
1882         btrfs_set_backup_extent_root_level(root_backup,
1883                                btrfs_header_level(info->extent_root->node));
1884
1885         /*
1886          * we might commit during log recovery, which happens before we set
1887          * the fs_root.  Make sure it is valid before we fill it in.
1888          */
1889         if (info->fs_root && info->fs_root->node) {
1890                 btrfs_set_backup_fs_root(root_backup,
1891                                          info->fs_root->node->start);
1892                 btrfs_set_backup_fs_root_gen(root_backup,
1893                                btrfs_header_generation(info->fs_root->node));
1894                 btrfs_set_backup_fs_root_level(root_backup,
1895                                btrfs_header_level(info->fs_root->node));
1896         }
1897
1898         btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1899         btrfs_set_backup_dev_root_gen(root_backup,
1900                                btrfs_header_generation(info->dev_root->node));
1901         btrfs_set_backup_dev_root_level(root_backup,
1902                                        btrfs_header_level(info->dev_root->node));
1903
1904         btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1905         btrfs_set_backup_csum_root_gen(root_backup,
1906                                btrfs_header_generation(info->csum_root->node));
1907         btrfs_set_backup_csum_root_level(root_backup,
1908                                btrfs_header_level(info->csum_root->node));
1909
1910         btrfs_set_backup_total_bytes(root_backup,
1911                              btrfs_super_total_bytes(info->super_copy));
1912         btrfs_set_backup_bytes_used(root_backup,
1913                              btrfs_super_bytes_used(info->super_copy));
1914         btrfs_set_backup_num_devices(root_backup,
1915                              btrfs_super_num_devices(info->super_copy));
1916
1917         /*
1918          * if we don't copy this out to the super_copy, it won't get remembered
1919          * for the next commit
1920          */
1921         memcpy(&info->super_copy->super_roots,
1922                &info->super_for_commit->super_roots,
1923                sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1924 }
1925
1926 /*
1927  * this copies info out of the root backup array and back into
1928  * the in-memory super block.  It is meant to help iterate through
1929  * the array, so you send it the number of backups you've already
1930  * tried and the last backup index you used.
1931  *
1932  * this returns -1 when it has tried all the backups
1933  */
1934 static noinline int next_root_backup(struct btrfs_fs_info *info,
1935                                      struct btrfs_super_block *super,
1936                                      int *num_backups_tried, int *backup_index)
1937 {
1938         struct btrfs_root_backup *root_backup;
1939         int newest = *backup_index;
1940
1941         if (*num_backups_tried == 0) {
1942                 u64 gen = btrfs_super_generation(super);
1943
1944                 newest = find_newest_super_backup(info, gen);
1945                 if (newest == -1)
1946                         return -1;
1947
1948                 *backup_index = newest;
1949                 *num_backups_tried = 1;
1950         } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1951                 /* we've tried all the backups, all done */
1952                 return -1;
1953         } else {
1954                 /* jump to the next oldest backup */
1955                 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1956                         BTRFS_NUM_BACKUP_ROOTS;
1957                 *backup_index = newest;
1958                 *num_backups_tried += 1;
1959         }
1960         root_backup = super->super_roots + newest;
1961
1962         btrfs_set_super_generation(super,
1963                                    btrfs_backup_tree_root_gen(root_backup));
1964         btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1965         btrfs_set_super_root_level(super,
1966                                    btrfs_backup_tree_root_level(root_backup));
1967         btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1968
1969         /*
1970          * fixme: the total bytes and num_devices need to match or we should
1971          * need a fsck
1972          */
1973         btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1974         btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1975         return 0;
1976 }
1977
1978 /* helper to cleanup workers */
1979 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1980 {
1981         btrfs_stop_workers(&fs_info->generic_worker);
1982         btrfs_stop_workers(&fs_info->fixup_workers);
1983         btrfs_stop_workers(&fs_info->delalloc_workers);
1984         btrfs_stop_workers(&fs_info->workers);
1985         btrfs_stop_workers(&fs_info->endio_workers);
1986         btrfs_stop_workers(&fs_info->endio_meta_workers);
1987         btrfs_stop_workers(&fs_info->endio_raid56_workers);
1988         btrfs_stop_workers(&fs_info->rmw_workers);
1989         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
1990         btrfs_stop_workers(&fs_info->endio_write_workers);
1991         btrfs_stop_workers(&fs_info->endio_freespace_worker);
1992         btrfs_stop_workers(&fs_info->submit_workers);
1993         btrfs_stop_workers(&fs_info->delayed_workers);
1994         btrfs_stop_workers(&fs_info->caching_workers);
1995         btrfs_stop_workers(&fs_info->readahead_workers);
1996         btrfs_stop_workers(&fs_info->flush_workers);
1997         btrfs_stop_workers(&fs_info->qgroup_rescan_workers);
1998 }
1999
2000 /* helper to cleanup tree roots */
2001 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2002 {
2003         free_extent_buffer(info->tree_root->node);
2004         free_extent_buffer(info->tree_root->commit_root);
2005         info->tree_root->node = NULL;
2006         info->tree_root->commit_root = NULL;
2007
2008         if (info->dev_root) {
2009                 free_extent_buffer(info->dev_root->node);
2010                 free_extent_buffer(info->dev_root->commit_root);
2011                 info->dev_root->node = NULL;
2012                 info->dev_root->commit_root = NULL;
2013         }
2014         if (info->extent_root) {
2015                 free_extent_buffer(info->extent_root->node);
2016                 free_extent_buffer(info->extent_root->commit_root);
2017                 info->extent_root->node = NULL;
2018                 info->extent_root->commit_root = NULL;
2019         }
2020         if (info->csum_root) {
2021                 free_extent_buffer(info->csum_root->node);
2022                 free_extent_buffer(info->csum_root->commit_root);
2023                 info->csum_root->node = NULL;
2024                 info->csum_root->commit_root = NULL;
2025         }
2026         if (info->quota_root) {
2027                 free_extent_buffer(info->quota_root->node);
2028                 free_extent_buffer(info->quota_root->commit_root);
2029                 info->quota_root->node = NULL;
2030                 info->quota_root->commit_root = NULL;
2031         }
2032         if (chunk_root) {
2033                 free_extent_buffer(info->chunk_root->node);
2034                 free_extent_buffer(info->chunk_root->commit_root);
2035                 info->chunk_root->node = NULL;
2036                 info->chunk_root->commit_root = NULL;
2037         }
2038 }
2039
2040 static void del_fs_roots(struct btrfs_fs_info *fs_info)
2041 {
2042         int ret;
2043         struct btrfs_root *gang[8];
2044         int i;
2045
2046         while (!list_empty(&fs_info->dead_roots)) {
2047                 gang[0] = list_entry(fs_info->dead_roots.next,
2048                                      struct btrfs_root, root_list);
2049                 list_del(&gang[0]->root_list);
2050
2051                 if (gang[0]->in_radix) {
2052                         btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2053                 } else {
2054                         free_extent_buffer(gang[0]->node);
2055                         free_extent_buffer(gang[0]->commit_root);
2056                         btrfs_put_fs_root(gang[0]);
2057                 }
2058         }
2059
2060         while (1) {
2061                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2062                                              (void **)gang, 0,
2063                                              ARRAY_SIZE(gang));
2064                 if (!ret)
2065                         break;
2066                 for (i = 0; i < ret; i++)
2067                         btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2068         }
2069 }
2070
2071 int open_ctree(struct super_block *sb,
2072                struct btrfs_fs_devices *fs_devices,
2073                char *options)
2074 {
2075         u32 sectorsize;
2076         u32 nodesize;
2077         u32 leafsize;
2078         u32 blocksize;
2079         u32 stripesize;
2080         u64 generation;
2081         u64 features;
2082         struct btrfs_key location;
2083         struct buffer_head *bh;
2084         struct btrfs_super_block *disk_super;
2085         struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2086         struct btrfs_root *tree_root;
2087         struct btrfs_root *extent_root;
2088         struct btrfs_root *csum_root;
2089         struct btrfs_root *chunk_root;
2090         struct btrfs_root *dev_root;
2091         struct btrfs_root *quota_root;
2092         struct btrfs_root *log_tree_root;
2093         int ret;
2094         int err = -EINVAL;
2095         int num_backups_tried = 0;
2096         int backup_index = 0;
2097
2098         tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
2099         chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
2100         if (!tree_root || !chunk_root) {
2101                 err = -ENOMEM;
2102                 goto fail;
2103         }
2104
2105         ret = init_srcu_struct(&fs_info->subvol_srcu);
2106         if (ret) {
2107                 err = ret;
2108                 goto fail;
2109         }
2110
2111         ret = setup_bdi(fs_info, &fs_info->bdi);
2112         if (ret) {
2113                 err = ret;
2114                 goto fail_srcu;
2115         }
2116
2117         ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
2118         if (ret) {
2119                 err = ret;
2120                 goto fail_bdi;
2121         }
2122         fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
2123                                         (1 + ilog2(nr_cpu_ids));
2124
2125         ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
2126         if (ret) {
2127                 err = ret;
2128                 goto fail_dirty_metadata_bytes;
2129         }
2130
2131         fs_info->btree_inode = new_inode(sb);
2132         if (!fs_info->btree_inode) {
2133                 err = -ENOMEM;
2134                 goto fail_delalloc_bytes;
2135         }
2136
2137         mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2138
2139         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2140         INIT_LIST_HEAD(&fs_info->trans_list);
2141         INIT_LIST_HEAD(&fs_info->dead_roots);
2142         INIT_LIST_HEAD(&fs_info->delayed_iputs);
2143         INIT_LIST_HEAD(&fs_info->delalloc_inodes);
2144         INIT_LIST_HEAD(&fs_info->caching_block_groups);
2145         spin_lock_init(&fs_info->delalloc_lock);
2146         spin_lock_init(&fs_info->trans_lock);
2147         spin_lock_init(&fs_info->fs_roots_radix_lock);
2148         spin_lock_init(&fs_info->delayed_iput_lock);
2149         spin_lock_init(&fs_info->defrag_inodes_lock);
2150         spin_lock_init(&fs_info->free_chunk_lock);
2151         spin_lock_init(&fs_info->tree_mod_seq_lock);
2152         spin_lock_init(&fs_info->super_lock);
2153         rwlock_init(&fs_info->tree_mod_log_lock);
2154         mutex_init(&fs_info->reloc_mutex);
2155         seqlock_init(&fs_info->profiles_lock);
2156
2157         init_completion(&fs_info->kobj_unregister);
2158         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2159         INIT_LIST_HEAD(&fs_info->space_info);
2160         INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2161         btrfs_mapping_init(&fs_info->mapping_tree);
2162         btrfs_init_block_rsv(&fs_info->global_block_rsv,
2163                              BTRFS_BLOCK_RSV_GLOBAL);
2164         btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2165                              BTRFS_BLOCK_RSV_DELALLOC);
2166         btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2167         btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2168         btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2169         btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2170                              BTRFS_BLOCK_RSV_DELOPS);
2171         atomic_set(&fs_info->nr_async_submits, 0);
2172         atomic_set(&fs_info->async_delalloc_pages, 0);
2173         atomic_set(&fs_info->async_submit_draining, 0);
2174         atomic_set(&fs_info->nr_async_bios, 0);
2175         atomic_set(&fs_info->defrag_running, 0);
2176         atomic64_set(&fs_info->tree_mod_seq, 0);
2177         fs_info->sb = sb;
2178         fs_info->max_inline = 8192 * 1024;
2179         fs_info->metadata_ratio = 0;
2180         fs_info->defrag_inodes = RB_ROOT;
2181         fs_info->trans_no_join = 0;
2182         fs_info->free_chunk_space = 0;
2183         fs_info->tree_mod_log = RB_ROOT;
2184
2185         /* readahead state */
2186         INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
2187         spin_lock_init(&fs_info->reada_lock);
2188
2189         fs_info->thread_pool_size = min_t(unsigned long,
2190                                           num_online_cpus() + 2, 8);
2191
2192         INIT_LIST_HEAD(&fs_info->ordered_extents);
2193         spin_lock_init(&fs_info->ordered_extent_lock);
2194         fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2195                                         GFP_NOFS);
2196         if (!fs_info->delayed_root) {
2197                 err = -ENOMEM;
2198                 goto fail_iput;
2199         }
2200         btrfs_init_delayed_root(fs_info->delayed_root);
2201
2202         mutex_init(&fs_info->scrub_lock);
2203         atomic_set(&fs_info->scrubs_running, 0);
2204         atomic_set(&fs_info->scrub_pause_req, 0);
2205         atomic_set(&fs_info->scrubs_paused, 0);
2206         atomic_set(&fs_info->scrub_cancel_req, 0);
2207         init_waitqueue_head(&fs_info->scrub_pause_wait);
2208         init_rwsem(&fs_info->scrub_super_lock);
2209         fs_info->scrub_workers_refcnt = 0;
2210 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2211         fs_info->check_integrity_print_mask = 0;
2212 #endif
2213
2214         spin_lock_init(&fs_info->balance_lock);
2215         mutex_init(&fs_info->balance_mutex);
2216         atomic_set(&fs_info->balance_running, 0);
2217         atomic_set(&fs_info->balance_pause_req, 0);
2218         atomic_set(&fs_info->balance_cancel_req, 0);
2219         fs_info->balance_ctl = NULL;
2220         init_waitqueue_head(&fs_info->balance_wait_q);
2221
2222         sb->s_blocksize = 4096;
2223         sb->s_blocksize_bits = blksize_bits(4096);
2224         sb->s_bdi = &fs_info->bdi;
2225
2226         fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2227         set_nlink(fs_info->btree_inode, 1);
2228         /*
2229          * we set the i_size on the btree inode to the max possible int.
2230          * the real end of the address space is determined by all of
2231          * the devices in the system
2232          */
2233         fs_info->btree_inode->i_size = OFFSET_MAX;
2234         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
2235         fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
2236
2237         RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
2238         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
2239                              fs_info->btree_inode->i_mapping);
2240         BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
2241         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
2242
2243         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
2244
2245         BTRFS_I(fs_info->btree_inode)->root = tree_root;
2246         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
2247                sizeof(struct btrfs_key));
2248         set_bit(BTRFS_INODE_DUMMY,
2249                 &BTRFS_I(fs_info->btree_inode)->runtime_flags);
2250         insert_inode_hash(fs_info->btree_inode);
2251
2252         spin_lock_init(&fs_info->block_group_cache_lock);
2253         fs_info->block_group_cache_tree = RB_ROOT;
2254         fs_info->first_logical_byte = (u64)-1;
2255
2256         extent_io_tree_init(&fs_info->freed_extents[0],
2257                              fs_info->btree_inode->i_mapping);
2258         extent_io_tree_init(&fs_info->freed_extents[1],
2259                              fs_info->btree_inode->i_mapping);
2260         fs_info->pinned_extents = &fs_info->freed_extents[0];
2261         fs_info->do_barriers = 1;
2262
2263
2264         mutex_init(&fs_info->ordered_operations_mutex);
2265         mutex_init(&fs_info->tree_log_mutex);
2266         mutex_init(&fs_info->chunk_mutex);
2267         mutex_init(&fs_info->transaction_kthread_mutex);
2268         mutex_init(&fs_info->cleaner_mutex);
2269         mutex_init(&fs_info->volume_mutex);
2270         init_rwsem(&fs_info->extent_commit_sem);
2271         init_rwsem(&fs_info->cleanup_work_sem);
2272         init_rwsem(&fs_info->subvol_sem);
2273         fs_info->dev_replace.lock_owner = 0;
2274         atomic_set(&fs_info->dev_replace.nesting_level, 0);
2275         mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2276         mutex_init(&fs_info->dev_replace.lock_management_lock);
2277         mutex_init(&fs_info->dev_replace.lock);
2278
2279         spin_lock_init(&fs_info->qgroup_lock);
2280         mutex_init(&fs_info->qgroup_ioctl_lock);
2281         fs_info->qgroup_tree = RB_ROOT;
2282         INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2283         fs_info->qgroup_seq = 1;
2284         fs_info->quota_enabled = 0;
2285         fs_info->pending_quota_state = 0;
2286         fs_info->qgroup_ulist = NULL;
2287         mutex_init(&fs_info->qgroup_rescan_lock);
2288
2289         btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2290         btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2291
2292         init_waitqueue_head(&fs_info->transaction_throttle);
2293         init_waitqueue_head(&fs_info->transaction_wait);
2294         init_waitqueue_head(&fs_info->transaction_blocked_wait);
2295         init_waitqueue_head(&fs_info->async_submit_wait);
2296
2297         ret = btrfs_alloc_stripe_hash_table(fs_info);
2298         if (ret) {
2299                 err = ret;
2300                 goto fail_alloc;
2301         }
2302
2303         __setup_root(4096, 4096, 4096, 4096, tree_root,
2304                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
2305
2306         invalidate_bdev(fs_devices->latest_bdev);
2307
2308         /*
2309          * Read super block and check the signature bytes only
2310          */
2311         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2312         if (!bh) {
2313                 err = -EINVAL;
2314                 goto fail_alloc;
2315         }
2316
2317         /*
2318          * We want to check superblock checksum, the type is stored inside.
2319          * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2320          */
2321         if (btrfs_check_super_csum(bh->b_data)) {
2322                 printk(KERN_ERR "btrfs: superblock checksum mismatch\n");
2323                 err = -EINVAL;
2324                 goto fail_alloc;
2325         }
2326
2327         /*
2328          * super_copy is zeroed at allocation time and we never touch the
2329          * following bytes up to INFO_SIZE, the checksum is calculated from
2330          * the whole block of INFO_SIZE
2331          */
2332         memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2333         memcpy(fs_info->super_for_commit, fs_info->super_copy,
2334                sizeof(*fs_info->super_for_commit));
2335         brelse(bh);
2336
2337         memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2338
2339         ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2340         if (ret) {
2341                 printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
2342                 err = -EINVAL;
2343                 goto fail_alloc;
2344         }
2345
2346         disk_super = fs_info->super_copy;
2347         if (!btrfs_super_root(disk_super))
2348                 goto fail_alloc;
2349
2350         /* check FS state, whether FS is broken. */
2351         if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2352                 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2353
2354         /*
2355          * run through our array of backup supers and setup
2356          * our ring pointer to the oldest one
2357          */
2358         generation = btrfs_super_generation(disk_super);
2359         find_oldest_super_backup(fs_info, generation);
2360
2361         /*
2362          * In the long term, we'll store the compression type in the super
2363          * block, and it'll be used for per file compression control.
2364          */
2365         fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2366
2367         ret = btrfs_parse_options(tree_root, options);
2368         if (ret) {
2369                 err = ret;
2370                 goto fail_alloc;
2371         }
2372
2373         features = btrfs_super_incompat_flags(disk_super) &
2374                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2375         if (features) {
2376                 printk(KERN_ERR "BTRFS: couldn't mount because of "
2377                        "unsupported optional features (%Lx).\n",
2378                        (unsigned long long)features);
2379                 err = -EINVAL;
2380                 goto fail_alloc;
2381         }
2382
2383         if (btrfs_super_leafsize(disk_super) !=
2384             btrfs_super_nodesize(disk_super)) {
2385                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2386                        "blocksizes don't match.  node %d leaf %d\n",
2387                        btrfs_super_nodesize(disk_super),
2388                        btrfs_super_leafsize(disk_super));
2389                 err = -EINVAL;
2390                 goto fail_alloc;
2391         }
2392         if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
2393                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2394                        "blocksize (%d) was too large\n",
2395                        btrfs_super_leafsize(disk_super));
2396                 err = -EINVAL;
2397                 goto fail_alloc;
2398         }
2399
2400         features = btrfs_super_incompat_flags(disk_super);
2401         features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2402         if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
2403                 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2404
2405         if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2406                 printk(KERN_ERR "btrfs: has skinny extents\n");
2407
2408         /*
2409          * flag our filesystem as having big metadata blocks if
2410          * they are bigger than the page size
2411          */
2412         if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
2413                 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2414                         printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
2415                 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2416         }
2417
2418         nodesize = btrfs_super_nodesize(disk_super);
2419         leafsize = btrfs_super_leafsize(disk_super);
2420         sectorsize = btrfs_super_sectorsize(disk_super);
2421         stripesize = btrfs_super_stripesize(disk_super);
2422         fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
2423         fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2424
2425         /*
2426          * mixed block groups end up with duplicate but slightly offset
2427          * extent buffers for the same range.  It leads to corruptions
2428          */
2429         if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2430             (sectorsize != leafsize)) {
2431                 printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
2432                                 "are not allowed for mixed block groups on %s\n",
2433                                 sb->s_id);
2434                 goto fail_alloc;
2435         }
2436
2437         /*
2438          * Needn't use the lock because there is no other task which will
2439          * update the flag.
2440          */
2441         btrfs_set_super_incompat_flags(disk_super, features);
2442
2443         features = btrfs_super_compat_ro_flags(disk_super) &
2444                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2445         if (!(sb->s_flags & MS_RDONLY) && features) {
2446                 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
2447                        "unsupported option features (%Lx).\n",
2448                        (unsigned long long)features);
2449                 err = -EINVAL;
2450                 goto fail_alloc;
2451         }
2452
2453         btrfs_init_workers(&fs_info->generic_worker,
2454                            "genwork", 1, NULL);
2455
2456         btrfs_init_workers(&fs_info->workers, "worker",
2457                            fs_info->thread_pool_size,
2458                            &fs_info->generic_worker);
2459
2460         btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
2461                            fs_info->thread_pool_size,
2462                            &fs_info->generic_worker);
2463
2464         btrfs_init_workers(&fs_info->flush_workers, "flush_delalloc",
2465                            fs_info->thread_pool_size,
2466                            &fs_info->generic_worker);
2467
2468         btrfs_init_workers(&fs_info->submit_workers, "submit",
2469                            min_t(u64, fs_devices->num_devices,
2470                            fs_info->thread_pool_size),
2471                            &fs_info->generic_worker);
2472
2473         btrfs_init_workers(&fs_info->caching_workers, "cache",
2474                            2, &fs_info->generic_worker);
2475
2476         /* a higher idle thresh on the submit workers makes it much more
2477          * likely that bios will be send down in a sane order to the
2478          * devices
2479          */
2480         fs_info->submit_workers.idle_thresh = 64;
2481
2482         fs_info->workers.idle_thresh = 16;
2483         fs_info->workers.ordered = 1;
2484
2485         fs_info->delalloc_workers.idle_thresh = 2;
2486         fs_info->delalloc_workers.ordered = 1;
2487
2488         btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
2489                            &fs_info->generic_worker);
2490         btrfs_init_workers(&fs_info->endio_workers, "endio",
2491                            fs_info->thread_pool_size,
2492                            &fs_info->generic_worker);
2493         btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
2494                            fs_info->thread_pool_size,
2495                            &fs_info->generic_worker);
2496         btrfs_init_workers(&fs_info->endio_meta_write_workers,
2497                            "endio-meta-write", fs_info->thread_pool_size,
2498                            &fs_info->generic_worker);
2499         btrfs_init_workers(&fs_info->endio_raid56_workers,
2500                            "endio-raid56", fs_info->thread_pool_size,
2501                            &fs_info->generic_worker);
2502         btrfs_init_workers(&fs_info->rmw_workers,
2503                            "rmw", fs_info->thread_pool_size,
2504                            &fs_info->generic_worker);
2505         btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
2506                            fs_info->thread_pool_size,
2507                            &fs_info->generic_worker);
2508         btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
2509                            1, &fs_info->generic_worker);
2510         btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
2511                            fs_info->thread_pool_size,
2512                            &fs_info->generic_worker);
2513         btrfs_init_workers(&fs_info->readahead_workers, "readahead",
2514                            fs_info->thread_pool_size,
2515                            &fs_info->generic_worker);
2516         btrfs_init_workers(&fs_info->qgroup_rescan_workers, "qgroup-rescan", 1,
2517                            &fs_info->generic_worker);
2518
2519         /*
2520          * endios are largely parallel and should have a very
2521          * low idle thresh
2522          */
2523         fs_info->endio_workers.idle_thresh = 4;
2524         fs_info->endio_meta_workers.idle_thresh = 4;
2525         fs_info->endio_raid56_workers.idle_thresh = 4;
2526         fs_info->rmw_workers.idle_thresh = 2;
2527
2528         fs_info->endio_write_workers.idle_thresh = 2;
2529         fs_info->endio_meta_write_workers.idle_thresh = 2;
2530         fs_info->readahead_workers.idle_thresh = 2;
2531
2532         /*
2533          * btrfs_start_workers can really only fail because of ENOMEM so just
2534          * return -ENOMEM if any of these fail.
2535          */
2536         ret = btrfs_start_workers(&fs_info->workers);
2537         ret |= btrfs_start_workers(&fs_info->generic_worker);
2538         ret |= btrfs_start_workers(&fs_info->submit_workers);
2539         ret |= btrfs_start_workers(&fs_info->delalloc_workers);
2540         ret |= btrfs_start_workers(&fs_info->fixup_workers);
2541         ret |= btrfs_start_workers(&fs_info->endio_workers);
2542         ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
2543         ret |= btrfs_start_workers(&fs_info->rmw_workers);
2544         ret |= btrfs_start_workers(&fs_info->endio_raid56_workers);
2545         ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
2546         ret |= btrfs_start_workers(&fs_info->endio_write_workers);
2547         ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
2548         ret |= btrfs_start_workers(&fs_info->delayed_workers);
2549         ret |= btrfs_start_workers(&fs_info->caching_workers);
2550         ret |= btrfs_start_workers(&fs_info->readahead_workers);
2551         ret |= btrfs_start_workers(&fs_info->flush_workers);
2552         ret |= btrfs_start_workers(&fs_info->qgroup_rescan_workers);
2553         if (ret) {
2554                 err = -ENOMEM;
2555                 goto fail_sb_buffer;
2556         }
2557
2558         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2559         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2560                                     4 * 1024 * 1024 / PAGE_CACHE_SIZE);
2561
2562         tree_root->nodesize = nodesize;
2563         tree_root->leafsize = leafsize;
2564         tree_root->sectorsize = sectorsize;
2565         tree_root->stripesize = stripesize;
2566
2567         sb->s_blocksize = sectorsize;
2568         sb->s_blocksize_bits = blksize_bits(sectorsize);
2569
2570         if (disk_super->magic != cpu_to_le64(BTRFS_MAGIC)) {
2571                 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
2572                 goto fail_sb_buffer;
2573         }
2574
2575         if (sectorsize != PAGE_SIZE) {
2576                 printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) "
2577                        "found on %s\n", (unsigned long)sectorsize, sb->s_id);
2578                 goto fail_sb_buffer;
2579         }
2580
2581         mutex_lock(&fs_info->chunk_mutex);
2582         ret = btrfs_read_sys_array(tree_root);
2583         mutex_unlock(&fs_info->chunk_mutex);
2584         if (ret) {
2585                 printk(KERN_WARNING "btrfs: failed to read the system "
2586                        "array on %s\n", sb->s_id);
2587                 goto fail_sb_buffer;
2588         }
2589
2590         blocksize = btrfs_level_size(tree_root,
2591                                      btrfs_super_chunk_root_level(disk_super));
2592         generation = btrfs_super_chunk_root_generation(disk_super);
2593
2594         __setup_root(nodesize, leafsize, sectorsize, stripesize,
2595                      chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2596
2597         chunk_root->node = read_tree_block(chunk_root,
2598                                            btrfs_super_chunk_root(disk_super),
2599                                            blocksize, generation);
2600         if (!chunk_root->node ||
2601             !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
2602                 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
2603                        sb->s_id);
2604                 goto fail_tree_roots;
2605         }
2606         btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2607         chunk_root->commit_root = btrfs_root_node(chunk_root);
2608
2609         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2610            (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
2611            BTRFS_UUID_SIZE);
2612
2613         ret = btrfs_read_chunk_tree(chunk_root);
2614         if (ret) {
2615                 printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
2616                        sb->s_id);
2617                 goto fail_tree_roots;
2618         }
2619
2620         /*
2621          * keep the device that is marked to be the target device for the
2622          * dev_replace procedure
2623          */
2624         btrfs_close_extra_devices(fs_info, fs_devices, 0);
2625
2626         if (!fs_devices->latest_bdev) {
2627                 printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
2628                        sb->s_id);
2629                 goto fail_tree_roots;
2630         }
2631
2632 retry_root_backup:
2633         blocksize = btrfs_level_size(tree_root,
2634                                      btrfs_super_root_level(disk_super));
2635         generation = btrfs_super_generation(disk_super);
2636
2637         tree_root->node = read_tree_block(tree_root,
2638                                           btrfs_super_root(disk_super),
2639                                           blocksize, generation);
2640         if (!tree_root->node ||
2641             !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
2642                 printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
2643                        sb->s_id);
2644
2645                 goto recovery_tree_root;
2646         }
2647
2648         btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2649         tree_root->commit_root = btrfs_root_node(tree_root);
2650
2651         location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2652         location.type = BTRFS_ROOT_ITEM_KEY;
2653         location.offset = 0;
2654
2655         extent_root = btrfs_read_tree_root(tree_root, &location);
2656         if (IS_ERR(extent_root)) {
2657                 ret = PTR_ERR(extent_root);
2658                 goto recovery_tree_root;
2659         }
2660         extent_root->track_dirty = 1;
2661         fs_info->extent_root = extent_root;
2662
2663         location.objectid = BTRFS_DEV_TREE_OBJECTID;
2664         dev_root = btrfs_read_tree_root(tree_root, &location);
2665         if (IS_ERR(dev_root)) {
2666                 ret = PTR_ERR(dev_root);
2667                 goto recovery_tree_root;
2668         }
2669         dev_root->track_dirty = 1;
2670         fs_info->dev_root = dev_root;
2671         btrfs_init_devices_late(fs_info);
2672
2673         location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2674         csum_root = btrfs_read_tree_root(tree_root, &location);
2675         if (IS_ERR(csum_root)) {
2676                 ret = PTR_ERR(csum_root);
2677                 goto recovery_tree_root;
2678         }
2679         csum_root->track_dirty = 1;
2680         fs_info->csum_root = csum_root;
2681
2682         location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2683         quota_root = btrfs_read_tree_root(tree_root, &location);
2684         if (!IS_ERR(quota_root)) {
2685                 quota_root->track_dirty = 1;
2686                 fs_info->quota_enabled = 1;
2687                 fs_info->pending_quota_state = 1;
2688                 fs_info->quota_root = quota_root;
2689         }
2690
2691         fs_info->generation = generation;
2692         fs_info->last_trans_committed = generation;
2693
2694         ret = btrfs_recover_balance(fs_info);
2695         if (ret) {
2696                 printk(KERN_WARNING "btrfs: failed to recover balance\n");
2697                 goto fail_block_groups;
2698         }
2699
2700         ret = btrfs_init_dev_stats(fs_info);
2701         if (ret) {
2702                 printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
2703                        ret);
2704                 goto fail_block_groups;
2705         }
2706
2707         ret = btrfs_init_dev_replace(fs_info);
2708         if (ret) {
2709                 pr_err("btrfs: failed to init dev_replace: %d\n", ret);
2710                 goto fail_block_groups;
2711         }
2712
2713         btrfs_close_extra_devices(fs_info, fs_devices, 1);
2714
2715         ret = btrfs_init_space_info(fs_info);
2716         if (ret) {
2717                 printk(KERN_ERR "Failed to initial space info: %d\n", ret);
2718                 goto fail_block_groups;
2719         }
2720
2721         ret = btrfs_read_block_groups(extent_root);
2722         if (ret) {
2723                 printk(KERN_ERR "Failed to read block groups: %d\n", ret);
2724                 goto fail_block_groups;
2725         }
2726         fs_info->num_tolerated_disk_barrier_failures =
2727                 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2728         if (fs_info->fs_devices->missing_devices >
2729              fs_info->num_tolerated_disk_barrier_failures &&
2730             !(sb->s_flags & MS_RDONLY)) {
2731                 printk(KERN_WARNING
2732                        "Btrfs: too many missing devices, writeable mount is not allowed\n");
2733                 goto fail_block_groups;
2734         }
2735
2736         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2737                                                "btrfs-cleaner");
2738         if (IS_ERR(fs_info->cleaner_kthread))
2739                 goto fail_block_groups;
2740
2741         fs_info->transaction_kthread = kthread_run(transaction_kthread,
2742                                                    tree_root,
2743                                                    "btrfs-transaction");
2744         if (IS_ERR(fs_info->transaction_kthread))
2745                 goto fail_cleaner;
2746
2747         if (!btrfs_test_opt(tree_root, SSD) &&
2748             !btrfs_test_opt(tree_root, NOSSD) &&
2749             !fs_info->fs_devices->rotating) {
2750                 printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
2751                        "mode\n");
2752                 btrfs_set_opt(fs_info->mount_opt, SSD);
2753         }
2754
2755 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2756         if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
2757                 ret = btrfsic_mount(tree_root, fs_devices,
2758                                     btrfs_test_opt(tree_root,
2759                                         CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2760                                     1 : 0,
2761                                     fs_info->check_integrity_print_mask);
2762                 if (ret)
2763                         printk(KERN_WARNING "btrfs: failed to initialize"
2764                                " integrity check module %s\n", sb->s_id);
2765         }
2766 #endif
2767         ret = btrfs_read_qgroup_config(fs_info);
2768         if (ret)
2769                 goto fail_trans_kthread;
2770
2771         /* do not make disk changes in broken FS */
2772         if (btrfs_super_log_root(disk_super) != 0) {
2773                 u64 bytenr = btrfs_super_log_root(disk_super);
2774
2775                 if (fs_devices->rw_devices == 0) {
2776                         printk(KERN_WARNING "Btrfs log replay required "
2777                                "on RO media\n");
2778                         err = -EIO;
2779                         goto fail_qgroup;
2780                 }
2781                 blocksize =
2782                      btrfs_level_size(tree_root,
2783                                       btrfs_super_log_root_level(disk_super));
2784
2785                 log_tree_root = btrfs_alloc_root(fs_info);
2786                 if (!log_tree_root) {
2787                         err = -ENOMEM;
2788                         goto fail_qgroup;
2789                 }
2790
2791                 __setup_root(nodesize, leafsize, sectorsize, stripesize,
2792                              log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2793
2794                 log_tree_root->node = read_tree_block(tree_root, bytenr,
2795                                                       blocksize,
2796                                                       generation + 1);
2797                 if (!log_tree_root->node ||
2798                     !extent_buffer_uptodate(log_tree_root->node)) {
2799                         printk(KERN_ERR "btrfs: failed to read log tree\n");
2800                         free_extent_buffer(log_tree_root->node);
2801                         kfree(log_tree_root);
2802                         goto fail_trans_kthread;
2803                 }
2804                 /* returns with log_tree_root freed on success */
2805                 ret = btrfs_recover_log_trees(log_tree_root);
2806                 if (ret) {
2807                         btrfs_error(tree_root->fs_info, ret,
2808                                     "Failed to recover log tree");
2809                         free_extent_buffer(log_tree_root->node);
2810                         kfree(log_tree_root);
2811                         goto fail_trans_kthread;
2812                 }
2813
2814                 if (sb->s_flags & MS_RDONLY) {
2815                         ret = btrfs_commit_super(tree_root);
2816                         if (ret)
2817                                 goto fail_trans_kthread;
2818                 }
2819         }
2820
2821         ret = btrfs_find_orphan_roots(tree_root);
2822         if (ret)
2823                 goto fail_trans_kthread;
2824
2825         if (!(sb->s_flags & MS_RDONLY)) {
2826                 ret = btrfs_cleanup_fs_roots(fs_info);
2827                 if (ret)
2828                         goto fail_trans_kthread;
2829
2830                 ret = btrfs_recover_relocation(tree_root);
2831                 if (ret < 0) {
2832                         printk(KERN_WARNING
2833                                "btrfs: failed to recover relocation\n");
2834                         err = -EINVAL;
2835                         goto fail_qgroup;
2836                 }
2837         }
2838
2839         location.objectid = BTRFS_FS_TREE_OBJECTID;
2840         location.type = BTRFS_ROOT_ITEM_KEY;
2841         location.offset = 0;
2842
2843         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2844         if (IS_ERR(fs_info->fs_root)) {
2845                 err = PTR_ERR(fs_info->fs_root);
2846                 goto fail_qgroup;
2847         }
2848
2849         if (sb->s_flags & MS_RDONLY)
2850                 return 0;
2851
2852         down_read(&fs_info->cleanup_work_sem);
2853         if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
2854             (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
2855                 up_read(&fs_info->cleanup_work_sem);
2856                 close_ctree(tree_root);
2857                 return ret;
2858         }
2859         up_read(&fs_info->cleanup_work_sem);
2860
2861         ret = btrfs_resume_balance_async(fs_info);
2862         if (ret) {
2863                 printk(KERN_WARNING "btrfs: failed to resume balance\n");
2864                 close_ctree(tree_root);
2865                 return ret;
2866         }
2867
2868         ret = btrfs_resume_dev_replace_async(fs_info);
2869         if (ret) {
2870                 pr_warn("btrfs: failed to resume dev_replace\n");
2871                 close_ctree(tree_root);
2872                 return ret;
2873         }
2874
2875         return 0;
2876
2877 fail_qgroup:
2878         btrfs_free_qgroup_config(fs_info);
2879 fail_trans_kthread:
2880         kthread_stop(fs_info->transaction_kthread);
2881         btrfs_cleanup_transaction(fs_info->tree_root);
2882         del_fs_roots(fs_info);
2883 fail_cleaner:
2884         kthread_stop(fs_info->cleaner_kthread);
2885
2886         /*
2887          * make sure we're done with the btree inode before we stop our
2888          * kthreads
2889          */
2890         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2891
2892 fail_block_groups:
2893         btrfs_put_block_group_cache(fs_info);
2894         btrfs_free_block_groups(fs_info);
2895
2896 fail_tree_roots:
2897         free_root_pointers(fs_info, 1);
2898         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2899
2900 fail_sb_buffer:
2901         btrfs_stop_all_workers(fs_info);
2902 fail_alloc:
2903 fail_iput:
2904         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2905
2906         iput(fs_info->btree_inode);
2907 fail_delalloc_bytes:
2908         percpu_counter_destroy(&fs_info->delalloc_bytes);
2909 fail_dirty_metadata_bytes:
2910         percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
2911 fail_bdi:
2912         bdi_destroy(&fs_info->bdi);
2913 fail_srcu:
2914         cleanup_srcu_struct(&fs_info->subvol_srcu);
2915 fail:
2916         btrfs_free_stripe_hash_table(fs_info);
2917         btrfs_close_devices(fs_info->fs_devices);
2918         return err;
2919
2920 recovery_tree_root:
2921         if (!btrfs_test_opt(tree_root, RECOVERY))
2922                 goto fail_tree_roots;
2923
2924         free_root_pointers(fs_info, 0);
2925
2926         /* don't use the log in recovery mode, it won't be valid */
2927         btrfs_set_super_log_root(disk_super, 0);
2928
2929         /* we can't trust the free space cache either */
2930         btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
2931
2932         ret = next_root_backup(fs_info, fs_info->super_copy,
2933                                &num_backups_tried, &backup_index);
2934         if (ret == -1)
2935                 goto fail_block_groups;
2936         goto retry_root_backup;
2937 }
2938
2939 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2940 {
2941         if (uptodate) {
2942                 set_buffer_uptodate(bh);
2943         } else {
2944                 struct btrfs_device *device = (struct btrfs_device *)
2945                         bh->b_private;
2946
2947                 printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
2948                                           "I/O error on %s\n",
2949                                           rcu_str_deref(device->name));
2950                 /* note, we dont' set_buffer_write_io_error because we have
2951                  * our own ways of dealing with the IO errors
2952                  */
2953                 clear_buffer_uptodate(bh);
2954                 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
2955         }
2956         unlock_buffer(bh);
2957         put_bh(bh);
2958 }
2959
2960 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2961 {
2962         struct buffer_head *bh;
2963         struct buffer_head *latest = NULL;
2964         struct btrfs_super_block *super;
2965         int i;
2966         u64 transid = 0;
2967         u64 bytenr;
2968
2969         /* we would like to check all the supers, but that would make
2970          * a btrfs mount succeed after a mkfs from a different FS.
2971          * So, we need to add a special mount option to scan for
2972          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2973          */
2974         for (i = 0; i < 1; i++) {
2975                 bytenr = btrfs_sb_offset(i);
2976                 if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2977                         break;
2978                 bh = __bread(bdev, bytenr / 4096, 4096);
2979                 if (!bh)
2980                         continue;
2981
2982                 super = (struct btrfs_super_block *)bh->b_data;
2983                 if (btrfs_super_bytenr(super) != bytenr ||
2984                     super->magic != cpu_to_le64(BTRFS_MAGIC)) {
2985                         brelse(bh);
2986                         continue;
2987                 }
2988
2989                 if (!latest || btrfs_super_generation(super) > transid) {
2990                         brelse(latest);
2991                         latest = bh;
2992                         transid = btrfs_super_generation(super);
2993                 } else {
2994                         brelse(bh);
2995                 }
2996         }
2997         return latest;
2998 }
2999
3000 /*
3001  * this should be called twice, once with wait == 0 and
3002  * once with wait == 1.  When wait == 0 is done, all the buffer heads
3003  * we write are pinned.
3004  *
3005  * They are released when wait == 1 is done.
3006  * max_mirrors must be the same for both runs, and it indicates how
3007  * many supers on this one device should be written.
3008  *
3009  * max_mirrors == 0 means to write them all.
3010  */
3011 static int write_dev_supers(struct btrfs_device *device,
3012                             struct btrfs_super_block *sb,
3013                             int do_barriers, int wait, int max_mirrors)
3014 {
3015         struct buffer_head *bh;
3016         int i;
3017         int ret;
3018         int errors = 0;
3019         u32 crc;
3020         u64 bytenr;
3021
3022         if (max_mirrors == 0)
3023                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3024
3025         for (i = 0; i < max_mirrors; i++) {
3026                 bytenr = btrfs_sb_offset(i);
3027                 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
3028                         break;
3029
3030                 if (wait) {
3031                         bh = __find_get_block(device->bdev, bytenr / 4096,
3032                                               BTRFS_SUPER_INFO_SIZE);
3033                         if (!bh) {
3034                                 errors++;
3035                                 continue;
3036                         }
3037                         wait_on_buffer(bh);
3038                         if (!buffer_uptodate(bh))
3039                                 errors++;
3040
3041                         /* drop our reference */
3042                         brelse(bh);
3043
3044                         /* drop the reference from the wait == 0 run */
3045                         brelse(bh);
3046                         continue;
3047                 } else {
3048                         btrfs_set_super_bytenr(sb, bytenr);
3049
3050                         crc = ~(u32)0;
3051                         crc = btrfs_csum_data((char *)sb +
3052                                               BTRFS_CSUM_SIZE, crc,
3053                                               BTRFS_SUPER_INFO_SIZE -
3054                                               BTRFS_CSUM_SIZE);
3055                         btrfs_csum_final(crc, sb->csum);
3056
3057                         /*
3058                          * one reference for us, and we leave it for the
3059                          * caller
3060                          */
3061                         bh = __getblk(device->bdev, bytenr / 4096,
3062                                       BTRFS_SUPER_INFO_SIZE);
3063                         if (!bh) {
3064                                 printk(KERN_ERR "btrfs: couldn't get super "
3065                                        "buffer head for bytenr %Lu\n", bytenr);
3066                                 errors++;
3067                                 continue;
3068                         }
3069
3070                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3071
3072                         /* one reference for submit_bh */
3073                         get_bh(bh);
3074
3075                         set_buffer_uptodate(bh);
3076                         lock_buffer(bh);
3077                         bh->b_end_io = btrfs_end_buffer_write_sync;
3078                         bh->b_private = device;
3079                 }
3080
3081                 /*
3082                  * we fua the first super.  The others we allow
3083                  * to go down lazy.
3084                  */
3085                 ret = btrfsic_submit_bh(WRITE_FUA, bh);
3086                 if (ret)
3087                         errors++;
3088         }
3089         return errors < i ? 0 : -1;
3090 }
3091
3092 /*
3093  * endio for the write_dev_flush, this will wake anyone waiting
3094  * for the barrier when it is done
3095  */
3096 static void btrfs_end_empty_barrier(struct bio *bio, int err)
3097 {
3098         if (err) {
3099                 if (err == -EOPNOTSUPP)
3100                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
3101                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3102         }
3103         if (bio->bi_private)
3104                 complete(bio->bi_private);
3105         bio_put(bio);
3106 }
3107
3108 /*
3109  * trigger flushes for one the devices.  If you pass wait == 0, the flushes are
3110  * sent down.  With wait == 1, it waits for the previous flush.
3111  *
3112  * any device where the flush fails with eopnotsupp are flagged as not-barrier
3113  * capable
3114  */
3115 static int write_dev_flush(struct btrfs_device *device, int wait)
3116 {
3117         struct bio *bio;
3118         int ret = 0;
3119
3120         if (device->nobarriers)
3121                 return 0;
3122
3123         if (wait) {
3124                 bio = device->flush_bio;
3125                 if (!bio)
3126                         return 0;
3127
3128                 wait_for_completion(&device->flush_wait);
3129
3130                 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
3131                         printk_in_rcu("btrfs: disabling barriers on dev %s\n",
3132                                       rcu_str_deref(device->name));
3133                         device->nobarriers = 1;
3134                 } else if (!bio_flagged(bio, BIO_UPTODATE)) {
3135                         ret = -EIO;
3136                         btrfs_dev_stat_inc_and_print(device,
3137                                 BTRFS_DEV_STAT_FLUSH_ERRS);
3138                 }
3139
3140                 /* drop the reference from the wait == 0 run */
3141                 bio_put(bio);
3142                 device->flush_bio = NULL;
3143
3144                 return ret;
3145         }
3146
3147         /*
3148          * one reference for us, and we leave it for the
3149          * caller
3150          */
3151         device->flush_bio = NULL;
3152         bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
3153         if (!bio)
3154                 return -ENOMEM;
3155
3156         bio->bi_end_io = btrfs_end_empty_barrier;
3157         bio->bi_bdev = device->bdev;
3158         init_completion(&device->flush_wait);
3159         bio->bi_private = &device->flush_wait;
3160         device->flush_bio = bio;
3161
3162         bio_get(bio);
3163         btrfsic_submit_bio(WRITE_FLUSH, bio);
3164
3165         return 0;
3166 }
3167
3168 /*
3169  * send an empty flush down to each device in parallel,
3170  * then wait for them
3171  */
3172 static int barrier_all_devices(struct btrfs_fs_info *info)
3173 {
3174         struct list_head *head;
3175         struct btrfs_device *dev;
3176         int errors_send = 0;
3177         int errors_wait = 0;
3178         int ret;
3179
3180         /* send down all the barriers */
3181         head = &info->fs_devices->devices;
3182         list_for_each_entry_rcu(dev, head, dev_list) {
3183                 if (!dev->bdev) {
3184                         errors_send++;
3185                         continue;
3186                 }
3187                 if (!dev->in_fs_metadata || !dev->writeable)
3188                         continue;
3189
3190                 ret = write_dev_flush(dev, 0);
3191                 if (ret)
3192                         errors_send++;
3193         }
3194
3195         /* wait for all the barriers */
3196         list_for_each_entry_rcu(dev, head, dev_list) {
3197                 if (!dev->bdev) {
3198                         errors_wait++;
3199                         continue;
3200                 }
3201                 if (!dev->in_fs_metadata || !dev->writeable)
3202                         continue;
3203
3204                 ret = write_dev_flush(dev, 1);
3205                 if (ret)
3206                         errors_wait++;
3207         }
3208         if (errors_send > info->num_tolerated_disk_barrier_failures ||
3209             errors_wait > info->num_tolerated_disk_barrier_failures)
3210                 return -EIO;
3211         return 0;
3212 }
3213
3214 int btrfs_calc_num_tolerated_disk_barrier_failures(
3215         struct btrfs_fs_info *fs_info)
3216 {
3217         struct btrfs_ioctl_space_info space;
3218         struct btrfs_space_info *sinfo;
3219         u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
3220                        BTRFS_BLOCK_GROUP_SYSTEM,
3221                        BTRFS_BLOCK_GROUP_METADATA,
3222                        BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
3223         int num_types = 4;
3224         int i;
3225         int c;
3226         int num_tolerated_disk_barrier_failures =
3227                 (int)fs_info->fs_devices->num_devices;
3228
3229         for (i = 0; i < num_types; i++) {
3230                 struct btrfs_space_info *tmp;
3231
3232                 sinfo = NULL;
3233                 rcu_read_lock();
3234                 list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
3235                         if (tmp->flags == types[i]) {
3236                                 sinfo = tmp;
3237                                 break;
3238                         }
3239                 }
3240                 rcu_read_unlock();
3241
3242                 if (!sinfo)
3243                         continue;
3244
3245                 down_read(&sinfo->groups_sem);
3246                 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3247                         if (!list_empty(&sinfo->block_groups[c])) {
3248                                 u64 flags;
3249
3250                                 btrfs_get_block_group_info(
3251                                         &sinfo->block_groups[c], &space);
3252                                 if (space.total_bytes == 0 ||
3253                                     space.used_bytes == 0)
3254                                         continue;
3255                                 flags = space.flags;
3256                                 /*
3257                                  * return
3258                                  * 0: if dup, single or RAID0 is configured for
3259                                  *    any of metadata, system or data, else
3260                                  * 1: if RAID5 is configured, or if RAID1 or
3261                                  *    RAID10 is configured and only two mirrors
3262                                  *    are used, else
3263                                  * 2: if RAID6 is configured, else
3264                                  * num_mirrors - 1: if RAID1 or RAID10 is
3265                                  *                  configured and more than
3266                                  *                  2 mirrors are used.
3267                                  */
3268                                 if (num_tolerated_disk_barrier_failures > 0 &&
3269                                     ((flags & (BTRFS_BLOCK_GROUP_DUP |
3270                                                BTRFS_BLOCK_GROUP_RAID0)) ||
3271                                      ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
3272                                       == 0)))
3273                                         num_tolerated_disk_barrier_failures = 0;
3274                                 else if (num_tolerated_disk_barrier_failures > 1) {
3275                                         if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3276                                             BTRFS_BLOCK_GROUP_RAID5 |
3277                                             BTRFS_BLOCK_GROUP_RAID10)) {
3278                                                 num_tolerated_disk_barrier_failures = 1;
3279                                         } else if (flags &
3280                                                    BTRFS_BLOCK_GROUP_RAID6) {
3281                                                 num_tolerated_disk_barrier_failures = 2;
3282                                         }
3283                                 }
3284                         }
3285                 }
3286                 up_read(&sinfo->groups_sem);
3287         }
3288
3289         return num_tolerated_disk_barrier_failures;
3290 }
3291
3292 static int write_all_supers(struct btrfs_root *root, int max_mirrors)
3293 {
3294         struct list_head *head;
3295         struct btrfs_device *dev;
3296         struct btrfs_super_block *sb;
3297         struct btrfs_dev_item *dev_item;
3298         int ret;
3299         int do_barriers;
3300         int max_errors;
3301         int total_errors = 0;
3302         u64 flags;
3303
3304         max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
3305         do_barriers = !btrfs_test_opt(root, NOBARRIER);
3306         backup_super_roots(root->fs_info);
3307
3308         sb = root->fs_info->super_for_commit;
3309         dev_item = &sb->dev_item;
3310
3311         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3312         head = &root->fs_info->fs_devices->devices;
3313
3314         if (do_barriers) {
3315                 ret = barrier_all_devices(root->fs_info);
3316                 if (ret) {
3317                         mutex_unlock(
3318                                 &root->fs_info->fs_devices->device_list_mutex);
3319                         btrfs_error(root->fs_info, ret,
3320                                     "errors while submitting device barriers.");
3321                         return ret;
3322                 }
3323         }
3324
3325         list_for_each_entry_rcu(dev, head, dev_list) {
3326                 if (!dev->bdev) {
3327                         total_errors++;
3328                         continue;
3329                 }
3330                 if (!dev->in_fs_metadata || !dev->writeable)
3331                         continue;
3332
3333                 btrfs_set_stack_device_generation(dev_item, 0);
3334                 btrfs_set_stack_device_type(dev_item, dev->type);
3335                 btrfs_set_stack_device_id(dev_item, dev->devid);
3336                 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
3337                 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
3338                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3339                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3340                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3341                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3342                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
3343
3344                 flags = btrfs_super_flags(sb);
3345                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3346
3347                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
3348                 if (ret)
3349                         total_errors++;
3350         }
3351         if (total_errors > max_errors) {
3352                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
3353                        total_errors);
3354
3355                 /* This shouldn't happen. FUA is masked off if unsupported */
3356                 BUG();
3357         }
3358
3359         total_errors = 0;
3360         list_for_each_entry_rcu(dev, head, dev_list) {
3361                 if (!dev->bdev)
3362                         continue;
3363                 if (!dev->in_fs_metadata || !dev->writeable)
3364                         continue;
3365
3366                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
3367                 if (ret)
3368                         total_errors++;
3369         }
3370         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3371         if (total_errors > max_errors) {
3372                 btrfs_error(root->fs_info, -EIO,
3373                             "%d errors while writing supers", total_errors);
3374                 return -EIO;
3375         }
3376         return 0;
3377 }
3378
3379 int write_ctree_super(struct btrfs_trans_handle *trans,
3380                       struct btrfs_root *root, int max_mirrors)
3381 {
3382         int ret;
3383
3384         ret = write_all_supers(root, max_mirrors);
3385         return ret;
3386 }
3387
3388 /* Drop a fs root from the radix tree and free it. */
3389 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3390                                   struct btrfs_root *root)
3391 {
3392         spin_lock(&fs_info->fs_roots_radix_lock);
3393         radix_tree_delete(&fs_info->fs_roots_radix,
3394                           (unsigned long)root->root_key.objectid);
3395         spin_unlock(&fs_info->fs_roots_radix_lock);
3396
3397         if (btrfs_root_refs(&root->root_item) == 0)
3398                 synchronize_srcu(&fs_info->subvol_srcu);
3399
3400         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3401                 btrfs_free_log(NULL, root);
3402                 btrfs_free_log_root_tree(NULL, fs_info);
3403         }
3404
3405         __btrfs_remove_free_space_cache(root->free_ino_pinned);
3406         __btrfs_remove_free_space_cache(root->free_ino_ctl);
3407         free_fs_root(root);
3408 }
3409
3410 static void free_fs_root(struct btrfs_root *root)
3411 {
3412         iput(root->cache_inode);
3413         WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3414         if (root->anon_dev)
3415                 free_anon_bdev(root->anon_dev);
3416         free_extent_buffer(root->node);
3417         free_extent_buffer(root->commit_root);
3418         kfree(root->free_ino_ctl);
3419         kfree(root->free_ino_pinned);
3420         kfree(root->name);
3421         btrfs_put_fs_root(root);
3422 }
3423
3424 void btrfs_free_fs_root(struct btrfs_root *root)
3425 {
3426         free_fs_root(root);
3427 }
3428
3429 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3430 {
3431         u64 root_objectid = 0;
3432         struct btrfs_root *gang[8];
3433         int i;
3434         int ret;
3435
3436         while (1) {
3437                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3438                                              (void **)gang, root_objectid,
3439                                              ARRAY_SIZE(gang));
3440                 if (!ret)
3441                         break;
3442
3443                 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3444                 for (i = 0; i < ret; i++) {
3445                         int err;
3446
3447                         root_objectid = gang[i]->root_key.objectid;
3448                         err = btrfs_orphan_cleanup(gang[i]);
3449                         if (err)
3450                                 return err;
3451                 }
3452                 root_objectid++;
3453         }
3454         return 0;
3455 }
3456
3457 int btrfs_commit_super(struct btrfs_root *root)
3458 {
3459         struct btrfs_trans_handle *trans;
3460         int ret;
3461
3462         mutex_lock(&root->fs_info->cleaner_mutex);
3463         btrfs_run_delayed_iputs(root);
3464         mutex_unlock(&root->fs_info->cleaner_mutex);
3465         wake_up_process(root->fs_info->cleaner_kthread);
3466
3467         /* wait until ongoing cleanup work done */
3468         down_write(&root->fs_info->cleanup_work_sem);
3469         up_write(&root->fs_info->cleanup_work_sem);
3470
3471         trans = btrfs_join_transaction(root);
3472         if (IS_ERR(trans))
3473                 return PTR_ERR(trans);
3474         ret = btrfs_commit_transaction(trans, root);
3475         if (ret)
3476                 return ret;
3477         /* run commit again to drop the original snapshot */
3478         trans = btrfs_join_transaction(root);
3479         if (IS_ERR(trans))
3480                 return PTR_ERR(trans);
3481         ret = btrfs_commit_transaction(trans, root);
3482         if (ret)
3483                 return ret;
3484         ret = btrfs_write_and_wait_transaction(NULL, root);
3485         if (ret) {
3486                 btrfs_error(root->fs_info, ret,
3487                             "Failed to sync btree inode to disk.");
3488                 return ret;
3489         }
3490
3491         ret = write_ctree_super(NULL, root, 0);
3492         return ret;
3493 }
3494
3495 int close_ctree(struct btrfs_root *root)
3496 {
3497         struct btrfs_fs_info *fs_info = root->fs_info;
3498         int ret;
3499
3500         fs_info->closing = 1;
3501         smp_mb();
3502
3503         /* pause restriper - we want to resume on mount */
3504         btrfs_pause_balance(fs_info);
3505
3506         btrfs_dev_replace_suspend_for_unmount(fs_info);
3507
3508         btrfs_scrub_cancel(fs_info);
3509
3510         /* wait for any defraggers to finish */
3511         wait_event(fs_info->transaction_wait,
3512                    (atomic_read(&fs_info->defrag_running) == 0));
3513
3514         /* clear out the rbtree of defraggable inodes */
3515         btrfs_cleanup_defrag_inodes(fs_info);
3516
3517         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3518                 ret = btrfs_commit_super(root);
3519                 if (ret)
3520                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3521         }
3522
3523         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3524                 btrfs_error_commit_super(root);
3525
3526         btrfs_put_block_group_cache(fs_info);
3527
3528         kthread_stop(fs_info->transaction_kthread);
3529         kthread_stop(fs_info->cleaner_kthread);
3530
3531         fs_info->closing = 2;
3532         smp_mb();
3533
3534         btrfs_free_qgroup_config(root->fs_info);
3535
3536         if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3537                 printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
3538                        percpu_counter_sum(&fs_info->delalloc_bytes));
3539         }
3540
3541         btrfs_free_block_groups(fs_info);
3542
3543         btrfs_stop_all_workers(fs_info);
3544
3545         del_fs_roots(fs_info);
3546
3547         free_root_pointers(fs_info, 1);
3548
3549         iput(fs_info->btree_inode);
3550
3551 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3552         if (btrfs_test_opt(root, CHECK_INTEGRITY))
3553                 btrfsic_unmount(root, fs_info->fs_devices);
3554 #endif
3555
3556         btrfs_close_devices(fs_info->fs_devices);
3557         btrfs_mapping_tree_free(&fs_info->mapping_tree);
3558
3559         percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3560         percpu_counter_destroy(&fs_info->delalloc_bytes);
3561         bdi_destroy(&fs_info->bdi);
3562         cleanup_srcu_struct(&fs_info->subvol_srcu);
3563
3564         btrfs_free_stripe_hash_table(fs_info);
3565
3566         return 0;
3567 }
3568
3569 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3570                           int atomic)
3571 {
3572         int ret;
3573         struct inode *btree_inode = buf->pages[0]->mapping->host;
3574
3575         ret = extent_buffer_uptodate(buf);
3576         if (!ret)
3577                 return ret;
3578
3579         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3580                                     parent_transid, atomic);
3581         if (ret == -EAGAIN)
3582                 return ret;
3583         return !ret;
3584 }
3585
3586 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
3587 {
3588         return set_extent_buffer_uptodate(buf);
3589 }
3590
3591 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3592 {
3593         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3594         u64 transid = btrfs_header_generation(buf);
3595         int was_dirty;
3596
3597         btrfs_assert_tree_locked(buf);
3598         if (transid != root->fs_info->generation)
3599                 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
3600                        "found %llu running %llu\n",
3601                         (unsigned long long)buf->start,
3602                         (unsigned long long)transid,
3603                         (unsigned long long)root->fs_info->generation);
3604         was_dirty = set_extent_buffer_dirty(buf);
3605         if (!was_dirty)
3606                 __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
3607                                      buf->len,
3608                                      root->fs_info->dirty_metadata_batch);
3609 }
3610
3611 static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
3612                                         int flush_delayed)
3613 {
3614         /*
3615          * looks as though older kernels can get into trouble with
3616          * this code, they end up stuck in balance_dirty_pages forever
3617          */
3618         int ret;
3619
3620         if (current->flags & PF_MEMALLOC)
3621                 return;
3622
3623         if (flush_delayed)
3624                 btrfs_balance_delayed_items(root);
3625
3626         ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
3627                                      BTRFS_DIRTY_METADATA_THRESH);
3628         if (ret > 0) {
3629                 balance_dirty_pages_ratelimited(
3630                                    root->fs_info->btree_inode->i_mapping);
3631         }
3632         return;
3633 }
3634
3635 void btrfs_btree_balance_dirty(struct btrfs_root *root)
3636 {
3637         __btrfs_btree_balance_dirty(root, 1);
3638 }
3639
3640 void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
3641 {
3642         __btrfs_btree_balance_dirty(root, 0);
3643 }
3644
3645 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3646 {
3647         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3648         return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3649 }
3650
3651 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3652                               int read_only)
3653 {
3654         /*
3655          * Placeholder for checks
3656          */
3657         return 0;
3658 }
3659
3660 static void btrfs_error_commit_super(struct btrfs_root *root)
3661 {
3662         mutex_lock(&root->fs_info->cleaner_mutex);
3663         btrfs_run_delayed_iputs(root);
3664         mutex_unlock(&root->fs_info->cleaner_mutex);
3665
3666         down_write(&root->fs_info->cleanup_work_sem);
3667         up_write(&root->fs_info->cleanup_work_sem);
3668
3669         /* cleanup FS via transaction */
3670         btrfs_cleanup_transaction(root);
3671 }
3672
3673 static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
3674                                              struct btrfs_root *root)
3675 {
3676         struct btrfs_inode *btrfs_inode;
3677         struct list_head splice;
3678
3679         INIT_LIST_HEAD(&splice);
3680
3681         mutex_lock(&root->fs_info->ordered_operations_mutex);
3682         spin_lock(&root->fs_info->ordered_extent_lock);
3683
3684         list_splice_init(&t->ordered_operations, &splice);
3685         while (!list_empty(&splice)) {
3686                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3687                                          ordered_operations);
3688
3689                 list_del_init(&btrfs_inode->ordered_operations);
3690                 spin_unlock(&root->fs_info->ordered_extent_lock);
3691
3692                 btrfs_invalidate_inodes(btrfs_inode->root);
3693
3694                 spin_lock(&root->fs_info->ordered_extent_lock);
3695         }
3696
3697         spin_unlock(&root->fs_info->ordered_extent_lock);
3698         mutex_unlock(&root->fs_info->ordered_operations_mutex);
3699 }
3700
3701 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
3702 {
3703         struct btrfs_ordered_extent *ordered;
3704
3705         spin_lock(&root->fs_info->ordered_extent_lock);
3706         /*
3707          * This will just short circuit the ordered completion stuff which will
3708          * make sure the ordered extent gets properly cleaned up.
3709          */
3710         list_for_each_entry(ordered, &root->fs_info->ordered_extents,
3711                             root_extent_list)
3712                 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
3713         spin_unlock(&root->fs_info->ordered_extent_lock);
3714 }
3715
3716 int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3717                                struct btrfs_root *root)
3718 {
3719         struct rb_node *node;
3720         struct btrfs_delayed_ref_root *delayed_refs;
3721         struct btrfs_delayed_ref_node *ref;
3722         int ret = 0;
3723
3724         delayed_refs = &trans->delayed_refs;
3725
3726         spin_lock(&delayed_refs->lock);
3727         if (delayed_refs->num_entries == 0) {
3728                 spin_unlock(&delayed_refs->lock);
3729                 printk(KERN_INFO "delayed_refs has NO entry\n");
3730                 return ret;
3731         }
3732
3733         while ((node = rb_first(&delayed_refs->root)) != NULL) {
3734                 struct btrfs_delayed_ref_head *head = NULL;
3735
3736                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3737                 atomic_set(&ref->refs, 1);
3738                 if (btrfs_delayed_ref_is_head(ref)) {
3739
3740                         head = btrfs_delayed_node_to_head(ref);
3741                         if (!mutex_trylock(&head->mutex)) {
3742                                 atomic_inc(&ref->refs);
3743                                 spin_unlock(&delayed_refs->lock);
3744
3745                                 /* Need to wait for the delayed ref to run */
3746                                 mutex_lock(&head->mutex);
3747                                 mutex_unlock(&head->mutex);
3748                                 btrfs_put_delayed_ref(ref);
3749
3750                                 spin_lock(&delayed_refs->lock);
3751                                 continue;
3752                         }
3753
3754                         if (head->must_insert_reserved)
3755                                 btrfs_pin_extent(root, ref->bytenr,
3756                                                  ref->num_bytes, 1);
3757                         btrfs_free_delayed_extent_op(head->extent_op);
3758                         delayed_refs->num_heads--;
3759                         if (list_empty(&head->cluster))
3760                                 delayed_refs->num_heads_ready--;
3761                         list_del_init(&head->cluster);
3762                 }
3763
3764                 ref->in_tree = 0;
3765                 rb_erase(&ref->rb_node, &delayed_refs->root);
3766                 delayed_refs->num_entries--;
3767                 if (head)
3768                         mutex_unlock(&head->mutex);
3769                 spin_unlock(&delayed_refs->lock);
3770                 btrfs_put_delayed_ref(ref);
3771
3772                 cond_resched();
3773                 spin_lock(&delayed_refs->lock);
3774         }
3775
3776         spin_unlock(&delayed_refs->lock);
3777
3778         return ret;
3779 }
3780
3781 static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t)
3782 {
3783         struct btrfs_pending_snapshot *snapshot;
3784         struct list_head splice;
3785
3786         INIT_LIST_HEAD(&splice);
3787
3788         list_splice_init(&t->pending_snapshots, &splice);
3789
3790         while (!list_empty(&splice)) {
3791                 snapshot = list_entry(splice.next,
3792                                       struct btrfs_pending_snapshot,
3793                                       list);
3794                 snapshot->error = -ECANCELED;
3795                 list_del_init(&snapshot->list);
3796         }
3797 }
3798
3799 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3800 {
3801         struct btrfs_inode *btrfs_inode;
3802         struct list_head splice;
3803
3804         INIT_LIST_HEAD(&splice);
3805
3806         spin_lock(&root->fs_info->delalloc_lock);
3807         list_splice_init(&root->fs_info->delalloc_inodes, &splice);
3808
3809         while (!list_empty(&splice)) {
3810                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3811                                     delalloc_inodes);
3812
3813                 list_del_init(&btrfs_inode->delalloc_inodes);
3814                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
3815                           &btrfs_inode->runtime_flags);
3816                 spin_unlock(&root->fs_info->delalloc_lock);
3817
3818                 btrfs_invalidate_inodes(btrfs_inode->root);
3819
3820                 spin_lock(&root->fs_info->delalloc_lock);
3821         }
3822
3823         spin_unlock(&root->fs_info->delalloc_lock);
3824 }
3825
3826 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3827                                         struct extent_io_tree *dirty_pages,
3828                                         int mark)
3829 {
3830         int ret;
3831         struct extent_buffer *eb;
3832         u64 start = 0;
3833         u64 end;
3834
3835         while (1) {
3836                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
3837                                             mark, NULL);
3838                 if (ret)
3839                         break;
3840
3841                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
3842                 while (start <= end) {
3843                         eb = btrfs_find_tree_block(root, start,
3844                                                    root->leafsize);
3845                         start += root->leafsize;
3846                         if (!eb)
3847                                 continue;
3848                         wait_on_extent_buffer_writeback(eb);
3849
3850                         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
3851                                                &eb->bflags))
3852                                 clear_extent_buffer_dirty(eb);
3853                         free_extent_buffer_stale(eb);
3854                 }
3855         }
3856
3857         return ret;
3858 }
3859
3860 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3861                                        struct extent_io_tree *pinned_extents)
3862 {
3863         struct extent_io_tree *unpin;
3864         u64 start;
3865         u64 end;
3866         int ret;
3867         bool loop = true;
3868
3869         unpin = pinned_extents;
3870 again:
3871         while (1) {
3872                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3873                                             EXTENT_DIRTY, NULL);
3874                 if (ret)
3875                         break;
3876
3877                 /* opt_discard */
3878                 if (btrfs_test_opt(root, DISCARD))
3879                         ret = btrfs_error_discard_extent(root, start,
3880                                                          end + 1 - start,
3881                                                          NULL);
3882
3883                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3884                 btrfs_error_unpin_extent_range(root, start, end);
3885                 cond_resched();
3886         }
3887
3888         if (loop) {
3889                 if (unpin == &root->fs_info->freed_extents[0])
3890                         unpin = &root->fs_info->freed_extents[1];
3891                 else
3892                         unpin = &root->fs_info->freed_extents[0];
3893                 loop = false;
3894                 goto again;
3895         }
3896
3897         return 0;
3898 }
3899
3900 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
3901                                    struct btrfs_root *root)
3902 {
3903         btrfs_destroy_delayed_refs(cur_trans, root);
3904         btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
3905                                 cur_trans->dirty_pages.dirty_bytes);
3906
3907         /* FIXME: cleanup wait for commit */
3908         cur_trans->in_commit = 1;
3909         cur_trans->blocked = 1;
3910         wake_up(&root->fs_info->transaction_blocked_wait);
3911
3912         btrfs_evict_pending_snapshots(cur_trans);
3913
3914         cur_trans->blocked = 0;
3915         wake_up(&root->fs_info->transaction_wait);
3916
3917         cur_trans->commit_done = 1;
3918         wake_up(&cur_trans->commit_wait);
3919
3920         btrfs_destroy_delayed_inodes(root);
3921         btrfs_assert_delayed_root_empty(root);
3922
3923         btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
3924                                      EXTENT_DIRTY);
3925         btrfs_destroy_pinned_extent(root,
3926                                     root->fs_info->pinned_extents);
3927
3928         /*
3929         memset(cur_trans, 0, sizeof(*cur_trans));
3930         kmem_cache_free(btrfs_transaction_cachep, cur_trans);
3931         */
3932 }
3933
3934 static int btrfs_cleanup_transaction(struct btrfs_root *root)
3935 {
3936         struct btrfs_transaction *t;
3937         LIST_HEAD(list);
3938
3939         mutex_lock(&root->fs_info->transaction_kthread_mutex);
3940
3941         spin_lock(&root->fs_info->trans_lock);
3942         list_splice_init(&root->fs_info->trans_list, &list);
3943         root->fs_info->trans_no_join = 1;
3944         spin_unlock(&root->fs_info->trans_lock);
3945
3946         while (!list_empty(&list)) {
3947                 t = list_entry(list.next, struct btrfs_transaction, list);
3948
3949                 btrfs_destroy_ordered_operations(t, root);
3950
3951                 btrfs_destroy_ordered_extents(root);
3952
3953                 btrfs_destroy_delayed_refs(t, root);
3954
3955                 /* FIXME: cleanup wait for commit */
3956                 t->in_commit = 1;
3957                 t->blocked = 1;
3958                 smp_mb();
3959                 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3960                         wake_up(&root->fs_info->transaction_blocked_wait);
3961
3962                 btrfs_evict_pending_snapshots(t);
3963
3964                 t->blocked = 0;
3965                 smp_mb();
3966                 if (waitqueue_active(&root->fs_info->transaction_wait))
3967                         wake_up(&root->fs_info->transaction_wait);
3968
3969                 t->commit_done = 1;
3970                 smp_mb();
3971                 if (waitqueue_active(&t->commit_wait))
3972                         wake_up(&t->commit_wait);
3973
3974                 btrfs_destroy_delayed_inodes(root);
3975                 btrfs_assert_delayed_root_empty(root);
3976
3977                 btrfs_destroy_delalloc_inodes(root);
3978
3979                 spin_lock(&root->fs_info->trans_lock);
3980                 root->fs_info->running_transaction = NULL;
3981                 spin_unlock(&root->fs_info->trans_lock);
3982
3983                 btrfs_destroy_marked_extents(root, &t->dirty_pages,
3984                                              EXTENT_DIRTY);
3985
3986                 btrfs_destroy_pinned_extent(root,
3987                                             root->fs_info->pinned_extents);
3988
3989                 atomic_set(&t->use_count, 0);
3990                 list_del_init(&t->list);
3991                 memset(t, 0, sizeof(*t));
3992                 kmem_cache_free(btrfs_transaction_cachep, t);
3993         }
3994
3995         spin_lock(&root->fs_info->trans_lock);
3996         root->fs_info->trans_no_join = 0;
3997         spin_unlock(&root->fs_info->trans_lock);
3998         mutex_unlock(&root->fs_info->transaction_kthread_mutex);
3999
4000         return 0;
4001 }
4002
4003 static struct extent_io_ops btree_extent_io_ops = {
4004         .readpage_end_io_hook = btree_readpage_end_io_hook,
4005         .readpage_io_failed_hook = btree_io_failed_hook,
4006         .submit_bio_hook = btree_submit_bio_hook,
4007         /* note we're sharing with inode.c for the merge bio hook */
4008         .merge_bio_hook = btrfs_merge_bio_hook,
4009 };