Btrfs: introduce per-subvolume delalloc inode list
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / btrfs / disk-io.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
31 #include <linux/migrate.h>
32 #include <linux/ratelimit.h>
33 #include <linux/uuid.h>
34 #include <asm/unaligned.h>
35 #include "compat.h"
36 #include "ctree.h"
37 #include "disk-io.h"
38 #include "transaction.h"
39 #include "btrfs_inode.h"
40 #include "volumes.h"
41 #include "print-tree.h"
42 #include "async-thread.h"
43 #include "locking.h"
44 #include "tree-log.h"
45 #include "free-space-cache.h"
46 #include "inode-map.h"
47 #include "check-integrity.h"
48 #include "rcu-string.h"
49 #include "dev-replace.h"
50 #include "raid56.h"
51
52 #ifdef CONFIG_X86
53 #include <asm/cpufeature.h>
54 #endif
55
56 static struct extent_io_ops btree_extent_io_ops;
57 static void end_workqueue_fn(struct btrfs_work *work);
58 static void free_fs_root(struct btrfs_root *root);
59 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
60                                     int read_only);
61 static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
62                                              struct btrfs_root *root);
63 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
64 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
65                                       struct btrfs_root *root);
66 static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t);
67 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
68 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
69                                         struct extent_io_tree *dirty_pages,
70                                         int mark);
71 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
72                                        struct extent_io_tree *pinned_extents);
73 static int btrfs_cleanup_transaction(struct btrfs_root *root);
74 static void btrfs_error_commit_super(struct btrfs_root *root);
75
76 /*
77  * end_io_wq structs are used to do processing in task context when an IO is
78  * complete.  This is used during reads to verify checksums, and it is used
79  * by writes to insert metadata for new file extents after IO is complete.
80  */
81 struct end_io_wq {
82         struct bio *bio;
83         bio_end_io_t *end_io;
84         void *private;
85         struct btrfs_fs_info *info;
86         int error;
87         int metadata;
88         struct list_head list;
89         struct btrfs_work work;
90 };
91
92 /*
93  * async submit bios are used to offload expensive checksumming
94  * onto the worker threads.  They checksum file and metadata bios
95  * just before they are sent down the IO stack.
96  */
97 struct async_submit_bio {
98         struct inode *inode;
99         struct bio *bio;
100         struct list_head list;
101         extent_submit_bio_hook_t *submit_bio_start;
102         extent_submit_bio_hook_t *submit_bio_done;
103         int rw;
104         int mirror_num;
105         unsigned long bio_flags;
106         /*
107          * bio_offset is optional, can be used if the pages in the bio
108          * can't tell us where in the file the bio should go
109          */
110         u64 bio_offset;
111         struct btrfs_work work;
112         int error;
113 };
114
115 /*
116  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
117  * eb, the lockdep key is determined by the btrfs_root it belongs to and
118  * the level the eb occupies in the tree.
119  *
120  * Different roots are used for different purposes and may nest inside each
121  * other and they require separate keysets.  As lockdep keys should be
122  * static, assign keysets according to the purpose of the root as indicated
123  * by btrfs_root->objectid.  This ensures that all special purpose roots
124  * have separate keysets.
125  *
126  * Lock-nesting across peer nodes is always done with the immediate parent
127  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
128  * subclass to avoid triggering lockdep warning in such cases.
129  *
130  * The key is set by the readpage_end_io_hook after the buffer has passed
131  * csum validation but before the pages are unlocked.  It is also set by
132  * btrfs_init_new_buffer on freshly allocated blocks.
133  *
134  * We also add a check to make sure the highest level of the tree is the
135  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
136  * needs update as well.
137  */
138 #ifdef CONFIG_DEBUG_LOCK_ALLOC
139 # if BTRFS_MAX_LEVEL != 8
140 #  error
141 # endif
142
143 static struct btrfs_lockdep_keyset {
144         u64                     id;             /* root objectid */
145         const char              *name_stem;     /* lock name stem */
146         char                    names[BTRFS_MAX_LEVEL + 1][20];
147         struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
148 } btrfs_lockdep_keysets[] = {
149         { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
150         { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
151         { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
152         { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
153         { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
154         { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
155         { .id = BTRFS_QUOTA_TREE_OBJECTID,      .name_stem = "quota"    },
156         { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
157         { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
158         { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
159         { .id = 0,                              .name_stem = "tree"     },
160 };
161
162 void __init btrfs_init_lockdep(void)
163 {
164         int i, j;
165
166         /* initialize lockdep class names */
167         for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
168                 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
169
170                 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
171                         snprintf(ks->names[j], sizeof(ks->names[j]),
172                                  "btrfs-%s-%02d", ks->name_stem, j);
173         }
174 }
175
176 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
177                                     int level)
178 {
179         struct btrfs_lockdep_keyset *ks;
180
181         BUG_ON(level >= ARRAY_SIZE(ks->keys));
182
183         /* find the matching keyset, id 0 is the default entry */
184         for (ks = btrfs_lockdep_keysets; ks->id; ks++)
185                 if (ks->id == objectid)
186                         break;
187
188         lockdep_set_class_and_name(&eb->lock,
189                                    &ks->keys[level], ks->names[level]);
190 }
191
192 #endif
193
194 /*
195  * extents on the btree inode are pretty simple, there's one extent
196  * that covers the entire device
197  */
198 static struct extent_map *btree_get_extent(struct inode *inode,
199                 struct page *page, size_t pg_offset, u64 start, u64 len,
200                 int create)
201 {
202         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
203         struct extent_map *em;
204         int ret;
205
206         read_lock(&em_tree->lock);
207         em = lookup_extent_mapping(em_tree, start, len);
208         if (em) {
209                 em->bdev =
210                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
211                 read_unlock(&em_tree->lock);
212                 goto out;
213         }
214         read_unlock(&em_tree->lock);
215
216         em = alloc_extent_map();
217         if (!em) {
218                 em = ERR_PTR(-ENOMEM);
219                 goto out;
220         }
221         em->start = 0;
222         em->len = (u64)-1;
223         em->block_len = (u64)-1;
224         em->block_start = 0;
225         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
226
227         write_lock(&em_tree->lock);
228         ret = add_extent_mapping(em_tree, em, 0);
229         if (ret == -EEXIST) {
230                 free_extent_map(em);
231                 em = lookup_extent_mapping(em_tree, start, len);
232                 if (!em)
233                         em = ERR_PTR(-EIO);
234         } else if (ret) {
235                 free_extent_map(em);
236                 em = ERR_PTR(ret);
237         }
238         write_unlock(&em_tree->lock);
239
240 out:
241         return em;
242 }
243
244 u32 btrfs_csum_data(char *data, u32 seed, size_t len)
245 {
246         return crc32c(seed, data, len);
247 }
248
249 void btrfs_csum_final(u32 crc, char *result)
250 {
251         put_unaligned_le32(~crc, result);
252 }
253
254 /*
255  * compute the csum for a btree block, and either verify it or write it
256  * into the csum field of the block.
257  */
258 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
259                            int verify)
260 {
261         u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
262         char *result = NULL;
263         unsigned long len;
264         unsigned long cur_len;
265         unsigned long offset = BTRFS_CSUM_SIZE;
266         char *kaddr;
267         unsigned long map_start;
268         unsigned long map_len;
269         int err;
270         u32 crc = ~(u32)0;
271         unsigned long inline_result;
272
273         len = buf->len - offset;
274         while (len > 0) {
275                 err = map_private_extent_buffer(buf, offset, 32,
276                                         &kaddr, &map_start, &map_len);
277                 if (err)
278                         return 1;
279                 cur_len = min(len, map_len - (offset - map_start));
280                 crc = btrfs_csum_data(kaddr + offset - map_start,
281                                       crc, cur_len);
282                 len -= cur_len;
283                 offset += cur_len;
284         }
285         if (csum_size > sizeof(inline_result)) {
286                 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
287                 if (!result)
288                         return 1;
289         } else {
290                 result = (char *)&inline_result;
291         }
292
293         btrfs_csum_final(crc, result);
294
295         if (verify) {
296                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
297                         u32 val;
298                         u32 found = 0;
299                         memcpy(&found, result, csum_size);
300
301                         read_extent_buffer(buf, &val, 0, csum_size);
302                         printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
303                                        "failed on %llu wanted %X found %X "
304                                        "level %d\n",
305                                        root->fs_info->sb->s_id,
306                                        (unsigned long long)buf->start, val, found,
307                                        btrfs_header_level(buf));
308                         if (result != (char *)&inline_result)
309                                 kfree(result);
310                         return 1;
311                 }
312         } else {
313                 write_extent_buffer(buf, result, 0, csum_size);
314         }
315         if (result != (char *)&inline_result)
316                 kfree(result);
317         return 0;
318 }
319
320 /*
321  * we can't consider a given block up to date unless the transid of the
322  * block matches the transid in the parent node's pointer.  This is how we
323  * detect blocks that either didn't get written at all or got written
324  * in the wrong place.
325  */
326 static int verify_parent_transid(struct extent_io_tree *io_tree,
327                                  struct extent_buffer *eb, u64 parent_transid,
328                                  int atomic)
329 {
330         struct extent_state *cached_state = NULL;
331         int ret;
332
333         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
334                 return 0;
335
336         if (atomic)
337                 return -EAGAIN;
338
339         lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
340                          0, &cached_state);
341         if (extent_buffer_uptodate(eb) &&
342             btrfs_header_generation(eb) == parent_transid) {
343                 ret = 0;
344                 goto out;
345         }
346         printk_ratelimited("parent transid verify failed on %llu wanted %llu "
347                        "found %llu\n",
348                        (unsigned long long)eb->start,
349                        (unsigned long long)parent_transid,
350                        (unsigned long long)btrfs_header_generation(eb));
351         ret = 1;
352         clear_extent_buffer_uptodate(eb);
353 out:
354         unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
355                              &cached_state, GFP_NOFS);
356         return ret;
357 }
358
359 /*
360  * Return 0 if the superblock checksum type matches the checksum value of that
361  * algorithm. Pass the raw disk superblock data.
362  */
363 static int btrfs_check_super_csum(char *raw_disk_sb)
364 {
365         struct btrfs_super_block *disk_sb =
366                 (struct btrfs_super_block *)raw_disk_sb;
367         u16 csum_type = btrfs_super_csum_type(disk_sb);
368         int ret = 0;
369
370         if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
371                 u32 crc = ~(u32)0;
372                 const int csum_size = sizeof(crc);
373                 char result[csum_size];
374
375                 /*
376                  * The super_block structure does not span the whole
377                  * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
378                  * is filled with zeros and is included in the checkum.
379                  */
380                 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
381                                 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
382                 btrfs_csum_final(crc, result);
383
384                 if (memcmp(raw_disk_sb, result, csum_size))
385                         ret = 1;
386
387                 if (ret && btrfs_super_generation(disk_sb) < 10) {
388                         printk(KERN_WARNING "btrfs: super block crcs don't match, older mkfs detected\n");
389                         ret = 0;
390                 }
391         }
392
393         if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
394                 printk(KERN_ERR "btrfs: unsupported checksum algorithm %u\n",
395                                 csum_type);
396                 ret = 1;
397         }
398
399         return ret;
400 }
401
402 /*
403  * helper to read a given tree block, doing retries as required when
404  * the checksums don't match and we have alternate mirrors to try.
405  */
406 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
407                                           struct extent_buffer *eb,
408                                           u64 start, u64 parent_transid)
409 {
410         struct extent_io_tree *io_tree;
411         int failed = 0;
412         int ret;
413         int num_copies = 0;
414         int mirror_num = 0;
415         int failed_mirror = 0;
416
417         clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
418         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
419         while (1) {
420                 ret = read_extent_buffer_pages(io_tree, eb, start,
421                                                WAIT_COMPLETE,
422                                                btree_get_extent, mirror_num);
423                 if (!ret) {
424                         if (!verify_parent_transid(io_tree, eb,
425                                                    parent_transid, 0))
426                                 break;
427                         else
428                                 ret = -EIO;
429                 }
430
431                 /*
432                  * This buffer's crc is fine, but its contents are corrupted, so
433                  * there is no reason to read the other copies, they won't be
434                  * any less wrong.
435                  */
436                 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
437                         break;
438
439                 num_copies = btrfs_num_copies(root->fs_info,
440                                               eb->start, eb->len);
441                 if (num_copies == 1)
442                         break;
443
444                 if (!failed_mirror) {
445                         failed = 1;
446                         failed_mirror = eb->read_mirror;
447                 }
448
449                 mirror_num++;
450                 if (mirror_num == failed_mirror)
451                         mirror_num++;
452
453                 if (mirror_num > num_copies)
454                         break;
455         }
456
457         if (failed && !ret && failed_mirror)
458                 repair_eb_io_failure(root, eb, failed_mirror);
459
460         return ret;
461 }
462
463 /*
464  * checksum a dirty tree block before IO.  This has extra checks to make sure
465  * we only fill in the checksum field in the first page of a multi-page block
466  */
467
468 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
469 {
470         struct extent_io_tree *tree;
471         u64 start = page_offset(page);
472         u64 found_start;
473         struct extent_buffer *eb;
474
475         tree = &BTRFS_I(page->mapping->host)->io_tree;
476
477         eb = (struct extent_buffer *)page->private;
478         if (page != eb->pages[0])
479                 return 0;
480         found_start = btrfs_header_bytenr(eb);
481         if (found_start != start) {
482                 WARN_ON(1);
483                 return 0;
484         }
485         if (!PageUptodate(page)) {
486                 WARN_ON(1);
487                 return 0;
488         }
489         csum_tree_block(root, eb, 0);
490         return 0;
491 }
492
493 static int check_tree_block_fsid(struct btrfs_root *root,
494                                  struct extent_buffer *eb)
495 {
496         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
497         u8 fsid[BTRFS_UUID_SIZE];
498         int ret = 1;
499
500         read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
501                            BTRFS_FSID_SIZE);
502         while (fs_devices) {
503                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
504                         ret = 0;
505                         break;
506                 }
507                 fs_devices = fs_devices->seed;
508         }
509         return ret;
510 }
511
512 #define CORRUPT(reason, eb, root, slot)                         \
513         printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
514                "root=%llu, slot=%d\n", reason,                  \
515                (unsigned long long)btrfs_header_bytenr(eb),     \
516                (unsigned long long)root->objectid, slot)
517
518 static noinline int check_leaf(struct btrfs_root *root,
519                                struct extent_buffer *leaf)
520 {
521         struct btrfs_key key;
522         struct btrfs_key leaf_key;
523         u32 nritems = btrfs_header_nritems(leaf);
524         int slot;
525
526         if (nritems == 0)
527                 return 0;
528
529         /* Check the 0 item */
530         if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
531             BTRFS_LEAF_DATA_SIZE(root)) {
532                 CORRUPT("invalid item offset size pair", leaf, root, 0);
533                 return -EIO;
534         }
535
536         /*
537          * Check to make sure each items keys are in the correct order and their
538          * offsets make sense.  We only have to loop through nritems-1 because
539          * we check the current slot against the next slot, which verifies the
540          * next slot's offset+size makes sense and that the current's slot
541          * offset is correct.
542          */
543         for (slot = 0; slot < nritems - 1; slot++) {
544                 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
545                 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
546
547                 /* Make sure the keys are in the right order */
548                 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
549                         CORRUPT("bad key order", leaf, root, slot);
550                         return -EIO;
551                 }
552
553                 /*
554                  * Make sure the offset and ends are right, remember that the
555                  * item data starts at the end of the leaf and grows towards the
556                  * front.
557                  */
558                 if (btrfs_item_offset_nr(leaf, slot) !=
559                         btrfs_item_end_nr(leaf, slot + 1)) {
560                         CORRUPT("slot offset bad", leaf, root, slot);
561                         return -EIO;
562                 }
563
564                 /*
565                  * Check to make sure that we don't point outside of the leaf,
566                  * just incase all the items are consistent to eachother, but
567                  * all point outside of the leaf.
568                  */
569                 if (btrfs_item_end_nr(leaf, slot) >
570                     BTRFS_LEAF_DATA_SIZE(root)) {
571                         CORRUPT("slot end outside of leaf", leaf, root, slot);
572                         return -EIO;
573                 }
574         }
575
576         return 0;
577 }
578
579 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
580                                struct extent_state *state, int mirror)
581 {
582         struct extent_io_tree *tree;
583         u64 found_start;
584         int found_level;
585         struct extent_buffer *eb;
586         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
587         int ret = 0;
588         int reads_done;
589
590         if (!page->private)
591                 goto out;
592
593         tree = &BTRFS_I(page->mapping->host)->io_tree;
594         eb = (struct extent_buffer *)page->private;
595
596         /* the pending IO might have been the only thing that kept this buffer
597          * in memory.  Make sure we have a ref for all this other checks
598          */
599         extent_buffer_get(eb);
600
601         reads_done = atomic_dec_and_test(&eb->io_pages);
602         if (!reads_done)
603                 goto err;
604
605         eb->read_mirror = mirror;
606         if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
607                 ret = -EIO;
608                 goto err;
609         }
610
611         found_start = btrfs_header_bytenr(eb);
612         if (found_start != eb->start) {
613                 printk_ratelimited(KERN_INFO "btrfs bad tree block start "
614                                "%llu %llu\n",
615                                (unsigned long long)found_start,
616                                (unsigned long long)eb->start);
617                 ret = -EIO;
618                 goto err;
619         }
620         if (check_tree_block_fsid(root, eb)) {
621                 printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
622                                (unsigned long long)eb->start);
623                 ret = -EIO;
624                 goto err;
625         }
626         found_level = btrfs_header_level(eb);
627         if (found_level >= BTRFS_MAX_LEVEL) {
628                 btrfs_info(root->fs_info, "bad tree block level %d\n",
629                            (int)btrfs_header_level(eb));
630                 ret = -EIO;
631                 goto err;
632         }
633
634         btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
635                                        eb, found_level);
636
637         ret = csum_tree_block(root, eb, 1);
638         if (ret) {
639                 ret = -EIO;
640                 goto err;
641         }
642
643         /*
644          * If this is a leaf block and it is corrupt, set the corrupt bit so
645          * that we don't try and read the other copies of this block, just
646          * return -EIO.
647          */
648         if (found_level == 0 && check_leaf(root, eb)) {
649                 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
650                 ret = -EIO;
651         }
652
653         if (!ret)
654                 set_extent_buffer_uptodate(eb);
655 err:
656         if (reads_done &&
657             test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
658                 btree_readahead_hook(root, eb, eb->start, ret);
659
660         if (ret) {
661                 /*
662                  * our io error hook is going to dec the io pages
663                  * again, we have to make sure it has something
664                  * to decrement
665                  */
666                 atomic_inc(&eb->io_pages);
667                 clear_extent_buffer_uptodate(eb);
668         }
669         free_extent_buffer(eb);
670 out:
671         return ret;
672 }
673
674 static int btree_io_failed_hook(struct page *page, int failed_mirror)
675 {
676         struct extent_buffer *eb;
677         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
678
679         eb = (struct extent_buffer *)page->private;
680         set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
681         eb->read_mirror = failed_mirror;
682         atomic_dec(&eb->io_pages);
683         if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
684                 btree_readahead_hook(root, eb, eb->start, -EIO);
685         return -EIO;    /* we fixed nothing */
686 }
687
688 static void end_workqueue_bio(struct bio *bio, int err)
689 {
690         struct end_io_wq *end_io_wq = bio->bi_private;
691         struct btrfs_fs_info *fs_info;
692
693         fs_info = end_io_wq->info;
694         end_io_wq->error = err;
695         end_io_wq->work.func = end_workqueue_fn;
696         end_io_wq->work.flags = 0;
697
698         if (bio->bi_rw & REQ_WRITE) {
699                 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
700                         btrfs_queue_worker(&fs_info->endio_meta_write_workers,
701                                            &end_io_wq->work);
702                 else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
703                         btrfs_queue_worker(&fs_info->endio_freespace_worker,
704                                            &end_io_wq->work);
705                 else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
706                         btrfs_queue_worker(&fs_info->endio_raid56_workers,
707                                            &end_io_wq->work);
708                 else
709                         btrfs_queue_worker(&fs_info->endio_write_workers,
710                                            &end_io_wq->work);
711         } else {
712                 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
713                         btrfs_queue_worker(&fs_info->endio_raid56_workers,
714                                            &end_io_wq->work);
715                 else if (end_io_wq->metadata)
716                         btrfs_queue_worker(&fs_info->endio_meta_workers,
717                                            &end_io_wq->work);
718                 else
719                         btrfs_queue_worker(&fs_info->endio_workers,
720                                            &end_io_wq->work);
721         }
722 }
723
724 /*
725  * For the metadata arg you want
726  *
727  * 0 - if data
728  * 1 - if normal metadta
729  * 2 - if writing to the free space cache area
730  * 3 - raid parity work
731  */
732 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
733                         int metadata)
734 {
735         struct end_io_wq *end_io_wq;
736         end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
737         if (!end_io_wq)
738                 return -ENOMEM;
739
740         end_io_wq->private = bio->bi_private;
741         end_io_wq->end_io = bio->bi_end_io;
742         end_io_wq->info = info;
743         end_io_wq->error = 0;
744         end_io_wq->bio = bio;
745         end_io_wq->metadata = metadata;
746
747         bio->bi_private = end_io_wq;
748         bio->bi_end_io = end_workqueue_bio;
749         return 0;
750 }
751
752 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
753 {
754         unsigned long limit = min_t(unsigned long,
755                                     info->workers.max_workers,
756                                     info->fs_devices->open_devices);
757         return 256 * limit;
758 }
759
760 static void run_one_async_start(struct btrfs_work *work)
761 {
762         struct async_submit_bio *async;
763         int ret;
764
765         async = container_of(work, struct  async_submit_bio, work);
766         ret = async->submit_bio_start(async->inode, async->rw, async->bio,
767                                       async->mirror_num, async->bio_flags,
768                                       async->bio_offset);
769         if (ret)
770                 async->error = ret;
771 }
772
773 static void run_one_async_done(struct btrfs_work *work)
774 {
775         struct btrfs_fs_info *fs_info;
776         struct async_submit_bio *async;
777         int limit;
778
779         async = container_of(work, struct  async_submit_bio, work);
780         fs_info = BTRFS_I(async->inode)->root->fs_info;
781
782         limit = btrfs_async_submit_limit(fs_info);
783         limit = limit * 2 / 3;
784
785         if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
786             waitqueue_active(&fs_info->async_submit_wait))
787                 wake_up(&fs_info->async_submit_wait);
788
789         /* If an error occured we just want to clean up the bio and move on */
790         if (async->error) {
791                 bio_endio(async->bio, async->error);
792                 return;
793         }
794
795         async->submit_bio_done(async->inode, async->rw, async->bio,
796                                async->mirror_num, async->bio_flags,
797                                async->bio_offset);
798 }
799
800 static void run_one_async_free(struct btrfs_work *work)
801 {
802         struct async_submit_bio *async;
803
804         async = container_of(work, struct  async_submit_bio, work);
805         kfree(async);
806 }
807
808 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
809                         int rw, struct bio *bio, int mirror_num,
810                         unsigned long bio_flags,
811                         u64 bio_offset,
812                         extent_submit_bio_hook_t *submit_bio_start,
813                         extent_submit_bio_hook_t *submit_bio_done)
814 {
815         struct async_submit_bio *async;
816
817         async = kmalloc(sizeof(*async), GFP_NOFS);
818         if (!async)
819                 return -ENOMEM;
820
821         async->inode = inode;
822         async->rw = rw;
823         async->bio = bio;
824         async->mirror_num = mirror_num;
825         async->submit_bio_start = submit_bio_start;
826         async->submit_bio_done = submit_bio_done;
827
828         async->work.func = run_one_async_start;
829         async->work.ordered_func = run_one_async_done;
830         async->work.ordered_free = run_one_async_free;
831
832         async->work.flags = 0;
833         async->bio_flags = bio_flags;
834         async->bio_offset = bio_offset;
835
836         async->error = 0;
837
838         atomic_inc(&fs_info->nr_async_submits);
839
840         if (rw & REQ_SYNC)
841                 btrfs_set_work_high_prio(&async->work);
842
843         btrfs_queue_worker(&fs_info->workers, &async->work);
844
845         while (atomic_read(&fs_info->async_submit_draining) &&
846               atomic_read(&fs_info->nr_async_submits)) {
847                 wait_event(fs_info->async_submit_wait,
848                            (atomic_read(&fs_info->nr_async_submits) == 0));
849         }
850
851         return 0;
852 }
853
854 static int btree_csum_one_bio(struct bio *bio)
855 {
856         struct bio_vec *bvec = bio->bi_io_vec;
857         int bio_index = 0;
858         struct btrfs_root *root;
859         int ret = 0;
860
861         WARN_ON(bio->bi_vcnt <= 0);
862         while (bio_index < bio->bi_vcnt) {
863                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
864                 ret = csum_dirty_buffer(root, bvec->bv_page);
865                 if (ret)
866                         break;
867                 bio_index++;
868                 bvec++;
869         }
870         return ret;
871 }
872
873 static int __btree_submit_bio_start(struct inode *inode, int rw,
874                                     struct bio *bio, int mirror_num,
875                                     unsigned long bio_flags,
876                                     u64 bio_offset)
877 {
878         /*
879          * when we're called for a write, we're already in the async
880          * submission context.  Just jump into btrfs_map_bio
881          */
882         return btree_csum_one_bio(bio);
883 }
884
885 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
886                                  int mirror_num, unsigned long bio_flags,
887                                  u64 bio_offset)
888 {
889         int ret;
890
891         /*
892          * when we're called for a write, we're already in the async
893          * submission context.  Just jump into btrfs_map_bio
894          */
895         ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
896         if (ret)
897                 bio_endio(bio, ret);
898         return ret;
899 }
900
901 static int check_async_write(struct inode *inode, unsigned long bio_flags)
902 {
903         if (bio_flags & EXTENT_BIO_TREE_LOG)
904                 return 0;
905 #ifdef CONFIG_X86
906         if (cpu_has_xmm4_2)
907                 return 0;
908 #endif
909         return 1;
910 }
911
912 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
913                                  int mirror_num, unsigned long bio_flags,
914                                  u64 bio_offset)
915 {
916         int async = check_async_write(inode, bio_flags);
917         int ret;
918
919         if (!(rw & REQ_WRITE)) {
920                 /*
921                  * called for a read, do the setup so that checksum validation
922                  * can happen in the async kernel threads
923                  */
924                 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
925                                           bio, 1);
926                 if (ret)
927                         goto out_w_error;
928                 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
929                                     mirror_num, 0);
930         } else if (!async) {
931                 ret = btree_csum_one_bio(bio);
932                 if (ret)
933                         goto out_w_error;
934                 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
935                                     mirror_num, 0);
936         } else {
937                 /*
938                  * kthread helpers are used to submit writes so that
939                  * checksumming can happen in parallel across all CPUs
940                  */
941                 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
942                                           inode, rw, bio, mirror_num, 0,
943                                           bio_offset,
944                                           __btree_submit_bio_start,
945                                           __btree_submit_bio_done);
946         }
947
948         if (ret) {
949 out_w_error:
950                 bio_endio(bio, ret);
951         }
952         return ret;
953 }
954
955 #ifdef CONFIG_MIGRATION
956 static int btree_migratepage(struct address_space *mapping,
957                         struct page *newpage, struct page *page,
958                         enum migrate_mode mode)
959 {
960         /*
961          * we can't safely write a btree page from here,
962          * we haven't done the locking hook
963          */
964         if (PageDirty(page))
965                 return -EAGAIN;
966         /*
967          * Buffers may be managed in a filesystem specific way.
968          * We must have no buffers or drop them.
969          */
970         if (page_has_private(page) &&
971             !try_to_release_page(page, GFP_KERNEL))
972                 return -EAGAIN;
973         return migrate_page(mapping, newpage, page, mode);
974 }
975 #endif
976
977
978 static int btree_writepages(struct address_space *mapping,
979                             struct writeback_control *wbc)
980 {
981         struct extent_io_tree *tree;
982         struct btrfs_fs_info *fs_info;
983         int ret;
984
985         tree = &BTRFS_I(mapping->host)->io_tree;
986         if (wbc->sync_mode == WB_SYNC_NONE) {
987
988                 if (wbc->for_kupdate)
989                         return 0;
990
991                 fs_info = BTRFS_I(mapping->host)->root->fs_info;
992                 /* this is a bit racy, but that's ok */
993                 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
994                                              BTRFS_DIRTY_METADATA_THRESH);
995                 if (ret < 0)
996                         return 0;
997         }
998         return btree_write_cache_pages(mapping, wbc);
999 }
1000
1001 static int btree_readpage(struct file *file, struct page *page)
1002 {
1003         struct extent_io_tree *tree;
1004         tree = &BTRFS_I(page->mapping->host)->io_tree;
1005         return extent_read_full_page(tree, page, btree_get_extent, 0);
1006 }
1007
1008 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
1009 {
1010         if (PageWriteback(page) || PageDirty(page))
1011                 return 0;
1012
1013         return try_release_extent_buffer(page);
1014 }
1015
1016 static void btree_invalidatepage(struct page *page, unsigned long offset)
1017 {
1018         struct extent_io_tree *tree;
1019         tree = &BTRFS_I(page->mapping->host)->io_tree;
1020         extent_invalidatepage(tree, page, offset);
1021         btree_releasepage(page, GFP_NOFS);
1022         if (PagePrivate(page)) {
1023                 printk(KERN_WARNING "btrfs warning page private not zero "
1024                        "on page %llu\n", (unsigned long long)page_offset(page));
1025                 ClearPagePrivate(page);
1026                 set_page_private(page, 0);
1027                 page_cache_release(page);
1028         }
1029 }
1030
1031 static int btree_set_page_dirty(struct page *page)
1032 {
1033 #ifdef DEBUG
1034         struct extent_buffer *eb;
1035
1036         BUG_ON(!PagePrivate(page));
1037         eb = (struct extent_buffer *)page->private;
1038         BUG_ON(!eb);
1039         BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1040         BUG_ON(!atomic_read(&eb->refs));
1041         btrfs_assert_tree_locked(eb);
1042 #endif
1043         return __set_page_dirty_nobuffers(page);
1044 }
1045
1046 static const struct address_space_operations btree_aops = {
1047         .readpage       = btree_readpage,
1048         .writepages     = btree_writepages,
1049         .releasepage    = btree_releasepage,
1050         .invalidatepage = btree_invalidatepage,
1051 #ifdef CONFIG_MIGRATION
1052         .migratepage    = btree_migratepage,
1053 #endif
1054         .set_page_dirty = btree_set_page_dirty,
1055 };
1056
1057 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1058                          u64 parent_transid)
1059 {
1060         struct extent_buffer *buf = NULL;
1061         struct inode *btree_inode = root->fs_info->btree_inode;
1062         int ret = 0;
1063
1064         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1065         if (!buf)
1066                 return 0;
1067         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1068                                  buf, 0, WAIT_NONE, btree_get_extent, 0);
1069         free_extent_buffer(buf);
1070         return ret;
1071 }
1072
1073 int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1074                          int mirror_num, struct extent_buffer **eb)
1075 {
1076         struct extent_buffer *buf = NULL;
1077         struct inode *btree_inode = root->fs_info->btree_inode;
1078         struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1079         int ret;
1080
1081         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1082         if (!buf)
1083                 return 0;
1084
1085         set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1086
1087         ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
1088                                        btree_get_extent, mirror_num);
1089         if (ret) {
1090                 free_extent_buffer(buf);
1091                 return ret;
1092         }
1093
1094         if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1095                 free_extent_buffer(buf);
1096                 return -EIO;
1097         } else if (extent_buffer_uptodate(buf)) {
1098                 *eb = buf;
1099         } else {
1100                 free_extent_buffer(buf);
1101         }
1102         return 0;
1103 }
1104
1105 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
1106                                             u64 bytenr, u32 blocksize)
1107 {
1108         struct inode *btree_inode = root->fs_info->btree_inode;
1109         struct extent_buffer *eb;
1110         eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1111                                 bytenr, blocksize);
1112         return eb;
1113 }
1114
1115 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1116                                                  u64 bytenr, u32 blocksize)
1117 {
1118         struct inode *btree_inode = root->fs_info->btree_inode;
1119         struct extent_buffer *eb;
1120
1121         eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1122                                  bytenr, blocksize);
1123         return eb;
1124 }
1125
1126
1127 int btrfs_write_tree_block(struct extent_buffer *buf)
1128 {
1129         return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1130                                         buf->start + buf->len - 1);
1131 }
1132
1133 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1134 {
1135         return filemap_fdatawait_range(buf->pages[0]->mapping,
1136                                        buf->start, buf->start + buf->len - 1);
1137 }
1138
1139 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1140                                       u32 blocksize, u64 parent_transid)
1141 {
1142         struct extent_buffer *buf = NULL;
1143         int ret;
1144
1145         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1146         if (!buf)
1147                 return NULL;
1148
1149         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1150         return buf;
1151
1152 }
1153
1154 void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1155                       struct extent_buffer *buf)
1156 {
1157         struct btrfs_fs_info *fs_info = root->fs_info;
1158
1159         if (btrfs_header_generation(buf) ==
1160             fs_info->running_transaction->transid) {
1161                 btrfs_assert_tree_locked(buf);
1162
1163                 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1164                         __percpu_counter_add(&fs_info->dirty_metadata_bytes,
1165                                              -buf->len,
1166                                              fs_info->dirty_metadata_batch);
1167                         /* ugh, clear_extent_buffer_dirty needs to lock the page */
1168                         btrfs_set_lock_blocking(buf);
1169                         clear_extent_buffer_dirty(buf);
1170                 }
1171         }
1172 }
1173
1174 static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1175                          u32 stripesize, struct btrfs_root *root,
1176                          struct btrfs_fs_info *fs_info,
1177                          u64 objectid)
1178 {
1179         root->node = NULL;
1180         root->commit_root = NULL;
1181         root->sectorsize = sectorsize;
1182         root->nodesize = nodesize;
1183         root->leafsize = leafsize;
1184         root->stripesize = stripesize;
1185         root->ref_cows = 0;
1186         root->track_dirty = 0;
1187         root->in_radix = 0;
1188         root->orphan_item_inserted = 0;
1189         root->orphan_cleanup_state = 0;
1190
1191         root->objectid = objectid;
1192         root->last_trans = 0;
1193         root->highest_objectid = 0;
1194         root->nr_delalloc_inodes = 0;
1195         root->name = NULL;
1196         root->inode_tree = RB_ROOT;
1197         INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1198         root->block_rsv = NULL;
1199         root->orphan_block_rsv = NULL;
1200
1201         INIT_LIST_HEAD(&root->dirty_list);
1202         INIT_LIST_HEAD(&root->root_list);
1203         INIT_LIST_HEAD(&root->delalloc_inodes);
1204         INIT_LIST_HEAD(&root->delalloc_root);
1205         INIT_LIST_HEAD(&root->logged_list[0]);
1206         INIT_LIST_HEAD(&root->logged_list[1]);
1207         spin_lock_init(&root->orphan_lock);
1208         spin_lock_init(&root->inode_lock);
1209         spin_lock_init(&root->delalloc_lock);
1210         spin_lock_init(&root->accounting_lock);
1211         spin_lock_init(&root->log_extents_lock[0]);
1212         spin_lock_init(&root->log_extents_lock[1]);
1213         mutex_init(&root->objectid_mutex);
1214         mutex_init(&root->log_mutex);
1215         init_waitqueue_head(&root->log_writer_wait);
1216         init_waitqueue_head(&root->log_commit_wait[0]);
1217         init_waitqueue_head(&root->log_commit_wait[1]);
1218         atomic_set(&root->log_commit[0], 0);
1219         atomic_set(&root->log_commit[1], 0);
1220         atomic_set(&root->log_writers, 0);
1221         atomic_set(&root->log_batch, 0);
1222         atomic_set(&root->orphan_inodes, 0);
1223         atomic_set(&root->refs, 1);
1224         root->log_transid = 0;
1225         root->last_log_commit = 0;
1226         extent_io_tree_init(&root->dirty_log_pages,
1227                              fs_info->btree_inode->i_mapping);
1228
1229         memset(&root->root_key, 0, sizeof(root->root_key));
1230         memset(&root->root_item, 0, sizeof(root->root_item));
1231         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1232         memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1233         root->defrag_trans_start = fs_info->generation;
1234         init_completion(&root->kobj_unregister);
1235         root->defrag_running = 0;
1236         root->root_key.objectid = objectid;
1237         root->anon_dev = 0;
1238
1239         spin_lock_init(&root->root_item_lock);
1240 }
1241
1242 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
1243 {
1244         struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
1245         if (root)
1246                 root->fs_info = fs_info;
1247         return root;
1248 }
1249
1250 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1251                                      struct btrfs_fs_info *fs_info,
1252                                      u64 objectid)
1253 {
1254         struct extent_buffer *leaf;
1255         struct btrfs_root *tree_root = fs_info->tree_root;
1256         struct btrfs_root *root;
1257         struct btrfs_key key;
1258         int ret = 0;
1259         u64 bytenr;
1260         uuid_le uuid;
1261
1262         root = btrfs_alloc_root(fs_info);
1263         if (!root)
1264                 return ERR_PTR(-ENOMEM);
1265
1266         __setup_root(tree_root->nodesize, tree_root->leafsize,
1267                      tree_root->sectorsize, tree_root->stripesize,
1268                      root, fs_info, objectid);
1269         root->root_key.objectid = objectid;
1270         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1271         root->root_key.offset = 0;
1272
1273         leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
1274                                       0, objectid, NULL, 0, 0, 0);
1275         if (IS_ERR(leaf)) {
1276                 ret = PTR_ERR(leaf);
1277                 leaf = NULL;
1278                 goto fail;
1279         }
1280
1281         bytenr = leaf->start;
1282         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1283         btrfs_set_header_bytenr(leaf, leaf->start);
1284         btrfs_set_header_generation(leaf, trans->transid);
1285         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1286         btrfs_set_header_owner(leaf, objectid);
1287         root->node = leaf;
1288
1289         write_extent_buffer(leaf, fs_info->fsid,
1290                             (unsigned long)btrfs_header_fsid(leaf),
1291                             BTRFS_FSID_SIZE);
1292         write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
1293                             (unsigned long)btrfs_header_chunk_tree_uuid(leaf),
1294                             BTRFS_UUID_SIZE);
1295         btrfs_mark_buffer_dirty(leaf);
1296
1297         root->commit_root = btrfs_root_node(root);
1298         root->track_dirty = 1;
1299
1300
1301         root->root_item.flags = 0;
1302         root->root_item.byte_limit = 0;
1303         btrfs_set_root_bytenr(&root->root_item, leaf->start);
1304         btrfs_set_root_generation(&root->root_item, trans->transid);
1305         btrfs_set_root_level(&root->root_item, 0);
1306         btrfs_set_root_refs(&root->root_item, 1);
1307         btrfs_set_root_used(&root->root_item, leaf->len);
1308         btrfs_set_root_last_snapshot(&root->root_item, 0);
1309         btrfs_set_root_dirid(&root->root_item, 0);
1310         uuid_le_gen(&uuid);
1311         memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1312         root->root_item.drop_level = 0;
1313
1314         key.objectid = objectid;
1315         key.type = BTRFS_ROOT_ITEM_KEY;
1316         key.offset = 0;
1317         ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1318         if (ret)
1319                 goto fail;
1320
1321         btrfs_tree_unlock(leaf);
1322
1323         return root;
1324
1325 fail:
1326         if (leaf) {
1327                 btrfs_tree_unlock(leaf);
1328                 free_extent_buffer(leaf);
1329         }
1330         kfree(root);
1331
1332         return ERR_PTR(ret);
1333 }
1334
1335 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1336                                          struct btrfs_fs_info *fs_info)
1337 {
1338         struct btrfs_root *root;
1339         struct btrfs_root *tree_root = fs_info->tree_root;
1340         struct extent_buffer *leaf;
1341
1342         root = btrfs_alloc_root(fs_info);
1343         if (!root)
1344                 return ERR_PTR(-ENOMEM);
1345
1346         __setup_root(tree_root->nodesize, tree_root->leafsize,
1347                      tree_root->sectorsize, tree_root->stripesize,
1348                      root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1349
1350         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1351         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1352         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1353         /*
1354          * log trees do not get reference counted because they go away
1355          * before a real commit is actually done.  They do store pointers
1356          * to file data extents, and those reference counts still get
1357          * updated (along with back refs to the log tree).
1358          */
1359         root->ref_cows = 0;
1360
1361         leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1362                                       BTRFS_TREE_LOG_OBJECTID, NULL,
1363                                       0, 0, 0);
1364         if (IS_ERR(leaf)) {
1365                 kfree(root);
1366                 return ERR_CAST(leaf);
1367         }
1368
1369         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1370         btrfs_set_header_bytenr(leaf, leaf->start);
1371         btrfs_set_header_generation(leaf, trans->transid);
1372         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1373         btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1374         root->node = leaf;
1375
1376         write_extent_buffer(root->node, root->fs_info->fsid,
1377                             (unsigned long)btrfs_header_fsid(root->node),
1378                             BTRFS_FSID_SIZE);
1379         btrfs_mark_buffer_dirty(root->node);
1380         btrfs_tree_unlock(root->node);
1381         return root;
1382 }
1383
1384 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1385                              struct btrfs_fs_info *fs_info)
1386 {
1387         struct btrfs_root *log_root;
1388
1389         log_root = alloc_log_tree(trans, fs_info);
1390         if (IS_ERR(log_root))
1391                 return PTR_ERR(log_root);
1392         WARN_ON(fs_info->log_root_tree);
1393         fs_info->log_root_tree = log_root;
1394         return 0;
1395 }
1396
1397 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1398                        struct btrfs_root *root)
1399 {
1400         struct btrfs_root *log_root;
1401         struct btrfs_inode_item *inode_item;
1402
1403         log_root = alloc_log_tree(trans, root->fs_info);
1404         if (IS_ERR(log_root))
1405                 return PTR_ERR(log_root);
1406
1407         log_root->last_trans = trans->transid;
1408         log_root->root_key.offset = root->root_key.objectid;
1409
1410         inode_item = &log_root->root_item.inode;
1411         inode_item->generation = cpu_to_le64(1);
1412         inode_item->size = cpu_to_le64(3);
1413         inode_item->nlink = cpu_to_le32(1);
1414         inode_item->nbytes = cpu_to_le64(root->leafsize);
1415         inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1416
1417         btrfs_set_root_node(&log_root->root_item, log_root->node);
1418
1419         WARN_ON(root->log_root);
1420         root->log_root = log_root;
1421         root->log_transid = 0;
1422         root->last_log_commit = 0;
1423         return 0;
1424 }
1425
1426 struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1427                                         struct btrfs_key *key)
1428 {
1429         struct btrfs_root *root;
1430         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1431         struct btrfs_path *path;
1432         u64 generation;
1433         u32 blocksize;
1434         int ret;
1435
1436         path = btrfs_alloc_path();
1437         if (!path)
1438                 return ERR_PTR(-ENOMEM);
1439
1440         root = btrfs_alloc_root(fs_info);
1441         if (!root) {
1442                 ret = -ENOMEM;
1443                 goto alloc_fail;
1444         }
1445
1446         __setup_root(tree_root->nodesize, tree_root->leafsize,
1447                      tree_root->sectorsize, tree_root->stripesize,
1448                      root, fs_info, key->objectid);
1449
1450         ret = btrfs_find_root(tree_root, key, path,
1451                               &root->root_item, &root->root_key);
1452         if (ret) {
1453                 if (ret > 0)
1454                         ret = -ENOENT;
1455                 goto find_fail;
1456         }
1457
1458         generation = btrfs_root_generation(&root->root_item);
1459         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1460         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1461                                      blocksize, generation);
1462         if (!root->node) {
1463                 ret = -ENOMEM;
1464                 goto find_fail;
1465         } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1466                 ret = -EIO;
1467                 goto read_fail;
1468         }
1469         root->commit_root = btrfs_root_node(root);
1470 out:
1471         btrfs_free_path(path);
1472         return root;
1473
1474 read_fail:
1475         free_extent_buffer(root->node);
1476 find_fail:
1477         kfree(root);
1478 alloc_fail:
1479         root = ERR_PTR(ret);
1480         goto out;
1481 }
1482
1483 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1484                                       struct btrfs_key *location)
1485 {
1486         struct btrfs_root *root;
1487
1488         root = btrfs_read_tree_root(tree_root, location);
1489         if (IS_ERR(root))
1490                 return root;
1491
1492         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1493                 root->ref_cows = 1;
1494                 btrfs_check_and_init_root_item(&root->root_item);
1495         }
1496
1497         return root;
1498 }
1499
1500 int btrfs_init_fs_root(struct btrfs_root *root)
1501 {
1502         int ret;
1503
1504         root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1505         root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1506                                         GFP_NOFS);
1507         if (!root->free_ino_pinned || !root->free_ino_ctl) {
1508                 ret = -ENOMEM;
1509                 goto fail;
1510         }
1511
1512         btrfs_init_free_ino_ctl(root);
1513         mutex_init(&root->fs_commit_mutex);
1514         spin_lock_init(&root->cache_lock);
1515         init_waitqueue_head(&root->cache_wait);
1516
1517         ret = get_anon_bdev(&root->anon_dev);
1518         if (ret)
1519                 goto fail;
1520         return 0;
1521 fail:
1522         kfree(root->free_ino_ctl);
1523         kfree(root->free_ino_pinned);
1524         return ret;
1525 }
1526
1527 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1528                                         u64 root_id)
1529 {
1530         struct btrfs_root *root;
1531
1532         spin_lock(&fs_info->fs_roots_radix_lock);
1533         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1534                                  (unsigned long)root_id);
1535         spin_unlock(&fs_info->fs_roots_radix_lock);
1536         return root;
1537 }
1538
1539 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1540                          struct btrfs_root *root)
1541 {
1542         int ret;
1543
1544         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1545         if (ret)
1546                 return ret;
1547
1548         spin_lock(&fs_info->fs_roots_radix_lock);
1549         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1550                                 (unsigned long)root->root_key.objectid,
1551                                 root);
1552         if (ret == 0)
1553                 root->in_radix = 1;
1554         spin_unlock(&fs_info->fs_roots_radix_lock);
1555         radix_tree_preload_end();
1556
1557         return ret;
1558 }
1559
1560 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1561                                               struct btrfs_key *location)
1562 {
1563         struct btrfs_root *root;
1564         int ret;
1565
1566         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1567                 return fs_info->tree_root;
1568         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1569                 return fs_info->extent_root;
1570         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1571                 return fs_info->chunk_root;
1572         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1573                 return fs_info->dev_root;
1574         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1575                 return fs_info->csum_root;
1576         if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1577                 return fs_info->quota_root ? fs_info->quota_root :
1578                                              ERR_PTR(-ENOENT);
1579 again:
1580         root = btrfs_lookup_fs_root(fs_info, location->objectid);
1581         if (root)
1582                 return root;
1583
1584         root = btrfs_read_fs_root(fs_info->tree_root, location);
1585         if (IS_ERR(root))
1586                 return root;
1587
1588         if (btrfs_root_refs(&root->root_item) == 0) {
1589                 ret = -ENOENT;
1590                 goto fail;
1591         }
1592
1593         ret = btrfs_init_fs_root(root);
1594         if (ret)
1595                 goto fail;
1596
1597         ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1598         if (ret < 0)
1599                 goto fail;
1600         if (ret == 0)
1601                 root->orphan_item_inserted = 1;
1602
1603         ret = btrfs_insert_fs_root(fs_info, root);
1604         if (ret) {
1605                 if (ret == -EEXIST) {
1606                         free_fs_root(root);
1607                         goto again;
1608                 }
1609                 goto fail;
1610         }
1611         return root;
1612 fail:
1613         free_fs_root(root);
1614         return ERR_PTR(ret);
1615 }
1616
1617 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1618 {
1619         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1620         int ret = 0;
1621         struct btrfs_device *device;
1622         struct backing_dev_info *bdi;
1623
1624         rcu_read_lock();
1625         list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1626                 if (!device->bdev)
1627                         continue;
1628                 bdi = blk_get_backing_dev_info(device->bdev);
1629                 if (bdi && bdi_congested(bdi, bdi_bits)) {
1630                         ret = 1;
1631                         break;
1632                 }
1633         }
1634         rcu_read_unlock();
1635         return ret;
1636 }
1637
1638 /*
1639  * If this fails, caller must call bdi_destroy() to get rid of the
1640  * bdi again.
1641  */
1642 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1643 {
1644         int err;
1645
1646         bdi->capabilities = BDI_CAP_MAP_COPY;
1647         err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1648         if (err)
1649                 return err;
1650
1651         bdi->ra_pages   = default_backing_dev_info.ra_pages;
1652         bdi->congested_fn       = btrfs_congested_fn;
1653         bdi->congested_data     = info;
1654         return 0;
1655 }
1656
1657 /*
1658  * called by the kthread helper functions to finally call the bio end_io
1659  * functions.  This is where read checksum verification actually happens
1660  */
1661 static void end_workqueue_fn(struct btrfs_work *work)
1662 {
1663         struct bio *bio;
1664         struct end_io_wq *end_io_wq;
1665         struct btrfs_fs_info *fs_info;
1666         int error;
1667
1668         end_io_wq = container_of(work, struct end_io_wq, work);
1669         bio = end_io_wq->bio;
1670         fs_info = end_io_wq->info;
1671
1672         error = end_io_wq->error;
1673         bio->bi_private = end_io_wq->private;
1674         bio->bi_end_io = end_io_wq->end_io;
1675         kfree(end_io_wq);
1676         bio_endio(bio, error);
1677 }
1678
1679 static int cleaner_kthread(void *arg)
1680 {
1681         struct btrfs_root *root = arg;
1682         int again;
1683
1684         do {
1685                 again = 0;
1686
1687                 /* Make the cleaner go to sleep early. */
1688                 if (btrfs_need_cleaner_sleep(root))
1689                         goto sleep;
1690
1691                 if (!mutex_trylock(&root->fs_info->cleaner_mutex))
1692                         goto sleep;
1693
1694                 /*
1695                  * Avoid the problem that we change the status of the fs
1696                  * during the above check and trylock.
1697                  */
1698                 if (btrfs_need_cleaner_sleep(root)) {
1699                         mutex_unlock(&root->fs_info->cleaner_mutex);
1700                         goto sleep;
1701                 }
1702
1703                 btrfs_run_delayed_iputs(root);
1704                 again = btrfs_clean_one_deleted_snapshot(root);
1705                 mutex_unlock(&root->fs_info->cleaner_mutex);
1706
1707                 /*
1708                  * The defragger has dealt with the R/O remount and umount,
1709                  * needn't do anything special here.
1710                  */
1711                 btrfs_run_defrag_inodes(root->fs_info);
1712 sleep:
1713                 if (!try_to_freeze() && !again) {
1714                         set_current_state(TASK_INTERRUPTIBLE);
1715                         if (!kthread_should_stop())
1716                                 schedule();
1717                         __set_current_state(TASK_RUNNING);
1718                 }
1719         } while (!kthread_should_stop());
1720         return 0;
1721 }
1722
1723 static int transaction_kthread(void *arg)
1724 {
1725         struct btrfs_root *root = arg;
1726         struct btrfs_trans_handle *trans;
1727         struct btrfs_transaction *cur;
1728         u64 transid;
1729         unsigned long now;
1730         unsigned long delay;
1731         bool cannot_commit;
1732
1733         do {
1734                 cannot_commit = false;
1735                 delay = HZ * 30;
1736                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1737
1738                 spin_lock(&root->fs_info->trans_lock);
1739                 cur = root->fs_info->running_transaction;
1740                 if (!cur) {
1741                         spin_unlock(&root->fs_info->trans_lock);
1742                         goto sleep;
1743                 }
1744
1745                 now = get_seconds();
1746                 if (!cur->blocked &&
1747                     (now < cur->start_time || now - cur->start_time < 30)) {
1748                         spin_unlock(&root->fs_info->trans_lock);
1749                         delay = HZ * 5;
1750                         goto sleep;
1751                 }
1752                 transid = cur->transid;
1753                 spin_unlock(&root->fs_info->trans_lock);
1754
1755                 /* If the file system is aborted, this will always fail. */
1756                 trans = btrfs_attach_transaction(root);
1757                 if (IS_ERR(trans)) {
1758                         if (PTR_ERR(trans) != -ENOENT)
1759                                 cannot_commit = true;
1760                         goto sleep;
1761                 }
1762                 if (transid == trans->transid) {
1763                         btrfs_commit_transaction(trans, root);
1764                 } else {
1765                         btrfs_end_transaction(trans, root);
1766                 }
1767 sleep:
1768                 wake_up_process(root->fs_info->cleaner_kthread);
1769                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1770
1771                 if (!try_to_freeze()) {
1772                         set_current_state(TASK_INTERRUPTIBLE);
1773                         if (!kthread_should_stop() &&
1774                             (!btrfs_transaction_blocked(root->fs_info) ||
1775                              cannot_commit))
1776                                 schedule_timeout(delay);
1777                         __set_current_state(TASK_RUNNING);
1778                 }
1779         } while (!kthread_should_stop());
1780         return 0;
1781 }
1782
1783 /*
1784  * this will find the highest generation in the array of
1785  * root backups.  The index of the highest array is returned,
1786  * or -1 if we can't find anything.
1787  *
1788  * We check to make sure the array is valid by comparing the
1789  * generation of the latest  root in the array with the generation
1790  * in the super block.  If they don't match we pitch it.
1791  */
1792 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1793 {
1794         u64 cur;
1795         int newest_index = -1;
1796         struct btrfs_root_backup *root_backup;
1797         int i;
1798
1799         for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1800                 root_backup = info->super_copy->super_roots + i;
1801                 cur = btrfs_backup_tree_root_gen(root_backup);
1802                 if (cur == newest_gen)
1803                         newest_index = i;
1804         }
1805
1806         /* check to see if we actually wrapped around */
1807         if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1808                 root_backup = info->super_copy->super_roots;
1809                 cur = btrfs_backup_tree_root_gen(root_backup);
1810                 if (cur == newest_gen)
1811                         newest_index = 0;
1812         }
1813         return newest_index;
1814 }
1815
1816
1817 /*
1818  * find the oldest backup so we know where to store new entries
1819  * in the backup array.  This will set the backup_root_index
1820  * field in the fs_info struct
1821  */
1822 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1823                                      u64 newest_gen)
1824 {
1825         int newest_index = -1;
1826
1827         newest_index = find_newest_super_backup(info, newest_gen);
1828         /* if there was garbage in there, just move along */
1829         if (newest_index == -1) {
1830                 info->backup_root_index = 0;
1831         } else {
1832                 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1833         }
1834 }
1835
1836 /*
1837  * copy all the root pointers into the super backup array.
1838  * this will bump the backup pointer by one when it is
1839  * done
1840  */
1841 static void backup_super_roots(struct btrfs_fs_info *info)
1842 {
1843         int next_backup;
1844         struct btrfs_root_backup *root_backup;
1845         int last_backup;
1846
1847         next_backup = info->backup_root_index;
1848         last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1849                 BTRFS_NUM_BACKUP_ROOTS;
1850
1851         /*
1852          * just overwrite the last backup if we're at the same generation
1853          * this happens only at umount
1854          */
1855         root_backup = info->super_for_commit->super_roots + last_backup;
1856         if (btrfs_backup_tree_root_gen(root_backup) ==
1857             btrfs_header_generation(info->tree_root->node))
1858                 next_backup = last_backup;
1859
1860         root_backup = info->super_for_commit->super_roots + next_backup;
1861
1862         /*
1863          * make sure all of our padding and empty slots get zero filled
1864          * regardless of which ones we use today
1865          */
1866         memset(root_backup, 0, sizeof(*root_backup));
1867
1868         info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1869
1870         btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1871         btrfs_set_backup_tree_root_gen(root_backup,
1872                                btrfs_header_generation(info->tree_root->node));
1873
1874         btrfs_set_backup_tree_root_level(root_backup,
1875                                btrfs_header_level(info->tree_root->node));
1876
1877         btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1878         btrfs_set_backup_chunk_root_gen(root_backup,
1879                                btrfs_header_generation(info->chunk_root->node));
1880         btrfs_set_backup_chunk_root_level(root_backup,
1881                                btrfs_header_level(info->chunk_root->node));
1882
1883         btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1884         btrfs_set_backup_extent_root_gen(root_backup,
1885                                btrfs_header_generation(info->extent_root->node));
1886         btrfs_set_backup_extent_root_level(root_backup,
1887                                btrfs_header_level(info->extent_root->node));
1888
1889         /*
1890          * we might commit during log recovery, which happens before we set
1891          * the fs_root.  Make sure it is valid before we fill it in.
1892          */
1893         if (info->fs_root && info->fs_root->node) {
1894                 btrfs_set_backup_fs_root(root_backup,
1895                                          info->fs_root->node->start);
1896                 btrfs_set_backup_fs_root_gen(root_backup,
1897                                btrfs_header_generation(info->fs_root->node));
1898                 btrfs_set_backup_fs_root_level(root_backup,
1899                                btrfs_header_level(info->fs_root->node));
1900         }
1901
1902         btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1903         btrfs_set_backup_dev_root_gen(root_backup,
1904                                btrfs_header_generation(info->dev_root->node));
1905         btrfs_set_backup_dev_root_level(root_backup,
1906                                        btrfs_header_level(info->dev_root->node));
1907
1908         btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1909         btrfs_set_backup_csum_root_gen(root_backup,
1910                                btrfs_header_generation(info->csum_root->node));
1911         btrfs_set_backup_csum_root_level(root_backup,
1912                                btrfs_header_level(info->csum_root->node));
1913
1914         btrfs_set_backup_total_bytes(root_backup,
1915                              btrfs_super_total_bytes(info->super_copy));
1916         btrfs_set_backup_bytes_used(root_backup,
1917                              btrfs_super_bytes_used(info->super_copy));
1918         btrfs_set_backup_num_devices(root_backup,
1919                              btrfs_super_num_devices(info->super_copy));
1920
1921         /*
1922          * if we don't copy this out to the super_copy, it won't get remembered
1923          * for the next commit
1924          */
1925         memcpy(&info->super_copy->super_roots,
1926                &info->super_for_commit->super_roots,
1927                sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1928 }
1929
1930 /*
1931  * this copies info out of the root backup array and back into
1932  * the in-memory super block.  It is meant to help iterate through
1933  * the array, so you send it the number of backups you've already
1934  * tried and the last backup index you used.
1935  *
1936  * this returns -1 when it has tried all the backups
1937  */
1938 static noinline int next_root_backup(struct btrfs_fs_info *info,
1939                                      struct btrfs_super_block *super,
1940                                      int *num_backups_tried, int *backup_index)
1941 {
1942         struct btrfs_root_backup *root_backup;
1943         int newest = *backup_index;
1944
1945         if (*num_backups_tried == 0) {
1946                 u64 gen = btrfs_super_generation(super);
1947
1948                 newest = find_newest_super_backup(info, gen);
1949                 if (newest == -1)
1950                         return -1;
1951
1952                 *backup_index = newest;
1953                 *num_backups_tried = 1;
1954         } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1955                 /* we've tried all the backups, all done */
1956                 return -1;
1957         } else {
1958                 /* jump to the next oldest backup */
1959                 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1960                         BTRFS_NUM_BACKUP_ROOTS;
1961                 *backup_index = newest;
1962                 *num_backups_tried += 1;
1963         }
1964         root_backup = super->super_roots + newest;
1965
1966         btrfs_set_super_generation(super,
1967                                    btrfs_backup_tree_root_gen(root_backup));
1968         btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1969         btrfs_set_super_root_level(super,
1970                                    btrfs_backup_tree_root_level(root_backup));
1971         btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1972
1973         /*
1974          * fixme: the total bytes and num_devices need to match or we should
1975          * need a fsck
1976          */
1977         btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1978         btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1979         return 0;
1980 }
1981
1982 /* helper to cleanup workers */
1983 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1984 {
1985         btrfs_stop_workers(&fs_info->generic_worker);
1986         btrfs_stop_workers(&fs_info->fixup_workers);
1987         btrfs_stop_workers(&fs_info->delalloc_workers);
1988         btrfs_stop_workers(&fs_info->workers);
1989         btrfs_stop_workers(&fs_info->endio_workers);
1990         btrfs_stop_workers(&fs_info->endio_meta_workers);
1991         btrfs_stop_workers(&fs_info->endio_raid56_workers);
1992         btrfs_stop_workers(&fs_info->rmw_workers);
1993         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
1994         btrfs_stop_workers(&fs_info->endio_write_workers);
1995         btrfs_stop_workers(&fs_info->endio_freespace_worker);
1996         btrfs_stop_workers(&fs_info->submit_workers);
1997         btrfs_stop_workers(&fs_info->delayed_workers);
1998         btrfs_stop_workers(&fs_info->caching_workers);
1999         btrfs_stop_workers(&fs_info->readahead_workers);
2000         btrfs_stop_workers(&fs_info->flush_workers);
2001         btrfs_stop_workers(&fs_info->qgroup_rescan_workers);
2002 }
2003
2004 /* helper to cleanup tree roots */
2005 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2006 {
2007         free_extent_buffer(info->tree_root->node);
2008         free_extent_buffer(info->tree_root->commit_root);
2009         info->tree_root->node = NULL;
2010         info->tree_root->commit_root = NULL;
2011
2012         if (info->dev_root) {
2013                 free_extent_buffer(info->dev_root->node);
2014                 free_extent_buffer(info->dev_root->commit_root);
2015                 info->dev_root->node = NULL;
2016                 info->dev_root->commit_root = NULL;
2017         }
2018         if (info->extent_root) {
2019                 free_extent_buffer(info->extent_root->node);
2020                 free_extent_buffer(info->extent_root->commit_root);
2021                 info->extent_root->node = NULL;
2022                 info->extent_root->commit_root = NULL;
2023         }
2024         if (info->csum_root) {
2025                 free_extent_buffer(info->csum_root->node);
2026                 free_extent_buffer(info->csum_root->commit_root);
2027                 info->csum_root->node = NULL;
2028                 info->csum_root->commit_root = NULL;
2029         }
2030         if (info->quota_root) {
2031                 free_extent_buffer(info->quota_root->node);
2032                 free_extent_buffer(info->quota_root->commit_root);
2033                 info->quota_root->node = NULL;
2034                 info->quota_root->commit_root = NULL;
2035         }
2036         if (chunk_root) {
2037                 free_extent_buffer(info->chunk_root->node);
2038                 free_extent_buffer(info->chunk_root->commit_root);
2039                 info->chunk_root->node = NULL;
2040                 info->chunk_root->commit_root = NULL;
2041         }
2042 }
2043
2044 static void del_fs_roots(struct btrfs_fs_info *fs_info)
2045 {
2046         int ret;
2047         struct btrfs_root *gang[8];
2048         int i;
2049
2050         while (!list_empty(&fs_info->dead_roots)) {
2051                 gang[0] = list_entry(fs_info->dead_roots.next,
2052                                      struct btrfs_root, root_list);
2053                 list_del(&gang[0]->root_list);
2054
2055                 if (gang[0]->in_radix) {
2056                         btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2057                 } else {
2058                         free_extent_buffer(gang[0]->node);
2059                         free_extent_buffer(gang[0]->commit_root);
2060                         btrfs_put_fs_root(gang[0]);
2061                 }
2062         }
2063
2064         while (1) {
2065                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2066                                              (void **)gang, 0,
2067                                              ARRAY_SIZE(gang));
2068                 if (!ret)
2069                         break;
2070                 for (i = 0; i < ret; i++)
2071                         btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2072         }
2073 }
2074
2075 int open_ctree(struct super_block *sb,
2076                struct btrfs_fs_devices *fs_devices,
2077                char *options)
2078 {
2079         u32 sectorsize;
2080         u32 nodesize;
2081         u32 leafsize;
2082         u32 blocksize;
2083         u32 stripesize;
2084         u64 generation;
2085         u64 features;
2086         struct btrfs_key location;
2087         struct buffer_head *bh;
2088         struct btrfs_super_block *disk_super;
2089         struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2090         struct btrfs_root *tree_root;
2091         struct btrfs_root *extent_root;
2092         struct btrfs_root *csum_root;
2093         struct btrfs_root *chunk_root;
2094         struct btrfs_root *dev_root;
2095         struct btrfs_root *quota_root;
2096         struct btrfs_root *log_tree_root;
2097         int ret;
2098         int err = -EINVAL;
2099         int num_backups_tried = 0;
2100         int backup_index = 0;
2101
2102         tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
2103         chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
2104         if (!tree_root || !chunk_root) {
2105                 err = -ENOMEM;
2106                 goto fail;
2107         }
2108
2109         ret = init_srcu_struct(&fs_info->subvol_srcu);
2110         if (ret) {
2111                 err = ret;
2112                 goto fail;
2113         }
2114
2115         ret = setup_bdi(fs_info, &fs_info->bdi);
2116         if (ret) {
2117                 err = ret;
2118                 goto fail_srcu;
2119         }
2120
2121         ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
2122         if (ret) {
2123                 err = ret;
2124                 goto fail_bdi;
2125         }
2126         fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
2127                                         (1 + ilog2(nr_cpu_ids));
2128
2129         ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
2130         if (ret) {
2131                 err = ret;
2132                 goto fail_dirty_metadata_bytes;
2133         }
2134
2135         fs_info->btree_inode = new_inode(sb);
2136         if (!fs_info->btree_inode) {
2137                 err = -ENOMEM;
2138                 goto fail_delalloc_bytes;
2139         }
2140
2141         mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2142
2143         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2144         INIT_LIST_HEAD(&fs_info->trans_list);
2145         INIT_LIST_HEAD(&fs_info->dead_roots);
2146         INIT_LIST_HEAD(&fs_info->delayed_iputs);
2147         INIT_LIST_HEAD(&fs_info->delalloc_roots);
2148         INIT_LIST_HEAD(&fs_info->caching_block_groups);
2149         spin_lock_init(&fs_info->delalloc_root_lock);
2150         spin_lock_init(&fs_info->trans_lock);
2151         spin_lock_init(&fs_info->fs_roots_radix_lock);
2152         spin_lock_init(&fs_info->delayed_iput_lock);
2153         spin_lock_init(&fs_info->defrag_inodes_lock);
2154         spin_lock_init(&fs_info->free_chunk_lock);
2155         spin_lock_init(&fs_info->tree_mod_seq_lock);
2156         spin_lock_init(&fs_info->super_lock);
2157         rwlock_init(&fs_info->tree_mod_log_lock);
2158         mutex_init(&fs_info->reloc_mutex);
2159         seqlock_init(&fs_info->profiles_lock);
2160
2161         init_completion(&fs_info->kobj_unregister);
2162         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2163         INIT_LIST_HEAD(&fs_info->space_info);
2164         INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2165         btrfs_mapping_init(&fs_info->mapping_tree);
2166         btrfs_init_block_rsv(&fs_info->global_block_rsv,
2167                              BTRFS_BLOCK_RSV_GLOBAL);
2168         btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2169                              BTRFS_BLOCK_RSV_DELALLOC);
2170         btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2171         btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2172         btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2173         btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2174                              BTRFS_BLOCK_RSV_DELOPS);
2175         atomic_set(&fs_info->nr_async_submits, 0);
2176         atomic_set(&fs_info->async_delalloc_pages, 0);
2177         atomic_set(&fs_info->async_submit_draining, 0);
2178         atomic_set(&fs_info->nr_async_bios, 0);
2179         atomic_set(&fs_info->defrag_running, 0);
2180         atomic64_set(&fs_info->tree_mod_seq, 0);
2181         fs_info->sb = sb;
2182         fs_info->max_inline = 8192 * 1024;
2183         fs_info->metadata_ratio = 0;
2184         fs_info->defrag_inodes = RB_ROOT;
2185         fs_info->trans_no_join = 0;
2186         fs_info->free_chunk_space = 0;
2187         fs_info->tree_mod_log = RB_ROOT;
2188
2189         /* readahead state */
2190         INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
2191         spin_lock_init(&fs_info->reada_lock);
2192
2193         fs_info->thread_pool_size = min_t(unsigned long,
2194                                           num_online_cpus() + 2, 8);
2195
2196         INIT_LIST_HEAD(&fs_info->ordered_extents);
2197         spin_lock_init(&fs_info->ordered_extent_lock);
2198         fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2199                                         GFP_NOFS);
2200         if (!fs_info->delayed_root) {
2201                 err = -ENOMEM;
2202                 goto fail_iput;
2203         }
2204         btrfs_init_delayed_root(fs_info->delayed_root);
2205
2206         mutex_init(&fs_info->scrub_lock);
2207         atomic_set(&fs_info->scrubs_running, 0);
2208         atomic_set(&fs_info->scrub_pause_req, 0);
2209         atomic_set(&fs_info->scrubs_paused, 0);
2210         atomic_set(&fs_info->scrub_cancel_req, 0);
2211         init_waitqueue_head(&fs_info->scrub_pause_wait);
2212         init_rwsem(&fs_info->scrub_super_lock);
2213         fs_info->scrub_workers_refcnt = 0;
2214 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2215         fs_info->check_integrity_print_mask = 0;
2216 #endif
2217
2218         spin_lock_init(&fs_info->balance_lock);
2219         mutex_init(&fs_info->balance_mutex);
2220         atomic_set(&fs_info->balance_running, 0);
2221         atomic_set(&fs_info->balance_pause_req, 0);
2222         atomic_set(&fs_info->balance_cancel_req, 0);
2223         fs_info->balance_ctl = NULL;
2224         init_waitqueue_head(&fs_info->balance_wait_q);
2225
2226         sb->s_blocksize = 4096;
2227         sb->s_blocksize_bits = blksize_bits(4096);
2228         sb->s_bdi = &fs_info->bdi;
2229
2230         fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2231         set_nlink(fs_info->btree_inode, 1);
2232         /*
2233          * we set the i_size on the btree inode to the max possible int.
2234          * the real end of the address space is determined by all of
2235          * the devices in the system
2236          */
2237         fs_info->btree_inode->i_size = OFFSET_MAX;
2238         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
2239         fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
2240
2241         RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
2242         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
2243                              fs_info->btree_inode->i_mapping);
2244         BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
2245         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
2246
2247         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
2248
2249         BTRFS_I(fs_info->btree_inode)->root = tree_root;
2250         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
2251                sizeof(struct btrfs_key));
2252         set_bit(BTRFS_INODE_DUMMY,
2253                 &BTRFS_I(fs_info->btree_inode)->runtime_flags);
2254         insert_inode_hash(fs_info->btree_inode);
2255
2256         spin_lock_init(&fs_info->block_group_cache_lock);
2257         fs_info->block_group_cache_tree = RB_ROOT;
2258         fs_info->first_logical_byte = (u64)-1;
2259
2260         extent_io_tree_init(&fs_info->freed_extents[0],
2261                              fs_info->btree_inode->i_mapping);
2262         extent_io_tree_init(&fs_info->freed_extents[1],
2263                              fs_info->btree_inode->i_mapping);
2264         fs_info->pinned_extents = &fs_info->freed_extents[0];
2265         fs_info->do_barriers = 1;
2266
2267
2268         mutex_init(&fs_info->ordered_operations_mutex);
2269         mutex_init(&fs_info->tree_log_mutex);
2270         mutex_init(&fs_info->chunk_mutex);
2271         mutex_init(&fs_info->transaction_kthread_mutex);
2272         mutex_init(&fs_info->cleaner_mutex);
2273         mutex_init(&fs_info->volume_mutex);
2274         init_rwsem(&fs_info->extent_commit_sem);
2275         init_rwsem(&fs_info->cleanup_work_sem);
2276         init_rwsem(&fs_info->subvol_sem);
2277         fs_info->dev_replace.lock_owner = 0;
2278         atomic_set(&fs_info->dev_replace.nesting_level, 0);
2279         mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2280         mutex_init(&fs_info->dev_replace.lock_management_lock);
2281         mutex_init(&fs_info->dev_replace.lock);
2282
2283         spin_lock_init(&fs_info->qgroup_lock);
2284         mutex_init(&fs_info->qgroup_ioctl_lock);
2285         fs_info->qgroup_tree = RB_ROOT;
2286         INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2287         fs_info->qgroup_seq = 1;
2288         fs_info->quota_enabled = 0;
2289         fs_info->pending_quota_state = 0;
2290         fs_info->qgroup_ulist = NULL;
2291         mutex_init(&fs_info->qgroup_rescan_lock);
2292
2293         btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2294         btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2295
2296         init_waitqueue_head(&fs_info->transaction_throttle);
2297         init_waitqueue_head(&fs_info->transaction_wait);
2298         init_waitqueue_head(&fs_info->transaction_blocked_wait);
2299         init_waitqueue_head(&fs_info->async_submit_wait);
2300
2301         ret = btrfs_alloc_stripe_hash_table(fs_info);
2302         if (ret) {
2303                 err = ret;
2304                 goto fail_alloc;
2305         }
2306
2307         __setup_root(4096, 4096, 4096, 4096, tree_root,
2308                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
2309
2310         invalidate_bdev(fs_devices->latest_bdev);
2311
2312         /*
2313          * Read super block and check the signature bytes only
2314          */
2315         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2316         if (!bh) {
2317                 err = -EINVAL;
2318                 goto fail_alloc;
2319         }
2320
2321         /*
2322          * We want to check superblock checksum, the type is stored inside.
2323          * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2324          */
2325         if (btrfs_check_super_csum(bh->b_data)) {
2326                 printk(KERN_ERR "btrfs: superblock checksum mismatch\n");
2327                 err = -EINVAL;
2328                 goto fail_alloc;
2329         }
2330
2331         /*
2332          * super_copy is zeroed at allocation time and we never touch the
2333          * following bytes up to INFO_SIZE, the checksum is calculated from
2334          * the whole block of INFO_SIZE
2335          */
2336         memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2337         memcpy(fs_info->super_for_commit, fs_info->super_copy,
2338                sizeof(*fs_info->super_for_commit));
2339         brelse(bh);
2340
2341         memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2342
2343         ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2344         if (ret) {
2345                 printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
2346                 err = -EINVAL;
2347                 goto fail_alloc;
2348         }
2349
2350         disk_super = fs_info->super_copy;
2351         if (!btrfs_super_root(disk_super))
2352                 goto fail_alloc;
2353
2354         /* check FS state, whether FS is broken. */
2355         if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2356                 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2357
2358         /*
2359          * run through our array of backup supers and setup
2360          * our ring pointer to the oldest one
2361          */
2362         generation = btrfs_super_generation(disk_super);
2363         find_oldest_super_backup(fs_info, generation);
2364
2365         /*
2366          * In the long term, we'll store the compression type in the super
2367          * block, and it'll be used for per file compression control.
2368          */
2369         fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2370
2371         ret = btrfs_parse_options(tree_root, options);
2372         if (ret) {
2373                 err = ret;
2374                 goto fail_alloc;
2375         }
2376
2377         features = btrfs_super_incompat_flags(disk_super) &
2378                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2379         if (features) {
2380                 printk(KERN_ERR "BTRFS: couldn't mount because of "
2381                        "unsupported optional features (%Lx).\n",
2382                        (unsigned long long)features);
2383                 err = -EINVAL;
2384                 goto fail_alloc;
2385         }
2386
2387         if (btrfs_super_leafsize(disk_super) !=
2388             btrfs_super_nodesize(disk_super)) {
2389                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2390                        "blocksizes don't match.  node %d leaf %d\n",
2391                        btrfs_super_nodesize(disk_super),
2392                        btrfs_super_leafsize(disk_super));
2393                 err = -EINVAL;
2394                 goto fail_alloc;
2395         }
2396         if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
2397                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2398                        "blocksize (%d) was too large\n",
2399                        btrfs_super_leafsize(disk_super));
2400                 err = -EINVAL;
2401                 goto fail_alloc;
2402         }
2403
2404         features = btrfs_super_incompat_flags(disk_super);
2405         features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2406         if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
2407                 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2408
2409         if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2410                 printk(KERN_ERR "btrfs: has skinny extents\n");
2411
2412         /*
2413          * flag our filesystem as having big metadata blocks if
2414          * they are bigger than the page size
2415          */
2416         if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
2417                 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2418                         printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
2419                 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2420         }
2421
2422         nodesize = btrfs_super_nodesize(disk_super);
2423         leafsize = btrfs_super_leafsize(disk_super);
2424         sectorsize = btrfs_super_sectorsize(disk_super);
2425         stripesize = btrfs_super_stripesize(disk_super);
2426         fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
2427         fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2428
2429         /*
2430          * mixed block groups end up with duplicate but slightly offset
2431          * extent buffers for the same range.  It leads to corruptions
2432          */
2433         if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2434             (sectorsize != leafsize)) {
2435                 printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
2436                                 "are not allowed for mixed block groups on %s\n",
2437                                 sb->s_id);
2438                 goto fail_alloc;
2439         }
2440
2441         /*
2442          * Needn't use the lock because there is no other task which will
2443          * update the flag.
2444          */
2445         btrfs_set_super_incompat_flags(disk_super, features);
2446
2447         features = btrfs_super_compat_ro_flags(disk_super) &
2448                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2449         if (!(sb->s_flags & MS_RDONLY) && features) {
2450                 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
2451                        "unsupported option features (%Lx).\n",
2452                        (unsigned long long)features);
2453                 err = -EINVAL;
2454                 goto fail_alloc;
2455         }
2456
2457         btrfs_init_workers(&fs_info->generic_worker,
2458                            "genwork", 1, NULL);
2459
2460         btrfs_init_workers(&fs_info->workers, "worker",
2461                            fs_info->thread_pool_size,
2462                            &fs_info->generic_worker);
2463
2464         btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
2465                            fs_info->thread_pool_size,
2466                            &fs_info->generic_worker);
2467
2468         btrfs_init_workers(&fs_info->flush_workers, "flush_delalloc",
2469                            fs_info->thread_pool_size,
2470                            &fs_info->generic_worker);
2471
2472         btrfs_init_workers(&fs_info->submit_workers, "submit",
2473                            min_t(u64, fs_devices->num_devices,
2474                            fs_info->thread_pool_size),
2475                            &fs_info->generic_worker);
2476
2477         btrfs_init_workers(&fs_info->caching_workers, "cache",
2478                            2, &fs_info->generic_worker);
2479
2480         /* a higher idle thresh on the submit workers makes it much more
2481          * likely that bios will be send down in a sane order to the
2482          * devices
2483          */
2484         fs_info->submit_workers.idle_thresh = 64;
2485
2486         fs_info->workers.idle_thresh = 16;
2487         fs_info->workers.ordered = 1;
2488
2489         fs_info->delalloc_workers.idle_thresh = 2;
2490         fs_info->delalloc_workers.ordered = 1;
2491
2492         btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
2493                            &fs_info->generic_worker);
2494         btrfs_init_workers(&fs_info->endio_workers, "endio",
2495                            fs_info->thread_pool_size,
2496                            &fs_info->generic_worker);
2497         btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
2498                            fs_info->thread_pool_size,
2499                            &fs_info->generic_worker);
2500         btrfs_init_workers(&fs_info->endio_meta_write_workers,
2501                            "endio-meta-write", fs_info->thread_pool_size,
2502                            &fs_info->generic_worker);
2503         btrfs_init_workers(&fs_info->endio_raid56_workers,
2504                            "endio-raid56", fs_info->thread_pool_size,
2505                            &fs_info->generic_worker);
2506         btrfs_init_workers(&fs_info->rmw_workers,
2507                            "rmw", fs_info->thread_pool_size,
2508                            &fs_info->generic_worker);
2509         btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
2510                            fs_info->thread_pool_size,
2511                            &fs_info->generic_worker);
2512         btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
2513                            1, &fs_info->generic_worker);
2514         btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
2515                            fs_info->thread_pool_size,
2516                            &fs_info->generic_worker);
2517         btrfs_init_workers(&fs_info->readahead_workers, "readahead",
2518                            fs_info->thread_pool_size,
2519                            &fs_info->generic_worker);
2520         btrfs_init_workers(&fs_info->qgroup_rescan_workers, "qgroup-rescan", 1,
2521                            &fs_info->generic_worker);
2522
2523         /*
2524          * endios are largely parallel and should have a very
2525          * low idle thresh
2526          */
2527         fs_info->endio_workers.idle_thresh = 4;
2528         fs_info->endio_meta_workers.idle_thresh = 4;
2529         fs_info->endio_raid56_workers.idle_thresh = 4;
2530         fs_info->rmw_workers.idle_thresh = 2;
2531
2532         fs_info->endio_write_workers.idle_thresh = 2;
2533         fs_info->endio_meta_write_workers.idle_thresh = 2;
2534         fs_info->readahead_workers.idle_thresh = 2;
2535
2536         /*
2537          * btrfs_start_workers can really only fail because of ENOMEM so just
2538          * return -ENOMEM if any of these fail.
2539          */
2540         ret = btrfs_start_workers(&fs_info->workers);
2541         ret |= btrfs_start_workers(&fs_info->generic_worker);
2542         ret |= btrfs_start_workers(&fs_info->submit_workers);
2543         ret |= btrfs_start_workers(&fs_info->delalloc_workers);
2544         ret |= btrfs_start_workers(&fs_info->fixup_workers);
2545         ret |= btrfs_start_workers(&fs_info->endio_workers);
2546         ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
2547         ret |= btrfs_start_workers(&fs_info->rmw_workers);
2548         ret |= btrfs_start_workers(&fs_info->endio_raid56_workers);
2549         ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
2550         ret |= btrfs_start_workers(&fs_info->endio_write_workers);
2551         ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
2552         ret |= btrfs_start_workers(&fs_info->delayed_workers);
2553         ret |= btrfs_start_workers(&fs_info->caching_workers);
2554         ret |= btrfs_start_workers(&fs_info->readahead_workers);
2555         ret |= btrfs_start_workers(&fs_info->flush_workers);
2556         ret |= btrfs_start_workers(&fs_info->qgroup_rescan_workers);
2557         if (ret) {
2558                 err = -ENOMEM;
2559                 goto fail_sb_buffer;
2560         }
2561
2562         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2563         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2564                                     4 * 1024 * 1024 / PAGE_CACHE_SIZE);
2565
2566         tree_root->nodesize = nodesize;
2567         tree_root->leafsize = leafsize;
2568         tree_root->sectorsize = sectorsize;
2569         tree_root->stripesize = stripesize;
2570
2571         sb->s_blocksize = sectorsize;
2572         sb->s_blocksize_bits = blksize_bits(sectorsize);
2573
2574         if (disk_super->magic != cpu_to_le64(BTRFS_MAGIC)) {
2575                 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
2576                 goto fail_sb_buffer;
2577         }
2578
2579         if (sectorsize != PAGE_SIZE) {
2580                 printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) "
2581                        "found on %s\n", (unsigned long)sectorsize, sb->s_id);
2582                 goto fail_sb_buffer;
2583         }
2584
2585         mutex_lock(&fs_info->chunk_mutex);
2586         ret = btrfs_read_sys_array(tree_root);
2587         mutex_unlock(&fs_info->chunk_mutex);
2588         if (ret) {
2589                 printk(KERN_WARNING "btrfs: failed to read the system "
2590                        "array on %s\n", sb->s_id);
2591                 goto fail_sb_buffer;
2592         }
2593
2594         blocksize = btrfs_level_size(tree_root,
2595                                      btrfs_super_chunk_root_level(disk_super));
2596         generation = btrfs_super_chunk_root_generation(disk_super);
2597
2598         __setup_root(nodesize, leafsize, sectorsize, stripesize,
2599                      chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2600
2601         chunk_root->node = read_tree_block(chunk_root,
2602                                            btrfs_super_chunk_root(disk_super),
2603                                            blocksize, generation);
2604         if (!chunk_root->node ||
2605             !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
2606                 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
2607                        sb->s_id);
2608                 goto fail_tree_roots;
2609         }
2610         btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2611         chunk_root->commit_root = btrfs_root_node(chunk_root);
2612
2613         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2614            (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
2615            BTRFS_UUID_SIZE);
2616
2617         ret = btrfs_read_chunk_tree(chunk_root);
2618         if (ret) {
2619                 printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
2620                        sb->s_id);
2621                 goto fail_tree_roots;
2622         }
2623
2624         /*
2625          * keep the device that is marked to be the target device for the
2626          * dev_replace procedure
2627          */
2628         btrfs_close_extra_devices(fs_info, fs_devices, 0);
2629
2630         if (!fs_devices->latest_bdev) {
2631                 printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
2632                        sb->s_id);
2633                 goto fail_tree_roots;
2634         }
2635
2636 retry_root_backup:
2637         blocksize = btrfs_level_size(tree_root,
2638                                      btrfs_super_root_level(disk_super));
2639         generation = btrfs_super_generation(disk_super);
2640
2641         tree_root->node = read_tree_block(tree_root,
2642                                           btrfs_super_root(disk_super),
2643                                           blocksize, generation);
2644         if (!tree_root->node ||
2645             !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
2646                 printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
2647                        sb->s_id);
2648
2649                 goto recovery_tree_root;
2650         }
2651
2652         btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2653         tree_root->commit_root = btrfs_root_node(tree_root);
2654
2655         location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2656         location.type = BTRFS_ROOT_ITEM_KEY;
2657         location.offset = 0;
2658
2659         extent_root = btrfs_read_tree_root(tree_root, &location);
2660         if (IS_ERR(extent_root)) {
2661                 ret = PTR_ERR(extent_root);
2662                 goto recovery_tree_root;
2663         }
2664         extent_root->track_dirty = 1;
2665         fs_info->extent_root = extent_root;
2666
2667         location.objectid = BTRFS_DEV_TREE_OBJECTID;
2668         dev_root = btrfs_read_tree_root(tree_root, &location);
2669         if (IS_ERR(dev_root)) {
2670                 ret = PTR_ERR(dev_root);
2671                 goto recovery_tree_root;
2672         }
2673         dev_root->track_dirty = 1;
2674         fs_info->dev_root = dev_root;
2675         btrfs_init_devices_late(fs_info);
2676
2677         location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2678         csum_root = btrfs_read_tree_root(tree_root, &location);
2679         if (IS_ERR(csum_root)) {
2680                 ret = PTR_ERR(csum_root);
2681                 goto recovery_tree_root;
2682         }
2683         csum_root->track_dirty = 1;
2684         fs_info->csum_root = csum_root;
2685
2686         location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2687         quota_root = btrfs_read_tree_root(tree_root, &location);
2688         if (!IS_ERR(quota_root)) {
2689                 quota_root->track_dirty = 1;
2690                 fs_info->quota_enabled = 1;
2691                 fs_info->pending_quota_state = 1;
2692                 fs_info->quota_root = quota_root;
2693         }
2694
2695         fs_info->generation = generation;
2696         fs_info->last_trans_committed = generation;
2697
2698         ret = btrfs_recover_balance(fs_info);
2699         if (ret) {
2700                 printk(KERN_WARNING "btrfs: failed to recover balance\n");
2701                 goto fail_block_groups;
2702         }
2703
2704         ret = btrfs_init_dev_stats(fs_info);
2705         if (ret) {
2706                 printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
2707                        ret);
2708                 goto fail_block_groups;
2709         }
2710
2711         ret = btrfs_init_dev_replace(fs_info);
2712         if (ret) {
2713                 pr_err("btrfs: failed to init dev_replace: %d\n", ret);
2714                 goto fail_block_groups;
2715         }
2716
2717         btrfs_close_extra_devices(fs_info, fs_devices, 1);
2718
2719         ret = btrfs_init_space_info(fs_info);
2720         if (ret) {
2721                 printk(KERN_ERR "Failed to initial space info: %d\n", ret);
2722                 goto fail_block_groups;
2723         }
2724
2725         ret = btrfs_read_block_groups(extent_root);
2726         if (ret) {
2727                 printk(KERN_ERR "Failed to read block groups: %d\n", ret);
2728                 goto fail_block_groups;
2729         }
2730         fs_info->num_tolerated_disk_barrier_failures =
2731                 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2732         if (fs_info->fs_devices->missing_devices >
2733              fs_info->num_tolerated_disk_barrier_failures &&
2734             !(sb->s_flags & MS_RDONLY)) {
2735                 printk(KERN_WARNING
2736                        "Btrfs: too many missing devices, writeable mount is not allowed\n");
2737                 goto fail_block_groups;
2738         }
2739
2740         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2741                                                "btrfs-cleaner");
2742         if (IS_ERR(fs_info->cleaner_kthread))
2743                 goto fail_block_groups;
2744
2745         fs_info->transaction_kthread = kthread_run(transaction_kthread,
2746                                                    tree_root,
2747                                                    "btrfs-transaction");
2748         if (IS_ERR(fs_info->transaction_kthread))
2749                 goto fail_cleaner;
2750
2751         if (!btrfs_test_opt(tree_root, SSD) &&
2752             !btrfs_test_opt(tree_root, NOSSD) &&
2753             !fs_info->fs_devices->rotating) {
2754                 printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
2755                        "mode\n");
2756                 btrfs_set_opt(fs_info->mount_opt, SSD);
2757         }
2758
2759 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2760         if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
2761                 ret = btrfsic_mount(tree_root, fs_devices,
2762                                     btrfs_test_opt(tree_root,
2763                                         CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2764                                     1 : 0,
2765                                     fs_info->check_integrity_print_mask);
2766                 if (ret)
2767                         printk(KERN_WARNING "btrfs: failed to initialize"
2768                                " integrity check module %s\n", sb->s_id);
2769         }
2770 #endif
2771         ret = btrfs_read_qgroup_config(fs_info);
2772         if (ret)
2773                 goto fail_trans_kthread;
2774
2775         /* do not make disk changes in broken FS */
2776         if (btrfs_super_log_root(disk_super) != 0) {
2777                 u64 bytenr = btrfs_super_log_root(disk_super);
2778
2779                 if (fs_devices->rw_devices == 0) {
2780                         printk(KERN_WARNING "Btrfs log replay required "
2781                                "on RO media\n");
2782                         err = -EIO;
2783                         goto fail_qgroup;
2784                 }
2785                 blocksize =
2786                      btrfs_level_size(tree_root,
2787                                       btrfs_super_log_root_level(disk_super));
2788
2789                 log_tree_root = btrfs_alloc_root(fs_info);
2790                 if (!log_tree_root) {
2791                         err = -ENOMEM;
2792                         goto fail_qgroup;
2793                 }
2794
2795                 __setup_root(nodesize, leafsize, sectorsize, stripesize,
2796                              log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2797
2798                 log_tree_root->node = read_tree_block(tree_root, bytenr,
2799                                                       blocksize,
2800                                                       generation + 1);
2801                 if (!log_tree_root->node ||
2802                     !extent_buffer_uptodate(log_tree_root->node)) {
2803                         printk(KERN_ERR "btrfs: failed to read log tree\n");
2804                         free_extent_buffer(log_tree_root->node);
2805                         kfree(log_tree_root);
2806                         goto fail_trans_kthread;
2807                 }
2808                 /* returns with log_tree_root freed on success */
2809                 ret = btrfs_recover_log_trees(log_tree_root);
2810                 if (ret) {
2811                         btrfs_error(tree_root->fs_info, ret,
2812                                     "Failed to recover log tree");
2813                         free_extent_buffer(log_tree_root->node);
2814                         kfree(log_tree_root);
2815                         goto fail_trans_kthread;
2816                 }
2817
2818                 if (sb->s_flags & MS_RDONLY) {
2819                         ret = btrfs_commit_super(tree_root);
2820                         if (ret)
2821                                 goto fail_trans_kthread;
2822                 }
2823         }
2824
2825         ret = btrfs_find_orphan_roots(tree_root);
2826         if (ret)
2827                 goto fail_trans_kthread;
2828
2829         if (!(sb->s_flags & MS_RDONLY)) {
2830                 ret = btrfs_cleanup_fs_roots(fs_info);
2831                 if (ret)
2832                         goto fail_trans_kthread;
2833
2834                 ret = btrfs_recover_relocation(tree_root);
2835                 if (ret < 0) {
2836                         printk(KERN_WARNING
2837                                "btrfs: failed to recover relocation\n");
2838                         err = -EINVAL;
2839                         goto fail_qgroup;
2840                 }
2841         }
2842
2843         location.objectid = BTRFS_FS_TREE_OBJECTID;
2844         location.type = BTRFS_ROOT_ITEM_KEY;
2845         location.offset = 0;
2846
2847         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2848         if (IS_ERR(fs_info->fs_root)) {
2849                 err = PTR_ERR(fs_info->fs_root);
2850                 goto fail_qgroup;
2851         }
2852
2853         if (sb->s_flags & MS_RDONLY)
2854                 return 0;
2855
2856         down_read(&fs_info->cleanup_work_sem);
2857         if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
2858             (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
2859                 up_read(&fs_info->cleanup_work_sem);
2860                 close_ctree(tree_root);
2861                 return ret;
2862         }
2863         up_read(&fs_info->cleanup_work_sem);
2864
2865         ret = btrfs_resume_balance_async(fs_info);
2866         if (ret) {
2867                 printk(KERN_WARNING "btrfs: failed to resume balance\n");
2868                 close_ctree(tree_root);
2869                 return ret;
2870         }
2871
2872         ret = btrfs_resume_dev_replace_async(fs_info);
2873         if (ret) {
2874                 pr_warn("btrfs: failed to resume dev_replace\n");
2875                 close_ctree(tree_root);
2876                 return ret;
2877         }
2878
2879         return 0;
2880
2881 fail_qgroup:
2882         btrfs_free_qgroup_config(fs_info);
2883 fail_trans_kthread:
2884         kthread_stop(fs_info->transaction_kthread);
2885         btrfs_cleanup_transaction(fs_info->tree_root);
2886         del_fs_roots(fs_info);
2887 fail_cleaner:
2888         kthread_stop(fs_info->cleaner_kthread);
2889
2890         /*
2891          * make sure we're done with the btree inode before we stop our
2892          * kthreads
2893          */
2894         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2895
2896 fail_block_groups:
2897         btrfs_put_block_group_cache(fs_info);
2898         btrfs_free_block_groups(fs_info);
2899
2900 fail_tree_roots:
2901         free_root_pointers(fs_info, 1);
2902         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2903
2904 fail_sb_buffer:
2905         btrfs_stop_all_workers(fs_info);
2906 fail_alloc:
2907 fail_iput:
2908         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2909
2910         iput(fs_info->btree_inode);
2911 fail_delalloc_bytes:
2912         percpu_counter_destroy(&fs_info->delalloc_bytes);
2913 fail_dirty_metadata_bytes:
2914         percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
2915 fail_bdi:
2916         bdi_destroy(&fs_info->bdi);
2917 fail_srcu:
2918         cleanup_srcu_struct(&fs_info->subvol_srcu);
2919 fail:
2920         btrfs_free_stripe_hash_table(fs_info);
2921         btrfs_close_devices(fs_info->fs_devices);
2922         return err;
2923
2924 recovery_tree_root:
2925         if (!btrfs_test_opt(tree_root, RECOVERY))
2926                 goto fail_tree_roots;
2927
2928         free_root_pointers(fs_info, 0);
2929
2930         /* don't use the log in recovery mode, it won't be valid */
2931         btrfs_set_super_log_root(disk_super, 0);
2932
2933         /* we can't trust the free space cache either */
2934         btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
2935
2936         ret = next_root_backup(fs_info, fs_info->super_copy,
2937                                &num_backups_tried, &backup_index);
2938         if (ret == -1)
2939                 goto fail_block_groups;
2940         goto retry_root_backup;
2941 }
2942
2943 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2944 {
2945         if (uptodate) {
2946                 set_buffer_uptodate(bh);
2947         } else {
2948                 struct btrfs_device *device = (struct btrfs_device *)
2949                         bh->b_private;
2950
2951                 printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
2952                                           "I/O error on %s\n",
2953                                           rcu_str_deref(device->name));
2954                 /* note, we dont' set_buffer_write_io_error because we have
2955                  * our own ways of dealing with the IO errors
2956                  */
2957                 clear_buffer_uptodate(bh);
2958                 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
2959         }
2960         unlock_buffer(bh);
2961         put_bh(bh);
2962 }
2963
2964 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2965 {
2966         struct buffer_head *bh;
2967         struct buffer_head *latest = NULL;
2968         struct btrfs_super_block *super;
2969         int i;
2970         u64 transid = 0;
2971         u64 bytenr;
2972
2973         /* we would like to check all the supers, but that would make
2974          * a btrfs mount succeed after a mkfs from a different FS.
2975          * So, we need to add a special mount option to scan for
2976          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2977          */
2978         for (i = 0; i < 1; i++) {
2979                 bytenr = btrfs_sb_offset(i);
2980                 if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2981                         break;
2982                 bh = __bread(bdev, bytenr / 4096, 4096);
2983                 if (!bh)
2984                         continue;
2985
2986                 super = (struct btrfs_super_block *)bh->b_data;
2987                 if (btrfs_super_bytenr(super) != bytenr ||
2988                     super->magic != cpu_to_le64(BTRFS_MAGIC)) {
2989                         brelse(bh);
2990                         continue;
2991                 }
2992
2993                 if (!latest || btrfs_super_generation(super) > transid) {
2994                         brelse(latest);
2995                         latest = bh;
2996                         transid = btrfs_super_generation(super);
2997                 } else {
2998                         brelse(bh);
2999                 }
3000         }
3001         return latest;
3002 }
3003
3004 /*
3005  * this should be called twice, once with wait == 0 and
3006  * once with wait == 1.  When wait == 0 is done, all the buffer heads
3007  * we write are pinned.
3008  *
3009  * They are released when wait == 1 is done.
3010  * max_mirrors must be the same for both runs, and it indicates how
3011  * many supers on this one device should be written.
3012  *
3013  * max_mirrors == 0 means to write them all.
3014  */
3015 static int write_dev_supers(struct btrfs_device *device,
3016                             struct btrfs_super_block *sb,
3017                             int do_barriers, int wait, int max_mirrors)
3018 {
3019         struct buffer_head *bh;
3020         int i;
3021         int ret;
3022         int errors = 0;
3023         u32 crc;
3024         u64 bytenr;
3025
3026         if (max_mirrors == 0)
3027                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3028
3029         for (i = 0; i < max_mirrors; i++) {
3030                 bytenr = btrfs_sb_offset(i);
3031                 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
3032                         break;
3033
3034                 if (wait) {
3035                         bh = __find_get_block(device->bdev, bytenr / 4096,
3036                                               BTRFS_SUPER_INFO_SIZE);
3037                         if (!bh) {
3038                                 errors++;
3039                                 continue;
3040                         }
3041                         wait_on_buffer(bh);
3042                         if (!buffer_uptodate(bh))
3043                                 errors++;
3044
3045                         /* drop our reference */
3046                         brelse(bh);
3047
3048                         /* drop the reference from the wait == 0 run */
3049                         brelse(bh);
3050                         continue;
3051                 } else {
3052                         btrfs_set_super_bytenr(sb, bytenr);
3053
3054                         crc = ~(u32)0;
3055                         crc = btrfs_csum_data((char *)sb +
3056                                               BTRFS_CSUM_SIZE, crc,
3057                                               BTRFS_SUPER_INFO_SIZE -
3058                                               BTRFS_CSUM_SIZE);
3059                         btrfs_csum_final(crc, sb->csum);
3060
3061                         /*
3062                          * one reference for us, and we leave it for the
3063                          * caller
3064                          */
3065                         bh = __getblk(device->bdev, bytenr / 4096,
3066                                       BTRFS_SUPER_INFO_SIZE);
3067                         if (!bh) {
3068                                 printk(KERN_ERR "btrfs: couldn't get super "
3069                                        "buffer head for bytenr %Lu\n", bytenr);
3070                                 errors++;
3071                                 continue;
3072                         }
3073
3074                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3075
3076                         /* one reference for submit_bh */
3077                         get_bh(bh);
3078
3079                         set_buffer_uptodate(bh);
3080                         lock_buffer(bh);
3081                         bh->b_end_io = btrfs_end_buffer_write_sync;
3082                         bh->b_private = device;
3083                 }
3084
3085                 /*
3086                  * we fua the first super.  The others we allow
3087                  * to go down lazy.
3088                  */
3089                 ret = btrfsic_submit_bh(WRITE_FUA, bh);
3090                 if (ret)
3091                         errors++;
3092         }
3093         return errors < i ? 0 : -1;
3094 }
3095
3096 /*
3097  * endio for the write_dev_flush, this will wake anyone waiting
3098  * for the barrier when it is done
3099  */
3100 static void btrfs_end_empty_barrier(struct bio *bio, int err)
3101 {
3102         if (err) {
3103                 if (err == -EOPNOTSUPP)
3104                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
3105                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3106         }
3107         if (bio->bi_private)
3108                 complete(bio->bi_private);
3109         bio_put(bio);
3110 }
3111
3112 /*
3113  * trigger flushes for one the devices.  If you pass wait == 0, the flushes are
3114  * sent down.  With wait == 1, it waits for the previous flush.
3115  *
3116  * any device where the flush fails with eopnotsupp are flagged as not-barrier
3117  * capable
3118  */
3119 static int write_dev_flush(struct btrfs_device *device, int wait)
3120 {
3121         struct bio *bio;
3122         int ret = 0;
3123
3124         if (device->nobarriers)
3125                 return 0;
3126
3127         if (wait) {
3128                 bio = device->flush_bio;
3129                 if (!bio)
3130                         return 0;
3131
3132                 wait_for_completion(&device->flush_wait);
3133
3134                 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
3135                         printk_in_rcu("btrfs: disabling barriers on dev %s\n",
3136                                       rcu_str_deref(device->name));
3137                         device->nobarriers = 1;
3138                 } else if (!bio_flagged(bio, BIO_UPTODATE)) {
3139                         ret = -EIO;
3140                         btrfs_dev_stat_inc_and_print(device,
3141                                 BTRFS_DEV_STAT_FLUSH_ERRS);
3142                 }
3143
3144                 /* drop the reference from the wait == 0 run */
3145                 bio_put(bio);
3146                 device->flush_bio = NULL;
3147
3148                 return ret;
3149         }
3150
3151         /*
3152          * one reference for us, and we leave it for the
3153          * caller
3154          */
3155         device->flush_bio = NULL;
3156         bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
3157         if (!bio)
3158                 return -ENOMEM;
3159
3160         bio->bi_end_io = btrfs_end_empty_barrier;
3161         bio->bi_bdev = device->bdev;
3162         init_completion(&device->flush_wait);
3163         bio->bi_private = &device->flush_wait;
3164         device->flush_bio = bio;
3165
3166         bio_get(bio);
3167         btrfsic_submit_bio(WRITE_FLUSH, bio);
3168
3169         return 0;
3170 }
3171
3172 /*
3173  * send an empty flush down to each device in parallel,
3174  * then wait for them
3175  */
3176 static int barrier_all_devices(struct btrfs_fs_info *info)
3177 {
3178         struct list_head *head;
3179         struct btrfs_device *dev;
3180         int errors_send = 0;
3181         int errors_wait = 0;
3182         int ret;
3183
3184         /* send down all the barriers */
3185         head = &info->fs_devices->devices;
3186         list_for_each_entry_rcu(dev, head, dev_list) {
3187                 if (!dev->bdev) {
3188                         errors_send++;
3189                         continue;
3190                 }
3191                 if (!dev->in_fs_metadata || !dev->writeable)
3192                         continue;
3193
3194                 ret = write_dev_flush(dev, 0);
3195                 if (ret)
3196                         errors_send++;
3197         }
3198
3199         /* wait for all the barriers */
3200         list_for_each_entry_rcu(dev, head, dev_list) {
3201                 if (!dev->bdev) {
3202                         errors_wait++;
3203                         continue;
3204                 }
3205                 if (!dev->in_fs_metadata || !dev->writeable)
3206                         continue;
3207
3208                 ret = write_dev_flush(dev, 1);
3209                 if (ret)
3210                         errors_wait++;
3211         }
3212         if (errors_send > info->num_tolerated_disk_barrier_failures ||
3213             errors_wait > info->num_tolerated_disk_barrier_failures)
3214                 return -EIO;
3215         return 0;
3216 }
3217
3218 int btrfs_calc_num_tolerated_disk_barrier_failures(
3219         struct btrfs_fs_info *fs_info)
3220 {
3221         struct btrfs_ioctl_space_info space;
3222         struct btrfs_space_info *sinfo;
3223         u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
3224                        BTRFS_BLOCK_GROUP_SYSTEM,
3225                        BTRFS_BLOCK_GROUP_METADATA,
3226                        BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
3227         int num_types = 4;
3228         int i;
3229         int c;
3230         int num_tolerated_disk_barrier_failures =
3231                 (int)fs_info->fs_devices->num_devices;
3232
3233         for (i = 0; i < num_types; i++) {
3234                 struct btrfs_space_info *tmp;
3235
3236                 sinfo = NULL;
3237                 rcu_read_lock();
3238                 list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
3239                         if (tmp->flags == types[i]) {
3240                                 sinfo = tmp;
3241                                 break;
3242                         }
3243                 }
3244                 rcu_read_unlock();
3245
3246                 if (!sinfo)
3247                         continue;
3248
3249                 down_read(&sinfo->groups_sem);
3250                 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3251                         if (!list_empty(&sinfo->block_groups[c])) {
3252                                 u64 flags;
3253
3254                                 btrfs_get_block_group_info(
3255                                         &sinfo->block_groups[c], &space);
3256                                 if (space.total_bytes == 0 ||
3257                                     space.used_bytes == 0)
3258                                         continue;
3259                                 flags = space.flags;
3260                                 /*
3261                                  * return
3262                                  * 0: if dup, single or RAID0 is configured for
3263                                  *    any of metadata, system or data, else
3264                                  * 1: if RAID5 is configured, or if RAID1 or
3265                                  *    RAID10 is configured and only two mirrors
3266                                  *    are used, else
3267                                  * 2: if RAID6 is configured, else
3268                                  * num_mirrors - 1: if RAID1 or RAID10 is
3269                                  *                  configured and more than
3270                                  *                  2 mirrors are used.
3271                                  */
3272                                 if (num_tolerated_disk_barrier_failures > 0 &&
3273                                     ((flags & (BTRFS_BLOCK_GROUP_DUP |
3274                                                BTRFS_BLOCK_GROUP_RAID0)) ||
3275                                      ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
3276                                       == 0)))
3277                                         num_tolerated_disk_barrier_failures = 0;
3278                                 else if (num_tolerated_disk_barrier_failures > 1) {
3279                                         if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3280                                             BTRFS_BLOCK_GROUP_RAID5 |
3281                                             BTRFS_BLOCK_GROUP_RAID10)) {
3282                                                 num_tolerated_disk_barrier_failures = 1;
3283                                         } else if (flags &
3284                                                    BTRFS_BLOCK_GROUP_RAID6) {
3285                                                 num_tolerated_disk_barrier_failures = 2;
3286                                         }
3287                                 }
3288                         }
3289                 }
3290                 up_read(&sinfo->groups_sem);
3291         }
3292
3293         return num_tolerated_disk_barrier_failures;
3294 }
3295
3296 static int write_all_supers(struct btrfs_root *root, int max_mirrors)
3297 {
3298         struct list_head *head;
3299         struct btrfs_device *dev;
3300         struct btrfs_super_block *sb;
3301         struct btrfs_dev_item *dev_item;
3302         int ret;
3303         int do_barriers;
3304         int max_errors;
3305         int total_errors = 0;
3306         u64 flags;
3307
3308         max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
3309         do_barriers = !btrfs_test_opt(root, NOBARRIER);
3310         backup_super_roots(root->fs_info);
3311
3312         sb = root->fs_info->super_for_commit;
3313         dev_item = &sb->dev_item;
3314
3315         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3316         head = &root->fs_info->fs_devices->devices;
3317
3318         if (do_barriers) {
3319                 ret = barrier_all_devices(root->fs_info);
3320                 if (ret) {
3321                         mutex_unlock(
3322                                 &root->fs_info->fs_devices->device_list_mutex);
3323                         btrfs_error(root->fs_info, ret,
3324                                     "errors while submitting device barriers.");
3325                         return ret;
3326                 }
3327         }
3328
3329         list_for_each_entry_rcu(dev, head, dev_list) {
3330                 if (!dev->bdev) {
3331                         total_errors++;
3332                         continue;
3333                 }
3334                 if (!dev->in_fs_metadata || !dev->writeable)
3335                         continue;
3336
3337                 btrfs_set_stack_device_generation(dev_item, 0);
3338                 btrfs_set_stack_device_type(dev_item, dev->type);
3339                 btrfs_set_stack_device_id(dev_item, dev->devid);
3340                 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
3341                 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
3342                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3343                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3344                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3345                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3346                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
3347
3348                 flags = btrfs_super_flags(sb);
3349                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3350
3351                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
3352                 if (ret)
3353                         total_errors++;
3354         }
3355         if (total_errors > max_errors) {
3356                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
3357                        total_errors);
3358
3359                 /* This shouldn't happen. FUA is masked off if unsupported */
3360                 BUG();
3361         }
3362
3363         total_errors = 0;
3364         list_for_each_entry_rcu(dev, head, dev_list) {
3365                 if (!dev->bdev)
3366                         continue;
3367                 if (!dev->in_fs_metadata || !dev->writeable)
3368                         continue;
3369
3370                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
3371                 if (ret)
3372                         total_errors++;
3373         }
3374         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3375         if (total_errors > max_errors) {
3376                 btrfs_error(root->fs_info, -EIO,
3377                             "%d errors while writing supers", total_errors);
3378                 return -EIO;
3379         }
3380         return 0;
3381 }
3382
3383 int write_ctree_super(struct btrfs_trans_handle *trans,
3384                       struct btrfs_root *root, int max_mirrors)
3385 {
3386         int ret;
3387
3388         ret = write_all_supers(root, max_mirrors);
3389         return ret;
3390 }
3391
3392 /* Drop a fs root from the radix tree and free it. */
3393 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3394                                   struct btrfs_root *root)
3395 {
3396         spin_lock(&fs_info->fs_roots_radix_lock);
3397         radix_tree_delete(&fs_info->fs_roots_radix,
3398                           (unsigned long)root->root_key.objectid);
3399         spin_unlock(&fs_info->fs_roots_radix_lock);
3400
3401         if (btrfs_root_refs(&root->root_item) == 0)
3402                 synchronize_srcu(&fs_info->subvol_srcu);
3403
3404         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3405                 btrfs_free_log(NULL, root);
3406                 btrfs_free_log_root_tree(NULL, fs_info);
3407         }
3408
3409         __btrfs_remove_free_space_cache(root->free_ino_pinned);
3410         __btrfs_remove_free_space_cache(root->free_ino_ctl);
3411         free_fs_root(root);
3412 }
3413
3414 static void free_fs_root(struct btrfs_root *root)
3415 {
3416         iput(root->cache_inode);
3417         WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3418         if (root->anon_dev)
3419                 free_anon_bdev(root->anon_dev);
3420         free_extent_buffer(root->node);
3421         free_extent_buffer(root->commit_root);
3422         kfree(root->free_ino_ctl);
3423         kfree(root->free_ino_pinned);
3424         kfree(root->name);
3425         btrfs_put_fs_root(root);
3426 }
3427
3428 void btrfs_free_fs_root(struct btrfs_root *root)
3429 {
3430         free_fs_root(root);
3431 }
3432
3433 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3434 {
3435         u64 root_objectid = 0;
3436         struct btrfs_root *gang[8];
3437         int i;
3438         int ret;
3439
3440         while (1) {
3441                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3442                                              (void **)gang, root_objectid,
3443                                              ARRAY_SIZE(gang));
3444                 if (!ret)
3445                         break;
3446
3447                 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3448                 for (i = 0; i < ret; i++) {
3449                         int err;
3450
3451                         root_objectid = gang[i]->root_key.objectid;
3452                         err = btrfs_orphan_cleanup(gang[i]);
3453                         if (err)
3454                                 return err;
3455                 }
3456                 root_objectid++;
3457         }
3458         return 0;
3459 }
3460
3461 int btrfs_commit_super(struct btrfs_root *root)
3462 {
3463         struct btrfs_trans_handle *trans;
3464         int ret;
3465
3466         mutex_lock(&root->fs_info->cleaner_mutex);
3467         btrfs_run_delayed_iputs(root);
3468         mutex_unlock(&root->fs_info->cleaner_mutex);
3469         wake_up_process(root->fs_info->cleaner_kthread);
3470
3471         /* wait until ongoing cleanup work done */
3472         down_write(&root->fs_info->cleanup_work_sem);
3473         up_write(&root->fs_info->cleanup_work_sem);
3474
3475         trans = btrfs_join_transaction(root);
3476         if (IS_ERR(trans))
3477                 return PTR_ERR(trans);
3478         ret = btrfs_commit_transaction(trans, root);
3479         if (ret)
3480                 return ret;
3481         /* run commit again to drop the original snapshot */
3482         trans = btrfs_join_transaction(root);
3483         if (IS_ERR(trans))
3484                 return PTR_ERR(trans);
3485         ret = btrfs_commit_transaction(trans, root);
3486         if (ret)
3487                 return ret;
3488         ret = btrfs_write_and_wait_transaction(NULL, root);
3489         if (ret) {
3490                 btrfs_error(root->fs_info, ret,
3491                             "Failed to sync btree inode to disk.");
3492                 return ret;
3493         }
3494
3495         ret = write_ctree_super(NULL, root, 0);
3496         return ret;
3497 }
3498
3499 int close_ctree(struct btrfs_root *root)
3500 {
3501         struct btrfs_fs_info *fs_info = root->fs_info;
3502         int ret;
3503
3504         fs_info->closing = 1;
3505         smp_mb();
3506
3507         /* pause restriper - we want to resume on mount */
3508         btrfs_pause_balance(fs_info);
3509
3510         btrfs_dev_replace_suspend_for_unmount(fs_info);
3511
3512         btrfs_scrub_cancel(fs_info);
3513
3514         /* wait for any defraggers to finish */
3515         wait_event(fs_info->transaction_wait,
3516                    (atomic_read(&fs_info->defrag_running) == 0));
3517
3518         /* clear out the rbtree of defraggable inodes */
3519         btrfs_cleanup_defrag_inodes(fs_info);
3520
3521         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3522                 ret = btrfs_commit_super(root);
3523                 if (ret)
3524                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3525         }
3526
3527         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3528                 btrfs_error_commit_super(root);
3529
3530         btrfs_put_block_group_cache(fs_info);
3531
3532         kthread_stop(fs_info->transaction_kthread);
3533         kthread_stop(fs_info->cleaner_kthread);
3534
3535         fs_info->closing = 2;
3536         smp_mb();
3537
3538         btrfs_free_qgroup_config(root->fs_info);
3539
3540         if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3541                 printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
3542                        percpu_counter_sum(&fs_info->delalloc_bytes));
3543         }
3544
3545         btrfs_free_block_groups(fs_info);
3546
3547         btrfs_stop_all_workers(fs_info);
3548
3549         del_fs_roots(fs_info);
3550
3551         free_root_pointers(fs_info, 1);
3552
3553         iput(fs_info->btree_inode);
3554
3555 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3556         if (btrfs_test_opt(root, CHECK_INTEGRITY))
3557                 btrfsic_unmount(root, fs_info->fs_devices);
3558 #endif
3559
3560         btrfs_close_devices(fs_info->fs_devices);
3561         btrfs_mapping_tree_free(&fs_info->mapping_tree);
3562
3563         percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3564         percpu_counter_destroy(&fs_info->delalloc_bytes);
3565         bdi_destroy(&fs_info->bdi);
3566         cleanup_srcu_struct(&fs_info->subvol_srcu);
3567
3568         btrfs_free_stripe_hash_table(fs_info);
3569
3570         return 0;
3571 }
3572
3573 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3574                           int atomic)
3575 {
3576         int ret;
3577         struct inode *btree_inode = buf->pages[0]->mapping->host;
3578
3579         ret = extent_buffer_uptodate(buf);
3580         if (!ret)
3581                 return ret;
3582
3583         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3584                                     parent_transid, atomic);
3585         if (ret == -EAGAIN)
3586                 return ret;
3587         return !ret;
3588 }
3589
3590 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
3591 {
3592         return set_extent_buffer_uptodate(buf);
3593 }
3594
3595 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3596 {
3597         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3598         u64 transid = btrfs_header_generation(buf);
3599         int was_dirty;
3600
3601         btrfs_assert_tree_locked(buf);
3602         if (transid != root->fs_info->generation)
3603                 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
3604                        "found %llu running %llu\n",
3605                         (unsigned long long)buf->start,
3606                         (unsigned long long)transid,
3607                         (unsigned long long)root->fs_info->generation);
3608         was_dirty = set_extent_buffer_dirty(buf);
3609         if (!was_dirty)
3610                 __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
3611                                      buf->len,
3612                                      root->fs_info->dirty_metadata_batch);
3613 }
3614
3615 static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
3616                                         int flush_delayed)
3617 {
3618         /*
3619          * looks as though older kernels can get into trouble with
3620          * this code, they end up stuck in balance_dirty_pages forever
3621          */
3622         int ret;
3623
3624         if (current->flags & PF_MEMALLOC)
3625                 return;
3626
3627         if (flush_delayed)
3628                 btrfs_balance_delayed_items(root);
3629
3630         ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
3631                                      BTRFS_DIRTY_METADATA_THRESH);
3632         if (ret > 0) {
3633                 balance_dirty_pages_ratelimited(
3634                                    root->fs_info->btree_inode->i_mapping);
3635         }
3636         return;
3637 }
3638
3639 void btrfs_btree_balance_dirty(struct btrfs_root *root)
3640 {
3641         __btrfs_btree_balance_dirty(root, 1);
3642 }
3643
3644 void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
3645 {
3646         __btrfs_btree_balance_dirty(root, 0);
3647 }
3648
3649 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3650 {
3651         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3652         return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3653 }
3654
3655 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3656                               int read_only)
3657 {
3658         /*
3659          * Placeholder for checks
3660          */
3661         return 0;
3662 }
3663
3664 static void btrfs_error_commit_super(struct btrfs_root *root)
3665 {
3666         mutex_lock(&root->fs_info->cleaner_mutex);
3667         btrfs_run_delayed_iputs(root);
3668         mutex_unlock(&root->fs_info->cleaner_mutex);
3669
3670         down_write(&root->fs_info->cleanup_work_sem);
3671         up_write(&root->fs_info->cleanup_work_sem);
3672
3673         /* cleanup FS via transaction */
3674         btrfs_cleanup_transaction(root);
3675 }
3676
3677 static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
3678                                              struct btrfs_root *root)
3679 {
3680         struct btrfs_inode *btrfs_inode;
3681         struct list_head splice;
3682
3683         INIT_LIST_HEAD(&splice);
3684
3685         mutex_lock(&root->fs_info->ordered_operations_mutex);
3686         spin_lock(&root->fs_info->ordered_extent_lock);
3687
3688         list_splice_init(&t->ordered_operations, &splice);
3689         while (!list_empty(&splice)) {
3690                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3691                                          ordered_operations);
3692
3693                 list_del_init(&btrfs_inode->ordered_operations);
3694                 spin_unlock(&root->fs_info->ordered_extent_lock);
3695
3696                 btrfs_invalidate_inodes(btrfs_inode->root);
3697
3698                 spin_lock(&root->fs_info->ordered_extent_lock);
3699         }
3700
3701         spin_unlock(&root->fs_info->ordered_extent_lock);
3702         mutex_unlock(&root->fs_info->ordered_operations_mutex);
3703 }
3704
3705 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
3706 {
3707         struct btrfs_ordered_extent *ordered;
3708
3709         spin_lock(&root->fs_info->ordered_extent_lock);
3710         /*
3711          * This will just short circuit the ordered completion stuff which will
3712          * make sure the ordered extent gets properly cleaned up.
3713          */
3714         list_for_each_entry(ordered, &root->fs_info->ordered_extents,
3715                             root_extent_list)
3716                 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
3717         spin_unlock(&root->fs_info->ordered_extent_lock);
3718 }
3719
3720 int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3721                                struct btrfs_root *root)
3722 {
3723         struct rb_node *node;
3724         struct btrfs_delayed_ref_root *delayed_refs;
3725         struct btrfs_delayed_ref_node *ref;
3726         int ret = 0;
3727
3728         delayed_refs = &trans->delayed_refs;
3729
3730         spin_lock(&delayed_refs->lock);
3731         if (delayed_refs->num_entries == 0) {
3732                 spin_unlock(&delayed_refs->lock);
3733                 printk(KERN_INFO "delayed_refs has NO entry\n");
3734                 return ret;
3735         }
3736
3737         while ((node = rb_first(&delayed_refs->root)) != NULL) {
3738                 struct btrfs_delayed_ref_head *head = NULL;
3739
3740                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3741                 atomic_set(&ref->refs, 1);
3742                 if (btrfs_delayed_ref_is_head(ref)) {
3743
3744                         head = btrfs_delayed_node_to_head(ref);
3745                         if (!mutex_trylock(&head->mutex)) {
3746                                 atomic_inc(&ref->refs);
3747                                 spin_unlock(&delayed_refs->lock);
3748
3749                                 /* Need to wait for the delayed ref to run */
3750                                 mutex_lock(&head->mutex);
3751                                 mutex_unlock(&head->mutex);
3752                                 btrfs_put_delayed_ref(ref);
3753
3754                                 spin_lock(&delayed_refs->lock);
3755                                 continue;
3756                         }
3757
3758                         if (head->must_insert_reserved)
3759                                 btrfs_pin_extent(root, ref->bytenr,
3760                                                  ref->num_bytes, 1);
3761                         btrfs_free_delayed_extent_op(head->extent_op);
3762                         delayed_refs->num_heads--;
3763                         if (list_empty(&head->cluster))
3764                                 delayed_refs->num_heads_ready--;
3765                         list_del_init(&head->cluster);
3766                 }
3767
3768                 ref->in_tree = 0;
3769                 rb_erase(&ref->rb_node, &delayed_refs->root);
3770                 delayed_refs->num_entries--;
3771                 if (head)
3772                         mutex_unlock(&head->mutex);
3773                 spin_unlock(&delayed_refs->lock);
3774                 btrfs_put_delayed_ref(ref);
3775
3776                 cond_resched();
3777                 spin_lock(&delayed_refs->lock);
3778         }
3779
3780         spin_unlock(&delayed_refs->lock);
3781
3782         return ret;
3783 }
3784
3785 static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t)
3786 {
3787         struct btrfs_pending_snapshot *snapshot;
3788         struct list_head splice;
3789
3790         INIT_LIST_HEAD(&splice);
3791
3792         list_splice_init(&t->pending_snapshots, &splice);
3793
3794         while (!list_empty(&splice)) {
3795                 snapshot = list_entry(splice.next,
3796                                       struct btrfs_pending_snapshot,
3797                                       list);
3798                 snapshot->error = -ECANCELED;
3799                 list_del_init(&snapshot->list);
3800         }
3801 }
3802
3803 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3804 {
3805         struct btrfs_inode *btrfs_inode;
3806         struct list_head splice;
3807
3808         INIT_LIST_HEAD(&splice);
3809
3810         spin_lock(&root->delalloc_lock);
3811         list_splice_init(&root->delalloc_inodes, &splice);
3812
3813         while (!list_empty(&splice)) {
3814                 btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
3815                                                delalloc_inodes);
3816
3817                 list_del_init(&btrfs_inode->delalloc_inodes);
3818                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
3819                           &btrfs_inode->runtime_flags);
3820                 spin_unlock(&root->delalloc_lock);
3821
3822                 btrfs_invalidate_inodes(btrfs_inode->root);
3823
3824                 spin_lock(&root->delalloc_lock);
3825         }
3826
3827         spin_unlock(&root->delalloc_lock);
3828 }
3829
3830 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
3831 {
3832         struct btrfs_root *root;
3833         struct list_head splice;
3834
3835         INIT_LIST_HEAD(&splice);
3836
3837         spin_lock(&fs_info->delalloc_root_lock);
3838         list_splice_init(&fs_info->delalloc_roots, &splice);
3839         while (!list_empty(&splice)) {
3840                 root = list_first_entry(&splice, struct btrfs_root,
3841                                          delalloc_root);
3842                 list_del_init(&root->delalloc_root);
3843                 root = btrfs_grab_fs_root(root);
3844                 BUG_ON(!root);
3845                 spin_unlock(&fs_info->delalloc_root_lock);
3846
3847                 btrfs_destroy_delalloc_inodes(root);
3848                 btrfs_put_fs_root(root);
3849
3850                 spin_lock(&fs_info->delalloc_root_lock);
3851         }
3852         spin_unlock(&fs_info->delalloc_root_lock);
3853 }
3854
3855 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3856                                         struct extent_io_tree *dirty_pages,
3857                                         int mark)
3858 {
3859         int ret;
3860         struct extent_buffer *eb;
3861         u64 start = 0;
3862         u64 end;
3863
3864         while (1) {
3865                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
3866                                             mark, NULL);
3867                 if (ret)
3868                         break;
3869
3870                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
3871                 while (start <= end) {
3872                         eb = btrfs_find_tree_block(root, start,
3873                                                    root->leafsize);
3874                         start += root->leafsize;
3875                         if (!eb)
3876                                 continue;
3877                         wait_on_extent_buffer_writeback(eb);
3878
3879                         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
3880                                                &eb->bflags))
3881                                 clear_extent_buffer_dirty(eb);
3882                         free_extent_buffer_stale(eb);
3883                 }
3884         }
3885
3886         return ret;
3887 }
3888
3889 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3890                                        struct extent_io_tree *pinned_extents)
3891 {
3892         struct extent_io_tree *unpin;
3893         u64 start;
3894         u64 end;
3895         int ret;
3896         bool loop = true;
3897
3898         unpin = pinned_extents;
3899 again:
3900         while (1) {
3901                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3902                                             EXTENT_DIRTY, NULL);
3903                 if (ret)
3904                         break;
3905
3906                 /* opt_discard */
3907                 if (btrfs_test_opt(root, DISCARD))
3908                         ret = btrfs_error_discard_extent(root, start,
3909                                                          end + 1 - start,
3910                                                          NULL);
3911
3912                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3913                 btrfs_error_unpin_extent_range(root, start, end);
3914                 cond_resched();
3915         }
3916
3917         if (loop) {
3918                 if (unpin == &root->fs_info->freed_extents[0])
3919                         unpin = &root->fs_info->freed_extents[1];
3920                 else
3921                         unpin = &root->fs_info->freed_extents[0];
3922                 loop = false;
3923                 goto again;
3924         }
3925
3926         return 0;
3927 }
3928
3929 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
3930                                    struct btrfs_root *root)
3931 {
3932         btrfs_destroy_delayed_refs(cur_trans, root);
3933         btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
3934                                 cur_trans->dirty_pages.dirty_bytes);
3935
3936         /* FIXME: cleanup wait for commit */
3937         cur_trans->in_commit = 1;
3938         cur_trans->blocked = 1;
3939         wake_up(&root->fs_info->transaction_blocked_wait);
3940
3941         btrfs_evict_pending_snapshots(cur_trans);
3942
3943         cur_trans->blocked = 0;
3944         wake_up(&root->fs_info->transaction_wait);
3945
3946         cur_trans->commit_done = 1;
3947         wake_up(&cur_trans->commit_wait);
3948
3949         btrfs_destroy_delayed_inodes(root);
3950         btrfs_assert_delayed_root_empty(root);
3951
3952         btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
3953                                      EXTENT_DIRTY);
3954         btrfs_destroy_pinned_extent(root,
3955                                     root->fs_info->pinned_extents);
3956
3957         /*
3958         memset(cur_trans, 0, sizeof(*cur_trans));
3959         kmem_cache_free(btrfs_transaction_cachep, cur_trans);
3960         */
3961 }
3962
3963 static int btrfs_cleanup_transaction(struct btrfs_root *root)
3964 {
3965         struct btrfs_transaction *t;
3966         LIST_HEAD(list);
3967
3968         mutex_lock(&root->fs_info->transaction_kthread_mutex);
3969
3970         spin_lock(&root->fs_info->trans_lock);
3971         list_splice_init(&root->fs_info->trans_list, &list);
3972         root->fs_info->trans_no_join = 1;
3973         spin_unlock(&root->fs_info->trans_lock);
3974
3975         while (!list_empty(&list)) {
3976                 t = list_entry(list.next, struct btrfs_transaction, list);
3977
3978                 btrfs_destroy_ordered_operations(t, root);
3979
3980                 btrfs_destroy_ordered_extents(root);
3981
3982                 btrfs_destroy_delayed_refs(t, root);
3983
3984                 /* FIXME: cleanup wait for commit */
3985                 t->in_commit = 1;
3986                 t->blocked = 1;
3987                 smp_mb();
3988                 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3989                         wake_up(&root->fs_info->transaction_blocked_wait);
3990
3991                 btrfs_evict_pending_snapshots(t);
3992
3993                 t->blocked = 0;
3994                 smp_mb();
3995                 if (waitqueue_active(&root->fs_info->transaction_wait))
3996                         wake_up(&root->fs_info->transaction_wait);
3997
3998                 t->commit_done = 1;
3999                 smp_mb();
4000                 if (waitqueue_active(&t->commit_wait))
4001                         wake_up(&t->commit_wait);
4002
4003                 btrfs_destroy_delayed_inodes(root);
4004                 btrfs_assert_delayed_root_empty(root);
4005
4006                 btrfs_destroy_all_delalloc_inodes(root->fs_info);
4007
4008                 spin_lock(&root->fs_info->trans_lock);
4009                 root->fs_info->running_transaction = NULL;
4010                 spin_unlock(&root->fs_info->trans_lock);
4011
4012                 btrfs_destroy_marked_extents(root, &t->dirty_pages,
4013                                              EXTENT_DIRTY);
4014
4015                 btrfs_destroy_pinned_extent(root,
4016                                             root->fs_info->pinned_extents);
4017
4018                 atomic_set(&t->use_count, 0);
4019                 list_del_init(&t->list);
4020                 memset(t, 0, sizeof(*t));
4021                 kmem_cache_free(btrfs_transaction_cachep, t);
4022         }
4023
4024         spin_lock(&root->fs_info->trans_lock);
4025         root->fs_info->trans_no_join = 0;
4026         spin_unlock(&root->fs_info->trans_lock);
4027         mutex_unlock(&root->fs_info->transaction_kthread_mutex);
4028
4029         return 0;
4030 }
4031
4032 static struct extent_io_ops btree_extent_io_ops = {
4033         .readpage_end_io_hook = btree_readpage_end_io_hook,
4034         .readpage_io_failed_hook = btree_io_failed_hook,
4035         .submit_bio_hook = btree_submit_bio_hook,
4036         /* note we're sharing with inode.c for the merge bio hook */
4037         .merge_bio_hook = btrfs_merge_bio_hook,
4038 };