btrfs: add dedicated members for start and length of a block group
[platform/kernel/linux-starfive.git] / fs / btrfs / volumes.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/bio.h>
8 #include <linux/slab.h>
9 #include <linux/buffer_head.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
17 #include "misc.h"
18 #include "ctree.h"
19 #include "extent_map.h"
20 #include "disk-io.h"
21 #include "transaction.h"
22 #include "print-tree.h"
23 #include "volumes.h"
24 #include "raid56.h"
25 #include "async-thread.h"
26 #include "check-integrity.h"
27 #include "rcu-string.h"
28 #include "dev-replace.h"
29 #include "sysfs.h"
30 #include "tree-checker.h"
31 #include "space-info.h"
32 #include "block-group.h"
33
34 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
35         [BTRFS_RAID_RAID10] = {
36                 .sub_stripes    = 2,
37                 .dev_stripes    = 1,
38                 .devs_max       = 0,    /* 0 == as many as possible */
39                 .devs_min       = 4,
40                 .tolerated_failures = 1,
41                 .devs_increment = 2,
42                 .ncopies        = 2,
43                 .nparity        = 0,
44                 .raid_name      = "raid10",
45                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID10,
46                 .mindev_error   = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
47         },
48         [BTRFS_RAID_RAID1] = {
49                 .sub_stripes    = 1,
50                 .dev_stripes    = 1,
51                 .devs_max       = 2,
52                 .devs_min       = 2,
53                 .tolerated_failures = 1,
54                 .devs_increment = 2,
55                 .ncopies        = 2,
56                 .nparity        = 0,
57                 .raid_name      = "raid1",
58                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID1,
59                 .mindev_error   = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
60         },
61         [BTRFS_RAID_DUP] = {
62                 .sub_stripes    = 1,
63                 .dev_stripes    = 2,
64                 .devs_max       = 1,
65                 .devs_min       = 1,
66                 .tolerated_failures = 0,
67                 .devs_increment = 1,
68                 .ncopies        = 2,
69                 .nparity        = 0,
70                 .raid_name      = "dup",
71                 .bg_flag        = BTRFS_BLOCK_GROUP_DUP,
72                 .mindev_error   = 0,
73         },
74         [BTRFS_RAID_RAID0] = {
75                 .sub_stripes    = 1,
76                 .dev_stripes    = 1,
77                 .devs_max       = 0,
78                 .devs_min       = 2,
79                 .tolerated_failures = 0,
80                 .devs_increment = 1,
81                 .ncopies        = 1,
82                 .nparity        = 0,
83                 .raid_name      = "raid0",
84                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID0,
85                 .mindev_error   = 0,
86         },
87         [BTRFS_RAID_SINGLE] = {
88                 .sub_stripes    = 1,
89                 .dev_stripes    = 1,
90                 .devs_max       = 1,
91                 .devs_min       = 1,
92                 .tolerated_failures = 0,
93                 .devs_increment = 1,
94                 .ncopies        = 1,
95                 .nparity        = 0,
96                 .raid_name      = "single",
97                 .bg_flag        = 0,
98                 .mindev_error   = 0,
99         },
100         [BTRFS_RAID_RAID5] = {
101                 .sub_stripes    = 1,
102                 .dev_stripes    = 1,
103                 .devs_max       = 0,
104                 .devs_min       = 2,
105                 .tolerated_failures = 1,
106                 .devs_increment = 1,
107                 .ncopies        = 1,
108                 .nparity        = 1,
109                 .raid_name      = "raid5",
110                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID5,
111                 .mindev_error   = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
112         },
113         [BTRFS_RAID_RAID6] = {
114                 .sub_stripes    = 1,
115                 .dev_stripes    = 1,
116                 .devs_max       = 0,
117                 .devs_min       = 3,
118                 .tolerated_failures = 2,
119                 .devs_increment = 1,
120                 .ncopies        = 1,
121                 .nparity        = 2,
122                 .raid_name      = "raid6",
123                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID6,
124                 .mindev_error   = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
125         },
126 };
127
128 const char *btrfs_bg_type_to_raid_name(u64 flags)
129 {
130         const int index = btrfs_bg_flags_to_raid_index(flags);
131
132         if (index >= BTRFS_NR_RAID_TYPES)
133                 return NULL;
134
135         return btrfs_raid_array[index].raid_name;
136 }
137
138 /*
139  * Fill @buf with textual description of @bg_flags, no more than @size_buf
140  * bytes including terminating null byte.
141  */
142 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
143 {
144         int i;
145         int ret;
146         char *bp = buf;
147         u64 flags = bg_flags;
148         u32 size_bp = size_buf;
149
150         if (!flags) {
151                 strcpy(bp, "NONE");
152                 return;
153         }
154
155 #define DESCRIBE_FLAG(flag, desc)                                               \
156         do {                                                            \
157                 if (flags & (flag)) {                                   \
158                         ret = snprintf(bp, size_bp, "%s|", (desc));     \
159                         if (ret < 0 || ret >= size_bp)                  \
160                                 goto out_overflow;                      \
161                         size_bp -= ret;                                 \
162                         bp += ret;                                      \
163                         flags &= ~(flag);                               \
164                 }                                                       \
165         } while (0)
166
167         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
168         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
169         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
170
171         DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
172         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
173                 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
174                               btrfs_raid_array[i].raid_name);
175 #undef DESCRIBE_FLAG
176
177         if (flags) {
178                 ret = snprintf(bp, size_bp, "0x%llx|", flags);
179                 size_bp -= ret;
180         }
181
182         if (size_bp < size_buf)
183                 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
184
185         /*
186          * The text is trimmed, it's up to the caller to provide sufficiently
187          * large buffer
188          */
189 out_overflow:;
190 }
191
192 static int init_first_rw_device(struct btrfs_trans_handle *trans);
193 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
194 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
195 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
196 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
197                              enum btrfs_map_op op,
198                              u64 logical, u64 *length,
199                              struct btrfs_bio **bbio_ret,
200                              int mirror_num, int need_raid_map);
201
202 /*
203  * Device locking
204  * ==============
205  *
206  * There are several mutexes that protect manipulation of devices and low-level
207  * structures like chunks but not block groups, extents or files
208  *
209  * uuid_mutex (global lock)
210  * ------------------------
211  * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
212  * the SCAN_DEV ioctl registration or from mount either implicitly (the first
213  * device) or requested by the device= mount option
214  *
215  * the mutex can be very coarse and can cover long-running operations
216  *
217  * protects: updates to fs_devices counters like missing devices, rw devices,
218  * seeding, structure cloning, opening/closing devices at mount/umount time
219  *
220  * global::fs_devs - add, remove, updates to the global list
221  *
222  * does not protect: manipulation of the fs_devices::devices list!
223  *
224  * btrfs_device::name - renames (write side), read is RCU
225  *
226  * fs_devices::device_list_mutex (per-fs, with RCU)
227  * ------------------------------------------------
228  * protects updates to fs_devices::devices, ie. adding and deleting
229  *
230  * simple list traversal with read-only actions can be done with RCU protection
231  *
232  * may be used to exclude some operations from running concurrently without any
233  * modifications to the list (see write_all_supers)
234  *
235  * balance_mutex
236  * -------------
237  * protects balance structures (status, state) and context accessed from
238  * several places (internally, ioctl)
239  *
240  * chunk_mutex
241  * -----------
242  * protects chunks, adding or removing during allocation, trim or when a new
243  * device is added/removed. Additionally it also protects post_commit_list of
244  * individual devices, since they can be added to the transaction's
245  * post_commit_list only with chunk_mutex held.
246  *
247  * cleaner_mutex
248  * -------------
249  * a big lock that is held by the cleaner thread and prevents running subvolume
250  * cleaning together with relocation or delayed iputs
251  *
252  *
253  * Lock nesting
254  * ============
255  *
256  * uuid_mutex
257  *   volume_mutex
258  *     device_list_mutex
259  *       chunk_mutex
260  *     balance_mutex
261  *
262  *
263  * Exclusive operations, BTRFS_FS_EXCL_OP
264  * ======================================
265  *
266  * Maintains the exclusivity of the following operations that apply to the
267  * whole filesystem and cannot run in parallel.
268  *
269  * - Balance (*)
270  * - Device add
271  * - Device remove
272  * - Device replace (*)
273  * - Resize
274  *
275  * The device operations (as above) can be in one of the following states:
276  *
277  * - Running state
278  * - Paused state
279  * - Completed state
280  *
281  * Only device operations marked with (*) can go into the Paused state for the
282  * following reasons:
283  *
284  * - ioctl (only Balance can be Paused through ioctl)
285  * - filesystem remounted as read-only
286  * - filesystem unmounted and mounted as read-only
287  * - system power-cycle and filesystem mounted as read-only
288  * - filesystem or device errors leading to forced read-only
289  *
290  * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations.
291  * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set.
292  * A device operation in Paused or Running state can be canceled or resumed
293  * either by ioctl (Balance only) or when remounted as read-write.
294  * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or
295  * completed.
296  */
297
298 DEFINE_MUTEX(uuid_mutex);
299 static LIST_HEAD(fs_uuids);
300 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
301 {
302         return &fs_uuids;
303 }
304
305 /*
306  * alloc_fs_devices - allocate struct btrfs_fs_devices
307  * @fsid:               if not NULL, copy the UUID to fs_devices::fsid
308  * @metadata_fsid:      if not NULL, copy the UUID to fs_devices::metadata_fsid
309  *
310  * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
311  * The returned struct is not linked onto any lists and can be destroyed with
312  * kfree() right away.
313  */
314 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
315                                                  const u8 *metadata_fsid)
316 {
317         struct btrfs_fs_devices *fs_devs;
318
319         fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
320         if (!fs_devs)
321                 return ERR_PTR(-ENOMEM);
322
323         mutex_init(&fs_devs->device_list_mutex);
324
325         INIT_LIST_HEAD(&fs_devs->devices);
326         INIT_LIST_HEAD(&fs_devs->alloc_list);
327         INIT_LIST_HEAD(&fs_devs->fs_list);
328         if (fsid)
329                 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
330
331         if (metadata_fsid)
332                 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
333         else if (fsid)
334                 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
335
336         return fs_devs;
337 }
338
339 void btrfs_free_device(struct btrfs_device *device)
340 {
341         WARN_ON(!list_empty(&device->post_commit_list));
342         rcu_string_free(device->name);
343         extent_io_tree_release(&device->alloc_state);
344         bio_put(device->flush_bio);
345         kfree(device);
346 }
347
348 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
349 {
350         struct btrfs_device *device;
351         WARN_ON(fs_devices->opened);
352         while (!list_empty(&fs_devices->devices)) {
353                 device = list_entry(fs_devices->devices.next,
354                                     struct btrfs_device, dev_list);
355                 list_del(&device->dev_list);
356                 btrfs_free_device(device);
357         }
358         kfree(fs_devices);
359 }
360
361 void __exit btrfs_cleanup_fs_uuids(void)
362 {
363         struct btrfs_fs_devices *fs_devices;
364
365         while (!list_empty(&fs_uuids)) {
366                 fs_devices = list_entry(fs_uuids.next,
367                                         struct btrfs_fs_devices, fs_list);
368                 list_del(&fs_devices->fs_list);
369                 free_fs_devices(fs_devices);
370         }
371 }
372
373 /*
374  * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
375  * Returned struct is not linked onto any lists and must be destroyed using
376  * btrfs_free_device.
377  */
378 static struct btrfs_device *__alloc_device(void)
379 {
380         struct btrfs_device *dev;
381
382         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
383         if (!dev)
384                 return ERR_PTR(-ENOMEM);
385
386         /*
387          * Preallocate a bio that's always going to be used for flushing device
388          * barriers and matches the device lifespan
389          */
390         dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
391         if (!dev->flush_bio) {
392                 kfree(dev);
393                 return ERR_PTR(-ENOMEM);
394         }
395
396         INIT_LIST_HEAD(&dev->dev_list);
397         INIT_LIST_HEAD(&dev->dev_alloc_list);
398         INIT_LIST_HEAD(&dev->post_commit_list);
399
400         atomic_set(&dev->reada_in_flight, 0);
401         atomic_set(&dev->dev_stats_ccnt, 0);
402         btrfs_device_data_ordered_init(dev);
403         INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
404         INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
405         extent_io_tree_init(NULL, &dev->alloc_state, 0, NULL);
406
407         return dev;
408 }
409
410 static noinline struct btrfs_fs_devices *find_fsid(
411                 const u8 *fsid, const u8 *metadata_fsid)
412 {
413         struct btrfs_fs_devices *fs_devices;
414
415         ASSERT(fsid);
416
417         if (metadata_fsid) {
418                 /*
419                  * Handle scanned device having completed its fsid change but
420                  * belonging to a fs_devices that was created by first scanning
421                  * a device which didn't have its fsid/metadata_uuid changed
422                  * at all and the CHANGING_FSID_V2 flag set.
423                  */
424                 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
425                         if (fs_devices->fsid_change &&
426                             memcmp(metadata_fsid, fs_devices->fsid,
427                                    BTRFS_FSID_SIZE) == 0 &&
428                             memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
429                                    BTRFS_FSID_SIZE) == 0) {
430                                 return fs_devices;
431                         }
432                 }
433                 /*
434                  * Handle scanned device having completed its fsid change but
435                  * belonging to a fs_devices that was created by a device that
436                  * has an outdated pair of fsid/metadata_uuid and
437                  * CHANGING_FSID_V2 flag set.
438                  */
439                 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
440                         if (fs_devices->fsid_change &&
441                             memcmp(fs_devices->metadata_uuid,
442                                    fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
443                             memcmp(metadata_fsid, fs_devices->metadata_uuid,
444                                    BTRFS_FSID_SIZE) == 0) {
445                                 return fs_devices;
446                         }
447                 }
448         }
449
450         /* Handle non-split brain cases */
451         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
452                 if (metadata_fsid) {
453                         if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
454                             && memcmp(metadata_fsid, fs_devices->metadata_uuid,
455                                       BTRFS_FSID_SIZE) == 0)
456                                 return fs_devices;
457                 } else {
458                         if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
459                                 return fs_devices;
460                 }
461         }
462         return NULL;
463 }
464
465 static int
466 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
467                       int flush, struct block_device **bdev,
468                       struct buffer_head **bh)
469 {
470         int ret;
471
472         *bdev = blkdev_get_by_path(device_path, flags, holder);
473
474         if (IS_ERR(*bdev)) {
475                 ret = PTR_ERR(*bdev);
476                 goto error;
477         }
478
479         if (flush)
480                 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
481         ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
482         if (ret) {
483                 blkdev_put(*bdev, flags);
484                 goto error;
485         }
486         invalidate_bdev(*bdev);
487         *bh = btrfs_read_dev_super(*bdev);
488         if (IS_ERR(*bh)) {
489                 ret = PTR_ERR(*bh);
490                 blkdev_put(*bdev, flags);
491                 goto error;
492         }
493
494         return 0;
495
496 error:
497         *bdev = NULL;
498         *bh = NULL;
499         return ret;
500 }
501
502 static bool device_path_matched(const char *path, struct btrfs_device *device)
503 {
504         int found;
505
506         rcu_read_lock();
507         found = strcmp(rcu_str_deref(device->name), path);
508         rcu_read_unlock();
509
510         return found == 0;
511 }
512
513 /*
514  *  Search and remove all stale (devices which are not mounted) devices.
515  *  When both inputs are NULL, it will search and release all stale devices.
516  *  path:       Optional. When provided will it release all unmounted devices
517  *              matching this path only.
518  *  skip_dev:   Optional. Will skip this device when searching for the stale
519  *              devices.
520  *  Return:     0 for success or if @path is NULL.
521  *              -EBUSY if @path is a mounted device.
522  *              -ENOENT if @path does not match any device in the list.
523  */
524 static int btrfs_free_stale_devices(const char *path,
525                                      struct btrfs_device *skip_device)
526 {
527         struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
528         struct btrfs_device *device, *tmp_device;
529         int ret = 0;
530
531         if (path)
532                 ret = -ENOENT;
533
534         list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
535
536                 mutex_lock(&fs_devices->device_list_mutex);
537                 list_for_each_entry_safe(device, tmp_device,
538                                          &fs_devices->devices, dev_list) {
539                         if (skip_device && skip_device == device)
540                                 continue;
541                         if (path && !device->name)
542                                 continue;
543                         if (path && !device_path_matched(path, device))
544                                 continue;
545                         if (fs_devices->opened) {
546                                 /* for an already deleted device return 0 */
547                                 if (path && ret != 0)
548                                         ret = -EBUSY;
549                                 break;
550                         }
551
552                         /* delete the stale device */
553                         fs_devices->num_devices--;
554                         list_del(&device->dev_list);
555                         btrfs_free_device(device);
556
557                         ret = 0;
558                         if (fs_devices->num_devices == 0)
559                                 break;
560                 }
561                 mutex_unlock(&fs_devices->device_list_mutex);
562
563                 if (fs_devices->num_devices == 0) {
564                         btrfs_sysfs_remove_fsid(fs_devices);
565                         list_del(&fs_devices->fs_list);
566                         free_fs_devices(fs_devices);
567                 }
568         }
569
570         return ret;
571 }
572
573 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
574                         struct btrfs_device *device, fmode_t flags,
575                         void *holder)
576 {
577         struct request_queue *q;
578         struct block_device *bdev;
579         struct buffer_head *bh;
580         struct btrfs_super_block *disk_super;
581         u64 devid;
582         int ret;
583
584         if (device->bdev)
585                 return -EINVAL;
586         if (!device->name)
587                 return -EINVAL;
588
589         ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
590                                     &bdev, &bh);
591         if (ret)
592                 return ret;
593
594         disk_super = (struct btrfs_super_block *)bh->b_data;
595         devid = btrfs_stack_device_id(&disk_super->dev_item);
596         if (devid != device->devid)
597                 goto error_brelse;
598
599         if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
600                 goto error_brelse;
601
602         device->generation = btrfs_super_generation(disk_super);
603
604         if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
605                 if (btrfs_super_incompat_flags(disk_super) &
606                     BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
607                         pr_err(
608                 "BTRFS: Invalid seeding and uuid-changed device detected\n");
609                         goto error_brelse;
610                 }
611
612                 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
613                 fs_devices->seeding = 1;
614         } else {
615                 if (bdev_read_only(bdev))
616                         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
617                 else
618                         set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
619         }
620
621         q = bdev_get_queue(bdev);
622         if (!blk_queue_nonrot(q))
623                 fs_devices->rotating = 1;
624
625         device->bdev = bdev;
626         clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
627         device->mode = flags;
628
629         fs_devices->open_devices++;
630         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
631             device->devid != BTRFS_DEV_REPLACE_DEVID) {
632                 fs_devices->rw_devices++;
633                 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
634         }
635         brelse(bh);
636
637         return 0;
638
639 error_brelse:
640         brelse(bh);
641         blkdev_put(bdev, flags);
642
643         return -EINVAL;
644 }
645
646 /*
647  * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
648  * being created with a disk that has already completed its fsid change.
649  */
650 static struct btrfs_fs_devices *find_fsid_inprogress(
651                                         struct btrfs_super_block *disk_super)
652 {
653         struct btrfs_fs_devices *fs_devices;
654
655         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
656                 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
657                            BTRFS_FSID_SIZE) != 0 &&
658                     memcmp(fs_devices->metadata_uuid, disk_super->fsid,
659                            BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
660                         return fs_devices;
661                 }
662         }
663
664         return NULL;
665 }
666
667
668 static struct btrfs_fs_devices *find_fsid_changed(
669                                         struct btrfs_super_block *disk_super)
670 {
671         struct btrfs_fs_devices *fs_devices;
672
673         /*
674          * Handles the case where scanned device is part of an fs that had
675          * multiple successful changes of FSID but curently device didn't
676          * observe it. Meaning our fsid will be different than theirs.
677          */
678         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
679                 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
680                            BTRFS_FSID_SIZE) != 0 &&
681                     memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
682                            BTRFS_FSID_SIZE) == 0 &&
683                     memcmp(fs_devices->fsid, disk_super->fsid,
684                            BTRFS_FSID_SIZE) != 0) {
685                         return fs_devices;
686                 }
687         }
688
689         return NULL;
690 }
691 /*
692  * Add new device to list of registered devices
693  *
694  * Returns:
695  * device pointer which was just added or updated when successful
696  * error pointer when failed
697  */
698 static noinline struct btrfs_device *device_list_add(const char *path,
699                            struct btrfs_super_block *disk_super,
700                            bool *new_device_added)
701 {
702         struct btrfs_device *device;
703         struct btrfs_fs_devices *fs_devices = NULL;
704         struct rcu_string *name;
705         u64 found_transid = btrfs_super_generation(disk_super);
706         u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
707         bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
708                 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
709         bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
710                                         BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
711
712         if (fsid_change_in_progress) {
713                 if (!has_metadata_uuid) {
714                         /*
715                          * When we have an image which has CHANGING_FSID_V2 set
716                          * it might belong to either a filesystem which has
717                          * disks with completed fsid change or it might belong
718                          * to fs with no UUID changes in effect, handle both.
719                          */
720                         fs_devices = find_fsid_inprogress(disk_super);
721                         if (!fs_devices)
722                                 fs_devices = find_fsid(disk_super->fsid, NULL);
723                 } else {
724                         fs_devices = find_fsid_changed(disk_super);
725                 }
726         } else if (has_metadata_uuid) {
727                 fs_devices = find_fsid(disk_super->fsid,
728                                        disk_super->metadata_uuid);
729         } else {
730                 fs_devices = find_fsid(disk_super->fsid, NULL);
731         }
732
733
734         if (!fs_devices) {
735                 if (has_metadata_uuid)
736                         fs_devices = alloc_fs_devices(disk_super->fsid,
737                                                       disk_super->metadata_uuid);
738                 else
739                         fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
740
741                 if (IS_ERR(fs_devices))
742                         return ERR_CAST(fs_devices);
743
744                 fs_devices->fsid_change = fsid_change_in_progress;
745
746                 mutex_lock(&fs_devices->device_list_mutex);
747                 list_add(&fs_devices->fs_list, &fs_uuids);
748
749                 device = NULL;
750         } else {
751                 mutex_lock(&fs_devices->device_list_mutex);
752                 device = btrfs_find_device(fs_devices, devid,
753                                 disk_super->dev_item.uuid, NULL, false);
754
755                 /*
756                  * If this disk has been pulled into an fs devices created by
757                  * a device which had the CHANGING_FSID_V2 flag then replace the
758                  * metadata_uuid/fsid values of the fs_devices.
759                  */
760                 if (has_metadata_uuid && fs_devices->fsid_change &&
761                     found_transid > fs_devices->latest_generation) {
762                         memcpy(fs_devices->fsid, disk_super->fsid,
763                                         BTRFS_FSID_SIZE);
764                         memcpy(fs_devices->metadata_uuid,
765                                         disk_super->metadata_uuid, BTRFS_FSID_SIZE);
766
767                         fs_devices->fsid_change = false;
768                 }
769         }
770
771         if (!device) {
772                 if (fs_devices->opened) {
773                         mutex_unlock(&fs_devices->device_list_mutex);
774                         return ERR_PTR(-EBUSY);
775                 }
776
777                 device = btrfs_alloc_device(NULL, &devid,
778                                             disk_super->dev_item.uuid);
779                 if (IS_ERR(device)) {
780                         mutex_unlock(&fs_devices->device_list_mutex);
781                         /* we can safely leave the fs_devices entry around */
782                         return device;
783                 }
784
785                 name = rcu_string_strdup(path, GFP_NOFS);
786                 if (!name) {
787                         btrfs_free_device(device);
788                         mutex_unlock(&fs_devices->device_list_mutex);
789                         return ERR_PTR(-ENOMEM);
790                 }
791                 rcu_assign_pointer(device->name, name);
792
793                 list_add_rcu(&device->dev_list, &fs_devices->devices);
794                 fs_devices->num_devices++;
795
796                 device->fs_devices = fs_devices;
797                 *new_device_added = true;
798
799                 if (disk_super->label[0])
800                         pr_info(
801         "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
802                                 disk_super->label, devid, found_transid, path,
803                                 current->comm, task_pid_nr(current));
804                 else
805                         pr_info(
806         "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
807                                 disk_super->fsid, devid, found_transid, path,
808                                 current->comm, task_pid_nr(current));
809
810         } else if (!device->name || strcmp(device->name->str, path)) {
811                 /*
812                  * When FS is already mounted.
813                  * 1. If you are here and if the device->name is NULL that
814                  *    means this device was missing at time of FS mount.
815                  * 2. If you are here and if the device->name is different
816                  *    from 'path' that means either
817                  *      a. The same device disappeared and reappeared with
818                  *         different name. or
819                  *      b. The missing-disk-which-was-replaced, has
820                  *         reappeared now.
821                  *
822                  * We must allow 1 and 2a above. But 2b would be a spurious
823                  * and unintentional.
824                  *
825                  * Further in case of 1 and 2a above, the disk at 'path'
826                  * would have missed some transaction when it was away and
827                  * in case of 2a the stale bdev has to be updated as well.
828                  * 2b must not be allowed at all time.
829                  */
830
831                 /*
832                  * For now, we do allow update to btrfs_fs_device through the
833                  * btrfs dev scan cli after FS has been mounted.  We're still
834                  * tracking a problem where systems fail mount by subvolume id
835                  * when we reject replacement on a mounted FS.
836                  */
837                 if (!fs_devices->opened && found_transid < device->generation) {
838                         /*
839                          * That is if the FS is _not_ mounted and if you
840                          * are here, that means there is more than one
841                          * disk with same uuid and devid.We keep the one
842                          * with larger generation number or the last-in if
843                          * generation are equal.
844                          */
845                         mutex_unlock(&fs_devices->device_list_mutex);
846                         return ERR_PTR(-EEXIST);
847                 }
848
849                 /*
850                  * We are going to replace the device path for a given devid,
851                  * make sure it's the same device if the device is mounted
852                  */
853                 if (device->bdev) {
854                         struct block_device *path_bdev;
855
856                         path_bdev = lookup_bdev(path);
857                         if (IS_ERR(path_bdev)) {
858                                 mutex_unlock(&fs_devices->device_list_mutex);
859                                 return ERR_CAST(path_bdev);
860                         }
861
862                         if (device->bdev != path_bdev) {
863                                 bdput(path_bdev);
864                                 mutex_unlock(&fs_devices->device_list_mutex);
865                                 btrfs_warn_in_rcu(device->fs_info,
866                         "duplicate device fsid:devid for %pU:%llu old:%s new:%s",
867                                         disk_super->fsid, devid,
868                                         rcu_str_deref(device->name), path);
869                                 return ERR_PTR(-EEXIST);
870                         }
871                         bdput(path_bdev);
872                         btrfs_info_in_rcu(device->fs_info,
873                                 "device fsid %pU devid %llu moved old:%s new:%s",
874                                 disk_super->fsid, devid,
875                                 rcu_str_deref(device->name), path);
876                 }
877
878                 name = rcu_string_strdup(path, GFP_NOFS);
879                 if (!name) {
880                         mutex_unlock(&fs_devices->device_list_mutex);
881                         return ERR_PTR(-ENOMEM);
882                 }
883                 rcu_string_free(device->name);
884                 rcu_assign_pointer(device->name, name);
885                 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
886                         fs_devices->missing_devices--;
887                         clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
888                 }
889         }
890
891         /*
892          * Unmount does not free the btrfs_device struct but would zero
893          * generation along with most of the other members. So just update
894          * it back. We need it to pick the disk with largest generation
895          * (as above).
896          */
897         if (!fs_devices->opened) {
898                 device->generation = found_transid;
899                 fs_devices->latest_generation = max_t(u64, found_transid,
900                                                 fs_devices->latest_generation);
901         }
902
903         fs_devices->total_devices = btrfs_super_num_devices(disk_super);
904
905         mutex_unlock(&fs_devices->device_list_mutex);
906         return device;
907 }
908
909 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
910 {
911         struct btrfs_fs_devices *fs_devices;
912         struct btrfs_device *device;
913         struct btrfs_device *orig_dev;
914         int ret = 0;
915
916         fs_devices = alloc_fs_devices(orig->fsid, NULL);
917         if (IS_ERR(fs_devices))
918                 return fs_devices;
919
920         mutex_lock(&orig->device_list_mutex);
921         fs_devices->total_devices = orig->total_devices;
922
923         list_for_each_entry(orig_dev, &orig->devices, dev_list) {
924                 struct rcu_string *name;
925
926                 device = btrfs_alloc_device(NULL, &orig_dev->devid,
927                                             orig_dev->uuid);
928                 if (IS_ERR(device)) {
929                         ret = PTR_ERR(device);
930                         goto error;
931                 }
932
933                 /*
934                  * This is ok to do without rcu read locked because we hold the
935                  * uuid mutex so nothing we touch in here is going to disappear.
936                  */
937                 if (orig_dev->name) {
938                         name = rcu_string_strdup(orig_dev->name->str,
939                                         GFP_KERNEL);
940                         if (!name) {
941                                 btrfs_free_device(device);
942                                 ret = -ENOMEM;
943                                 goto error;
944                         }
945                         rcu_assign_pointer(device->name, name);
946                 }
947
948                 list_add(&device->dev_list, &fs_devices->devices);
949                 device->fs_devices = fs_devices;
950                 fs_devices->num_devices++;
951         }
952         mutex_unlock(&orig->device_list_mutex);
953         return fs_devices;
954 error:
955         mutex_unlock(&orig->device_list_mutex);
956         free_fs_devices(fs_devices);
957         return ERR_PTR(ret);
958 }
959
960 /*
961  * After we have read the system tree and know devids belonging to
962  * this filesystem, remove the device which does not belong there.
963  */
964 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
965 {
966         struct btrfs_device *device, *next;
967         struct btrfs_device *latest_dev = NULL;
968
969         mutex_lock(&uuid_mutex);
970 again:
971         /* This is the initialized path, it is safe to release the devices. */
972         list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
973                 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
974                                                         &device->dev_state)) {
975                         if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
976                              &device->dev_state) &&
977                              (!latest_dev ||
978                               device->generation > latest_dev->generation)) {
979                                 latest_dev = device;
980                         }
981                         continue;
982                 }
983
984                 if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
985                         /*
986                          * In the first step, keep the device which has
987                          * the correct fsid and the devid that is used
988                          * for the dev_replace procedure.
989                          * In the second step, the dev_replace state is
990                          * read from the device tree and it is known
991                          * whether the procedure is really active or
992                          * not, which means whether this device is
993                          * used or whether it should be removed.
994                          */
995                         if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
996                                                   &device->dev_state)) {
997                                 continue;
998                         }
999                 }
1000                 if (device->bdev) {
1001                         blkdev_put(device->bdev, device->mode);
1002                         device->bdev = NULL;
1003                         fs_devices->open_devices--;
1004                 }
1005                 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1006                         list_del_init(&device->dev_alloc_list);
1007                         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1008                         if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1009                                       &device->dev_state))
1010                                 fs_devices->rw_devices--;
1011                 }
1012                 list_del_init(&device->dev_list);
1013                 fs_devices->num_devices--;
1014                 btrfs_free_device(device);
1015         }
1016
1017         if (fs_devices->seed) {
1018                 fs_devices = fs_devices->seed;
1019                 goto again;
1020         }
1021
1022         fs_devices->latest_bdev = latest_dev->bdev;
1023
1024         mutex_unlock(&uuid_mutex);
1025 }
1026
1027 static void btrfs_close_bdev(struct btrfs_device *device)
1028 {
1029         if (!device->bdev)
1030                 return;
1031
1032         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1033                 sync_blockdev(device->bdev);
1034                 invalidate_bdev(device->bdev);
1035         }
1036
1037         blkdev_put(device->bdev, device->mode);
1038 }
1039
1040 static void btrfs_close_one_device(struct btrfs_device *device)
1041 {
1042         struct btrfs_fs_devices *fs_devices = device->fs_devices;
1043         struct btrfs_device *new_device;
1044         struct rcu_string *name;
1045
1046         if (device->bdev)
1047                 fs_devices->open_devices--;
1048
1049         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1050             device->devid != BTRFS_DEV_REPLACE_DEVID) {
1051                 list_del_init(&device->dev_alloc_list);
1052                 fs_devices->rw_devices--;
1053         }
1054
1055         if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
1056                 fs_devices->missing_devices--;
1057
1058         btrfs_close_bdev(device);
1059
1060         new_device = btrfs_alloc_device(NULL, &device->devid,
1061                                         device->uuid);
1062         BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
1063
1064         /* Safe because we are under uuid_mutex */
1065         if (device->name) {
1066                 name = rcu_string_strdup(device->name->str, GFP_NOFS);
1067                 BUG_ON(!name); /* -ENOMEM */
1068                 rcu_assign_pointer(new_device->name, name);
1069         }
1070
1071         list_replace_rcu(&device->dev_list, &new_device->dev_list);
1072         new_device->fs_devices = device->fs_devices;
1073
1074         synchronize_rcu();
1075         btrfs_free_device(device);
1076 }
1077
1078 static int close_fs_devices(struct btrfs_fs_devices *fs_devices)
1079 {
1080         struct btrfs_device *device, *tmp;
1081
1082         if (--fs_devices->opened > 0)
1083                 return 0;
1084
1085         mutex_lock(&fs_devices->device_list_mutex);
1086         list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
1087                 btrfs_close_one_device(device);
1088         }
1089         mutex_unlock(&fs_devices->device_list_mutex);
1090
1091         WARN_ON(fs_devices->open_devices);
1092         WARN_ON(fs_devices->rw_devices);
1093         fs_devices->opened = 0;
1094         fs_devices->seeding = 0;
1095
1096         return 0;
1097 }
1098
1099 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1100 {
1101         struct btrfs_fs_devices *seed_devices = NULL;
1102         int ret;
1103
1104         mutex_lock(&uuid_mutex);
1105         ret = close_fs_devices(fs_devices);
1106         if (!fs_devices->opened) {
1107                 seed_devices = fs_devices->seed;
1108                 fs_devices->seed = NULL;
1109         }
1110         mutex_unlock(&uuid_mutex);
1111
1112         while (seed_devices) {
1113                 fs_devices = seed_devices;
1114                 seed_devices = fs_devices->seed;
1115                 close_fs_devices(fs_devices);
1116                 free_fs_devices(fs_devices);
1117         }
1118         return ret;
1119 }
1120
1121 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1122                                 fmode_t flags, void *holder)
1123 {
1124         struct btrfs_device *device;
1125         struct btrfs_device *latest_dev = NULL;
1126         int ret = 0;
1127
1128         flags |= FMODE_EXCL;
1129
1130         list_for_each_entry(device, &fs_devices->devices, dev_list) {
1131                 /* Just open everything we can; ignore failures here */
1132                 if (btrfs_open_one_device(fs_devices, device, flags, holder))
1133                         continue;
1134
1135                 if (!latest_dev ||
1136                     device->generation > latest_dev->generation)
1137                         latest_dev = device;
1138         }
1139         if (fs_devices->open_devices == 0) {
1140                 ret = -EINVAL;
1141                 goto out;
1142         }
1143         fs_devices->opened = 1;
1144         fs_devices->latest_bdev = latest_dev->bdev;
1145         fs_devices->total_rw_bytes = 0;
1146 out:
1147         return ret;
1148 }
1149
1150 static int devid_cmp(void *priv, struct list_head *a, struct list_head *b)
1151 {
1152         struct btrfs_device *dev1, *dev2;
1153
1154         dev1 = list_entry(a, struct btrfs_device, dev_list);
1155         dev2 = list_entry(b, struct btrfs_device, dev_list);
1156
1157         if (dev1->devid < dev2->devid)
1158                 return -1;
1159         else if (dev1->devid > dev2->devid)
1160                 return 1;
1161         return 0;
1162 }
1163
1164 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1165                        fmode_t flags, void *holder)
1166 {
1167         int ret;
1168
1169         lockdep_assert_held(&uuid_mutex);
1170
1171         mutex_lock(&fs_devices->device_list_mutex);
1172         if (fs_devices->opened) {
1173                 fs_devices->opened++;
1174                 ret = 0;
1175         } else {
1176                 list_sort(NULL, &fs_devices->devices, devid_cmp);
1177                 ret = open_fs_devices(fs_devices, flags, holder);
1178         }
1179         mutex_unlock(&fs_devices->device_list_mutex);
1180
1181         return ret;
1182 }
1183
1184 static void btrfs_release_disk_super(struct page *page)
1185 {
1186         kunmap(page);
1187         put_page(page);
1188 }
1189
1190 static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
1191                                  struct page **page,
1192                                  struct btrfs_super_block **disk_super)
1193 {
1194         void *p;
1195         pgoff_t index;
1196
1197         /* make sure our super fits in the device */
1198         if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1199                 return 1;
1200
1201         /* make sure our super fits in the page */
1202         if (sizeof(**disk_super) > PAGE_SIZE)
1203                 return 1;
1204
1205         /* make sure our super doesn't straddle pages on disk */
1206         index = bytenr >> PAGE_SHIFT;
1207         if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index)
1208                 return 1;
1209
1210         /* pull in the page with our super */
1211         *page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
1212                                    index, GFP_KERNEL);
1213
1214         if (IS_ERR_OR_NULL(*page))
1215                 return 1;
1216
1217         p = kmap(*page);
1218
1219         /* align our pointer to the offset of the super block */
1220         *disk_super = p + offset_in_page(bytenr);
1221
1222         if (btrfs_super_bytenr(*disk_super) != bytenr ||
1223             btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
1224                 btrfs_release_disk_super(*page);
1225                 return 1;
1226         }
1227
1228         if ((*disk_super)->label[0] &&
1229                 (*disk_super)->label[BTRFS_LABEL_SIZE - 1])
1230                 (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0';
1231
1232         return 0;
1233 }
1234
1235 int btrfs_forget_devices(const char *path)
1236 {
1237         int ret;
1238
1239         mutex_lock(&uuid_mutex);
1240         ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1241         mutex_unlock(&uuid_mutex);
1242
1243         return ret;
1244 }
1245
1246 /*
1247  * Look for a btrfs signature on a device. This may be called out of the mount path
1248  * and we are not allowed to call set_blocksize during the scan. The superblock
1249  * is read via pagecache
1250  */
1251 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1252                                            void *holder)
1253 {
1254         struct btrfs_super_block *disk_super;
1255         bool new_device_added = false;
1256         struct btrfs_device *device = NULL;
1257         struct block_device *bdev;
1258         struct page *page;
1259         u64 bytenr;
1260
1261         lockdep_assert_held(&uuid_mutex);
1262
1263         /*
1264          * we would like to check all the supers, but that would make
1265          * a btrfs mount succeed after a mkfs from a different FS.
1266          * So, we need to add a special mount option to scan for
1267          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1268          */
1269         bytenr = btrfs_sb_offset(0);
1270         flags |= FMODE_EXCL;
1271
1272         bdev = blkdev_get_by_path(path, flags, holder);
1273         if (IS_ERR(bdev))
1274                 return ERR_CAST(bdev);
1275
1276         if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) {
1277                 device = ERR_PTR(-EINVAL);
1278                 goto error_bdev_put;
1279         }
1280
1281         device = device_list_add(path, disk_super, &new_device_added);
1282         if (!IS_ERR(device)) {
1283                 if (new_device_added)
1284                         btrfs_free_stale_devices(path, device);
1285         }
1286
1287         btrfs_release_disk_super(page);
1288
1289 error_bdev_put:
1290         blkdev_put(bdev, flags);
1291
1292         return device;
1293 }
1294
1295 /*
1296  * Try to find a chunk that intersects [start, start + len] range and when one
1297  * such is found, record the end of it in *start
1298  */
1299 static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1300                                     u64 len)
1301 {
1302         u64 physical_start, physical_end;
1303
1304         lockdep_assert_held(&device->fs_info->chunk_mutex);
1305
1306         if (!find_first_extent_bit(&device->alloc_state, *start,
1307                                    &physical_start, &physical_end,
1308                                    CHUNK_ALLOCATED, NULL)) {
1309
1310                 if (in_range(physical_start, *start, len) ||
1311                     in_range(*start, physical_start,
1312                              physical_end - physical_start)) {
1313                         *start = physical_end + 1;
1314                         return true;
1315                 }
1316         }
1317         return false;
1318 }
1319
1320
1321 /*
1322  * find_free_dev_extent_start - find free space in the specified device
1323  * @device:       the device which we search the free space in
1324  * @num_bytes:    the size of the free space that we need
1325  * @search_start: the position from which to begin the search
1326  * @start:        store the start of the free space.
1327  * @len:          the size of the free space. that we find, or the size
1328  *                of the max free space if we don't find suitable free space
1329  *
1330  * this uses a pretty simple search, the expectation is that it is
1331  * called very infrequently and that a given device has a small number
1332  * of extents
1333  *
1334  * @start is used to store the start of the free space if we find. But if we
1335  * don't find suitable free space, it will be used to store the start position
1336  * of the max free space.
1337  *
1338  * @len is used to store the size of the free space that we find.
1339  * But if we don't find suitable free space, it is used to store the size of
1340  * the max free space.
1341  *
1342  * NOTE: This function will search *commit* root of device tree, and does extra
1343  * check to ensure dev extents are not double allocated.
1344  * This makes the function safe to allocate dev extents but may not report
1345  * correct usable device space, as device extent freed in current transaction
1346  * is not reported as avaiable.
1347  */
1348 static int find_free_dev_extent_start(struct btrfs_device *device,
1349                                 u64 num_bytes, u64 search_start, u64 *start,
1350                                 u64 *len)
1351 {
1352         struct btrfs_fs_info *fs_info = device->fs_info;
1353         struct btrfs_root *root = fs_info->dev_root;
1354         struct btrfs_key key;
1355         struct btrfs_dev_extent *dev_extent;
1356         struct btrfs_path *path;
1357         u64 hole_size;
1358         u64 max_hole_start;
1359         u64 max_hole_size;
1360         u64 extent_end;
1361         u64 search_end = device->total_bytes;
1362         int ret;
1363         int slot;
1364         struct extent_buffer *l;
1365
1366         /*
1367          * We don't want to overwrite the superblock on the drive nor any area
1368          * used by the boot loader (grub for example), so we make sure to start
1369          * at an offset of at least 1MB.
1370          */
1371         search_start = max_t(u64, search_start, SZ_1M);
1372
1373         path = btrfs_alloc_path();
1374         if (!path)
1375                 return -ENOMEM;
1376
1377         max_hole_start = search_start;
1378         max_hole_size = 0;
1379
1380 again:
1381         if (search_start >= search_end ||
1382                 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1383                 ret = -ENOSPC;
1384                 goto out;
1385         }
1386
1387         path->reada = READA_FORWARD;
1388         path->search_commit_root = 1;
1389         path->skip_locking = 1;
1390
1391         key.objectid = device->devid;
1392         key.offset = search_start;
1393         key.type = BTRFS_DEV_EXTENT_KEY;
1394
1395         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1396         if (ret < 0)
1397                 goto out;
1398         if (ret > 0) {
1399                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1400                 if (ret < 0)
1401                         goto out;
1402         }
1403
1404         while (1) {
1405                 l = path->nodes[0];
1406                 slot = path->slots[0];
1407                 if (slot >= btrfs_header_nritems(l)) {
1408                         ret = btrfs_next_leaf(root, path);
1409                         if (ret == 0)
1410                                 continue;
1411                         if (ret < 0)
1412                                 goto out;
1413
1414                         break;
1415                 }
1416                 btrfs_item_key_to_cpu(l, &key, slot);
1417
1418                 if (key.objectid < device->devid)
1419                         goto next;
1420
1421                 if (key.objectid > device->devid)
1422                         break;
1423
1424                 if (key.type != BTRFS_DEV_EXTENT_KEY)
1425                         goto next;
1426
1427                 if (key.offset > search_start) {
1428                         hole_size = key.offset - search_start;
1429
1430                         /*
1431                          * Have to check before we set max_hole_start, otherwise
1432                          * we could end up sending back this offset anyway.
1433                          */
1434                         if (contains_pending_extent(device, &search_start,
1435                                                     hole_size)) {
1436                                 if (key.offset >= search_start)
1437                                         hole_size = key.offset - search_start;
1438                                 else
1439                                         hole_size = 0;
1440                         }
1441
1442                         if (hole_size > max_hole_size) {
1443                                 max_hole_start = search_start;
1444                                 max_hole_size = hole_size;
1445                         }
1446
1447                         /*
1448                          * If this free space is greater than which we need,
1449                          * it must be the max free space that we have found
1450                          * until now, so max_hole_start must point to the start
1451                          * of this free space and the length of this free space
1452                          * is stored in max_hole_size. Thus, we return
1453                          * max_hole_start and max_hole_size and go back to the
1454                          * caller.
1455                          */
1456                         if (hole_size >= num_bytes) {
1457                                 ret = 0;
1458                                 goto out;
1459                         }
1460                 }
1461
1462                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1463                 extent_end = key.offset + btrfs_dev_extent_length(l,
1464                                                                   dev_extent);
1465                 if (extent_end > search_start)
1466                         search_start = extent_end;
1467 next:
1468                 path->slots[0]++;
1469                 cond_resched();
1470         }
1471
1472         /*
1473          * At this point, search_start should be the end of
1474          * allocated dev extents, and when shrinking the device,
1475          * search_end may be smaller than search_start.
1476          */
1477         if (search_end > search_start) {
1478                 hole_size = search_end - search_start;
1479
1480                 if (contains_pending_extent(device, &search_start, hole_size)) {
1481                         btrfs_release_path(path);
1482                         goto again;
1483                 }
1484
1485                 if (hole_size > max_hole_size) {
1486                         max_hole_start = search_start;
1487                         max_hole_size = hole_size;
1488                 }
1489         }
1490
1491         /* See above. */
1492         if (max_hole_size < num_bytes)
1493                 ret = -ENOSPC;
1494         else
1495                 ret = 0;
1496
1497 out:
1498         btrfs_free_path(path);
1499         *start = max_hole_start;
1500         if (len)
1501                 *len = max_hole_size;
1502         return ret;
1503 }
1504
1505 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1506                          u64 *start, u64 *len)
1507 {
1508         /* FIXME use last free of some kind */
1509         return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1510 }
1511
1512 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1513                           struct btrfs_device *device,
1514                           u64 start, u64 *dev_extent_len)
1515 {
1516         struct btrfs_fs_info *fs_info = device->fs_info;
1517         struct btrfs_root *root = fs_info->dev_root;
1518         int ret;
1519         struct btrfs_path *path;
1520         struct btrfs_key key;
1521         struct btrfs_key found_key;
1522         struct extent_buffer *leaf = NULL;
1523         struct btrfs_dev_extent *extent = NULL;
1524
1525         path = btrfs_alloc_path();
1526         if (!path)
1527                 return -ENOMEM;
1528
1529         key.objectid = device->devid;
1530         key.offset = start;
1531         key.type = BTRFS_DEV_EXTENT_KEY;
1532 again:
1533         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1534         if (ret > 0) {
1535                 ret = btrfs_previous_item(root, path, key.objectid,
1536                                           BTRFS_DEV_EXTENT_KEY);
1537                 if (ret)
1538                         goto out;
1539                 leaf = path->nodes[0];
1540                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1541                 extent = btrfs_item_ptr(leaf, path->slots[0],
1542                                         struct btrfs_dev_extent);
1543                 BUG_ON(found_key.offset > start || found_key.offset +
1544                        btrfs_dev_extent_length(leaf, extent) < start);
1545                 key = found_key;
1546                 btrfs_release_path(path);
1547                 goto again;
1548         } else if (ret == 0) {
1549                 leaf = path->nodes[0];
1550                 extent = btrfs_item_ptr(leaf, path->slots[0],
1551                                         struct btrfs_dev_extent);
1552         } else {
1553                 btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
1554                 goto out;
1555         }
1556
1557         *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1558
1559         ret = btrfs_del_item(trans, root, path);
1560         if (ret) {
1561                 btrfs_handle_fs_error(fs_info, ret,
1562                                       "Failed to remove dev extent item");
1563         } else {
1564                 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1565         }
1566 out:
1567         btrfs_free_path(path);
1568         return ret;
1569 }
1570
1571 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1572                                   struct btrfs_device *device,
1573                                   u64 chunk_offset, u64 start, u64 num_bytes)
1574 {
1575         int ret;
1576         struct btrfs_path *path;
1577         struct btrfs_fs_info *fs_info = device->fs_info;
1578         struct btrfs_root *root = fs_info->dev_root;
1579         struct btrfs_dev_extent *extent;
1580         struct extent_buffer *leaf;
1581         struct btrfs_key key;
1582
1583         WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
1584         WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1585         path = btrfs_alloc_path();
1586         if (!path)
1587                 return -ENOMEM;
1588
1589         key.objectid = device->devid;
1590         key.offset = start;
1591         key.type = BTRFS_DEV_EXTENT_KEY;
1592         ret = btrfs_insert_empty_item(trans, root, path, &key,
1593                                       sizeof(*extent));
1594         if (ret)
1595                 goto out;
1596
1597         leaf = path->nodes[0];
1598         extent = btrfs_item_ptr(leaf, path->slots[0],
1599                                 struct btrfs_dev_extent);
1600         btrfs_set_dev_extent_chunk_tree(leaf, extent,
1601                                         BTRFS_CHUNK_TREE_OBJECTID);
1602         btrfs_set_dev_extent_chunk_objectid(leaf, extent,
1603                                             BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1604         btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1605
1606         btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1607         btrfs_mark_buffer_dirty(leaf);
1608 out:
1609         btrfs_free_path(path);
1610         return ret;
1611 }
1612
1613 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1614 {
1615         struct extent_map_tree *em_tree;
1616         struct extent_map *em;
1617         struct rb_node *n;
1618         u64 ret = 0;
1619
1620         em_tree = &fs_info->mapping_tree;
1621         read_lock(&em_tree->lock);
1622         n = rb_last(&em_tree->map.rb_root);
1623         if (n) {
1624                 em = rb_entry(n, struct extent_map, rb_node);
1625                 ret = em->start + em->len;
1626         }
1627         read_unlock(&em_tree->lock);
1628
1629         return ret;
1630 }
1631
1632 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1633                                     u64 *devid_ret)
1634 {
1635         int ret;
1636         struct btrfs_key key;
1637         struct btrfs_key found_key;
1638         struct btrfs_path *path;
1639
1640         path = btrfs_alloc_path();
1641         if (!path)
1642                 return -ENOMEM;
1643
1644         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1645         key.type = BTRFS_DEV_ITEM_KEY;
1646         key.offset = (u64)-1;
1647
1648         ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1649         if (ret < 0)
1650                 goto error;
1651
1652         if (ret == 0) {
1653                 /* Corruption */
1654                 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1655                 ret = -EUCLEAN;
1656                 goto error;
1657         }
1658
1659         ret = btrfs_previous_item(fs_info->chunk_root, path,
1660                                   BTRFS_DEV_ITEMS_OBJECTID,
1661                                   BTRFS_DEV_ITEM_KEY);
1662         if (ret) {
1663                 *devid_ret = 1;
1664         } else {
1665                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1666                                       path->slots[0]);
1667                 *devid_ret = found_key.offset + 1;
1668         }
1669         ret = 0;
1670 error:
1671         btrfs_free_path(path);
1672         return ret;
1673 }
1674
1675 /*
1676  * the device information is stored in the chunk root
1677  * the btrfs_device struct should be fully filled in
1678  */
1679 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1680                             struct btrfs_device *device)
1681 {
1682         int ret;
1683         struct btrfs_path *path;
1684         struct btrfs_dev_item *dev_item;
1685         struct extent_buffer *leaf;
1686         struct btrfs_key key;
1687         unsigned long ptr;
1688
1689         path = btrfs_alloc_path();
1690         if (!path)
1691                 return -ENOMEM;
1692
1693         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1694         key.type = BTRFS_DEV_ITEM_KEY;
1695         key.offset = device->devid;
1696
1697         ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1698                                       &key, sizeof(*dev_item));
1699         if (ret)
1700                 goto out;
1701
1702         leaf = path->nodes[0];
1703         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1704
1705         btrfs_set_device_id(leaf, dev_item, device->devid);
1706         btrfs_set_device_generation(leaf, dev_item, 0);
1707         btrfs_set_device_type(leaf, dev_item, device->type);
1708         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1709         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1710         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1711         btrfs_set_device_total_bytes(leaf, dev_item,
1712                                      btrfs_device_get_disk_total_bytes(device));
1713         btrfs_set_device_bytes_used(leaf, dev_item,
1714                                     btrfs_device_get_bytes_used(device));
1715         btrfs_set_device_group(leaf, dev_item, 0);
1716         btrfs_set_device_seek_speed(leaf, dev_item, 0);
1717         btrfs_set_device_bandwidth(leaf, dev_item, 0);
1718         btrfs_set_device_start_offset(leaf, dev_item, 0);
1719
1720         ptr = btrfs_device_uuid(dev_item);
1721         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1722         ptr = btrfs_device_fsid(dev_item);
1723         write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1724                             ptr, BTRFS_FSID_SIZE);
1725         btrfs_mark_buffer_dirty(leaf);
1726
1727         ret = 0;
1728 out:
1729         btrfs_free_path(path);
1730         return ret;
1731 }
1732
1733 /*
1734  * Function to update ctime/mtime for a given device path.
1735  * Mainly used for ctime/mtime based probe like libblkid.
1736  */
1737 static void update_dev_time(const char *path_name)
1738 {
1739         struct file *filp;
1740
1741         filp = filp_open(path_name, O_RDWR, 0);
1742         if (IS_ERR(filp))
1743                 return;
1744         file_update_time(filp);
1745         filp_close(filp, NULL);
1746 }
1747
1748 static int btrfs_rm_dev_item(struct btrfs_device *device)
1749 {
1750         struct btrfs_root *root = device->fs_info->chunk_root;
1751         int ret;
1752         struct btrfs_path *path;
1753         struct btrfs_key key;
1754         struct btrfs_trans_handle *trans;
1755
1756         path = btrfs_alloc_path();
1757         if (!path)
1758                 return -ENOMEM;
1759
1760         trans = btrfs_start_transaction(root, 0);
1761         if (IS_ERR(trans)) {
1762                 btrfs_free_path(path);
1763                 return PTR_ERR(trans);
1764         }
1765         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1766         key.type = BTRFS_DEV_ITEM_KEY;
1767         key.offset = device->devid;
1768
1769         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1770         if (ret) {
1771                 if (ret > 0)
1772                         ret = -ENOENT;
1773                 btrfs_abort_transaction(trans, ret);
1774                 btrfs_end_transaction(trans);
1775                 goto out;
1776         }
1777
1778         ret = btrfs_del_item(trans, root, path);
1779         if (ret) {
1780                 btrfs_abort_transaction(trans, ret);
1781                 btrfs_end_transaction(trans);
1782         }
1783
1784 out:
1785         btrfs_free_path(path);
1786         if (!ret)
1787                 ret = btrfs_commit_transaction(trans);
1788         return ret;
1789 }
1790
1791 /*
1792  * Verify that @num_devices satisfies the RAID profile constraints in the whole
1793  * filesystem. It's up to the caller to adjust that number regarding eg. device
1794  * replace.
1795  */
1796 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1797                 u64 num_devices)
1798 {
1799         u64 all_avail;
1800         unsigned seq;
1801         int i;
1802
1803         do {
1804                 seq = read_seqbegin(&fs_info->profiles_lock);
1805
1806                 all_avail = fs_info->avail_data_alloc_bits |
1807                             fs_info->avail_system_alloc_bits |
1808                             fs_info->avail_metadata_alloc_bits;
1809         } while (read_seqretry(&fs_info->profiles_lock, seq));
1810
1811         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1812                 if (!(all_avail & btrfs_raid_array[i].bg_flag))
1813                         continue;
1814
1815                 if (num_devices < btrfs_raid_array[i].devs_min) {
1816                         int ret = btrfs_raid_array[i].mindev_error;
1817
1818                         if (ret)
1819                                 return ret;
1820                 }
1821         }
1822
1823         return 0;
1824 }
1825
1826 static struct btrfs_device * btrfs_find_next_active_device(
1827                 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
1828 {
1829         struct btrfs_device *next_device;
1830
1831         list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1832                 if (next_device != device &&
1833                     !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
1834                     && next_device->bdev)
1835                         return next_device;
1836         }
1837
1838         return NULL;
1839 }
1840
1841 /*
1842  * Helper function to check if the given device is part of s_bdev / latest_bdev
1843  * and replace it with the provided or the next active device, in the context
1844  * where this function called, there should be always be another device (or
1845  * this_dev) which is active.
1846  */
1847 void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
1848                                      struct btrfs_device *this_dev)
1849 {
1850         struct btrfs_fs_info *fs_info = device->fs_info;
1851         struct btrfs_device *next_device;
1852
1853         if (this_dev)
1854                 next_device = this_dev;
1855         else
1856                 next_device = btrfs_find_next_active_device(fs_info->fs_devices,
1857                                                                 device);
1858         ASSERT(next_device);
1859
1860         if (fs_info->sb->s_bdev &&
1861                         (fs_info->sb->s_bdev == device->bdev))
1862                 fs_info->sb->s_bdev = next_device->bdev;
1863
1864         if (fs_info->fs_devices->latest_bdev == device->bdev)
1865                 fs_info->fs_devices->latest_bdev = next_device->bdev;
1866 }
1867
1868 /*
1869  * Return btrfs_fs_devices::num_devices excluding the device that's being
1870  * currently replaced.
1871  */
1872 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
1873 {
1874         u64 num_devices = fs_info->fs_devices->num_devices;
1875
1876         down_read(&fs_info->dev_replace.rwsem);
1877         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
1878                 ASSERT(num_devices > 1);
1879                 num_devices--;
1880         }
1881         up_read(&fs_info->dev_replace.rwsem);
1882
1883         return num_devices;
1884 }
1885
1886 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
1887                 u64 devid)
1888 {
1889         struct btrfs_device *device;
1890         struct btrfs_fs_devices *cur_devices;
1891         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
1892         u64 num_devices;
1893         int ret = 0;
1894
1895         mutex_lock(&uuid_mutex);
1896
1897         num_devices = btrfs_num_devices(fs_info);
1898
1899         ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
1900         if (ret)
1901                 goto out;
1902
1903         device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
1904
1905         if (IS_ERR(device)) {
1906                 if (PTR_ERR(device) == -ENOENT &&
1907                     strcmp(device_path, "missing") == 0)
1908                         ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1909                 else
1910                         ret = PTR_ERR(device);
1911                 goto out;
1912         }
1913
1914         if (btrfs_pinned_by_swapfile(fs_info, device)) {
1915                 btrfs_warn_in_rcu(fs_info,
1916                   "cannot remove device %s (devid %llu) due to active swapfile",
1917                                   rcu_str_deref(device->name), device->devid);
1918                 ret = -ETXTBSY;
1919                 goto out;
1920         }
1921
1922         if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1923                 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1924                 goto out;
1925         }
1926
1927         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1928             fs_info->fs_devices->rw_devices == 1) {
1929                 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1930                 goto out;
1931         }
1932
1933         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1934                 mutex_lock(&fs_info->chunk_mutex);
1935                 list_del_init(&device->dev_alloc_list);
1936                 device->fs_devices->rw_devices--;
1937                 mutex_unlock(&fs_info->chunk_mutex);
1938         }
1939
1940         mutex_unlock(&uuid_mutex);
1941         ret = btrfs_shrink_device(device, 0);
1942         mutex_lock(&uuid_mutex);
1943         if (ret)
1944                 goto error_undo;
1945
1946         /*
1947          * TODO: the superblock still includes this device in its num_devices
1948          * counter although write_all_supers() is not locked out. This
1949          * could give a filesystem state which requires a degraded mount.
1950          */
1951         ret = btrfs_rm_dev_item(device);
1952         if (ret)
1953                 goto error_undo;
1954
1955         clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
1956         btrfs_scrub_cancel_dev(device);
1957
1958         /*
1959          * the device list mutex makes sure that we don't change
1960          * the device list while someone else is writing out all
1961          * the device supers. Whoever is writing all supers, should
1962          * lock the device list mutex before getting the number of
1963          * devices in the super block (super_copy). Conversely,
1964          * whoever updates the number of devices in the super block
1965          * (super_copy) should hold the device list mutex.
1966          */
1967
1968         /*
1969          * In normal cases the cur_devices == fs_devices. But in case
1970          * of deleting a seed device, the cur_devices should point to
1971          * its own fs_devices listed under the fs_devices->seed.
1972          */
1973         cur_devices = device->fs_devices;
1974         mutex_lock(&fs_devices->device_list_mutex);
1975         list_del_rcu(&device->dev_list);
1976
1977         cur_devices->num_devices--;
1978         cur_devices->total_devices--;
1979         /* Update total_devices of the parent fs_devices if it's seed */
1980         if (cur_devices != fs_devices)
1981                 fs_devices->total_devices--;
1982
1983         if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
1984                 cur_devices->missing_devices--;
1985
1986         btrfs_assign_next_active_device(device, NULL);
1987
1988         if (device->bdev) {
1989                 cur_devices->open_devices--;
1990                 /* remove sysfs entry */
1991                 btrfs_sysfs_rm_device_link(fs_devices, device);
1992         }
1993
1994         num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
1995         btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
1996         mutex_unlock(&fs_devices->device_list_mutex);
1997
1998         /*
1999          * at this point, the device is zero sized and detached from
2000          * the devices list.  All that's left is to zero out the old
2001          * supers and free the device.
2002          */
2003         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2004                 btrfs_scratch_superblocks(device->bdev, device->name->str);
2005
2006         btrfs_close_bdev(device);
2007         synchronize_rcu();
2008         btrfs_free_device(device);
2009
2010         if (cur_devices->open_devices == 0) {
2011                 while (fs_devices) {
2012                         if (fs_devices->seed == cur_devices) {
2013                                 fs_devices->seed = cur_devices->seed;
2014                                 break;
2015                         }
2016                         fs_devices = fs_devices->seed;
2017                 }
2018                 cur_devices->seed = NULL;
2019                 close_fs_devices(cur_devices);
2020                 free_fs_devices(cur_devices);
2021         }
2022
2023 out:
2024         mutex_unlock(&uuid_mutex);
2025         return ret;
2026
2027 error_undo:
2028         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2029                 mutex_lock(&fs_info->chunk_mutex);
2030                 list_add(&device->dev_alloc_list,
2031                          &fs_devices->alloc_list);
2032                 device->fs_devices->rw_devices++;
2033                 mutex_unlock(&fs_info->chunk_mutex);
2034         }
2035         goto out;
2036 }
2037
2038 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2039 {
2040         struct btrfs_fs_devices *fs_devices;
2041
2042         lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2043
2044         /*
2045          * in case of fs with no seed, srcdev->fs_devices will point
2046          * to fs_devices of fs_info. However when the dev being replaced is
2047          * a seed dev it will point to the seed's local fs_devices. In short
2048          * srcdev will have its correct fs_devices in both the cases.
2049          */
2050         fs_devices = srcdev->fs_devices;
2051
2052         list_del_rcu(&srcdev->dev_list);
2053         list_del(&srcdev->dev_alloc_list);
2054         fs_devices->num_devices--;
2055         if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2056                 fs_devices->missing_devices--;
2057
2058         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2059                 fs_devices->rw_devices--;
2060
2061         if (srcdev->bdev)
2062                 fs_devices->open_devices--;
2063 }
2064
2065 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2066 {
2067         struct btrfs_fs_info *fs_info = srcdev->fs_info;
2068         struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2069
2070         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) {
2071                 /* zero out the old super if it is writable */
2072                 btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
2073         }
2074
2075         btrfs_close_bdev(srcdev);
2076         synchronize_rcu();
2077         btrfs_free_device(srcdev);
2078
2079         /* if this is no devs we rather delete the fs_devices */
2080         if (!fs_devices->num_devices) {
2081                 struct btrfs_fs_devices *tmp_fs_devices;
2082
2083                 /*
2084                  * On a mounted FS, num_devices can't be zero unless it's a
2085                  * seed. In case of a seed device being replaced, the replace
2086                  * target added to the sprout FS, so there will be no more
2087                  * device left under the seed FS.
2088                  */
2089                 ASSERT(fs_devices->seeding);
2090
2091                 tmp_fs_devices = fs_info->fs_devices;
2092                 while (tmp_fs_devices) {
2093                         if (tmp_fs_devices->seed == fs_devices) {
2094                                 tmp_fs_devices->seed = fs_devices->seed;
2095                                 break;
2096                         }
2097                         tmp_fs_devices = tmp_fs_devices->seed;
2098                 }
2099                 fs_devices->seed = NULL;
2100                 close_fs_devices(fs_devices);
2101                 free_fs_devices(fs_devices);
2102         }
2103 }
2104
2105 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2106 {
2107         struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2108
2109         WARN_ON(!tgtdev);
2110         mutex_lock(&fs_devices->device_list_mutex);
2111
2112         btrfs_sysfs_rm_device_link(fs_devices, tgtdev);
2113
2114         if (tgtdev->bdev)
2115                 fs_devices->open_devices--;
2116
2117         fs_devices->num_devices--;
2118
2119         btrfs_assign_next_active_device(tgtdev, NULL);
2120
2121         list_del_rcu(&tgtdev->dev_list);
2122
2123         mutex_unlock(&fs_devices->device_list_mutex);
2124
2125         /*
2126          * The update_dev_time() with in btrfs_scratch_superblocks()
2127          * may lead to a call to btrfs_show_devname() which will try
2128          * to hold device_list_mutex. And here this device
2129          * is already out of device list, so we don't have to hold
2130          * the device_list_mutex lock.
2131          */
2132         btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2133
2134         btrfs_close_bdev(tgtdev);
2135         synchronize_rcu();
2136         btrfs_free_device(tgtdev);
2137 }
2138
2139 static struct btrfs_device *btrfs_find_device_by_path(
2140                 struct btrfs_fs_info *fs_info, const char *device_path)
2141 {
2142         int ret = 0;
2143         struct btrfs_super_block *disk_super;
2144         u64 devid;
2145         u8 *dev_uuid;
2146         struct block_device *bdev;
2147         struct buffer_head *bh;
2148         struct btrfs_device *device;
2149
2150         ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2151                                     fs_info->bdev_holder, 0, &bdev, &bh);
2152         if (ret)
2153                 return ERR_PTR(ret);
2154         disk_super = (struct btrfs_super_block *)bh->b_data;
2155         devid = btrfs_stack_device_id(&disk_super->dev_item);
2156         dev_uuid = disk_super->dev_item.uuid;
2157         if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2158                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2159                                            disk_super->metadata_uuid, true);
2160         else
2161                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2162                                            disk_super->fsid, true);
2163
2164         brelse(bh);
2165         if (!device)
2166                 device = ERR_PTR(-ENOENT);
2167         blkdev_put(bdev, FMODE_READ);
2168         return device;
2169 }
2170
2171 /*
2172  * Lookup a device given by device id, or the path if the id is 0.
2173  */
2174 struct btrfs_device *btrfs_find_device_by_devspec(
2175                 struct btrfs_fs_info *fs_info, u64 devid,
2176                 const char *device_path)
2177 {
2178         struct btrfs_device *device;
2179
2180         if (devid) {
2181                 device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
2182                                            NULL, true);
2183                 if (!device)
2184                         return ERR_PTR(-ENOENT);
2185                 return device;
2186         }
2187
2188         if (!device_path || !device_path[0])
2189                 return ERR_PTR(-EINVAL);
2190
2191         if (strcmp(device_path, "missing") == 0) {
2192                 /* Find first missing device */
2193                 list_for_each_entry(device, &fs_info->fs_devices->devices,
2194                                     dev_list) {
2195                         if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2196                                      &device->dev_state) && !device->bdev)
2197                                 return device;
2198                 }
2199                 return ERR_PTR(-ENOENT);
2200         }
2201
2202         return btrfs_find_device_by_path(fs_info, device_path);
2203 }
2204
2205 /*
2206  * does all the dirty work required for changing file system's UUID.
2207  */
2208 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2209 {
2210         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2211         struct btrfs_fs_devices *old_devices;
2212         struct btrfs_fs_devices *seed_devices;
2213         struct btrfs_super_block *disk_super = fs_info->super_copy;
2214         struct btrfs_device *device;
2215         u64 super_flags;
2216
2217         lockdep_assert_held(&uuid_mutex);
2218         if (!fs_devices->seeding)
2219                 return -EINVAL;
2220
2221         seed_devices = alloc_fs_devices(NULL, NULL);
2222         if (IS_ERR(seed_devices))
2223                 return PTR_ERR(seed_devices);
2224
2225         old_devices = clone_fs_devices(fs_devices);
2226         if (IS_ERR(old_devices)) {
2227                 kfree(seed_devices);
2228                 return PTR_ERR(old_devices);
2229         }
2230
2231         list_add(&old_devices->fs_list, &fs_uuids);
2232
2233         memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2234         seed_devices->opened = 1;
2235         INIT_LIST_HEAD(&seed_devices->devices);
2236         INIT_LIST_HEAD(&seed_devices->alloc_list);
2237         mutex_init(&seed_devices->device_list_mutex);
2238
2239         mutex_lock(&fs_devices->device_list_mutex);
2240         list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2241                               synchronize_rcu);
2242         list_for_each_entry(device, &seed_devices->devices, dev_list)
2243                 device->fs_devices = seed_devices;
2244
2245         mutex_lock(&fs_info->chunk_mutex);
2246         list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2247         mutex_unlock(&fs_info->chunk_mutex);
2248
2249         fs_devices->seeding = 0;
2250         fs_devices->num_devices = 0;
2251         fs_devices->open_devices = 0;
2252         fs_devices->missing_devices = 0;
2253         fs_devices->rotating = 0;
2254         fs_devices->seed = seed_devices;
2255
2256         generate_random_uuid(fs_devices->fsid);
2257         memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2258         memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2259         mutex_unlock(&fs_devices->device_list_mutex);
2260
2261         super_flags = btrfs_super_flags(disk_super) &
2262                       ~BTRFS_SUPER_FLAG_SEEDING;
2263         btrfs_set_super_flags(disk_super, super_flags);
2264
2265         return 0;
2266 }
2267
2268 /*
2269  * Store the expected generation for seed devices in device items.
2270  */
2271 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2272 {
2273         struct btrfs_fs_info *fs_info = trans->fs_info;
2274         struct btrfs_root *root = fs_info->chunk_root;
2275         struct btrfs_path *path;
2276         struct extent_buffer *leaf;
2277         struct btrfs_dev_item *dev_item;
2278         struct btrfs_device *device;
2279         struct btrfs_key key;
2280         u8 fs_uuid[BTRFS_FSID_SIZE];
2281         u8 dev_uuid[BTRFS_UUID_SIZE];
2282         u64 devid;
2283         int ret;
2284
2285         path = btrfs_alloc_path();
2286         if (!path)
2287                 return -ENOMEM;
2288
2289         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2290         key.offset = 0;
2291         key.type = BTRFS_DEV_ITEM_KEY;
2292
2293         while (1) {
2294                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2295                 if (ret < 0)
2296                         goto error;
2297
2298                 leaf = path->nodes[0];
2299 next_slot:
2300                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2301                         ret = btrfs_next_leaf(root, path);
2302                         if (ret > 0)
2303                                 break;
2304                         if (ret < 0)
2305                                 goto error;
2306                         leaf = path->nodes[0];
2307                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2308                         btrfs_release_path(path);
2309                         continue;
2310                 }
2311
2312                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2313                 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2314                     key.type != BTRFS_DEV_ITEM_KEY)
2315                         break;
2316
2317                 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2318                                           struct btrfs_dev_item);
2319                 devid = btrfs_device_id(leaf, dev_item);
2320                 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2321                                    BTRFS_UUID_SIZE);
2322                 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2323                                    BTRFS_FSID_SIZE);
2324                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2325                                            fs_uuid, true);
2326                 BUG_ON(!device); /* Logic error */
2327
2328                 if (device->fs_devices->seeding) {
2329                         btrfs_set_device_generation(leaf, dev_item,
2330                                                     device->generation);
2331                         btrfs_mark_buffer_dirty(leaf);
2332                 }
2333
2334                 path->slots[0]++;
2335                 goto next_slot;
2336         }
2337         ret = 0;
2338 error:
2339         btrfs_free_path(path);
2340         return ret;
2341 }
2342
2343 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2344 {
2345         struct btrfs_root *root = fs_info->dev_root;
2346         struct request_queue *q;
2347         struct btrfs_trans_handle *trans;
2348         struct btrfs_device *device;
2349         struct block_device *bdev;
2350         struct super_block *sb = fs_info->sb;
2351         struct rcu_string *name;
2352         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2353         u64 orig_super_total_bytes;
2354         u64 orig_super_num_devices;
2355         int seeding_dev = 0;
2356         int ret = 0;
2357         bool unlocked = false;
2358
2359         if (sb_rdonly(sb) && !fs_devices->seeding)
2360                 return -EROFS;
2361
2362         bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2363                                   fs_info->bdev_holder);
2364         if (IS_ERR(bdev))
2365                 return PTR_ERR(bdev);
2366
2367         if (fs_devices->seeding) {
2368                 seeding_dev = 1;
2369                 down_write(&sb->s_umount);
2370                 mutex_lock(&uuid_mutex);
2371         }
2372
2373         filemap_write_and_wait(bdev->bd_inode->i_mapping);
2374
2375         mutex_lock(&fs_devices->device_list_mutex);
2376         list_for_each_entry(device, &fs_devices->devices, dev_list) {
2377                 if (device->bdev == bdev) {
2378                         ret = -EEXIST;
2379                         mutex_unlock(
2380                                 &fs_devices->device_list_mutex);
2381                         goto error;
2382                 }
2383         }
2384         mutex_unlock(&fs_devices->device_list_mutex);
2385
2386         device = btrfs_alloc_device(fs_info, NULL, NULL);
2387         if (IS_ERR(device)) {
2388                 /* we can safely leave the fs_devices entry around */
2389                 ret = PTR_ERR(device);
2390                 goto error;
2391         }
2392
2393         name = rcu_string_strdup(device_path, GFP_KERNEL);
2394         if (!name) {
2395                 ret = -ENOMEM;
2396                 goto error_free_device;
2397         }
2398         rcu_assign_pointer(device->name, name);
2399
2400         trans = btrfs_start_transaction(root, 0);
2401         if (IS_ERR(trans)) {
2402                 ret = PTR_ERR(trans);
2403                 goto error_free_device;
2404         }
2405
2406         q = bdev_get_queue(bdev);
2407         set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2408         device->generation = trans->transid;
2409         device->io_width = fs_info->sectorsize;
2410         device->io_align = fs_info->sectorsize;
2411         device->sector_size = fs_info->sectorsize;
2412         device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2413                                          fs_info->sectorsize);
2414         device->disk_total_bytes = device->total_bytes;
2415         device->commit_total_bytes = device->total_bytes;
2416         device->fs_info = fs_info;
2417         device->bdev = bdev;
2418         set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2419         clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2420         device->mode = FMODE_EXCL;
2421         device->dev_stats_valid = 1;
2422         set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2423
2424         if (seeding_dev) {
2425                 sb->s_flags &= ~SB_RDONLY;
2426                 ret = btrfs_prepare_sprout(fs_info);
2427                 if (ret) {
2428                         btrfs_abort_transaction(trans, ret);
2429                         goto error_trans;
2430                 }
2431         }
2432
2433         device->fs_devices = fs_devices;
2434
2435         mutex_lock(&fs_devices->device_list_mutex);
2436         mutex_lock(&fs_info->chunk_mutex);
2437         list_add_rcu(&device->dev_list, &fs_devices->devices);
2438         list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2439         fs_devices->num_devices++;
2440         fs_devices->open_devices++;
2441         fs_devices->rw_devices++;
2442         fs_devices->total_devices++;
2443         fs_devices->total_rw_bytes += device->total_bytes;
2444
2445         atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2446
2447         if (!blk_queue_nonrot(q))
2448                 fs_devices->rotating = 1;
2449
2450         orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2451         btrfs_set_super_total_bytes(fs_info->super_copy,
2452                 round_down(orig_super_total_bytes + device->total_bytes,
2453                            fs_info->sectorsize));
2454
2455         orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2456         btrfs_set_super_num_devices(fs_info->super_copy,
2457                                     orig_super_num_devices + 1);
2458
2459         /* add sysfs device entry */
2460         btrfs_sysfs_add_device_link(fs_devices, device);
2461
2462         /*
2463          * we've got more storage, clear any full flags on the space
2464          * infos
2465          */
2466         btrfs_clear_space_info_full(fs_info);
2467
2468         mutex_unlock(&fs_info->chunk_mutex);
2469         mutex_unlock(&fs_devices->device_list_mutex);
2470
2471         if (seeding_dev) {
2472                 mutex_lock(&fs_info->chunk_mutex);
2473                 ret = init_first_rw_device(trans);
2474                 mutex_unlock(&fs_info->chunk_mutex);
2475                 if (ret) {
2476                         btrfs_abort_transaction(trans, ret);
2477                         goto error_sysfs;
2478                 }
2479         }
2480
2481         ret = btrfs_add_dev_item(trans, device);
2482         if (ret) {
2483                 btrfs_abort_transaction(trans, ret);
2484                 goto error_sysfs;
2485         }
2486
2487         if (seeding_dev) {
2488                 ret = btrfs_finish_sprout(trans);
2489                 if (ret) {
2490                         btrfs_abort_transaction(trans, ret);
2491                         goto error_sysfs;
2492                 }
2493
2494                 btrfs_sysfs_update_sprout_fsid(fs_devices,
2495                                 fs_info->fs_devices->fsid);
2496         }
2497
2498         ret = btrfs_commit_transaction(trans);
2499
2500         if (seeding_dev) {
2501                 mutex_unlock(&uuid_mutex);
2502                 up_write(&sb->s_umount);
2503                 unlocked = true;
2504
2505                 if (ret) /* transaction commit */
2506                         return ret;
2507
2508                 ret = btrfs_relocate_sys_chunks(fs_info);
2509                 if (ret < 0)
2510                         btrfs_handle_fs_error(fs_info, ret,
2511                                     "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2512                 trans = btrfs_attach_transaction(root);
2513                 if (IS_ERR(trans)) {
2514                         if (PTR_ERR(trans) == -ENOENT)
2515                                 return 0;
2516                         ret = PTR_ERR(trans);
2517                         trans = NULL;
2518                         goto error_sysfs;
2519                 }
2520                 ret = btrfs_commit_transaction(trans);
2521         }
2522
2523         /* Update ctime/mtime for libblkid */
2524         update_dev_time(device_path);
2525         return ret;
2526
2527 error_sysfs:
2528         btrfs_sysfs_rm_device_link(fs_devices, device);
2529         mutex_lock(&fs_info->fs_devices->device_list_mutex);
2530         mutex_lock(&fs_info->chunk_mutex);
2531         list_del_rcu(&device->dev_list);
2532         list_del(&device->dev_alloc_list);
2533         fs_info->fs_devices->num_devices--;
2534         fs_info->fs_devices->open_devices--;
2535         fs_info->fs_devices->rw_devices--;
2536         fs_info->fs_devices->total_devices--;
2537         fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2538         atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2539         btrfs_set_super_total_bytes(fs_info->super_copy,
2540                                     orig_super_total_bytes);
2541         btrfs_set_super_num_devices(fs_info->super_copy,
2542                                     orig_super_num_devices);
2543         mutex_unlock(&fs_info->chunk_mutex);
2544         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2545 error_trans:
2546         if (seeding_dev)
2547                 sb->s_flags |= SB_RDONLY;
2548         if (trans)
2549                 btrfs_end_transaction(trans);
2550 error_free_device:
2551         btrfs_free_device(device);
2552 error:
2553         blkdev_put(bdev, FMODE_EXCL);
2554         if (seeding_dev && !unlocked) {
2555                 mutex_unlock(&uuid_mutex);
2556                 up_write(&sb->s_umount);
2557         }
2558         return ret;
2559 }
2560
2561 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2562                                         struct btrfs_device *device)
2563 {
2564         int ret;
2565         struct btrfs_path *path;
2566         struct btrfs_root *root = device->fs_info->chunk_root;
2567         struct btrfs_dev_item *dev_item;
2568         struct extent_buffer *leaf;
2569         struct btrfs_key key;
2570
2571         path = btrfs_alloc_path();
2572         if (!path)
2573                 return -ENOMEM;
2574
2575         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2576         key.type = BTRFS_DEV_ITEM_KEY;
2577         key.offset = device->devid;
2578
2579         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2580         if (ret < 0)
2581                 goto out;
2582
2583         if (ret > 0) {
2584                 ret = -ENOENT;
2585                 goto out;
2586         }
2587
2588         leaf = path->nodes[0];
2589         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2590
2591         btrfs_set_device_id(leaf, dev_item, device->devid);
2592         btrfs_set_device_type(leaf, dev_item, device->type);
2593         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2594         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2595         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2596         btrfs_set_device_total_bytes(leaf, dev_item,
2597                                      btrfs_device_get_disk_total_bytes(device));
2598         btrfs_set_device_bytes_used(leaf, dev_item,
2599                                     btrfs_device_get_bytes_used(device));
2600         btrfs_mark_buffer_dirty(leaf);
2601
2602 out:
2603         btrfs_free_path(path);
2604         return ret;
2605 }
2606
2607 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2608                       struct btrfs_device *device, u64 new_size)
2609 {
2610         struct btrfs_fs_info *fs_info = device->fs_info;
2611         struct btrfs_super_block *super_copy = fs_info->super_copy;
2612         u64 old_total;
2613         u64 diff;
2614
2615         if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2616                 return -EACCES;
2617
2618         new_size = round_down(new_size, fs_info->sectorsize);
2619
2620         mutex_lock(&fs_info->chunk_mutex);
2621         old_total = btrfs_super_total_bytes(super_copy);
2622         diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2623
2624         if (new_size <= device->total_bytes ||
2625             test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2626                 mutex_unlock(&fs_info->chunk_mutex);
2627                 return -EINVAL;
2628         }
2629
2630         btrfs_set_super_total_bytes(super_copy,
2631                         round_down(old_total + diff, fs_info->sectorsize));
2632         device->fs_devices->total_rw_bytes += diff;
2633
2634         btrfs_device_set_total_bytes(device, new_size);
2635         btrfs_device_set_disk_total_bytes(device, new_size);
2636         btrfs_clear_space_info_full(device->fs_info);
2637         if (list_empty(&device->post_commit_list))
2638                 list_add_tail(&device->post_commit_list,
2639                               &trans->transaction->dev_update_list);
2640         mutex_unlock(&fs_info->chunk_mutex);
2641
2642         return btrfs_update_device(trans, device);
2643 }
2644
2645 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2646 {
2647         struct btrfs_fs_info *fs_info = trans->fs_info;
2648         struct btrfs_root *root = fs_info->chunk_root;
2649         int ret;
2650         struct btrfs_path *path;
2651         struct btrfs_key key;
2652
2653         path = btrfs_alloc_path();
2654         if (!path)
2655                 return -ENOMEM;
2656
2657         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2658         key.offset = chunk_offset;
2659         key.type = BTRFS_CHUNK_ITEM_KEY;
2660
2661         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2662         if (ret < 0)
2663                 goto out;
2664         else if (ret > 0) { /* Logic error or corruption */
2665                 btrfs_handle_fs_error(fs_info, -ENOENT,
2666                                       "Failed lookup while freeing chunk.");
2667                 ret = -ENOENT;
2668                 goto out;
2669         }
2670
2671         ret = btrfs_del_item(trans, root, path);
2672         if (ret < 0)
2673                 btrfs_handle_fs_error(fs_info, ret,
2674                                       "Failed to delete chunk item.");
2675 out:
2676         btrfs_free_path(path);
2677         return ret;
2678 }
2679
2680 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2681 {
2682         struct btrfs_super_block *super_copy = fs_info->super_copy;
2683         struct btrfs_disk_key *disk_key;
2684         struct btrfs_chunk *chunk;
2685         u8 *ptr;
2686         int ret = 0;
2687         u32 num_stripes;
2688         u32 array_size;
2689         u32 len = 0;
2690         u32 cur;
2691         struct btrfs_key key;
2692
2693         mutex_lock(&fs_info->chunk_mutex);
2694         array_size = btrfs_super_sys_array_size(super_copy);
2695
2696         ptr = super_copy->sys_chunk_array;
2697         cur = 0;
2698
2699         while (cur < array_size) {
2700                 disk_key = (struct btrfs_disk_key *)ptr;
2701                 btrfs_disk_key_to_cpu(&key, disk_key);
2702
2703                 len = sizeof(*disk_key);
2704
2705                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2706                         chunk = (struct btrfs_chunk *)(ptr + len);
2707                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2708                         len += btrfs_chunk_item_size(num_stripes);
2709                 } else {
2710                         ret = -EIO;
2711                         break;
2712                 }
2713                 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2714                     key.offset == chunk_offset) {
2715                         memmove(ptr, ptr + len, array_size - (cur + len));
2716                         array_size -= len;
2717                         btrfs_set_super_sys_array_size(super_copy, array_size);
2718                 } else {
2719                         ptr += len;
2720                         cur += len;
2721                 }
2722         }
2723         mutex_unlock(&fs_info->chunk_mutex);
2724         return ret;
2725 }
2726
2727 /*
2728  * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2729  * @logical: Logical block offset in bytes.
2730  * @length: Length of extent in bytes.
2731  *
2732  * Return: Chunk mapping or ERR_PTR.
2733  */
2734 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
2735                                        u64 logical, u64 length)
2736 {
2737         struct extent_map_tree *em_tree;
2738         struct extent_map *em;
2739
2740         em_tree = &fs_info->mapping_tree;
2741         read_lock(&em_tree->lock);
2742         em = lookup_extent_mapping(em_tree, logical, length);
2743         read_unlock(&em_tree->lock);
2744
2745         if (!em) {
2746                 btrfs_crit(fs_info, "unable to find logical %llu length %llu",
2747                            logical, length);
2748                 return ERR_PTR(-EINVAL);
2749         }
2750
2751         if (em->start > logical || em->start + em->len < logical) {
2752                 btrfs_crit(fs_info,
2753                            "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
2754                            logical, length, em->start, em->start + em->len);
2755                 free_extent_map(em);
2756                 return ERR_PTR(-EINVAL);
2757         }
2758
2759         /* callers are responsible for dropping em's ref. */
2760         return em;
2761 }
2762
2763 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2764 {
2765         struct btrfs_fs_info *fs_info = trans->fs_info;
2766         struct extent_map *em;
2767         struct map_lookup *map;
2768         u64 dev_extent_len = 0;
2769         int i, ret = 0;
2770         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2771
2772         em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
2773         if (IS_ERR(em)) {
2774                 /*
2775                  * This is a logic error, but we don't want to just rely on the
2776                  * user having built with ASSERT enabled, so if ASSERT doesn't
2777                  * do anything we still error out.
2778                  */
2779                 ASSERT(0);
2780                 return PTR_ERR(em);
2781         }
2782         map = em->map_lookup;
2783         mutex_lock(&fs_info->chunk_mutex);
2784         check_system_chunk(trans, map->type);
2785         mutex_unlock(&fs_info->chunk_mutex);
2786
2787         /*
2788          * Take the device list mutex to prevent races with the final phase of
2789          * a device replace operation that replaces the device object associated
2790          * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
2791          */
2792         mutex_lock(&fs_devices->device_list_mutex);
2793         for (i = 0; i < map->num_stripes; i++) {
2794                 struct btrfs_device *device = map->stripes[i].dev;
2795                 ret = btrfs_free_dev_extent(trans, device,
2796                                             map->stripes[i].physical,
2797                                             &dev_extent_len);
2798                 if (ret) {
2799                         mutex_unlock(&fs_devices->device_list_mutex);
2800                         btrfs_abort_transaction(trans, ret);
2801                         goto out;
2802                 }
2803
2804                 if (device->bytes_used > 0) {
2805                         mutex_lock(&fs_info->chunk_mutex);
2806                         btrfs_device_set_bytes_used(device,
2807                                         device->bytes_used - dev_extent_len);
2808                         atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
2809                         btrfs_clear_space_info_full(fs_info);
2810                         mutex_unlock(&fs_info->chunk_mutex);
2811                 }
2812
2813                 ret = btrfs_update_device(trans, device);
2814                 if (ret) {
2815                         mutex_unlock(&fs_devices->device_list_mutex);
2816                         btrfs_abort_transaction(trans, ret);
2817                         goto out;
2818                 }
2819         }
2820         mutex_unlock(&fs_devices->device_list_mutex);
2821
2822         ret = btrfs_free_chunk(trans, chunk_offset);
2823         if (ret) {
2824                 btrfs_abort_transaction(trans, ret);
2825                 goto out;
2826         }
2827
2828         trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
2829
2830         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2831                 ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
2832                 if (ret) {
2833                         btrfs_abort_transaction(trans, ret);
2834                         goto out;
2835                 }
2836         }
2837
2838         ret = btrfs_remove_block_group(trans, chunk_offset, em);
2839         if (ret) {
2840                 btrfs_abort_transaction(trans, ret);
2841                 goto out;
2842         }
2843
2844 out:
2845         /* once for us */
2846         free_extent_map(em);
2847         return ret;
2848 }
2849
2850 static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2851 {
2852         struct btrfs_root *root = fs_info->chunk_root;
2853         struct btrfs_trans_handle *trans;
2854         int ret;
2855
2856         /*
2857          * Prevent races with automatic removal of unused block groups.
2858          * After we relocate and before we remove the chunk with offset
2859          * chunk_offset, automatic removal of the block group can kick in,
2860          * resulting in a failure when calling btrfs_remove_chunk() below.
2861          *
2862          * Make sure to acquire this mutex before doing a tree search (dev
2863          * or chunk trees) to find chunks. Otherwise the cleaner kthread might
2864          * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
2865          * we release the path used to search the chunk/dev tree and before
2866          * the current task acquires this mutex and calls us.
2867          */
2868         lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
2869
2870         /* step one, relocate all the extents inside this chunk */
2871         btrfs_scrub_pause(fs_info);
2872         ret = btrfs_relocate_block_group(fs_info, chunk_offset);
2873         btrfs_scrub_continue(fs_info);
2874         if (ret)
2875                 return ret;
2876
2877         trans = btrfs_start_trans_remove_block_group(root->fs_info,
2878                                                      chunk_offset);
2879         if (IS_ERR(trans)) {
2880                 ret = PTR_ERR(trans);
2881                 btrfs_handle_fs_error(root->fs_info, ret, NULL);
2882                 return ret;
2883         }
2884
2885         /*
2886          * step two, delete the device extents and the
2887          * chunk tree entries
2888          */
2889         ret = btrfs_remove_chunk(trans, chunk_offset);
2890         btrfs_end_transaction(trans);
2891         return ret;
2892 }
2893
2894 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
2895 {
2896         struct btrfs_root *chunk_root = fs_info->chunk_root;
2897         struct btrfs_path *path;
2898         struct extent_buffer *leaf;
2899         struct btrfs_chunk *chunk;
2900         struct btrfs_key key;
2901         struct btrfs_key found_key;
2902         u64 chunk_type;
2903         bool retried = false;
2904         int failed = 0;
2905         int ret;
2906
2907         path = btrfs_alloc_path();
2908         if (!path)
2909                 return -ENOMEM;
2910
2911 again:
2912         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2913         key.offset = (u64)-1;
2914         key.type = BTRFS_CHUNK_ITEM_KEY;
2915
2916         while (1) {
2917                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
2918                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2919                 if (ret < 0) {
2920                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
2921                         goto error;
2922                 }
2923                 BUG_ON(ret == 0); /* Corruption */
2924
2925                 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2926                                           key.type);
2927                 if (ret)
2928                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
2929                 if (ret < 0)
2930                         goto error;
2931                 if (ret > 0)
2932                         break;
2933
2934                 leaf = path->nodes[0];
2935                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2936
2937                 chunk = btrfs_item_ptr(leaf, path->slots[0],
2938                                        struct btrfs_chunk);
2939                 chunk_type = btrfs_chunk_type(leaf, chunk);
2940                 btrfs_release_path(path);
2941
2942                 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2943                         ret = btrfs_relocate_chunk(fs_info, found_key.offset);
2944                         if (ret == -ENOSPC)
2945                                 failed++;
2946                         else
2947                                 BUG_ON(ret);
2948                 }
2949                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
2950
2951                 if (found_key.offset == 0)
2952                         break;
2953                 key.offset = found_key.offset - 1;
2954         }
2955         ret = 0;
2956         if (failed && !retried) {
2957                 failed = 0;
2958                 retried = true;
2959                 goto again;
2960         } else if (WARN_ON(failed && retried)) {
2961                 ret = -ENOSPC;
2962         }
2963 error:
2964         btrfs_free_path(path);
2965         return ret;
2966 }
2967
2968 /*
2969  * return 1 : allocate a data chunk successfully,
2970  * return <0: errors during allocating a data chunk,
2971  * return 0 : no need to allocate a data chunk.
2972  */
2973 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
2974                                       u64 chunk_offset)
2975 {
2976         struct btrfs_block_group_cache *cache;
2977         u64 bytes_used;
2978         u64 chunk_type;
2979
2980         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2981         ASSERT(cache);
2982         chunk_type = cache->flags;
2983         btrfs_put_block_group(cache);
2984
2985         if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
2986                 return 0;
2987
2988         spin_lock(&fs_info->data_sinfo->lock);
2989         bytes_used = fs_info->data_sinfo->bytes_used;
2990         spin_unlock(&fs_info->data_sinfo->lock);
2991
2992         if (!bytes_used) {
2993                 struct btrfs_trans_handle *trans;
2994                 int ret;
2995
2996                 trans = btrfs_join_transaction(fs_info->tree_root);
2997                 if (IS_ERR(trans))
2998                         return PTR_ERR(trans);
2999
3000                 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3001                 btrfs_end_transaction(trans);
3002                 if (ret < 0)
3003                         return ret;
3004                 return 1;
3005         }
3006
3007         return 0;
3008 }
3009
3010 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3011                                struct btrfs_balance_control *bctl)
3012 {
3013         struct btrfs_root *root = fs_info->tree_root;
3014         struct btrfs_trans_handle *trans;
3015         struct btrfs_balance_item *item;
3016         struct btrfs_disk_balance_args disk_bargs;
3017         struct btrfs_path *path;
3018         struct extent_buffer *leaf;
3019         struct btrfs_key key;
3020         int ret, err;
3021
3022         path = btrfs_alloc_path();
3023         if (!path)
3024                 return -ENOMEM;
3025
3026         trans = btrfs_start_transaction(root, 0);
3027         if (IS_ERR(trans)) {
3028                 btrfs_free_path(path);
3029                 return PTR_ERR(trans);
3030         }
3031
3032         key.objectid = BTRFS_BALANCE_OBJECTID;
3033         key.type = BTRFS_TEMPORARY_ITEM_KEY;
3034         key.offset = 0;
3035
3036         ret = btrfs_insert_empty_item(trans, root, path, &key,
3037                                       sizeof(*item));
3038         if (ret)
3039                 goto out;
3040
3041         leaf = path->nodes[0];
3042         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3043
3044         memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3045
3046         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3047         btrfs_set_balance_data(leaf, item, &disk_bargs);
3048         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3049         btrfs_set_balance_meta(leaf, item, &disk_bargs);
3050         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3051         btrfs_set_balance_sys(leaf, item, &disk_bargs);
3052
3053         btrfs_set_balance_flags(leaf, item, bctl->flags);
3054
3055         btrfs_mark_buffer_dirty(leaf);
3056 out:
3057         btrfs_free_path(path);
3058         err = btrfs_commit_transaction(trans);
3059         if (err && !ret)
3060                 ret = err;
3061         return ret;
3062 }
3063
3064 static int del_balance_item(struct btrfs_fs_info *fs_info)
3065 {
3066         struct btrfs_root *root = fs_info->tree_root;
3067         struct btrfs_trans_handle *trans;
3068         struct btrfs_path *path;
3069         struct btrfs_key key;
3070         int ret, err;
3071
3072         path = btrfs_alloc_path();
3073         if (!path)
3074                 return -ENOMEM;
3075
3076         trans = btrfs_start_transaction(root, 0);
3077         if (IS_ERR(trans)) {
3078                 btrfs_free_path(path);
3079                 return PTR_ERR(trans);
3080         }
3081
3082         key.objectid = BTRFS_BALANCE_OBJECTID;
3083         key.type = BTRFS_TEMPORARY_ITEM_KEY;
3084         key.offset = 0;
3085
3086         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3087         if (ret < 0)
3088                 goto out;
3089         if (ret > 0) {
3090                 ret = -ENOENT;
3091                 goto out;
3092         }
3093
3094         ret = btrfs_del_item(trans, root, path);
3095 out:
3096         btrfs_free_path(path);
3097         err = btrfs_commit_transaction(trans);
3098         if (err && !ret)
3099                 ret = err;
3100         return ret;
3101 }
3102
3103 /*
3104  * This is a heuristic used to reduce the number of chunks balanced on
3105  * resume after balance was interrupted.
3106  */
3107 static void update_balance_args(struct btrfs_balance_control *bctl)
3108 {
3109         /*
3110          * Turn on soft mode for chunk types that were being converted.
3111          */
3112         if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3113                 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3114         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3115                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3116         if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3117                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3118
3119         /*
3120          * Turn on usage filter if is not already used.  The idea is
3121          * that chunks that we have already balanced should be
3122          * reasonably full.  Don't do it for chunks that are being
3123          * converted - that will keep us from relocating unconverted
3124          * (albeit full) chunks.
3125          */
3126         if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3127             !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3128             !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3129                 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3130                 bctl->data.usage = 90;
3131         }
3132         if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3133             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3134             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3135                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3136                 bctl->sys.usage = 90;
3137         }
3138         if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3139             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3140             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3141                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3142                 bctl->meta.usage = 90;
3143         }
3144 }
3145
3146 /*
3147  * Clear the balance status in fs_info and delete the balance item from disk.
3148  */
3149 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3150 {
3151         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3152         int ret;
3153
3154         BUG_ON(!fs_info->balance_ctl);
3155
3156         spin_lock(&fs_info->balance_lock);
3157         fs_info->balance_ctl = NULL;
3158         spin_unlock(&fs_info->balance_lock);
3159
3160         kfree(bctl);
3161         ret = del_balance_item(fs_info);
3162         if (ret)
3163                 btrfs_handle_fs_error(fs_info, ret, NULL);
3164 }
3165
3166 /*
3167  * Balance filters.  Return 1 if chunk should be filtered out
3168  * (should not be balanced).
3169  */
3170 static int chunk_profiles_filter(u64 chunk_type,
3171                                  struct btrfs_balance_args *bargs)
3172 {
3173         chunk_type = chunk_to_extended(chunk_type) &
3174                                 BTRFS_EXTENDED_PROFILE_MASK;
3175
3176         if (bargs->profiles & chunk_type)
3177                 return 0;
3178
3179         return 1;
3180 }
3181
3182 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3183                               struct btrfs_balance_args *bargs)
3184 {
3185         struct btrfs_block_group_cache *cache;
3186         u64 chunk_used;
3187         u64 user_thresh_min;
3188         u64 user_thresh_max;
3189         int ret = 1;
3190
3191         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3192         chunk_used = cache->used;
3193
3194         if (bargs->usage_min == 0)
3195                 user_thresh_min = 0;
3196         else
3197                 user_thresh_min = div_factor_fine(cache->length,
3198                                                   bargs->usage_min);
3199
3200         if (bargs->usage_max == 0)
3201                 user_thresh_max = 1;
3202         else if (bargs->usage_max > 100)
3203                 user_thresh_max = cache->length;
3204         else
3205                 user_thresh_max = div_factor_fine(cache->length,
3206                                                   bargs->usage_max);
3207
3208         if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3209                 ret = 0;
3210
3211         btrfs_put_block_group(cache);
3212         return ret;
3213 }
3214
3215 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3216                 u64 chunk_offset, struct btrfs_balance_args *bargs)
3217 {
3218         struct btrfs_block_group_cache *cache;
3219         u64 chunk_used, user_thresh;
3220         int ret = 1;
3221
3222         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3223         chunk_used = cache->used;
3224
3225         if (bargs->usage_min == 0)
3226                 user_thresh = 1;
3227         else if (bargs->usage > 100)
3228                 user_thresh = cache->length;
3229         else
3230                 user_thresh = div_factor_fine(cache->length, bargs->usage);
3231
3232         if (chunk_used < user_thresh)
3233                 ret = 0;
3234
3235         btrfs_put_block_group(cache);
3236         return ret;
3237 }
3238
3239 static int chunk_devid_filter(struct extent_buffer *leaf,
3240                               struct btrfs_chunk *chunk,
3241                               struct btrfs_balance_args *bargs)
3242 {
3243         struct btrfs_stripe *stripe;
3244         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3245         int i;
3246
3247         for (i = 0; i < num_stripes; i++) {
3248                 stripe = btrfs_stripe_nr(chunk, i);
3249                 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3250                         return 0;
3251         }
3252
3253         return 1;
3254 }
3255
3256 static u64 calc_data_stripes(u64 type, int num_stripes)
3257 {
3258         const int index = btrfs_bg_flags_to_raid_index(type);
3259         const int ncopies = btrfs_raid_array[index].ncopies;
3260         const int nparity = btrfs_raid_array[index].nparity;
3261
3262         if (nparity)
3263                 return num_stripes - nparity;
3264         else
3265                 return num_stripes / ncopies;
3266 }
3267
3268 /* [pstart, pend) */
3269 static int chunk_drange_filter(struct extent_buffer *leaf,
3270                                struct btrfs_chunk *chunk,
3271                                struct btrfs_balance_args *bargs)
3272 {
3273         struct btrfs_stripe *stripe;
3274         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3275         u64 stripe_offset;
3276         u64 stripe_length;
3277         u64 type;
3278         int factor;
3279         int i;
3280
3281         if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3282                 return 0;
3283
3284         type = btrfs_chunk_type(leaf, chunk);
3285         factor = calc_data_stripes(type, num_stripes);
3286
3287         for (i = 0; i < num_stripes; i++) {
3288                 stripe = btrfs_stripe_nr(chunk, i);
3289                 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3290                         continue;
3291
3292                 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3293                 stripe_length = btrfs_chunk_length(leaf, chunk);
3294                 stripe_length = div_u64(stripe_length, factor);
3295
3296                 if (stripe_offset < bargs->pend &&
3297                     stripe_offset + stripe_length > bargs->pstart)
3298                         return 0;
3299         }
3300
3301         return 1;
3302 }
3303
3304 /* [vstart, vend) */
3305 static int chunk_vrange_filter(struct extent_buffer *leaf,
3306                                struct btrfs_chunk *chunk,
3307                                u64 chunk_offset,
3308                                struct btrfs_balance_args *bargs)
3309 {
3310         if (chunk_offset < bargs->vend &&
3311             chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3312                 /* at least part of the chunk is inside this vrange */
3313                 return 0;
3314
3315         return 1;
3316 }
3317
3318 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3319                                struct btrfs_chunk *chunk,
3320                                struct btrfs_balance_args *bargs)
3321 {
3322         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3323
3324         if (bargs->stripes_min <= num_stripes
3325                         && num_stripes <= bargs->stripes_max)
3326                 return 0;
3327
3328         return 1;
3329 }
3330
3331 static int chunk_soft_convert_filter(u64 chunk_type,
3332                                      struct btrfs_balance_args *bargs)
3333 {
3334         if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3335                 return 0;
3336
3337         chunk_type = chunk_to_extended(chunk_type) &
3338                                 BTRFS_EXTENDED_PROFILE_MASK;
3339
3340         if (bargs->target == chunk_type)
3341                 return 1;
3342
3343         return 0;
3344 }
3345
3346 static int should_balance_chunk(struct extent_buffer *leaf,
3347                                 struct btrfs_chunk *chunk, u64 chunk_offset)
3348 {
3349         struct btrfs_fs_info *fs_info = leaf->fs_info;
3350         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3351         struct btrfs_balance_args *bargs = NULL;
3352         u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3353
3354         /* type filter */
3355         if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3356               (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3357                 return 0;
3358         }
3359
3360         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3361                 bargs = &bctl->data;
3362         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3363                 bargs = &bctl->sys;
3364         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3365                 bargs = &bctl->meta;
3366
3367         /* profiles filter */
3368         if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3369             chunk_profiles_filter(chunk_type, bargs)) {
3370                 return 0;
3371         }
3372
3373         /* usage filter */
3374         if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3375             chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3376                 return 0;
3377         } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3378             chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3379                 return 0;
3380         }
3381
3382         /* devid filter */
3383         if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3384             chunk_devid_filter(leaf, chunk, bargs)) {
3385                 return 0;
3386         }
3387
3388         /* drange filter, makes sense only with devid filter */
3389         if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3390             chunk_drange_filter(leaf, chunk, bargs)) {
3391                 return 0;
3392         }
3393
3394         /* vrange filter */
3395         if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3396             chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3397                 return 0;
3398         }
3399
3400         /* stripes filter */
3401         if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3402             chunk_stripes_range_filter(leaf, chunk, bargs)) {
3403                 return 0;
3404         }
3405
3406         /* soft profile changing mode */
3407         if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3408             chunk_soft_convert_filter(chunk_type, bargs)) {
3409                 return 0;
3410         }
3411
3412         /*
3413          * limited by count, must be the last filter
3414          */
3415         if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3416                 if (bargs->limit == 0)
3417                         return 0;
3418                 else
3419                         bargs->limit--;
3420         } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3421                 /*
3422                  * Same logic as the 'limit' filter; the minimum cannot be
3423                  * determined here because we do not have the global information
3424                  * about the count of all chunks that satisfy the filters.
3425                  */
3426                 if (bargs->limit_max == 0)
3427                         return 0;
3428                 else
3429                         bargs->limit_max--;
3430         }
3431
3432         return 1;
3433 }
3434
3435 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3436 {
3437         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3438         struct btrfs_root *chunk_root = fs_info->chunk_root;
3439         u64 chunk_type;
3440         struct btrfs_chunk *chunk;
3441         struct btrfs_path *path = NULL;
3442         struct btrfs_key key;
3443         struct btrfs_key found_key;
3444         struct extent_buffer *leaf;
3445         int slot;
3446         int ret;
3447         int enospc_errors = 0;
3448         bool counting = true;
3449         /* The single value limit and min/max limits use the same bytes in the */
3450         u64 limit_data = bctl->data.limit;
3451         u64 limit_meta = bctl->meta.limit;
3452         u64 limit_sys = bctl->sys.limit;
3453         u32 count_data = 0;
3454         u32 count_meta = 0;
3455         u32 count_sys = 0;
3456         int chunk_reserved = 0;
3457
3458         path = btrfs_alloc_path();
3459         if (!path) {
3460                 ret = -ENOMEM;
3461                 goto error;
3462         }
3463
3464         /* zero out stat counters */
3465         spin_lock(&fs_info->balance_lock);
3466         memset(&bctl->stat, 0, sizeof(bctl->stat));
3467         spin_unlock(&fs_info->balance_lock);
3468 again:
3469         if (!counting) {
3470                 /*
3471                  * The single value limit and min/max limits use the same bytes
3472                  * in the
3473                  */
3474                 bctl->data.limit = limit_data;
3475                 bctl->meta.limit = limit_meta;
3476                 bctl->sys.limit = limit_sys;
3477         }
3478         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3479         key.offset = (u64)-1;
3480         key.type = BTRFS_CHUNK_ITEM_KEY;
3481
3482         while (1) {
3483                 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3484                     atomic_read(&fs_info->balance_cancel_req)) {
3485                         ret = -ECANCELED;
3486                         goto error;
3487                 }
3488
3489                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3490                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3491                 if (ret < 0) {
3492                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3493                         goto error;
3494                 }
3495
3496                 /*
3497                  * this shouldn't happen, it means the last relocate
3498                  * failed
3499                  */
3500                 if (ret == 0)
3501                         BUG(); /* FIXME break ? */
3502
3503                 ret = btrfs_previous_item(chunk_root, path, 0,
3504                                           BTRFS_CHUNK_ITEM_KEY);
3505                 if (ret) {
3506                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3507                         ret = 0;
3508                         break;
3509                 }
3510
3511                 leaf = path->nodes[0];
3512                 slot = path->slots[0];
3513                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3514
3515                 if (found_key.objectid != key.objectid) {
3516                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3517                         break;
3518                 }
3519
3520                 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3521                 chunk_type = btrfs_chunk_type(leaf, chunk);
3522
3523                 if (!counting) {
3524                         spin_lock(&fs_info->balance_lock);
3525                         bctl->stat.considered++;
3526                         spin_unlock(&fs_info->balance_lock);
3527                 }
3528
3529                 ret = should_balance_chunk(leaf, chunk, found_key.offset);
3530
3531                 btrfs_release_path(path);
3532                 if (!ret) {
3533                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3534                         goto loop;
3535                 }
3536
3537                 if (counting) {
3538                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3539                         spin_lock(&fs_info->balance_lock);
3540                         bctl->stat.expected++;
3541                         spin_unlock(&fs_info->balance_lock);
3542
3543                         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3544                                 count_data++;
3545                         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3546                                 count_sys++;
3547                         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3548                                 count_meta++;
3549
3550                         goto loop;
3551                 }
3552
3553                 /*
3554                  * Apply limit_min filter, no need to check if the LIMITS
3555                  * filter is used, limit_min is 0 by default
3556                  */
3557                 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3558                                         count_data < bctl->data.limit_min)
3559                                 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3560                                         count_meta < bctl->meta.limit_min)
3561                                 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3562                                         count_sys < bctl->sys.limit_min)) {
3563                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3564                         goto loop;
3565                 }
3566
3567                 if (!chunk_reserved) {
3568                         /*
3569                          * We may be relocating the only data chunk we have,
3570                          * which could potentially end up with losing data's
3571                          * raid profile, so lets allocate an empty one in
3572                          * advance.
3573                          */
3574                         ret = btrfs_may_alloc_data_chunk(fs_info,
3575                                                          found_key.offset);
3576                         if (ret < 0) {
3577                                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3578                                 goto error;
3579                         } else if (ret == 1) {
3580                                 chunk_reserved = 1;
3581                         }
3582                 }
3583
3584                 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3585                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3586                 if (ret == -ENOSPC) {
3587                         enospc_errors++;
3588                 } else if (ret == -ETXTBSY) {
3589                         btrfs_info(fs_info,
3590            "skipping relocation of block group %llu due to active swapfile",
3591                                    found_key.offset);
3592                         ret = 0;
3593                 } else if (ret) {
3594                         goto error;
3595                 } else {
3596                         spin_lock(&fs_info->balance_lock);
3597                         bctl->stat.completed++;
3598                         spin_unlock(&fs_info->balance_lock);
3599                 }
3600 loop:
3601                 if (found_key.offset == 0)
3602                         break;
3603                 key.offset = found_key.offset - 1;
3604         }
3605
3606         if (counting) {
3607                 btrfs_release_path(path);
3608                 counting = false;
3609                 goto again;
3610         }
3611 error:
3612         btrfs_free_path(path);
3613         if (enospc_errors) {
3614                 btrfs_info(fs_info, "%d enospc errors during balance",
3615                            enospc_errors);
3616                 if (!ret)
3617                         ret = -ENOSPC;
3618         }
3619
3620         return ret;
3621 }
3622
3623 /**
3624  * alloc_profile_is_valid - see if a given profile is valid and reduced
3625  * @flags: profile to validate
3626  * @extended: if true @flags is treated as an extended profile
3627  */
3628 static int alloc_profile_is_valid(u64 flags, int extended)
3629 {
3630         u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3631                                BTRFS_BLOCK_GROUP_PROFILE_MASK);
3632
3633         flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3634
3635         /* 1) check that all other bits are zeroed */
3636         if (flags & ~mask)
3637                 return 0;
3638
3639         /* 2) see if profile is reduced */
3640         if (flags == 0)
3641                 return !extended; /* "0" is valid for usual profiles */
3642
3643         return has_single_bit_set(flags);
3644 }
3645
3646 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3647 {
3648         /* cancel requested || normal exit path */
3649         return atomic_read(&fs_info->balance_cancel_req) ||
3650                 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3651                  atomic_read(&fs_info->balance_cancel_req) == 0);
3652 }
3653
3654 /* Non-zero return value signifies invalidity */
3655 static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
3656                 u64 allowed)
3657 {
3658         return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3659                 (!alloc_profile_is_valid(bctl_arg->target, 1) ||
3660                  (bctl_arg->target & ~allowed)));
3661 }
3662
3663 /*
3664  * Fill @buf with textual description of balance filter flags @bargs, up to
3665  * @size_buf including the terminating null. The output may be trimmed if it
3666  * does not fit into the provided buffer.
3667  */
3668 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
3669                                  u32 size_buf)
3670 {
3671         int ret;
3672         u32 size_bp = size_buf;
3673         char *bp = buf;
3674         u64 flags = bargs->flags;
3675         char tmp_buf[128] = {'\0'};
3676
3677         if (!flags)
3678                 return;
3679
3680 #define CHECK_APPEND_NOARG(a)                                           \
3681         do {                                                            \
3682                 ret = snprintf(bp, size_bp, (a));                       \
3683                 if (ret < 0 || ret >= size_bp)                          \
3684                         goto out_overflow;                              \
3685                 size_bp -= ret;                                         \
3686                 bp += ret;                                              \
3687         } while (0)
3688
3689 #define CHECK_APPEND_1ARG(a, v1)                                        \
3690         do {                                                            \
3691                 ret = snprintf(bp, size_bp, (a), (v1));                 \
3692                 if (ret < 0 || ret >= size_bp)                          \
3693                         goto out_overflow;                              \
3694                 size_bp -= ret;                                         \
3695                 bp += ret;                                              \
3696         } while (0)
3697
3698 #define CHECK_APPEND_2ARG(a, v1, v2)                                    \
3699         do {                                                            \
3700                 ret = snprintf(bp, size_bp, (a), (v1), (v2));           \
3701                 if (ret < 0 || ret >= size_bp)                          \
3702                         goto out_overflow;                              \
3703                 size_bp -= ret;                                         \
3704                 bp += ret;                                              \
3705         } while (0)
3706
3707         if (flags & BTRFS_BALANCE_ARGS_CONVERT)
3708                 CHECK_APPEND_1ARG("convert=%s,",
3709                                   btrfs_bg_type_to_raid_name(bargs->target));
3710
3711         if (flags & BTRFS_BALANCE_ARGS_SOFT)
3712                 CHECK_APPEND_NOARG("soft,");
3713
3714         if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
3715                 btrfs_describe_block_groups(bargs->profiles, tmp_buf,
3716                                             sizeof(tmp_buf));
3717                 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
3718         }
3719
3720         if (flags & BTRFS_BALANCE_ARGS_USAGE)
3721                 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
3722
3723         if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
3724                 CHECK_APPEND_2ARG("usage=%u..%u,",
3725                                   bargs->usage_min, bargs->usage_max);
3726
3727         if (flags & BTRFS_BALANCE_ARGS_DEVID)
3728                 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
3729
3730         if (flags & BTRFS_BALANCE_ARGS_DRANGE)
3731                 CHECK_APPEND_2ARG("drange=%llu..%llu,",
3732                                   bargs->pstart, bargs->pend);
3733
3734         if (flags & BTRFS_BALANCE_ARGS_VRANGE)
3735                 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
3736                                   bargs->vstart, bargs->vend);
3737
3738         if (flags & BTRFS_BALANCE_ARGS_LIMIT)
3739                 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
3740
3741         if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
3742                 CHECK_APPEND_2ARG("limit=%u..%u,",
3743                                 bargs->limit_min, bargs->limit_max);
3744
3745         if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
3746                 CHECK_APPEND_2ARG("stripes=%u..%u,",
3747                                   bargs->stripes_min, bargs->stripes_max);
3748
3749 #undef CHECK_APPEND_2ARG
3750 #undef CHECK_APPEND_1ARG
3751 #undef CHECK_APPEND_NOARG
3752
3753 out_overflow:
3754
3755         if (size_bp < size_buf)
3756                 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
3757         else
3758                 buf[0] = '\0';
3759 }
3760
3761 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
3762 {
3763         u32 size_buf = 1024;
3764         char tmp_buf[192] = {'\0'};
3765         char *buf;
3766         char *bp;
3767         u32 size_bp = size_buf;
3768         int ret;
3769         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3770
3771         buf = kzalloc(size_buf, GFP_KERNEL);
3772         if (!buf)
3773                 return;
3774
3775         bp = buf;
3776
3777 #define CHECK_APPEND_1ARG(a, v1)                                        \
3778         do {                                                            \
3779                 ret = snprintf(bp, size_bp, (a), (v1));                 \
3780                 if (ret < 0 || ret >= size_bp)                          \
3781                         goto out_overflow;                              \
3782                 size_bp -= ret;                                         \
3783                 bp += ret;                                              \
3784         } while (0)
3785
3786         if (bctl->flags & BTRFS_BALANCE_FORCE)
3787                 CHECK_APPEND_1ARG("%s", "-f ");
3788
3789         if (bctl->flags & BTRFS_BALANCE_DATA) {
3790                 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
3791                 CHECK_APPEND_1ARG("-d%s ", tmp_buf);
3792         }
3793
3794         if (bctl->flags & BTRFS_BALANCE_METADATA) {
3795                 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
3796                 CHECK_APPEND_1ARG("-m%s ", tmp_buf);
3797         }
3798
3799         if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
3800                 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
3801                 CHECK_APPEND_1ARG("-s%s ", tmp_buf);
3802         }
3803
3804 #undef CHECK_APPEND_1ARG
3805
3806 out_overflow:
3807
3808         if (size_bp < size_buf)
3809                 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
3810         btrfs_info(fs_info, "balance: %s %s",
3811                    (bctl->flags & BTRFS_BALANCE_RESUME) ?
3812                    "resume" : "start", buf);
3813
3814         kfree(buf);
3815 }
3816
3817 /*
3818  * Should be called with balance mutexe held
3819  */
3820 int btrfs_balance(struct btrfs_fs_info *fs_info,
3821                   struct btrfs_balance_control *bctl,
3822                   struct btrfs_ioctl_balance_args *bargs)
3823 {
3824         u64 meta_target, data_target;
3825         u64 allowed;
3826         int mixed = 0;
3827         int ret;
3828         u64 num_devices;
3829         unsigned seq;
3830         bool reducing_redundancy;
3831         int i;
3832
3833         if (btrfs_fs_closing(fs_info) ||
3834             atomic_read(&fs_info->balance_pause_req) ||
3835             atomic_read(&fs_info->balance_cancel_req)) {
3836                 ret = -EINVAL;
3837                 goto out;
3838         }
3839
3840         allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3841         if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3842                 mixed = 1;
3843
3844         /*
3845          * In case of mixed groups both data and meta should be picked,
3846          * and identical options should be given for both of them.
3847          */
3848         allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3849         if (mixed && (bctl->flags & allowed)) {
3850                 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3851                     !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3852                     memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3853                         btrfs_err(fs_info,
3854           "balance: mixed groups data and metadata options must be the same");
3855                         ret = -EINVAL;
3856                         goto out;
3857                 }
3858         }
3859
3860         num_devices = btrfs_num_devices(fs_info);
3861
3862         /*
3863          * SINGLE profile on-disk has no profile bit, but in-memory we have a
3864          * special bit for it, to make it easier to distinguish.  Thus we need
3865          * to set it manually, or balance would refuse the profile.
3866          */
3867         allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3868         for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
3869                 if (num_devices >= btrfs_raid_array[i].devs_min)
3870                         allowed |= btrfs_raid_array[i].bg_flag;
3871
3872         if (validate_convert_profile(&bctl->data, allowed)) {
3873                 btrfs_err(fs_info,
3874                           "balance: invalid convert data profile %s",
3875                           btrfs_bg_type_to_raid_name(bctl->data.target));
3876                 ret = -EINVAL;
3877                 goto out;
3878         }
3879         if (validate_convert_profile(&bctl->meta, allowed)) {
3880                 btrfs_err(fs_info,
3881                           "balance: invalid convert metadata profile %s",
3882                           btrfs_bg_type_to_raid_name(bctl->meta.target));
3883                 ret = -EINVAL;
3884                 goto out;
3885         }
3886         if (validate_convert_profile(&bctl->sys, allowed)) {
3887                 btrfs_err(fs_info,
3888                           "balance: invalid convert system profile %s",
3889                           btrfs_bg_type_to_raid_name(bctl->sys.target));
3890                 ret = -EINVAL;
3891                 goto out;
3892         }
3893
3894         /*
3895          * Allow to reduce metadata or system integrity only if force set for
3896          * profiles with redundancy (copies, parity)
3897          */
3898         allowed = 0;
3899         for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
3900                 if (btrfs_raid_array[i].ncopies >= 2 ||
3901                     btrfs_raid_array[i].tolerated_failures >= 1)
3902                         allowed |= btrfs_raid_array[i].bg_flag;
3903         }
3904         do {
3905                 seq = read_seqbegin(&fs_info->profiles_lock);
3906
3907                 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3908                      (fs_info->avail_system_alloc_bits & allowed) &&
3909                      !(bctl->sys.target & allowed)) ||
3910                     ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3911                      (fs_info->avail_metadata_alloc_bits & allowed) &&
3912                      !(bctl->meta.target & allowed)))
3913                         reducing_redundancy = true;
3914                 else
3915                         reducing_redundancy = false;
3916
3917                 /* if we're not converting, the target field is uninitialized */
3918                 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
3919                         bctl->meta.target : fs_info->avail_metadata_alloc_bits;
3920                 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
3921                         bctl->data.target : fs_info->avail_data_alloc_bits;
3922         } while (read_seqretry(&fs_info->profiles_lock, seq));
3923
3924         if (reducing_redundancy) {
3925                 if (bctl->flags & BTRFS_BALANCE_FORCE) {
3926                         btrfs_info(fs_info,
3927                            "balance: force reducing metadata redundancy");
3928                 } else {
3929                         btrfs_err(fs_info,
3930         "balance: reduces metadata redundancy, use --force if you want this");
3931                         ret = -EINVAL;
3932                         goto out;
3933                 }
3934         }
3935
3936         if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
3937                 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
3938                 btrfs_warn(fs_info,
3939         "balance: metadata profile %s has lower redundancy than data profile %s",
3940                                 btrfs_bg_type_to_raid_name(meta_target),
3941                                 btrfs_bg_type_to_raid_name(data_target));
3942         }
3943
3944         if (fs_info->send_in_progress) {
3945                 btrfs_warn_rl(fs_info,
3946 "cannot run balance while send operations are in progress (%d in progress)",
3947                               fs_info->send_in_progress);
3948                 ret = -EAGAIN;
3949                 goto out;
3950         }
3951
3952         ret = insert_balance_item(fs_info, bctl);
3953         if (ret && ret != -EEXIST)
3954                 goto out;
3955
3956         if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3957                 BUG_ON(ret == -EEXIST);
3958                 BUG_ON(fs_info->balance_ctl);
3959                 spin_lock(&fs_info->balance_lock);
3960                 fs_info->balance_ctl = bctl;
3961                 spin_unlock(&fs_info->balance_lock);
3962         } else {
3963                 BUG_ON(ret != -EEXIST);
3964                 spin_lock(&fs_info->balance_lock);
3965                 update_balance_args(bctl);
3966                 spin_unlock(&fs_info->balance_lock);
3967         }
3968
3969         ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
3970         set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
3971         describe_balance_start_or_resume(fs_info);
3972         mutex_unlock(&fs_info->balance_mutex);
3973
3974         ret = __btrfs_balance(fs_info);
3975
3976         mutex_lock(&fs_info->balance_mutex);
3977         if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
3978                 btrfs_info(fs_info, "balance: paused");
3979         else if (ret == -ECANCELED && atomic_read(&fs_info->balance_cancel_req))
3980                 btrfs_info(fs_info, "balance: canceled");
3981         else
3982                 btrfs_info(fs_info, "balance: ended with status: %d", ret);
3983
3984         clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
3985
3986         if (bargs) {
3987                 memset(bargs, 0, sizeof(*bargs));
3988                 btrfs_update_ioctl_balance_args(fs_info, bargs);
3989         }
3990
3991         if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3992             balance_need_close(fs_info)) {
3993                 reset_balance_state(fs_info);
3994                 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3995         }
3996
3997         wake_up(&fs_info->balance_wait_q);
3998
3999         return ret;
4000 out:
4001         if (bctl->flags & BTRFS_BALANCE_RESUME)
4002                 reset_balance_state(fs_info);
4003         else
4004                 kfree(bctl);
4005         clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4006
4007         return ret;
4008 }
4009
4010 static int balance_kthread(void *data)
4011 {
4012         struct btrfs_fs_info *fs_info = data;
4013         int ret = 0;
4014
4015         mutex_lock(&fs_info->balance_mutex);
4016         if (fs_info->balance_ctl)
4017                 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4018         mutex_unlock(&fs_info->balance_mutex);
4019
4020         return ret;
4021 }
4022
4023 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4024 {
4025         struct task_struct *tsk;
4026
4027         mutex_lock(&fs_info->balance_mutex);
4028         if (!fs_info->balance_ctl) {
4029                 mutex_unlock(&fs_info->balance_mutex);
4030                 return 0;
4031         }
4032         mutex_unlock(&fs_info->balance_mutex);
4033
4034         if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4035                 btrfs_info(fs_info, "balance: resume skipped");
4036                 return 0;
4037         }
4038
4039         /*
4040          * A ro->rw remount sequence should continue with the paused balance
4041          * regardless of who pauses it, system or the user as of now, so set
4042          * the resume flag.
4043          */
4044         spin_lock(&fs_info->balance_lock);
4045         fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4046         spin_unlock(&fs_info->balance_lock);
4047
4048         tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4049         return PTR_ERR_OR_ZERO(tsk);
4050 }
4051
4052 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4053 {
4054         struct btrfs_balance_control *bctl;
4055         struct btrfs_balance_item *item;
4056         struct btrfs_disk_balance_args disk_bargs;
4057         struct btrfs_path *path;
4058         struct extent_buffer *leaf;
4059         struct btrfs_key key;
4060         int ret;
4061
4062         path = btrfs_alloc_path();
4063         if (!path)
4064                 return -ENOMEM;
4065
4066         key.objectid = BTRFS_BALANCE_OBJECTID;
4067         key.type = BTRFS_TEMPORARY_ITEM_KEY;
4068         key.offset = 0;
4069
4070         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4071         if (ret < 0)
4072                 goto out;
4073         if (ret > 0) { /* ret = -ENOENT; */
4074                 ret = 0;
4075                 goto out;
4076         }
4077
4078         bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4079         if (!bctl) {
4080                 ret = -ENOMEM;
4081                 goto out;
4082         }
4083
4084         leaf = path->nodes[0];
4085         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4086
4087         bctl->flags = btrfs_balance_flags(leaf, item);
4088         bctl->flags |= BTRFS_BALANCE_RESUME;
4089
4090         btrfs_balance_data(leaf, item, &disk_bargs);
4091         btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4092         btrfs_balance_meta(leaf, item, &disk_bargs);
4093         btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4094         btrfs_balance_sys(leaf, item, &disk_bargs);
4095         btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4096
4097         /*
4098          * This should never happen, as the paused balance state is recovered
4099          * during mount without any chance of other exclusive ops to collide.
4100          *
4101          * This gives the exclusive op status to balance and keeps in paused
4102          * state until user intervention (cancel or umount). If the ownership
4103          * cannot be assigned, show a message but do not fail. The balance
4104          * is in a paused state and must have fs_info::balance_ctl properly
4105          * set up.
4106          */
4107         if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags))
4108                 btrfs_warn(fs_info,
4109         "balance: cannot set exclusive op status, resume manually");
4110
4111         mutex_lock(&fs_info->balance_mutex);
4112         BUG_ON(fs_info->balance_ctl);
4113         spin_lock(&fs_info->balance_lock);
4114         fs_info->balance_ctl = bctl;
4115         spin_unlock(&fs_info->balance_lock);
4116         mutex_unlock(&fs_info->balance_mutex);
4117 out:
4118         btrfs_free_path(path);
4119         return ret;
4120 }
4121
4122 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4123 {
4124         int ret = 0;
4125
4126         mutex_lock(&fs_info->balance_mutex);
4127         if (!fs_info->balance_ctl) {
4128                 mutex_unlock(&fs_info->balance_mutex);
4129                 return -ENOTCONN;
4130         }
4131
4132         if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4133                 atomic_inc(&fs_info->balance_pause_req);
4134                 mutex_unlock(&fs_info->balance_mutex);
4135
4136                 wait_event(fs_info->balance_wait_q,
4137                            !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4138
4139                 mutex_lock(&fs_info->balance_mutex);
4140                 /* we are good with balance_ctl ripped off from under us */
4141                 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4142                 atomic_dec(&fs_info->balance_pause_req);
4143         } else {
4144                 ret = -ENOTCONN;
4145         }
4146
4147         mutex_unlock(&fs_info->balance_mutex);
4148         return ret;
4149 }
4150
4151 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4152 {
4153         mutex_lock(&fs_info->balance_mutex);
4154         if (!fs_info->balance_ctl) {
4155                 mutex_unlock(&fs_info->balance_mutex);
4156                 return -ENOTCONN;
4157         }
4158
4159         /*
4160          * A paused balance with the item stored on disk can be resumed at
4161          * mount time if the mount is read-write. Otherwise it's still paused
4162          * and we must not allow cancelling as it deletes the item.
4163          */
4164         if (sb_rdonly(fs_info->sb)) {
4165                 mutex_unlock(&fs_info->balance_mutex);
4166                 return -EROFS;
4167         }
4168
4169         atomic_inc(&fs_info->balance_cancel_req);
4170         /*
4171          * if we are running just wait and return, balance item is
4172          * deleted in btrfs_balance in this case
4173          */
4174         if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4175                 mutex_unlock(&fs_info->balance_mutex);
4176                 wait_event(fs_info->balance_wait_q,
4177                            !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4178                 mutex_lock(&fs_info->balance_mutex);
4179         } else {
4180                 mutex_unlock(&fs_info->balance_mutex);
4181                 /*
4182                  * Lock released to allow other waiters to continue, we'll
4183                  * reexamine the status again.
4184                  */
4185                 mutex_lock(&fs_info->balance_mutex);
4186
4187                 if (fs_info->balance_ctl) {
4188                         reset_balance_state(fs_info);
4189                         clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4190                         btrfs_info(fs_info, "balance: canceled");
4191                 }
4192         }
4193
4194         BUG_ON(fs_info->balance_ctl ||
4195                 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4196         atomic_dec(&fs_info->balance_cancel_req);
4197         mutex_unlock(&fs_info->balance_mutex);
4198         return 0;
4199 }
4200
4201 static int btrfs_uuid_scan_kthread(void *data)
4202 {
4203         struct btrfs_fs_info *fs_info = data;
4204         struct btrfs_root *root = fs_info->tree_root;
4205         struct btrfs_key key;
4206         struct btrfs_path *path = NULL;
4207         int ret = 0;
4208         struct extent_buffer *eb;
4209         int slot;
4210         struct btrfs_root_item root_item;
4211         u32 item_size;
4212         struct btrfs_trans_handle *trans = NULL;
4213
4214         path = btrfs_alloc_path();
4215         if (!path) {
4216                 ret = -ENOMEM;
4217                 goto out;
4218         }
4219
4220         key.objectid = 0;
4221         key.type = BTRFS_ROOT_ITEM_KEY;
4222         key.offset = 0;
4223
4224         while (1) {
4225                 ret = btrfs_search_forward(root, &key, path,
4226                                 BTRFS_OLDEST_GENERATION);
4227                 if (ret) {
4228                         if (ret > 0)
4229                                 ret = 0;
4230                         break;
4231                 }
4232
4233                 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4234                     (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4235                      key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4236                     key.objectid > BTRFS_LAST_FREE_OBJECTID)
4237                         goto skip;
4238
4239                 eb = path->nodes[0];
4240                 slot = path->slots[0];
4241                 item_size = btrfs_item_size_nr(eb, slot);
4242                 if (item_size < sizeof(root_item))
4243                         goto skip;
4244
4245                 read_extent_buffer(eb, &root_item,
4246                                    btrfs_item_ptr_offset(eb, slot),
4247                                    (int)sizeof(root_item));
4248                 if (btrfs_root_refs(&root_item) == 0)
4249                         goto skip;
4250
4251                 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4252                     !btrfs_is_empty_uuid(root_item.received_uuid)) {
4253                         if (trans)
4254                                 goto update_tree;
4255
4256                         btrfs_release_path(path);
4257                         /*
4258                          * 1 - subvol uuid item
4259                          * 1 - received_subvol uuid item
4260                          */
4261                         trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4262                         if (IS_ERR(trans)) {
4263                                 ret = PTR_ERR(trans);
4264                                 break;
4265                         }
4266                         continue;
4267                 } else {
4268                         goto skip;
4269                 }
4270 update_tree:
4271                 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4272                         ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4273                                                   BTRFS_UUID_KEY_SUBVOL,
4274                                                   key.objectid);
4275                         if (ret < 0) {
4276                                 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4277                                         ret);
4278                                 break;
4279                         }
4280                 }
4281
4282                 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4283                         ret = btrfs_uuid_tree_add(trans,
4284                                                   root_item.received_uuid,
4285                                                  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4286                                                   key.objectid);
4287                         if (ret < 0) {
4288                                 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4289                                         ret);
4290                                 break;
4291                         }
4292                 }
4293
4294 skip:
4295                 if (trans) {
4296                         ret = btrfs_end_transaction(trans);
4297                         trans = NULL;
4298                         if (ret)
4299                                 break;
4300                 }
4301
4302                 btrfs_release_path(path);
4303                 if (key.offset < (u64)-1) {
4304                         key.offset++;
4305                 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4306                         key.offset = 0;
4307                         key.type = BTRFS_ROOT_ITEM_KEY;
4308                 } else if (key.objectid < (u64)-1) {
4309                         key.offset = 0;
4310                         key.type = BTRFS_ROOT_ITEM_KEY;
4311                         key.objectid++;
4312                 } else {
4313                         break;
4314                 }
4315                 cond_resched();
4316         }
4317
4318 out:
4319         btrfs_free_path(path);
4320         if (trans && !IS_ERR(trans))
4321                 btrfs_end_transaction(trans);
4322         if (ret)
4323                 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4324         else
4325                 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4326         up(&fs_info->uuid_tree_rescan_sem);
4327         return 0;
4328 }
4329
4330 /*
4331  * Callback for btrfs_uuid_tree_iterate().
4332  * returns:
4333  * 0    check succeeded, the entry is not outdated.
4334  * < 0  if an error occurred.
4335  * > 0  if the check failed, which means the caller shall remove the entry.
4336  */
4337 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
4338                                        u8 *uuid, u8 type, u64 subid)
4339 {
4340         struct btrfs_key key;
4341         int ret = 0;
4342         struct btrfs_root *subvol_root;
4343
4344         if (type != BTRFS_UUID_KEY_SUBVOL &&
4345             type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
4346                 goto out;
4347
4348         key.objectid = subid;
4349         key.type = BTRFS_ROOT_ITEM_KEY;
4350         key.offset = (u64)-1;
4351         subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
4352         if (IS_ERR(subvol_root)) {
4353                 ret = PTR_ERR(subvol_root);
4354                 if (ret == -ENOENT)
4355                         ret = 1;
4356                 goto out;
4357         }
4358
4359         switch (type) {
4360         case BTRFS_UUID_KEY_SUBVOL:
4361                 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
4362                         ret = 1;
4363                 break;
4364         case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
4365                 if (memcmp(uuid, subvol_root->root_item.received_uuid,
4366                            BTRFS_UUID_SIZE))
4367                         ret = 1;
4368                 break;
4369         }
4370
4371 out:
4372         return ret;
4373 }
4374
4375 static int btrfs_uuid_rescan_kthread(void *data)
4376 {
4377         struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
4378         int ret;
4379
4380         /*
4381          * 1st step is to iterate through the existing UUID tree and
4382          * to delete all entries that contain outdated data.
4383          * 2nd step is to add all missing entries to the UUID tree.
4384          */
4385         ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
4386         if (ret < 0) {
4387                 btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4388                 up(&fs_info->uuid_tree_rescan_sem);
4389                 return ret;
4390         }
4391         return btrfs_uuid_scan_kthread(data);
4392 }
4393
4394 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4395 {
4396         struct btrfs_trans_handle *trans;
4397         struct btrfs_root *tree_root = fs_info->tree_root;
4398         struct btrfs_root *uuid_root;
4399         struct task_struct *task;
4400         int ret;
4401
4402         /*
4403          * 1 - root node
4404          * 1 - root item
4405          */
4406         trans = btrfs_start_transaction(tree_root, 2);
4407         if (IS_ERR(trans))
4408                 return PTR_ERR(trans);
4409
4410         uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4411         if (IS_ERR(uuid_root)) {
4412                 ret = PTR_ERR(uuid_root);
4413                 btrfs_abort_transaction(trans, ret);
4414                 btrfs_end_transaction(trans);
4415                 return ret;
4416         }
4417
4418         fs_info->uuid_root = uuid_root;
4419
4420         ret = btrfs_commit_transaction(trans);
4421         if (ret)
4422                 return ret;
4423
4424         down(&fs_info->uuid_tree_rescan_sem);
4425         task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4426         if (IS_ERR(task)) {
4427                 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4428                 btrfs_warn(fs_info, "failed to start uuid_scan task");
4429                 up(&fs_info->uuid_tree_rescan_sem);
4430                 return PTR_ERR(task);
4431         }
4432
4433         return 0;
4434 }
4435
4436 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
4437 {
4438         struct task_struct *task;
4439
4440         down(&fs_info->uuid_tree_rescan_sem);
4441         task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
4442         if (IS_ERR(task)) {
4443                 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4444                 btrfs_warn(fs_info, "failed to start uuid_rescan task");
4445                 up(&fs_info->uuid_tree_rescan_sem);
4446                 return PTR_ERR(task);
4447         }
4448
4449         return 0;
4450 }
4451
4452 /*
4453  * shrinking a device means finding all of the device extents past
4454  * the new size, and then following the back refs to the chunks.
4455  * The chunk relocation code actually frees the device extent
4456  */
4457 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4458 {
4459         struct btrfs_fs_info *fs_info = device->fs_info;
4460         struct btrfs_root *root = fs_info->dev_root;
4461         struct btrfs_trans_handle *trans;
4462         struct btrfs_dev_extent *dev_extent = NULL;
4463         struct btrfs_path *path;
4464         u64 length;
4465         u64 chunk_offset;
4466         int ret;
4467         int slot;
4468         int failed = 0;
4469         bool retried = false;
4470         struct extent_buffer *l;
4471         struct btrfs_key key;
4472         struct btrfs_super_block *super_copy = fs_info->super_copy;
4473         u64 old_total = btrfs_super_total_bytes(super_copy);
4474         u64 old_size = btrfs_device_get_total_bytes(device);
4475         u64 diff;
4476         u64 start;
4477
4478         new_size = round_down(new_size, fs_info->sectorsize);
4479         start = new_size;
4480         diff = round_down(old_size - new_size, fs_info->sectorsize);
4481
4482         if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4483                 return -EINVAL;
4484
4485         path = btrfs_alloc_path();
4486         if (!path)
4487                 return -ENOMEM;
4488
4489         path->reada = READA_BACK;
4490
4491         trans = btrfs_start_transaction(root, 0);
4492         if (IS_ERR(trans)) {
4493                 btrfs_free_path(path);
4494                 return PTR_ERR(trans);
4495         }
4496
4497         mutex_lock(&fs_info->chunk_mutex);
4498
4499         btrfs_device_set_total_bytes(device, new_size);
4500         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4501                 device->fs_devices->total_rw_bytes -= diff;
4502                 atomic64_sub(diff, &fs_info->free_chunk_space);
4503         }
4504
4505         /*
4506          * Once the device's size has been set to the new size, ensure all
4507          * in-memory chunks are synced to disk so that the loop below sees them
4508          * and relocates them accordingly.
4509          */
4510         if (contains_pending_extent(device, &start, diff)) {
4511                 mutex_unlock(&fs_info->chunk_mutex);
4512                 ret = btrfs_commit_transaction(trans);
4513                 if (ret)
4514                         goto done;
4515         } else {
4516                 mutex_unlock(&fs_info->chunk_mutex);
4517                 btrfs_end_transaction(trans);
4518         }
4519
4520 again:
4521         key.objectid = device->devid;
4522         key.offset = (u64)-1;
4523         key.type = BTRFS_DEV_EXTENT_KEY;
4524
4525         do {
4526                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
4527                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4528                 if (ret < 0) {
4529                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4530                         goto done;
4531                 }
4532
4533                 ret = btrfs_previous_item(root, path, 0, key.type);
4534                 if (ret)
4535                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4536                 if (ret < 0)
4537                         goto done;
4538                 if (ret) {
4539                         ret = 0;
4540                         btrfs_release_path(path);
4541                         break;
4542                 }
4543
4544                 l = path->nodes[0];
4545                 slot = path->slots[0];
4546                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4547
4548                 if (key.objectid != device->devid) {
4549                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4550                         btrfs_release_path(path);
4551                         break;
4552                 }
4553
4554                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4555                 length = btrfs_dev_extent_length(l, dev_extent);
4556
4557                 if (key.offset + length <= new_size) {
4558                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4559                         btrfs_release_path(path);
4560                         break;
4561                 }
4562
4563                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4564                 btrfs_release_path(path);
4565
4566                 /*
4567                  * We may be relocating the only data chunk we have,
4568                  * which could potentially end up with losing data's
4569                  * raid profile, so lets allocate an empty one in
4570                  * advance.
4571                  */
4572                 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4573                 if (ret < 0) {
4574                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4575                         goto done;
4576                 }
4577
4578                 ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4579                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4580                 if (ret == -ENOSPC) {
4581                         failed++;
4582                 } else if (ret) {
4583                         if (ret == -ETXTBSY) {
4584                                 btrfs_warn(fs_info,
4585                    "could not shrink block group %llu due to active swapfile",
4586                                            chunk_offset);
4587                         }
4588                         goto done;
4589                 }
4590         } while (key.offset-- > 0);
4591
4592         if (failed && !retried) {
4593                 failed = 0;
4594                 retried = true;
4595                 goto again;
4596         } else if (failed && retried) {
4597                 ret = -ENOSPC;
4598                 goto done;
4599         }
4600
4601         /* Shrinking succeeded, else we would be at "done". */
4602         trans = btrfs_start_transaction(root, 0);
4603         if (IS_ERR(trans)) {
4604                 ret = PTR_ERR(trans);
4605                 goto done;
4606         }
4607
4608         mutex_lock(&fs_info->chunk_mutex);
4609         btrfs_device_set_disk_total_bytes(device, new_size);
4610         if (list_empty(&device->post_commit_list))
4611                 list_add_tail(&device->post_commit_list,
4612                               &trans->transaction->dev_update_list);
4613
4614         WARN_ON(diff > old_total);
4615         btrfs_set_super_total_bytes(super_copy,
4616                         round_down(old_total - diff, fs_info->sectorsize));
4617         mutex_unlock(&fs_info->chunk_mutex);
4618
4619         /* Now btrfs_update_device() will change the on-disk size. */
4620         ret = btrfs_update_device(trans, device);
4621         if (ret < 0) {
4622                 btrfs_abort_transaction(trans, ret);
4623                 btrfs_end_transaction(trans);
4624         } else {
4625                 ret = btrfs_commit_transaction(trans);
4626         }
4627 done:
4628         btrfs_free_path(path);
4629         if (ret) {
4630                 mutex_lock(&fs_info->chunk_mutex);
4631                 btrfs_device_set_total_bytes(device, old_size);
4632                 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4633                         device->fs_devices->total_rw_bytes += diff;
4634                 atomic64_add(diff, &fs_info->free_chunk_space);
4635                 mutex_unlock(&fs_info->chunk_mutex);
4636         }
4637         return ret;
4638 }
4639
4640 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4641                            struct btrfs_key *key,
4642                            struct btrfs_chunk *chunk, int item_size)
4643 {
4644         struct btrfs_super_block *super_copy = fs_info->super_copy;
4645         struct btrfs_disk_key disk_key;
4646         u32 array_size;
4647         u8 *ptr;
4648
4649         mutex_lock(&fs_info->chunk_mutex);
4650         array_size = btrfs_super_sys_array_size(super_copy);
4651         if (array_size + item_size + sizeof(disk_key)
4652                         > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4653                 mutex_unlock(&fs_info->chunk_mutex);
4654                 return -EFBIG;
4655         }
4656
4657         ptr = super_copy->sys_chunk_array + array_size;
4658         btrfs_cpu_key_to_disk(&disk_key, key);
4659         memcpy(ptr, &disk_key, sizeof(disk_key));
4660         ptr += sizeof(disk_key);
4661         memcpy(ptr, chunk, item_size);
4662         item_size += sizeof(disk_key);
4663         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4664         mutex_unlock(&fs_info->chunk_mutex);
4665
4666         return 0;
4667 }
4668
4669 /*
4670  * sort the devices in descending order by max_avail, total_avail
4671  */
4672 static int btrfs_cmp_device_info(const void *a, const void *b)
4673 {
4674         const struct btrfs_device_info *di_a = a;
4675         const struct btrfs_device_info *di_b = b;
4676
4677         if (di_a->max_avail > di_b->max_avail)
4678                 return -1;
4679         if (di_a->max_avail < di_b->max_avail)
4680                 return 1;
4681         if (di_a->total_avail > di_b->total_avail)
4682                 return -1;
4683         if (di_a->total_avail < di_b->total_avail)
4684                 return 1;
4685         return 0;
4686 }
4687
4688 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4689 {
4690         if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4691                 return;
4692
4693         btrfs_set_fs_incompat(info, RAID56);
4694 }
4695
4696 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4697                                u64 start, u64 type)
4698 {
4699         struct btrfs_fs_info *info = trans->fs_info;
4700         struct btrfs_fs_devices *fs_devices = info->fs_devices;
4701         struct btrfs_device *device;
4702         struct map_lookup *map = NULL;
4703         struct extent_map_tree *em_tree;
4704         struct extent_map *em;
4705         struct btrfs_device_info *devices_info = NULL;
4706         u64 total_avail;
4707         int num_stripes;        /* total number of stripes to allocate */
4708         int data_stripes;       /* number of stripes that count for
4709                                    block group size */
4710         int sub_stripes;        /* sub_stripes info for map */
4711         int dev_stripes;        /* stripes per dev */
4712         int devs_max;           /* max devs to use */
4713         int devs_min;           /* min devs needed */
4714         int devs_increment;     /* ndevs has to be a multiple of this */
4715         int ncopies;            /* how many copies to data has */
4716         int nparity;            /* number of stripes worth of bytes to
4717                                    store parity information */
4718         int ret;
4719         u64 max_stripe_size;
4720         u64 max_chunk_size;
4721         u64 stripe_size;
4722         u64 chunk_size;
4723         int ndevs;
4724         int i;
4725         int j;
4726         int index;
4727
4728         BUG_ON(!alloc_profile_is_valid(type, 0));
4729
4730         if (list_empty(&fs_devices->alloc_list)) {
4731                 if (btrfs_test_opt(info, ENOSPC_DEBUG))
4732                         btrfs_debug(info, "%s: no writable device", __func__);
4733                 return -ENOSPC;
4734         }
4735
4736         index = btrfs_bg_flags_to_raid_index(type);
4737
4738         sub_stripes = btrfs_raid_array[index].sub_stripes;
4739         dev_stripes = btrfs_raid_array[index].dev_stripes;
4740         devs_max = btrfs_raid_array[index].devs_max;
4741         if (!devs_max)
4742                 devs_max = BTRFS_MAX_DEVS(info);
4743         devs_min = btrfs_raid_array[index].devs_min;
4744         devs_increment = btrfs_raid_array[index].devs_increment;
4745         ncopies = btrfs_raid_array[index].ncopies;
4746         nparity = btrfs_raid_array[index].nparity;
4747
4748         if (type & BTRFS_BLOCK_GROUP_DATA) {
4749                 max_stripe_size = SZ_1G;
4750                 max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
4751         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4752                 /* for larger filesystems, use larger metadata chunks */
4753                 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4754                         max_stripe_size = SZ_1G;
4755                 else
4756                         max_stripe_size = SZ_256M;
4757                 max_chunk_size = max_stripe_size;
4758         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4759                 max_stripe_size = SZ_32M;
4760                 max_chunk_size = 2 * max_stripe_size;
4761                 devs_max = min_t(int, devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
4762         } else {
4763                 btrfs_err(info, "invalid chunk type 0x%llx requested",
4764                        type);
4765                 BUG();
4766         }
4767
4768         /* We don't want a chunk larger than 10% of writable space */
4769         max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4770                              max_chunk_size);
4771
4772         devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4773                                GFP_NOFS);
4774         if (!devices_info)
4775                 return -ENOMEM;
4776
4777         /*
4778          * in the first pass through the devices list, we gather information
4779          * about the available holes on each device.
4780          */
4781         ndevs = 0;
4782         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
4783                 u64 max_avail;
4784                 u64 dev_offset;
4785
4786                 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4787                         WARN(1, KERN_ERR
4788                                "BTRFS: read-only device in alloc_list\n");
4789                         continue;
4790                 }
4791
4792                 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
4793                                         &device->dev_state) ||
4794                     test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4795                         continue;
4796
4797                 if (device->total_bytes > device->bytes_used)
4798                         total_avail = device->total_bytes - device->bytes_used;
4799                 else
4800                         total_avail = 0;
4801
4802                 /* If there is no space on this device, skip it. */
4803                 if (total_avail == 0)
4804                         continue;
4805
4806                 ret = find_free_dev_extent(device,
4807                                            max_stripe_size * dev_stripes,
4808                                            &dev_offset, &max_avail);
4809                 if (ret && ret != -ENOSPC)
4810                         goto error;
4811
4812                 if (ret == 0)
4813                         max_avail = max_stripe_size * dev_stripes;
4814
4815                 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) {
4816                         if (btrfs_test_opt(info, ENOSPC_DEBUG))
4817                                 btrfs_debug(info,
4818                         "%s: devid %llu has no free space, have=%llu want=%u",
4819                                             __func__, device->devid, max_avail,
4820                                             BTRFS_STRIPE_LEN * dev_stripes);
4821                         continue;
4822                 }
4823
4824                 if (ndevs == fs_devices->rw_devices) {
4825                         WARN(1, "%s: found more than %llu devices\n",
4826                              __func__, fs_devices->rw_devices);
4827                         break;
4828                 }
4829                 devices_info[ndevs].dev_offset = dev_offset;
4830                 devices_info[ndevs].max_avail = max_avail;
4831                 devices_info[ndevs].total_avail = total_avail;
4832                 devices_info[ndevs].dev = device;
4833                 ++ndevs;
4834         }
4835
4836         /*
4837          * now sort the devices by hole size / available space
4838          */
4839         sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4840              btrfs_cmp_device_info, NULL);
4841
4842         /* round down to number of usable stripes */
4843         ndevs = round_down(ndevs, devs_increment);
4844
4845         if (ndevs < devs_min) {
4846                 ret = -ENOSPC;
4847                 if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
4848                         btrfs_debug(info,
4849         "%s: not enough devices with free space: have=%d minimum required=%d",
4850                                     __func__, ndevs, devs_min);
4851                 }
4852                 goto error;
4853         }
4854
4855         ndevs = min(ndevs, devs_max);
4856
4857         /*
4858          * The primary goal is to maximize the number of stripes, so use as
4859          * many devices as possible, even if the stripes are not maximum sized.
4860          *
4861          * The DUP profile stores more than one stripe per device, the
4862          * max_avail is the total size so we have to adjust.
4863          */
4864         stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes);
4865         num_stripes = ndevs * dev_stripes;
4866
4867         /*
4868          * this will have to be fixed for RAID1 and RAID10 over
4869          * more drives
4870          */
4871         data_stripes = (num_stripes - nparity) / ncopies;
4872
4873         /*
4874          * Use the number of data stripes to figure out how big this chunk
4875          * is really going to be in terms of logical address space,
4876          * and compare that answer with the max chunk size. If it's higher,
4877          * we try to reduce stripe_size.
4878          */
4879         if (stripe_size * data_stripes > max_chunk_size) {
4880                 /*
4881                  * Reduce stripe_size, round it up to a 16MB boundary again and
4882                  * then use it, unless it ends up being even bigger than the
4883                  * previous value we had already.
4884                  */
4885                 stripe_size = min(round_up(div_u64(max_chunk_size,
4886                                                    data_stripes), SZ_16M),
4887                                   stripe_size);
4888         }
4889
4890         /* align to BTRFS_STRIPE_LEN */
4891         stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN);
4892
4893         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4894         if (!map) {
4895                 ret = -ENOMEM;
4896                 goto error;
4897         }
4898         map->num_stripes = num_stripes;
4899
4900         for (i = 0; i < ndevs; ++i) {
4901                 for (j = 0; j < dev_stripes; ++j) {
4902                         int s = i * dev_stripes + j;
4903                         map->stripes[s].dev = devices_info[i].dev;
4904                         map->stripes[s].physical = devices_info[i].dev_offset +
4905                                                    j * stripe_size;
4906                 }
4907         }
4908         map->stripe_len = BTRFS_STRIPE_LEN;
4909         map->io_align = BTRFS_STRIPE_LEN;
4910         map->io_width = BTRFS_STRIPE_LEN;
4911         map->type = type;
4912         map->sub_stripes = sub_stripes;
4913
4914         chunk_size = stripe_size * data_stripes;
4915
4916         trace_btrfs_chunk_alloc(info, map, start, chunk_size);
4917
4918         em = alloc_extent_map();
4919         if (!em) {
4920                 kfree(map);
4921                 ret = -ENOMEM;
4922                 goto error;
4923         }
4924         set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4925         em->map_lookup = map;
4926         em->start = start;
4927         em->len = chunk_size;
4928         em->block_start = 0;
4929         em->block_len = em->len;
4930         em->orig_block_len = stripe_size;
4931
4932         em_tree = &info->mapping_tree;
4933         write_lock(&em_tree->lock);
4934         ret = add_extent_mapping(em_tree, em, 0);
4935         if (ret) {
4936                 write_unlock(&em_tree->lock);
4937                 free_extent_map(em);
4938                 goto error;
4939         }
4940         write_unlock(&em_tree->lock);
4941
4942         ret = btrfs_make_block_group(trans, 0, type, start, chunk_size);
4943         if (ret)
4944                 goto error_del_extent;
4945
4946         for (i = 0; i < map->num_stripes; i++) {
4947                 struct btrfs_device *dev = map->stripes[i].dev;
4948
4949                 btrfs_device_set_bytes_used(dev, dev->bytes_used + stripe_size);
4950                 if (list_empty(&dev->post_commit_list))
4951                         list_add_tail(&dev->post_commit_list,
4952                                       &trans->transaction->dev_update_list);
4953         }
4954
4955         atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space);
4956
4957         free_extent_map(em);
4958         check_raid56_incompat_flag(info, type);
4959
4960         kfree(devices_info);
4961         return 0;
4962
4963 error_del_extent:
4964         write_lock(&em_tree->lock);
4965         remove_extent_mapping(em_tree, em);
4966         write_unlock(&em_tree->lock);
4967
4968         /* One for our allocation */
4969         free_extent_map(em);
4970         /* One for the tree reference */
4971         free_extent_map(em);
4972 error:
4973         kfree(devices_info);
4974         return ret;
4975 }
4976
4977 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4978                              u64 chunk_offset, u64 chunk_size)
4979 {
4980         struct btrfs_fs_info *fs_info = trans->fs_info;
4981         struct btrfs_root *extent_root = fs_info->extent_root;
4982         struct btrfs_root *chunk_root = fs_info->chunk_root;
4983         struct btrfs_key key;
4984         struct btrfs_device *device;
4985         struct btrfs_chunk *chunk;
4986         struct btrfs_stripe *stripe;
4987         struct extent_map *em;
4988         struct map_lookup *map;
4989         size_t item_size;
4990         u64 dev_offset;
4991         u64 stripe_size;
4992         int i = 0;
4993         int ret = 0;
4994
4995         em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
4996         if (IS_ERR(em))
4997                 return PTR_ERR(em);
4998
4999         map = em->map_lookup;
5000         item_size = btrfs_chunk_item_size(map->num_stripes);
5001         stripe_size = em->orig_block_len;
5002
5003         chunk = kzalloc(item_size, GFP_NOFS);
5004         if (!chunk) {
5005                 ret = -ENOMEM;
5006                 goto out;
5007         }
5008
5009         /*
5010          * Take the device list mutex to prevent races with the final phase of
5011          * a device replace operation that replaces the device object associated
5012          * with the map's stripes, because the device object's id can change
5013          * at any time during that final phase of the device replace operation
5014          * (dev-replace.c:btrfs_dev_replace_finishing()).
5015          */
5016         mutex_lock(&fs_info->fs_devices->device_list_mutex);
5017         for (i = 0; i < map->num_stripes; i++) {
5018                 device = map->stripes[i].dev;
5019                 dev_offset = map->stripes[i].physical;
5020
5021                 ret = btrfs_update_device(trans, device);
5022                 if (ret)
5023                         break;
5024                 ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
5025                                              dev_offset, stripe_size);
5026                 if (ret)
5027                         break;
5028         }
5029         if (ret) {
5030                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5031                 goto out;
5032         }
5033
5034         stripe = &chunk->stripe;
5035         for (i = 0; i < map->num_stripes; i++) {
5036                 device = map->stripes[i].dev;
5037                 dev_offset = map->stripes[i].physical;
5038
5039                 btrfs_set_stack_stripe_devid(stripe, device->devid);
5040                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
5041                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5042                 stripe++;
5043         }
5044         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5045
5046         btrfs_set_stack_chunk_length(chunk, chunk_size);
5047         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5048         btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5049         btrfs_set_stack_chunk_type(chunk, map->type);
5050         btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5051         btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5052         btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5053         btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5054         btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5055
5056         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5057         key.type = BTRFS_CHUNK_ITEM_KEY;
5058         key.offset = chunk_offset;
5059
5060         ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5061         if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5062                 /*
5063                  * TODO: Cleanup of inserted chunk root in case of
5064                  * failure.
5065                  */
5066                 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5067         }
5068
5069 out:
5070         kfree(chunk);
5071         free_extent_map(em);
5072         return ret;
5073 }
5074
5075 /*
5076  * Chunk allocation falls into two parts. The first part does work
5077  * that makes the new allocated chunk usable, but does not do any operation
5078  * that modifies the chunk tree. The second part does the work that
5079  * requires modifying the chunk tree. This division is important for the
5080  * bootstrap process of adding storage to a seed btrfs.
5081  */
5082 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
5083 {
5084         u64 chunk_offset;
5085
5086         lockdep_assert_held(&trans->fs_info->chunk_mutex);
5087         chunk_offset = find_next_chunk(trans->fs_info);
5088         return __btrfs_alloc_chunk(trans, chunk_offset, type);
5089 }
5090
5091 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5092 {
5093         struct btrfs_fs_info *fs_info = trans->fs_info;
5094         u64 chunk_offset;
5095         u64 sys_chunk_offset;
5096         u64 alloc_profile;
5097         int ret;
5098
5099         chunk_offset = find_next_chunk(fs_info);
5100         alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5101         ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile);
5102         if (ret)
5103                 return ret;
5104
5105         sys_chunk_offset = find_next_chunk(fs_info);
5106         alloc_profile = btrfs_system_alloc_profile(fs_info);
5107         ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile);
5108         return ret;
5109 }
5110
5111 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5112 {
5113         const int index = btrfs_bg_flags_to_raid_index(map->type);
5114
5115         return btrfs_raid_array[index].tolerated_failures;
5116 }
5117
5118 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5119 {
5120         struct extent_map *em;
5121         struct map_lookup *map;
5122         int readonly = 0;
5123         int miss_ndevs = 0;
5124         int i;
5125
5126         em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5127         if (IS_ERR(em))
5128                 return 1;
5129
5130         map = em->map_lookup;
5131         for (i = 0; i < map->num_stripes; i++) {
5132                 if (test_bit(BTRFS_DEV_STATE_MISSING,
5133                                         &map->stripes[i].dev->dev_state)) {
5134                         miss_ndevs++;
5135                         continue;
5136                 }
5137                 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5138                                         &map->stripes[i].dev->dev_state)) {
5139                         readonly = 1;
5140                         goto end;
5141                 }
5142         }
5143
5144         /*
5145          * If the number of missing devices is larger than max errors,
5146          * we can not write the data into that chunk successfully, so
5147          * set it readonly.
5148          */
5149         if (miss_ndevs > btrfs_chunk_max_errors(map))
5150                 readonly = 1;
5151 end:
5152         free_extent_map(em);
5153         return readonly;
5154 }
5155
5156 void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5157 {
5158         struct extent_map *em;
5159
5160         while (1) {
5161                 write_lock(&tree->lock);
5162                 em = lookup_extent_mapping(tree, 0, (u64)-1);
5163                 if (em)
5164                         remove_extent_mapping(tree, em);
5165                 write_unlock(&tree->lock);
5166                 if (!em)
5167                         break;
5168                 /* once for us */
5169                 free_extent_map(em);
5170                 /* once for the tree */
5171                 free_extent_map(em);
5172         }
5173 }
5174
5175 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5176 {
5177         struct extent_map *em;
5178         struct map_lookup *map;
5179         int ret;
5180
5181         em = btrfs_get_chunk_map(fs_info, logical, len);
5182         if (IS_ERR(em))
5183                 /*
5184                  * We could return errors for these cases, but that could get
5185                  * ugly and we'd probably do the same thing which is just not do
5186                  * anything else and exit, so return 1 so the callers don't try
5187                  * to use other copies.
5188                  */
5189                 return 1;
5190
5191         map = em->map_lookup;
5192         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5193                 ret = map->num_stripes;
5194         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5195                 ret = map->sub_stripes;
5196         else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5197                 ret = 2;
5198         else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5199                 /*
5200                  * There could be two corrupted data stripes, we need
5201                  * to loop retry in order to rebuild the correct data.
5202                  *
5203                  * Fail a stripe at a time on every retry except the
5204                  * stripe under reconstruction.
5205                  */
5206                 ret = map->num_stripes;
5207         else
5208                 ret = 1;
5209         free_extent_map(em);
5210
5211         down_read(&fs_info->dev_replace.rwsem);
5212         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5213             fs_info->dev_replace.tgtdev)
5214                 ret++;
5215         up_read(&fs_info->dev_replace.rwsem);
5216
5217         return ret;
5218 }
5219
5220 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5221                                     u64 logical)
5222 {
5223         struct extent_map *em;
5224         struct map_lookup *map;
5225         unsigned long len = fs_info->sectorsize;
5226
5227         em = btrfs_get_chunk_map(fs_info, logical, len);
5228
5229         if (!WARN_ON(IS_ERR(em))) {
5230                 map = em->map_lookup;
5231                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5232                         len = map->stripe_len * nr_data_stripes(map);
5233                 free_extent_map(em);
5234         }
5235         return len;
5236 }
5237
5238 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5239 {
5240         struct extent_map *em;
5241         struct map_lookup *map;
5242         int ret = 0;
5243
5244         em = btrfs_get_chunk_map(fs_info, logical, len);
5245
5246         if(!WARN_ON(IS_ERR(em))) {
5247                 map = em->map_lookup;
5248                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5249                         ret = 1;
5250                 free_extent_map(em);
5251         }
5252         return ret;
5253 }
5254
5255 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5256                             struct map_lookup *map, int first,
5257                             int dev_replace_is_ongoing)
5258 {
5259         int i;
5260         int num_stripes;
5261         int preferred_mirror;
5262         int tolerance;
5263         struct btrfs_device *srcdev;
5264
5265         ASSERT((map->type &
5266                  (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5267
5268         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5269                 num_stripes = map->sub_stripes;
5270         else
5271                 num_stripes = map->num_stripes;
5272
5273         preferred_mirror = first + current->pid % num_stripes;
5274
5275         if (dev_replace_is_ongoing &&
5276             fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5277              BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5278                 srcdev = fs_info->dev_replace.srcdev;
5279         else
5280                 srcdev = NULL;
5281
5282         /*
5283          * try to avoid the drive that is the source drive for a
5284          * dev-replace procedure, only choose it if no other non-missing
5285          * mirror is available
5286          */
5287         for (tolerance = 0; tolerance < 2; tolerance++) {
5288                 if (map->stripes[preferred_mirror].dev->bdev &&
5289                     (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5290                         return preferred_mirror;
5291                 for (i = first; i < first + num_stripes; i++) {
5292                         if (map->stripes[i].dev->bdev &&
5293                             (tolerance || map->stripes[i].dev != srcdev))
5294                                 return i;
5295                 }
5296         }
5297
5298         /* we couldn't find one that doesn't fail.  Just return something
5299          * and the io error handling code will clean up eventually
5300          */
5301         return preferred_mirror;
5302 }
5303
5304 static inline int parity_smaller(u64 a, u64 b)
5305 {
5306         return a > b;
5307 }
5308
5309 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5310 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5311 {
5312         struct btrfs_bio_stripe s;
5313         int i;
5314         u64 l;
5315         int again = 1;
5316
5317         while (again) {
5318                 again = 0;
5319                 for (i = 0; i < num_stripes - 1; i++) {
5320                         if (parity_smaller(bbio->raid_map[i],
5321                                            bbio->raid_map[i+1])) {
5322                                 s = bbio->stripes[i];
5323                                 l = bbio->raid_map[i];
5324                                 bbio->stripes[i] = bbio->stripes[i+1];
5325                                 bbio->raid_map[i] = bbio->raid_map[i+1];
5326                                 bbio->stripes[i+1] = s;
5327                                 bbio->raid_map[i+1] = l;
5328
5329                                 again = 1;
5330                         }
5331                 }
5332         }
5333 }
5334
5335 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5336 {
5337         struct btrfs_bio *bbio = kzalloc(
5338                  /* the size of the btrfs_bio */
5339                 sizeof(struct btrfs_bio) +
5340                 /* plus the variable array for the stripes */
5341                 sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5342                 /* plus the variable array for the tgt dev */
5343                 sizeof(int) * (real_stripes) +
5344                 /*
5345                  * plus the raid_map, which includes both the tgt dev
5346                  * and the stripes
5347                  */
5348                 sizeof(u64) * (total_stripes),
5349                 GFP_NOFS|__GFP_NOFAIL);
5350
5351         atomic_set(&bbio->error, 0);
5352         refcount_set(&bbio->refs, 1);
5353
5354         return bbio;
5355 }
5356
5357 void btrfs_get_bbio(struct btrfs_bio *bbio)
5358 {
5359         WARN_ON(!refcount_read(&bbio->refs));
5360         refcount_inc(&bbio->refs);
5361 }
5362
5363 void btrfs_put_bbio(struct btrfs_bio *bbio)
5364 {
5365         if (!bbio)
5366                 return;
5367         if (refcount_dec_and_test(&bbio->refs))
5368                 kfree(bbio);
5369 }
5370
5371 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5372 /*
5373  * Please note that, discard won't be sent to target device of device
5374  * replace.
5375  */
5376 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5377                                          u64 logical, u64 length,
5378                                          struct btrfs_bio **bbio_ret)
5379 {
5380         struct extent_map *em;
5381         struct map_lookup *map;
5382         struct btrfs_bio *bbio;
5383         u64 offset;
5384         u64 stripe_nr;
5385         u64 stripe_nr_end;
5386         u64 stripe_end_offset;
5387         u64 stripe_cnt;
5388         u64 stripe_len;
5389         u64 stripe_offset;
5390         u64 num_stripes;
5391         u32 stripe_index;
5392         u32 factor = 0;
5393         u32 sub_stripes = 0;
5394         u64 stripes_per_dev = 0;
5395         u32 remaining_stripes = 0;
5396         u32 last_stripe = 0;
5397         int ret = 0;
5398         int i;
5399
5400         /* discard always return a bbio */
5401         ASSERT(bbio_ret);
5402
5403         em = btrfs_get_chunk_map(fs_info, logical, length);
5404         if (IS_ERR(em))
5405                 return PTR_ERR(em);
5406
5407         map = em->map_lookup;
5408         /* we don't discard raid56 yet */
5409         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5410                 ret = -EOPNOTSUPP;
5411                 goto out;
5412         }
5413
5414         offset = logical - em->start;
5415         length = min_t(u64, em->len - offset, length);
5416
5417         stripe_len = map->stripe_len;
5418         /*
5419          * stripe_nr counts the total number of stripes we have to stride
5420          * to get to this block
5421          */
5422         stripe_nr = div64_u64(offset, stripe_len);
5423
5424         /* stripe_offset is the offset of this block in its stripe */
5425         stripe_offset = offset - stripe_nr * stripe_len;
5426
5427         stripe_nr_end = round_up(offset + length, map->stripe_len);
5428         stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5429         stripe_cnt = stripe_nr_end - stripe_nr;
5430         stripe_end_offset = stripe_nr_end * map->stripe_len -
5431                             (offset + length);
5432         /*
5433          * after this, stripe_nr is the number of stripes on this
5434          * device we have to walk to find the data, and stripe_index is
5435          * the number of our device in the stripe array
5436          */
5437         num_stripes = 1;
5438         stripe_index = 0;
5439         if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5440                          BTRFS_BLOCK_GROUP_RAID10)) {
5441                 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5442                         sub_stripes = 1;
5443                 else
5444                         sub_stripes = map->sub_stripes;
5445
5446                 factor = map->num_stripes / sub_stripes;
5447                 num_stripes = min_t(u64, map->num_stripes,
5448                                     sub_stripes * stripe_cnt);
5449                 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5450                 stripe_index *= sub_stripes;
5451                 stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5452                                               &remaining_stripes);
5453                 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5454                 last_stripe *= sub_stripes;
5455         } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
5456                                 BTRFS_BLOCK_GROUP_DUP)) {
5457                 num_stripes = map->num_stripes;
5458         } else {
5459                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5460                                         &stripe_index);
5461         }
5462
5463         bbio = alloc_btrfs_bio(num_stripes, 0);
5464         if (!bbio) {
5465                 ret = -ENOMEM;
5466                 goto out;
5467         }
5468
5469         for (i = 0; i < num_stripes; i++) {
5470                 bbio->stripes[i].physical =
5471                         map->stripes[stripe_index].physical +
5472                         stripe_offset + stripe_nr * map->stripe_len;
5473                 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5474
5475                 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5476                                  BTRFS_BLOCK_GROUP_RAID10)) {
5477                         bbio->stripes[i].length = stripes_per_dev *
5478                                 map->stripe_len;
5479
5480                         if (i / sub_stripes < remaining_stripes)
5481                                 bbio->stripes[i].length +=
5482                                         map->stripe_len;
5483
5484                         /*
5485                          * Special for the first stripe and
5486                          * the last stripe:
5487                          *
5488                          * |-------|...|-------|
5489                          *     |----------|
5490                          *    off     end_off
5491                          */
5492                         if (i < sub_stripes)
5493                                 bbio->stripes[i].length -=
5494                                         stripe_offset;
5495
5496                         if (stripe_index >= last_stripe &&
5497                             stripe_index <= (last_stripe +
5498                                              sub_stripes - 1))
5499                                 bbio->stripes[i].length -=
5500                                         stripe_end_offset;
5501
5502                         if (i == sub_stripes - 1)
5503                                 stripe_offset = 0;
5504                 } else {
5505                         bbio->stripes[i].length = length;
5506                 }
5507
5508                 stripe_index++;
5509                 if (stripe_index == map->num_stripes) {
5510                         stripe_index = 0;
5511                         stripe_nr++;
5512                 }
5513         }
5514
5515         *bbio_ret = bbio;
5516         bbio->map_type = map->type;
5517         bbio->num_stripes = num_stripes;
5518 out:
5519         free_extent_map(em);
5520         return ret;
5521 }
5522
5523 /*
5524  * In dev-replace case, for repair case (that's the only case where the mirror
5525  * is selected explicitly when calling btrfs_map_block), blocks left of the
5526  * left cursor can also be read from the target drive.
5527  *
5528  * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5529  * array of stripes.
5530  * For READ, it also needs to be supported using the same mirror number.
5531  *
5532  * If the requested block is not left of the left cursor, EIO is returned. This
5533  * can happen because btrfs_num_copies() returns one more in the dev-replace
5534  * case.
5535  */
5536 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
5537                                          u64 logical, u64 length,
5538                                          u64 srcdev_devid, int *mirror_num,
5539                                          u64 *physical)
5540 {
5541         struct btrfs_bio *bbio = NULL;
5542         int num_stripes;
5543         int index_srcdev = 0;
5544         int found = 0;
5545         u64 physical_of_found = 0;
5546         int i;
5547         int ret = 0;
5548
5549         ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
5550                                 logical, &length, &bbio, 0, 0);
5551         if (ret) {
5552                 ASSERT(bbio == NULL);
5553                 return ret;
5554         }
5555
5556         num_stripes = bbio->num_stripes;
5557         if (*mirror_num > num_stripes) {
5558                 /*
5559                  * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5560                  * that means that the requested area is not left of the left
5561                  * cursor
5562                  */
5563                 btrfs_put_bbio(bbio);
5564                 return -EIO;
5565         }
5566
5567         /*
5568          * process the rest of the function using the mirror_num of the source
5569          * drive. Therefore look it up first.  At the end, patch the device
5570          * pointer to the one of the target drive.
5571          */
5572         for (i = 0; i < num_stripes; i++) {
5573                 if (bbio->stripes[i].dev->devid != srcdev_devid)
5574                         continue;
5575
5576                 /*
5577                  * In case of DUP, in order to keep it simple, only add the
5578                  * mirror with the lowest physical address
5579                  */
5580                 if (found &&
5581                     physical_of_found <= bbio->stripes[i].physical)
5582                         continue;
5583
5584                 index_srcdev = i;
5585                 found = 1;
5586                 physical_of_found = bbio->stripes[i].physical;
5587         }
5588
5589         btrfs_put_bbio(bbio);
5590
5591         ASSERT(found);
5592         if (!found)
5593                 return -EIO;
5594
5595         *mirror_num = index_srcdev + 1;
5596         *physical = physical_of_found;
5597         return ret;
5598 }
5599
5600 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
5601                                       struct btrfs_bio **bbio_ret,
5602                                       struct btrfs_dev_replace *dev_replace,
5603                                       int *num_stripes_ret, int *max_errors_ret)
5604 {
5605         struct btrfs_bio *bbio = *bbio_ret;
5606         u64 srcdev_devid = dev_replace->srcdev->devid;
5607         int tgtdev_indexes = 0;
5608         int num_stripes = *num_stripes_ret;
5609         int max_errors = *max_errors_ret;
5610         int i;
5611
5612         if (op == BTRFS_MAP_WRITE) {
5613                 int index_where_to_add;
5614
5615                 /*
5616                  * duplicate the write operations while the dev replace
5617                  * procedure is running. Since the copying of the old disk to
5618                  * the new disk takes place at run time while the filesystem is
5619                  * mounted writable, the regular write operations to the old
5620                  * disk have to be duplicated to go to the new disk as well.
5621                  *
5622                  * Note that device->missing is handled by the caller, and that
5623                  * the write to the old disk is already set up in the stripes
5624                  * array.
5625                  */
5626                 index_where_to_add = num_stripes;
5627                 for (i = 0; i < num_stripes; i++) {
5628                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
5629                                 /* write to new disk, too */
5630                                 struct btrfs_bio_stripe *new =
5631                                         bbio->stripes + index_where_to_add;
5632                                 struct btrfs_bio_stripe *old =
5633                                         bbio->stripes + i;
5634
5635                                 new->physical = old->physical;
5636                                 new->length = old->length;
5637                                 new->dev = dev_replace->tgtdev;
5638                                 bbio->tgtdev_map[i] = index_where_to_add;
5639                                 index_where_to_add++;
5640                                 max_errors++;
5641                                 tgtdev_indexes++;
5642                         }
5643                 }
5644                 num_stripes = index_where_to_add;
5645         } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
5646                 int index_srcdev = 0;
5647                 int found = 0;
5648                 u64 physical_of_found = 0;
5649
5650                 /*
5651                  * During the dev-replace procedure, the target drive can also
5652                  * be used to read data in case it is needed to repair a corrupt
5653                  * block elsewhere. This is possible if the requested area is
5654                  * left of the left cursor. In this area, the target drive is a
5655                  * full copy of the source drive.
5656                  */
5657                 for (i = 0; i < num_stripes; i++) {
5658                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
5659                                 /*
5660                                  * In case of DUP, in order to keep it simple,
5661                                  * only add the mirror with the lowest physical
5662                                  * address
5663                                  */
5664                                 if (found &&
5665                                     physical_of_found <=
5666                                      bbio->stripes[i].physical)
5667                                         continue;
5668                                 index_srcdev = i;
5669                                 found = 1;
5670                                 physical_of_found = bbio->stripes[i].physical;
5671                         }
5672                 }
5673                 if (found) {
5674                         struct btrfs_bio_stripe *tgtdev_stripe =
5675                                 bbio->stripes + num_stripes;
5676
5677                         tgtdev_stripe->physical = physical_of_found;
5678                         tgtdev_stripe->length =
5679                                 bbio->stripes[index_srcdev].length;
5680                         tgtdev_stripe->dev = dev_replace->tgtdev;
5681                         bbio->tgtdev_map[index_srcdev] = num_stripes;
5682
5683                         tgtdev_indexes++;
5684                         num_stripes++;
5685                 }
5686         }
5687
5688         *num_stripes_ret = num_stripes;
5689         *max_errors_ret = max_errors;
5690         bbio->num_tgtdevs = tgtdev_indexes;
5691         *bbio_ret = bbio;
5692 }
5693
5694 static bool need_full_stripe(enum btrfs_map_op op)
5695 {
5696         return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
5697 }
5698
5699 /*
5700  * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
5701  *                     tuple. This information is used to calculate how big a
5702  *                     particular bio can get before it straddles a stripe.
5703  *
5704  * @fs_info - the filesystem
5705  * @logical - address that we want to figure out the geometry of
5706  * @len     - the length of IO we are going to perform, starting at @logical
5707  * @op      - type of operation - write or read
5708  * @io_geom - pointer used to return values
5709  *
5710  * Returns < 0 in case a chunk for the given logical address cannot be found,
5711  * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
5712  */
5713 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
5714                         u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
5715 {
5716         struct extent_map *em;
5717         struct map_lookup *map;
5718         u64 offset;
5719         u64 stripe_offset;
5720         u64 stripe_nr;
5721         u64 stripe_len;
5722         u64 raid56_full_stripe_start = (u64)-1;
5723         int data_stripes;
5724         int ret = 0;
5725
5726         ASSERT(op != BTRFS_MAP_DISCARD);
5727
5728         em = btrfs_get_chunk_map(fs_info, logical, len);
5729         if (IS_ERR(em))
5730                 return PTR_ERR(em);
5731
5732         map = em->map_lookup;
5733         /* Offset of this logical address in the chunk */
5734         offset = logical - em->start;
5735         /* Len of a stripe in a chunk */
5736         stripe_len = map->stripe_len;
5737         /* Stripe wher this block falls in */
5738         stripe_nr = div64_u64(offset, stripe_len);
5739         /* Offset of stripe in the chunk */
5740         stripe_offset = stripe_nr * stripe_len;
5741         if (offset < stripe_offset) {
5742                 btrfs_crit(fs_info,
5743 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
5744                         stripe_offset, offset, em->start, logical, stripe_len);
5745                 ret = -EINVAL;
5746                 goto out;
5747         }
5748
5749         /* stripe_offset is the offset of this block in its stripe */
5750         stripe_offset = offset - stripe_offset;
5751         data_stripes = nr_data_stripes(map);
5752
5753         if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
5754                 u64 max_len = stripe_len - stripe_offset;
5755
5756                 /*
5757                  * In case of raid56, we need to know the stripe aligned start
5758                  */
5759                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5760                         unsigned long full_stripe_len = stripe_len * data_stripes;
5761                         raid56_full_stripe_start = offset;
5762
5763                         /*
5764                          * Allow a write of a full stripe, but make sure we
5765                          * don't allow straddling of stripes
5766                          */
5767                         raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5768                                         full_stripe_len);
5769                         raid56_full_stripe_start *= full_stripe_len;
5770
5771                         /*
5772                          * For writes to RAID[56], allow a full stripeset across
5773                          * all disks. For other RAID types and for RAID[56]
5774                          * reads, just allow a single stripe (on a single disk).
5775                          */
5776                         if (op == BTRFS_MAP_WRITE) {
5777                                 max_len = stripe_len * data_stripes -
5778                                           (offset - raid56_full_stripe_start);
5779                         }
5780                 }
5781                 len = min_t(u64, em->len - offset, max_len);
5782         } else {
5783                 len = em->len - offset;
5784         }
5785
5786         io_geom->len = len;
5787         io_geom->offset = offset;
5788         io_geom->stripe_len = stripe_len;
5789         io_geom->stripe_nr = stripe_nr;
5790         io_geom->stripe_offset = stripe_offset;
5791         io_geom->raid56_stripe_offset = raid56_full_stripe_start;
5792
5793 out:
5794         /* once for us */
5795         free_extent_map(em);
5796         return ret;
5797 }
5798
5799 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
5800                              enum btrfs_map_op op,
5801                              u64 logical, u64 *length,
5802                              struct btrfs_bio **bbio_ret,
5803                              int mirror_num, int need_raid_map)
5804 {
5805         struct extent_map *em;
5806         struct map_lookup *map;
5807         u64 stripe_offset;
5808         u64 stripe_nr;
5809         u64 stripe_len;
5810         u32 stripe_index;
5811         int data_stripes;
5812         int i;
5813         int ret = 0;
5814         int num_stripes;
5815         int max_errors = 0;
5816         int tgtdev_indexes = 0;
5817         struct btrfs_bio *bbio = NULL;
5818         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
5819         int dev_replace_is_ongoing = 0;
5820         int num_alloc_stripes;
5821         int patch_the_first_stripe_for_dev_replace = 0;
5822         u64 physical_to_patch_in_first_stripe = 0;
5823         u64 raid56_full_stripe_start = (u64)-1;
5824         struct btrfs_io_geometry geom;
5825
5826         ASSERT(bbio_ret);
5827
5828         if (op == BTRFS_MAP_DISCARD)
5829                 return __btrfs_map_block_for_discard(fs_info, logical,
5830                                                      *length, bbio_ret);
5831
5832         ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
5833         if (ret < 0)
5834                 return ret;
5835
5836         em = btrfs_get_chunk_map(fs_info, logical, *length);
5837         ASSERT(!IS_ERR(em));
5838         map = em->map_lookup;
5839
5840         *length = geom.len;
5841         stripe_len = geom.stripe_len;
5842         stripe_nr = geom.stripe_nr;
5843         stripe_offset = geom.stripe_offset;
5844         raid56_full_stripe_start = geom.raid56_stripe_offset;
5845         data_stripes = nr_data_stripes(map);
5846
5847         down_read(&dev_replace->rwsem);
5848         dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
5849         /*
5850          * Hold the semaphore for read during the whole operation, write is
5851          * requested at commit time but must wait.
5852          */
5853         if (!dev_replace_is_ongoing)
5854                 up_read(&dev_replace->rwsem);
5855
5856         if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
5857             !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
5858                 ret = get_extra_mirror_from_replace(fs_info, logical, *length,
5859                                                     dev_replace->srcdev->devid,
5860                                                     &mirror_num,
5861                                             &physical_to_patch_in_first_stripe);
5862                 if (ret)
5863                         goto out;
5864                 else
5865                         patch_the_first_stripe_for_dev_replace = 1;
5866         } else if (mirror_num > map->num_stripes) {
5867                 mirror_num = 0;
5868         }
5869
5870         num_stripes = 1;
5871         stripe_index = 0;
5872         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5873                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5874                                 &stripe_index);
5875                 if (!need_full_stripe(op))
5876                         mirror_num = 1;
5877         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
5878                 if (need_full_stripe(op))
5879                         num_stripes = map->num_stripes;
5880                 else if (mirror_num)
5881                         stripe_index = mirror_num - 1;
5882                 else {
5883                         stripe_index = find_live_mirror(fs_info, map, 0,
5884                                             dev_replace_is_ongoing);
5885                         mirror_num = stripe_index + 1;
5886                 }
5887
5888         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5889                 if (need_full_stripe(op)) {
5890                         num_stripes = map->num_stripes;
5891                 } else if (mirror_num) {
5892                         stripe_index = mirror_num - 1;
5893                 } else {
5894                         mirror_num = 1;
5895                 }
5896
5897         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5898                 u32 factor = map->num_stripes / map->sub_stripes;
5899
5900                 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5901                 stripe_index *= map->sub_stripes;
5902
5903                 if (need_full_stripe(op))
5904                         num_stripes = map->sub_stripes;
5905                 else if (mirror_num)
5906                         stripe_index += mirror_num - 1;
5907                 else {
5908                         int old_stripe_index = stripe_index;
5909                         stripe_index = find_live_mirror(fs_info, map,
5910                                               stripe_index,
5911                                               dev_replace_is_ongoing);
5912                         mirror_num = stripe_index - old_stripe_index + 1;
5913                 }
5914
5915         } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5916                 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
5917                         /* push stripe_nr back to the start of the full stripe */
5918                         stripe_nr = div64_u64(raid56_full_stripe_start,
5919                                         stripe_len * data_stripes);
5920
5921                         /* RAID[56] write or recovery. Return all stripes */
5922                         num_stripes = map->num_stripes;
5923                         max_errors = nr_parity_stripes(map);
5924
5925                         *length = map->stripe_len;
5926                         stripe_index = 0;
5927                         stripe_offset = 0;
5928                 } else {
5929                         /*
5930                          * Mirror #0 or #1 means the original data block.
5931                          * Mirror #2 is RAID5 parity block.
5932                          * Mirror #3 is RAID6 Q block.
5933                          */
5934                         stripe_nr = div_u64_rem(stripe_nr,
5935                                         data_stripes, &stripe_index);
5936                         if (mirror_num > 1)
5937                                 stripe_index = data_stripes + mirror_num - 2;
5938
5939                         /* We distribute the parity blocks across stripes */
5940                         div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
5941                                         &stripe_index);
5942                         if (!need_full_stripe(op) && mirror_num <= 1)
5943                                 mirror_num = 1;
5944                 }
5945         } else {
5946                 /*
5947                  * after this, stripe_nr is the number of stripes on this
5948                  * device we have to walk to find the data, and stripe_index is
5949                  * the number of our device in the stripe array
5950                  */
5951                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5952                                 &stripe_index);
5953                 mirror_num = stripe_index + 1;
5954         }
5955         if (stripe_index >= map->num_stripes) {
5956                 btrfs_crit(fs_info,
5957                            "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
5958                            stripe_index, map->num_stripes);
5959                 ret = -EINVAL;
5960                 goto out;
5961         }
5962
5963         num_alloc_stripes = num_stripes;
5964         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
5965                 if (op == BTRFS_MAP_WRITE)
5966                         num_alloc_stripes <<= 1;
5967                 if (op == BTRFS_MAP_GET_READ_MIRRORS)
5968                         num_alloc_stripes++;
5969                 tgtdev_indexes = num_stripes;
5970         }
5971
5972         bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
5973         if (!bbio) {
5974                 ret = -ENOMEM;
5975                 goto out;
5976         }
5977         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
5978                 bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
5979
5980         /* build raid_map */
5981         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
5982             (need_full_stripe(op) || mirror_num > 1)) {
5983                 u64 tmp;
5984                 unsigned rot;
5985
5986                 bbio->raid_map = (u64 *)((void *)bbio->stripes +
5987                                  sizeof(struct btrfs_bio_stripe) *
5988                                  num_alloc_stripes +
5989                                  sizeof(int) * tgtdev_indexes);
5990
5991                 /* Work out the disk rotation on this stripe-set */
5992                 div_u64_rem(stripe_nr, num_stripes, &rot);
5993
5994                 /* Fill in the logical address of each stripe */
5995                 tmp = stripe_nr * data_stripes;
5996                 for (i = 0; i < data_stripes; i++)
5997                         bbio->raid_map[(i+rot) % num_stripes] =
5998                                 em->start + (tmp + i) * map->stripe_len;
5999
6000                 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
6001                 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6002                         bbio->raid_map[(i+rot+1) % num_stripes] =
6003                                 RAID6_Q_STRIPE;
6004         }
6005
6006
6007         for (i = 0; i < num_stripes; i++) {
6008                 bbio->stripes[i].physical =
6009                         map->stripes[stripe_index].physical +
6010                         stripe_offset +
6011                         stripe_nr * map->stripe_len;
6012                 bbio->stripes[i].dev =
6013                         map->stripes[stripe_index].dev;
6014                 stripe_index++;
6015         }
6016
6017         if (need_full_stripe(op))
6018                 max_errors = btrfs_chunk_max_errors(map);
6019
6020         if (bbio->raid_map)
6021                 sort_parity_stripes(bbio, num_stripes);
6022
6023         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6024             need_full_stripe(op)) {
6025                 handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
6026                                           &max_errors);
6027         }
6028
6029         *bbio_ret = bbio;
6030         bbio->map_type = map->type;
6031         bbio->num_stripes = num_stripes;
6032         bbio->max_errors = max_errors;
6033         bbio->mirror_num = mirror_num;
6034
6035         /*
6036          * this is the case that REQ_READ && dev_replace_is_ongoing &&
6037          * mirror_num == num_stripes + 1 && dev_replace target drive is
6038          * available as a mirror
6039          */
6040         if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6041                 WARN_ON(num_stripes > 1);
6042                 bbio->stripes[0].dev = dev_replace->tgtdev;
6043                 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
6044                 bbio->mirror_num = map->num_stripes + 1;
6045         }
6046 out:
6047         if (dev_replace_is_ongoing) {
6048                 lockdep_assert_held(&dev_replace->rwsem);
6049                 /* Unlock and let waiting writers proceed */
6050                 up_read(&dev_replace->rwsem);
6051         }
6052         free_extent_map(em);
6053         return ret;
6054 }
6055
6056 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6057                       u64 logical, u64 *length,
6058                       struct btrfs_bio **bbio_ret, int mirror_num)
6059 {
6060         return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
6061                                  mirror_num, 0);
6062 }
6063
6064 /* For Scrub/replace */
6065 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6066                      u64 logical, u64 *length,
6067                      struct btrfs_bio **bbio_ret)
6068 {
6069         return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
6070 }
6071
6072 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
6073                      u64 physical, u64 **logical, int *naddrs, int *stripe_len)
6074 {
6075         struct extent_map *em;
6076         struct map_lookup *map;
6077         u64 *buf;
6078         u64 bytenr;
6079         u64 length;
6080         u64 stripe_nr;
6081         u64 rmap_len;
6082         int i, j, nr = 0;
6083
6084         em = btrfs_get_chunk_map(fs_info, chunk_start, 1);
6085         if (IS_ERR(em))
6086                 return -EIO;
6087
6088         map = em->map_lookup;
6089         length = em->len;
6090         rmap_len = map->stripe_len;
6091
6092         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
6093                 length = div_u64(length, map->num_stripes / map->sub_stripes);
6094         else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
6095                 length = div_u64(length, map->num_stripes);
6096         else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6097                 length = div_u64(length, nr_data_stripes(map));
6098                 rmap_len = map->stripe_len * nr_data_stripes(map);
6099         }
6100
6101         buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
6102         BUG_ON(!buf); /* -ENOMEM */
6103
6104         for (i = 0; i < map->num_stripes; i++) {
6105                 if (map->stripes[i].physical > physical ||
6106                     map->stripes[i].physical + length <= physical)
6107                         continue;
6108
6109                 stripe_nr = physical - map->stripes[i].physical;
6110                 stripe_nr = div64_u64(stripe_nr, map->stripe_len);
6111
6112                 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6113                         stripe_nr = stripe_nr * map->num_stripes + i;
6114                         stripe_nr = div_u64(stripe_nr, map->sub_stripes);
6115                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6116                         stripe_nr = stripe_nr * map->num_stripes + i;
6117                 } /* else if RAID[56], multiply by nr_data_stripes().
6118                    * Alternatively, just use rmap_len below instead of
6119                    * map->stripe_len */
6120
6121                 bytenr = chunk_start + stripe_nr * rmap_len;
6122                 WARN_ON(nr >= map->num_stripes);
6123                 for (j = 0; j < nr; j++) {
6124                         if (buf[j] == bytenr)
6125                                 break;
6126                 }
6127                 if (j == nr) {
6128                         WARN_ON(nr >= map->num_stripes);
6129                         buf[nr++] = bytenr;
6130                 }
6131         }
6132
6133         *logical = buf;
6134         *naddrs = nr;
6135         *stripe_len = rmap_len;
6136
6137         free_extent_map(em);
6138         return 0;
6139 }
6140
6141 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6142 {
6143         bio->bi_private = bbio->private;
6144         bio->bi_end_io = bbio->end_io;
6145         bio_endio(bio);
6146
6147         btrfs_put_bbio(bbio);
6148 }
6149
6150 static void btrfs_end_bio(struct bio *bio)
6151 {
6152         struct btrfs_bio *bbio = bio->bi_private;
6153         int is_orig_bio = 0;
6154
6155         if (bio->bi_status) {
6156                 atomic_inc(&bbio->error);
6157                 if (bio->bi_status == BLK_STS_IOERR ||
6158                     bio->bi_status == BLK_STS_TARGET) {
6159                         unsigned int stripe_index =
6160                                 btrfs_io_bio(bio)->stripe_index;
6161                         struct btrfs_device *dev;
6162
6163                         BUG_ON(stripe_index >= bbio->num_stripes);
6164                         dev = bbio->stripes[stripe_index].dev;
6165                         if (dev->bdev) {
6166                                 if (bio_op(bio) == REQ_OP_WRITE)
6167                                         btrfs_dev_stat_inc_and_print(dev,
6168                                                 BTRFS_DEV_STAT_WRITE_ERRS);
6169                                 else if (!(bio->bi_opf & REQ_RAHEAD))
6170                                         btrfs_dev_stat_inc_and_print(dev,
6171                                                 BTRFS_DEV_STAT_READ_ERRS);
6172                                 if (bio->bi_opf & REQ_PREFLUSH)
6173                                         btrfs_dev_stat_inc_and_print(dev,
6174                                                 BTRFS_DEV_STAT_FLUSH_ERRS);
6175                         }
6176                 }
6177         }
6178
6179         if (bio == bbio->orig_bio)
6180                 is_orig_bio = 1;
6181
6182         btrfs_bio_counter_dec(bbio->fs_info);
6183
6184         if (atomic_dec_and_test(&bbio->stripes_pending)) {
6185                 if (!is_orig_bio) {
6186                         bio_put(bio);
6187                         bio = bbio->orig_bio;
6188                 }
6189
6190                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6191                 /* only send an error to the higher layers if it is
6192                  * beyond the tolerance of the btrfs bio
6193                  */
6194                 if (atomic_read(&bbio->error) > bbio->max_errors) {
6195                         bio->bi_status = BLK_STS_IOERR;
6196                 } else {
6197                         /*
6198                          * this bio is actually up to date, we didn't
6199                          * go over the max number of errors
6200                          */
6201                         bio->bi_status = BLK_STS_OK;
6202                 }
6203
6204                 btrfs_end_bbio(bbio, bio);
6205         } else if (!is_orig_bio) {
6206                 bio_put(bio);
6207         }
6208 }
6209
6210 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6211                               u64 physical, int dev_nr)
6212 {
6213         struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
6214         struct btrfs_fs_info *fs_info = bbio->fs_info;
6215
6216         bio->bi_private = bbio;
6217         btrfs_io_bio(bio)->stripe_index = dev_nr;
6218         bio->bi_end_io = btrfs_end_bio;
6219         bio->bi_iter.bi_sector = physical >> 9;
6220         btrfs_debug_in_rcu(fs_info,
6221         "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6222                 bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
6223                 (u_long)dev->bdev->bd_dev, rcu_str_deref(dev->name), dev->devid,
6224                 bio->bi_iter.bi_size);
6225         bio_set_dev(bio, dev->bdev);
6226
6227         btrfs_bio_counter_inc_noblocked(fs_info);
6228
6229         btrfsic_submit_bio(bio);
6230 }
6231
6232 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6233 {
6234         atomic_inc(&bbio->error);
6235         if (atomic_dec_and_test(&bbio->stripes_pending)) {
6236                 /* Should be the original bio. */
6237                 WARN_ON(bio != bbio->orig_bio);
6238
6239                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6240                 bio->bi_iter.bi_sector = logical >> 9;
6241                 if (atomic_read(&bbio->error) > bbio->max_errors)
6242                         bio->bi_status = BLK_STS_IOERR;
6243                 else
6244                         bio->bi_status = BLK_STS_OK;
6245                 btrfs_end_bbio(bbio, bio);
6246         }
6247 }
6248
6249 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6250                            int mirror_num)
6251 {
6252         struct btrfs_device *dev;
6253         struct bio *first_bio = bio;
6254         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6255         u64 length = 0;
6256         u64 map_length;
6257         int ret;
6258         int dev_nr;
6259         int total_devs;
6260         struct btrfs_bio *bbio = NULL;
6261
6262         length = bio->bi_iter.bi_size;
6263         map_length = length;
6264
6265         btrfs_bio_counter_inc_blocked(fs_info);
6266         ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6267                                 &map_length, &bbio, mirror_num, 1);
6268         if (ret) {
6269                 btrfs_bio_counter_dec(fs_info);
6270                 return errno_to_blk_status(ret);
6271         }
6272
6273         total_devs = bbio->num_stripes;
6274         bbio->orig_bio = first_bio;
6275         bbio->private = first_bio->bi_private;
6276         bbio->end_io = first_bio->bi_end_io;
6277         bbio->fs_info = fs_info;
6278         atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6279
6280         if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6281             ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
6282                 /* In this case, map_length has been set to the length of
6283                    a single stripe; not the whole write */
6284                 if (bio_op(bio) == REQ_OP_WRITE) {
6285                         ret = raid56_parity_write(fs_info, bio, bbio,
6286                                                   map_length);
6287                 } else {
6288                         ret = raid56_parity_recover(fs_info, bio, bbio,
6289                                                     map_length, mirror_num, 1);
6290                 }
6291
6292                 btrfs_bio_counter_dec(fs_info);
6293                 return errno_to_blk_status(ret);
6294         }
6295
6296         if (map_length < length) {
6297                 btrfs_crit(fs_info,
6298                            "mapping failed logical %llu bio len %llu len %llu",
6299                            logical, length, map_length);
6300                 BUG();
6301         }
6302
6303         for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6304                 dev = bbio->stripes[dev_nr].dev;
6305                 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6306                                                    &dev->dev_state) ||
6307                     (bio_op(first_bio) == REQ_OP_WRITE &&
6308                     !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6309                         bbio_error(bbio, first_bio, logical);
6310                         continue;
6311                 }
6312
6313                 if (dev_nr < total_devs - 1)
6314                         bio = btrfs_bio_clone(first_bio);
6315                 else
6316                         bio = first_bio;
6317
6318                 submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
6319                                   dev_nr);
6320         }
6321         btrfs_bio_counter_dec(fs_info);
6322         return BLK_STS_OK;
6323 }
6324
6325 /*
6326  * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6327  * return NULL.
6328  *
6329  * If devid and uuid are both specified, the match must be exact, otherwise
6330  * only devid is used.
6331  *
6332  * If @seed is true, traverse through the seed devices.
6333  */
6334 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6335                                        u64 devid, u8 *uuid, u8 *fsid,
6336                                        bool seed)
6337 {
6338         struct btrfs_device *device;
6339
6340         while (fs_devices) {
6341                 if (!fsid ||
6342                     !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6343                         list_for_each_entry(device, &fs_devices->devices,
6344                                             dev_list) {
6345                                 if (device->devid == devid &&
6346                                     (!uuid || memcmp(device->uuid, uuid,
6347                                                      BTRFS_UUID_SIZE) == 0))
6348                                         return device;
6349                         }
6350                 }
6351                 if (seed)
6352                         fs_devices = fs_devices->seed;
6353                 else
6354                         return NULL;
6355         }
6356         return NULL;
6357 }
6358
6359 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6360                                             u64 devid, u8 *dev_uuid)
6361 {
6362         struct btrfs_device *device;
6363
6364         device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6365         if (IS_ERR(device))
6366                 return device;
6367
6368         list_add(&device->dev_list, &fs_devices->devices);
6369         device->fs_devices = fs_devices;
6370         fs_devices->num_devices++;
6371
6372         set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6373         fs_devices->missing_devices++;
6374
6375         return device;
6376 }
6377
6378 /**
6379  * btrfs_alloc_device - allocate struct btrfs_device
6380  * @fs_info:    used only for generating a new devid, can be NULL if
6381  *              devid is provided (i.e. @devid != NULL).
6382  * @devid:      a pointer to devid for this device.  If NULL a new devid
6383  *              is generated.
6384  * @uuid:       a pointer to UUID for this device.  If NULL a new UUID
6385  *              is generated.
6386  *
6387  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6388  * on error.  Returned struct is not linked onto any lists and must be
6389  * destroyed with btrfs_free_device.
6390  */
6391 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6392                                         const u64 *devid,
6393                                         const u8 *uuid)
6394 {
6395         struct btrfs_device *dev;
6396         u64 tmp;
6397
6398         if (WARN_ON(!devid && !fs_info))
6399                 return ERR_PTR(-EINVAL);
6400
6401         dev = __alloc_device();
6402         if (IS_ERR(dev))
6403                 return dev;
6404
6405         if (devid)
6406                 tmp = *devid;
6407         else {
6408                 int ret;
6409
6410                 ret = find_next_devid(fs_info, &tmp);
6411                 if (ret) {
6412                         btrfs_free_device(dev);
6413                         return ERR_PTR(ret);
6414                 }
6415         }
6416         dev->devid = tmp;
6417
6418         if (uuid)
6419                 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6420         else
6421                 generate_random_uuid(dev->uuid);
6422
6423         return dev;
6424 }
6425
6426 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6427                                         u64 devid, u8 *uuid, bool error)
6428 {
6429         if (error)
6430                 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6431                               devid, uuid);
6432         else
6433                 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6434                               devid, uuid);
6435 }
6436
6437 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
6438 {
6439         int index = btrfs_bg_flags_to_raid_index(type);
6440         int ncopies = btrfs_raid_array[index].ncopies;
6441         int data_stripes;
6442
6443         switch (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6444         case BTRFS_BLOCK_GROUP_RAID5:
6445                 data_stripes = num_stripes - 1;
6446                 break;
6447         case BTRFS_BLOCK_GROUP_RAID6:
6448                 data_stripes = num_stripes - 2;
6449                 break;
6450         default:
6451                 data_stripes = num_stripes / ncopies;
6452                 break;
6453         }
6454         return div_u64(chunk_len, data_stripes);
6455 }
6456
6457 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
6458                           struct btrfs_chunk *chunk)
6459 {
6460         struct btrfs_fs_info *fs_info = leaf->fs_info;
6461         struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6462         struct map_lookup *map;
6463         struct extent_map *em;
6464         u64 logical;
6465         u64 length;
6466         u64 devid;
6467         u8 uuid[BTRFS_UUID_SIZE];
6468         int num_stripes;
6469         int ret;
6470         int i;
6471
6472         logical = key->offset;
6473         length = btrfs_chunk_length(leaf, chunk);
6474         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6475
6476         /*
6477          * Only need to verify chunk item if we're reading from sys chunk array,
6478          * as chunk item in tree block is already verified by tree-checker.
6479          */
6480         if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6481                 ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6482                 if (ret)
6483                         return ret;
6484         }
6485
6486         read_lock(&map_tree->lock);
6487         em = lookup_extent_mapping(map_tree, logical, 1);
6488         read_unlock(&map_tree->lock);
6489
6490         /* already mapped? */
6491         if (em && em->start <= logical && em->start + em->len > logical) {
6492                 free_extent_map(em);
6493                 return 0;
6494         } else if (em) {
6495                 free_extent_map(em);
6496         }
6497
6498         em = alloc_extent_map();
6499         if (!em)
6500                 return -ENOMEM;
6501         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6502         if (!map) {
6503                 free_extent_map(em);
6504                 return -ENOMEM;
6505         }
6506
6507         set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6508         em->map_lookup = map;
6509         em->start = logical;
6510         em->len = length;
6511         em->orig_start = 0;
6512         em->block_start = 0;
6513         em->block_len = em->len;
6514
6515         map->num_stripes = num_stripes;
6516         map->io_width = btrfs_chunk_io_width(leaf, chunk);
6517         map->io_align = btrfs_chunk_io_align(leaf, chunk);
6518         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6519         map->type = btrfs_chunk_type(leaf, chunk);
6520         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6521         map->verified_stripes = 0;
6522         em->orig_block_len = calc_stripe_length(map->type, em->len,
6523                                                 map->num_stripes);
6524         for (i = 0; i < num_stripes; i++) {
6525                 map->stripes[i].physical =
6526                         btrfs_stripe_offset_nr(leaf, chunk, i);
6527                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6528                 read_extent_buffer(leaf, uuid, (unsigned long)
6529                                    btrfs_stripe_dev_uuid_nr(chunk, i),
6530                                    BTRFS_UUID_SIZE);
6531                 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
6532                                                         devid, uuid, NULL, true);
6533                 if (!map->stripes[i].dev &&
6534                     !btrfs_test_opt(fs_info, DEGRADED)) {
6535                         free_extent_map(em);
6536                         btrfs_report_missing_device(fs_info, devid, uuid, true);
6537                         return -ENOENT;
6538                 }
6539                 if (!map->stripes[i].dev) {
6540                         map->stripes[i].dev =
6541                                 add_missing_dev(fs_info->fs_devices, devid,
6542                                                 uuid);
6543                         if (IS_ERR(map->stripes[i].dev)) {
6544                                 free_extent_map(em);
6545                                 btrfs_err(fs_info,
6546                                         "failed to init missing dev %llu: %ld",
6547                                         devid, PTR_ERR(map->stripes[i].dev));
6548                                 return PTR_ERR(map->stripes[i].dev);
6549                         }
6550                         btrfs_report_missing_device(fs_info, devid, uuid, false);
6551                 }
6552                 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
6553                                 &(map->stripes[i].dev->dev_state));
6554
6555         }
6556
6557         write_lock(&map_tree->lock);
6558         ret = add_extent_mapping(map_tree, em, 0);
6559         write_unlock(&map_tree->lock);
6560         if (ret < 0) {
6561                 btrfs_err(fs_info,
6562                           "failed to add chunk map, start=%llu len=%llu: %d",
6563                           em->start, em->len, ret);
6564         }
6565         free_extent_map(em);
6566
6567         return ret;
6568 }
6569
6570 static void fill_device_from_item(struct extent_buffer *leaf,
6571                                  struct btrfs_dev_item *dev_item,
6572                                  struct btrfs_device *device)
6573 {
6574         unsigned long ptr;
6575
6576         device->devid = btrfs_device_id(leaf, dev_item);
6577         device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6578         device->total_bytes = device->disk_total_bytes;
6579         device->commit_total_bytes = device->disk_total_bytes;
6580         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6581         device->commit_bytes_used = device->bytes_used;
6582         device->type = btrfs_device_type(leaf, dev_item);
6583         device->io_align = btrfs_device_io_align(leaf, dev_item);
6584         device->io_width = btrfs_device_io_width(leaf, dev_item);
6585         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6586         WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6587         clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
6588
6589         ptr = btrfs_device_uuid(dev_item);
6590         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6591 }
6592
6593 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
6594                                                   u8 *fsid)
6595 {
6596         struct btrfs_fs_devices *fs_devices;
6597         int ret;
6598
6599         lockdep_assert_held(&uuid_mutex);
6600         ASSERT(fsid);
6601
6602         fs_devices = fs_info->fs_devices->seed;
6603         while (fs_devices) {
6604                 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
6605                         return fs_devices;
6606
6607                 fs_devices = fs_devices->seed;
6608         }
6609
6610         fs_devices = find_fsid(fsid, NULL);
6611         if (!fs_devices) {
6612                 if (!btrfs_test_opt(fs_info, DEGRADED))
6613                         return ERR_PTR(-ENOENT);
6614
6615                 fs_devices = alloc_fs_devices(fsid, NULL);
6616                 if (IS_ERR(fs_devices))
6617                         return fs_devices;
6618
6619                 fs_devices->seeding = 1;
6620                 fs_devices->opened = 1;
6621                 return fs_devices;
6622         }
6623
6624         fs_devices = clone_fs_devices(fs_devices);
6625         if (IS_ERR(fs_devices))
6626                 return fs_devices;
6627
6628         ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
6629         if (ret) {
6630                 free_fs_devices(fs_devices);
6631                 fs_devices = ERR_PTR(ret);
6632                 goto out;
6633         }
6634
6635         if (!fs_devices->seeding) {
6636                 close_fs_devices(fs_devices);
6637                 free_fs_devices(fs_devices);
6638                 fs_devices = ERR_PTR(-EINVAL);
6639                 goto out;
6640         }
6641
6642         fs_devices->seed = fs_info->fs_devices->seed;
6643         fs_info->fs_devices->seed = fs_devices;
6644 out:
6645         return fs_devices;
6646 }
6647
6648 static int read_one_dev(struct extent_buffer *leaf,
6649                         struct btrfs_dev_item *dev_item)
6650 {
6651         struct btrfs_fs_info *fs_info = leaf->fs_info;
6652         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6653         struct btrfs_device *device;
6654         u64 devid;
6655         int ret;
6656         u8 fs_uuid[BTRFS_FSID_SIZE];
6657         u8 dev_uuid[BTRFS_UUID_SIZE];
6658
6659         devid = btrfs_device_id(leaf, dev_item);
6660         read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6661                            BTRFS_UUID_SIZE);
6662         read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6663                            BTRFS_FSID_SIZE);
6664
6665         if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
6666                 fs_devices = open_seed_devices(fs_info, fs_uuid);
6667                 if (IS_ERR(fs_devices))
6668                         return PTR_ERR(fs_devices);
6669         }
6670
6671         device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
6672                                    fs_uuid, true);
6673         if (!device) {
6674                 if (!btrfs_test_opt(fs_info, DEGRADED)) {
6675                         btrfs_report_missing_device(fs_info, devid,
6676                                                         dev_uuid, true);
6677                         return -ENOENT;
6678                 }
6679
6680                 device = add_missing_dev(fs_devices, devid, dev_uuid);
6681                 if (IS_ERR(device)) {
6682                         btrfs_err(fs_info,
6683                                 "failed to add missing dev %llu: %ld",
6684                                 devid, PTR_ERR(device));
6685                         return PTR_ERR(device);
6686                 }
6687                 btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
6688         } else {
6689                 if (!device->bdev) {
6690                         if (!btrfs_test_opt(fs_info, DEGRADED)) {
6691                                 btrfs_report_missing_device(fs_info,
6692                                                 devid, dev_uuid, true);
6693                                 return -ENOENT;
6694                         }
6695                         btrfs_report_missing_device(fs_info, devid,
6696                                                         dev_uuid, false);
6697                 }
6698
6699                 if (!device->bdev &&
6700                     !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
6701                         /*
6702                          * this happens when a device that was properly setup
6703                          * in the device info lists suddenly goes bad.
6704                          * device->bdev is NULL, and so we have to set
6705                          * device->missing to one here
6706                          */
6707                         device->fs_devices->missing_devices++;
6708                         set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6709                 }
6710
6711                 /* Move the device to its own fs_devices */
6712                 if (device->fs_devices != fs_devices) {
6713                         ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
6714                                                         &device->dev_state));
6715
6716                         list_move(&device->dev_list, &fs_devices->devices);
6717                         device->fs_devices->num_devices--;
6718                         fs_devices->num_devices++;
6719
6720                         device->fs_devices->missing_devices--;
6721                         fs_devices->missing_devices++;
6722
6723                         device->fs_devices = fs_devices;
6724                 }
6725         }
6726
6727         if (device->fs_devices != fs_info->fs_devices) {
6728                 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
6729                 if (device->generation !=
6730                     btrfs_device_generation(leaf, dev_item))
6731                         return -EINVAL;
6732         }
6733
6734         fill_device_from_item(leaf, dev_item, device);
6735         set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
6736         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
6737            !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
6738                 device->fs_devices->total_rw_bytes += device->total_bytes;
6739                 atomic64_add(device->total_bytes - device->bytes_used,
6740                                 &fs_info->free_chunk_space);
6741         }
6742         ret = 0;
6743         return ret;
6744 }
6745
6746 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
6747 {
6748         struct btrfs_root *root = fs_info->tree_root;
6749         struct btrfs_super_block *super_copy = fs_info->super_copy;
6750         struct extent_buffer *sb;
6751         struct btrfs_disk_key *disk_key;
6752         struct btrfs_chunk *chunk;
6753         u8 *array_ptr;
6754         unsigned long sb_array_offset;
6755         int ret = 0;
6756         u32 num_stripes;
6757         u32 array_size;
6758         u32 len = 0;
6759         u32 cur_offset;
6760         u64 type;
6761         struct btrfs_key key;
6762
6763         ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
6764         /*
6765          * This will create extent buffer of nodesize, superblock size is
6766          * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6767          * overallocate but we can keep it as-is, only the first page is used.
6768          */
6769         sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
6770         if (IS_ERR(sb))
6771                 return PTR_ERR(sb);
6772         set_extent_buffer_uptodate(sb);
6773         btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6774         /*
6775          * The sb extent buffer is artificial and just used to read the system array.
6776          * set_extent_buffer_uptodate() call does not properly mark all it's
6777          * pages up-to-date when the page is larger: extent does not cover the
6778          * whole page and consequently check_page_uptodate does not find all
6779          * the page's extents up-to-date (the hole beyond sb),
6780          * write_extent_buffer then triggers a WARN_ON.
6781          *
6782          * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6783          * but sb spans only this function. Add an explicit SetPageUptodate call
6784          * to silence the warning eg. on PowerPC 64.
6785          */
6786         if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6787                 SetPageUptodate(sb->pages[0]);
6788
6789         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6790         array_size = btrfs_super_sys_array_size(super_copy);
6791
6792         array_ptr = super_copy->sys_chunk_array;
6793         sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6794         cur_offset = 0;
6795
6796         while (cur_offset < array_size) {
6797                 disk_key = (struct btrfs_disk_key *)array_ptr;
6798                 len = sizeof(*disk_key);
6799                 if (cur_offset + len > array_size)
6800                         goto out_short_read;
6801
6802                 btrfs_disk_key_to_cpu(&key, disk_key);
6803
6804                 array_ptr += len;
6805                 sb_array_offset += len;
6806                 cur_offset += len;
6807
6808                 if (key.type != BTRFS_CHUNK_ITEM_KEY) {
6809                         btrfs_err(fs_info,
6810                             "unexpected item type %u in sys_array at offset %u",
6811                                   (u32)key.type, cur_offset);
6812                         ret = -EIO;
6813                         break;
6814                 }
6815
6816                 chunk = (struct btrfs_chunk *)sb_array_offset;
6817                 /*
6818                  * At least one btrfs_chunk with one stripe must be present,
6819                  * exact stripe count check comes afterwards
6820                  */
6821                 len = btrfs_chunk_item_size(1);
6822                 if (cur_offset + len > array_size)
6823                         goto out_short_read;
6824
6825                 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6826                 if (!num_stripes) {
6827                         btrfs_err(fs_info,
6828                         "invalid number of stripes %u in sys_array at offset %u",
6829                                   num_stripes, cur_offset);
6830                         ret = -EIO;
6831                         break;
6832                 }
6833
6834                 type = btrfs_chunk_type(sb, chunk);
6835                 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
6836                         btrfs_err(fs_info,
6837                         "invalid chunk type %llu in sys_array at offset %u",
6838                                   type, cur_offset);
6839                         ret = -EIO;
6840                         break;
6841                 }
6842
6843                 len = btrfs_chunk_item_size(num_stripes);
6844                 if (cur_offset + len > array_size)
6845                         goto out_short_read;
6846
6847                 ret = read_one_chunk(&key, sb, chunk);
6848                 if (ret)
6849                         break;
6850
6851                 array_ptr += len;
6852                 sb_array_offset += len;
6853                 cur_offset += len;
6854         }
6855         clear_extent_buffer_uptodate(sb);
6856         free_extent_buffer_stale(sb);
6857         return ret;
6858
6859 out_short_read:
6860         btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
6861                         len, cur_offset);
6862         clear_extent_buffer_uptodate(sb);
6863         free_extent_buffer_stale(sb);
6864         return -EIO;
6865 }
6866
6867 /*
6868  * Check if all chunks in the fs are OK for read-write degraded mount
6869  *
6870  * If the @failing_dev is specified, it's accounted as missing.
6871  *
6872  * Return true if all chunks meet the minimal RW mount requirements.
6873  * Return false if any chunk doesn't meet the minimal RW mount requirements.
6874  */
6875 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
6876                                         struct btrfs_device *failing_dev)
6877 {
6878         struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6879         struct extent_map *em;
6880         u64 next_start = 0;
6881         bool ret = true;
6882
6883         read_lock(&map_tree->lock);
6884         em = lookup_extent_mapping(map_tree, 0, (u64)-1);
6885         read_unlock(&map_tree->lock);
6886         /* No chunk at all? Return false anyway */
6887         if (!em) {
6888                 ret = false;
6889                 goto out;
6890         }
6891         while (em) {
6892                 struct map_lookup *map;
6893                 int missing = 0;
6894                 int max_tolerated;
6895                 int i;
6896
6897                 map = em->map_lookup;
6898                 max_tolerated =
6899                         btrfs_get_num_tolerated_disk_barrier_failures(
6900                                         map->type);
6901                 for (i = 0; i < map->num_stripes; i++) {
6902                         struct btrfs_device *dev = map->stripes[i].dev;
6903
6904                         if (!dev || !dev->bdev ||
6905                             test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
6906                             dev->last_flush_error)
6907                                 missing++;
6908                         else if (failing_dev && failing_dev == dev)
6909                                 missing++;
6910                 }
6911                 if (missing > max_tolerated) {
6912                         if (!failing_dev)
6913                                 btrfs_warn(fs_info,
6914         "chunk %llu missing %d devices, max tolerance is %d for writable mount",
6915                                    em->start, missing, max_tolerated);
6916                         free_extent_map(em);
6917                         ret = false;
6918                         goto out;
6919                 }
6920                 next_start = extent_map_end(em);
6921                 free_extent_map(em);
6922
6923                 read_lock(&map_tree->lock);
6924                 em = lookup_extent_mapping(map_tree, next_start,
6925                                            (u64)(-1) - next_start);
6926                 read_unlock(&map_tree->lock);
6927         }
6928 out:
6929         return ret;
6930 }
6931
6932 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
6933 {
6934         struct btrfs_root *root = fs_info->chunk_root;
6935         struct btrfs_path *path;
6936         struct extent_buffer *leaf;
6937         struct btrfs_key key;
6938         struct btrfs_key found_key;
6939         int ret;
6940         int slot;
6941         u64 total_dev = 0;
6942
6943         path = btrfs_alloc_path();
6944         if (!path)
6945                 return -ENOMEM;
6946
6947         /*
6948          * uuid_mutex is needed only if we are mounting a sprout FS
6949          * otherwise we don't need it.
6950          */
6951         mutex_lock(&uuid_mutex);
6952         mutex_lock(&fs_info->chunk_mutex);
6953
6954         /*
6955          * Read all device items, and then all the chunk items. All
6956          * device items are found before any chunk item (their object id
6957          * is smaller than the lowest possible object id for a chunk
6958          * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6959          */
6960         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
6961         key.offset = 0;
6962         key.type = 0;
6963         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6964         if (ret < 0)
6965                 goto error;
6966         while (1) {
6967                 leaf = path->nodes[0];
6968                 slot = path->slots[0];
6969                 if (slot >= btrfs_header_nritems(leaf)) {
6970                         ret = btrfs_next_leaf(root, path);
6971                         if (ret == 0)
6972                                 continue;
6973                         if (ret < 0)
6974                                 goto error;
6975                         break;
6976                 }
6977                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6978                 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6979                         struct btrfs_dev_item *dev_item;
6980                         dev_item = btrfs_item_ptr(leaf, slot,
6981                                                   struct btrfs_dev_item);
6982                         ret = read_one_dev(leaf, dev_item);
6983                         if (ret)
6984                                 goto error;
6985                         total_dev++;
6986                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6987                         struct btrfs_chunk *chunk;
6988                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6989                         ret = read_one_chunk(&found_key, leaf, chunk);
6990                         if (ret)
6991                                 goto error;
6992                 }
6993                 path->slots[0]++;
6994         }
6995
6996         /*
6997          * After loading chunk tree, we've got all device information,
6998          * do another round of validation checks.
6999          */
7000         if (total_dev != fs_info->fs_devices->total_devices) {
7001                 btrfs_err(fs_info,
7002            "super_num_devices %llu mismatch with num_devices %llu found here",
7003                           btrfs_super_num_devices(fs_info->super_copy),
7004                           total_dev);
7005                 ret = -EINVAL;
7006                 goto error;
7007         }
7008         if (btrfs_super_total_bytes(fs_info->super_copy) <
7009             fs_info->fs_devices->total_rw_bytes) {
7010                 btrfs_err(fs_info,
7011         "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7012                           btrfs_super_total_bytes(fs_info->super_copy),
7013                           fs_info->fs_devices->total_rw_bytes);
7014                 ret = -EINVAL;
7015                 goto error;
7016         }
7017         ret = 0;
7018 error:
7019         mutex_unlock(&fs_info->chunk_mutex);
7020         mutex_unlock(&uuid_mutex);
7021
7022         btrfs_free_path(path);
7023         return ret;
7024 }
7025
7026 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7027 {
7028         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7029         struct btrfs_device *device;
7030
7031         while (fs_devices) {
7032                 mutex_lock(&fs_devices->device_list_mutex);
7033                 list_for_each_entry(device, &fs_devices->devices, dev_list)
7034                         device->fs_info = fs_info;
7035                 mutex_unlock(&fs_devices->device_list_mutex);
7036
7037                 fs_devices = fs_devices->seed;
7038         }
7039 }
7040
7041 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7042                                  const struct btrfs_dev_stats_item *ptr,
7043                                  int index)
7044 {
7045         u64 val;
7046
7047         read_extent_buffer(eb, &val,
7048                            offsetof(struct btrfs_dev_stats_item, values) +
7049                             ((unsigned long)ptr) + (index * sizeof(u64)),
7050                            sizeof(val));
7051         return val;
7052 }
7053
7054 static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7055                                       struct btrfs_dev_stats_item *ptr,
7056                                       int index, u64 val)
7057 {
7058         write_extent_buffer(eb, &val,
7059                             offsetof(struct btrfs_dev_stats_item, values) +
7060                              ((unsigned long)ptr) + (index * sizeof(u64)),
7061                             sizeof(val));
7062 }
7063
7064 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7065 {
7066         struct btrfs_key key;
7067         struct btrfs_root *dev_root = fs_info->dev_root;
7068         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7069         struct extent_buffer *eb;
7070         int slot;
7071         int ret = 0;
7072         struct btrfs_device *device;
7073         struct btrfs_path *path = NULL;
7074         int i;
7075
7076         path = btrfs_alloc_path();
7077         if (!path)
7078                 return -ENOMEM;
7079
7080         mutex_lock(&fs_devices->device_list_mutex);
7081         list_for_each_entry(device, &fs_devices->devices, dev_list) {
7082                 int item_size;
7083                 struct btrfs_dev_stats_item *ptr;
7084
7085                 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7086                 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7087                 key.offset = device->devid;
7088                 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
7089                 if (ret) {
7090                         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7091                                 btrfs_dev_stat_set(device, i, 0);
7092                         device->dev_stats_valid = 1;
7093                         btrfs_release_path(path);
7094                         continue;
7095                 }
7096                 slot = path->slots[0];
7097                 eb = path->nodes[0];
7098                 item_size = btrfs_item_size_nr(eb, slot);
7099
7100                 ptr = btrfs_item_ptr(eb, slot,
7101                                      struct btrfs_dev_stats_item);
7102
7103                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7104                         if (item_size >= (1 + i) * sizeof(__le64))
7105                                 btrfs_dev_stat_set(device, i,
7106                                         btrfs_dev_stats_value(eb, ptr, i));
7107                         else
7108                                 btrfs_dev_stat_set(device, i, 0);
7109                 }
7110
7111                 device->dev_stats_valid = 1;
7112                 btrfs_dev_stat_print_on_load(device);
7113                 btrfs_release_path(path);
7114         }
7115         mutex_unlock(&fs_devices->device_list_mutex);
7116
7117         btrfs_free_path(path);
7118         return ret < 0 ? ret : 0;
7119 }
7120
7121 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7122                                 struct btrfs_device *device)
7123 {
7124         struct btrfs_fs_info *fs_info = trans->fs_info;
7125         struct btrfs_root *dev_root = fs_info->dev_root;
7126         struct btrfs_path *path;
7127         struct btrfs_key key;
7128         struct extent_buffer *eb;
7129         struct btrfs_dev_stats_item *ptr;
7130         int ret;
7131         int i;
7132
7133         key.objectid = BTRFS_DEV_STATS_OBJECTID;
7134         key.type = BTRFS_PERSISTENT_ITEM_KEY;
7135         key.offset = device->devid;
7136
7137         path = btrfs_alloc_path();
7138         if (!path)
7139                 return -ENOMEM;
7140         ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7141         if (ret < 0) {
7142                 btrfs_warn_in_rcu(fs_info,
7143                         "error %d while searching for dev_stats item for device %s",
7144                               ret, rcu_str_deref(device->name));
7145                 goto out;
7146         }
7147
7148         if (ret == 0 &&
7149             btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7150                 /* need to delete old one and insert a new one */
7151                 ret = btrfs_del_item(trans, dev_root, path);
7152                 if (ret != 0) {
7153                         btrfs_warn_in_rcu(fs_info,
7154                                 "delete too small dev_stats item for device %s failed %d",
7155                                       rcu_str_deref(device->name), ret);
7156                         goto out;
7157                 }
7158                 ret = 1;
7159         }
7160
7161         if (ret == 1) {
7162                 /* need to insert a new item */
7163                 btrfs_release_path(path);
7164                 ret = btrfs_insert_empty_item(trans, dev_root, path,
7165                                               &key, sizeof(*ptr));
7166                 if (ret < 0) {
7167                         btrfs_warn_in_rcu(fs_info,
7168                                 "insert dev_stats item for device %s failed %d",
7169                                 rcu_str_deref(device->name), ret);
7170                         goto out;
7171                 }
7172         }
7173
7174         eb = path->nodes[0];
7175         ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7176         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7177                 btrfs_set_dev_stats_value(eb, ptr, i,
7178                                           btrfs_dev_stat_read(device, i));
7179         btrfs_mark_buffer_dirty(eb);
7180
7181 out:
7182         btrfs_free_path(path);
7183         return ret;
7184 }
7185
7186 /*
7187  * called from commit_transaction. Writes all changed device stats to disk.
7188  */
7189 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7190 {
7191         struct btrfs_fs_info *fs_info = trans->fs_info;
7192         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7193         struct btrfs_device *device;
7194         int stats_cnt;
7195         int ret = 0;
7196
7197         mutex_lock(&fs_devices->device_list_mutex);
7198         list_for_each_entry(device, &fs_devices->devices, dev_list) {
7199                 stats_cnt = atomic_read(&device->dev_stats_ccnt);
7200                 if (!device->dev_stats_valid || stats_cnt == 0)
7201                         continue;
7202
7203
7204                 /*
7205                  * There is a LOAD-LOAD control dependency between the value of
7206                  * dev_stats_ccnt and updating the on-disk values which requires
7207                  * reading the in-memory counters. Such control dependencies
7208                  * require explicit read memory barriers.
7209                  *
7210                  * This memory barriers pairs with smp_mb__before_atomic in
7211                  * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7212                  * barrier implied by atomic_xchg in
7213                  * btrfs_dev_stats_read_and_reset
7214                  */
7215                 smp_rmb();
7216
7217                 ret = update_dev_stat_item(trans, device);
7218                 if (!ret)
7219                         atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7220         }
7221         mutex_unlock(&fs_devices->device_list_mutex);
7222
7223         return ret;
7224 }
7225
7226 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7227 {
7228         btrfs_dev_stat_inc(dev, index);
7229         btrfs_dev_stat_print_on_error(dev);
7230 }
7231
7232 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7233 {
7234         if (!dev->dev_stats_valid)
7235                 return;
7236         btrfs_err_rl_in_rcu(dev->fs_info,
7237                 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7238                            rcu_str_deref(dev->name),
7239                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7240                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7241                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7242                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7243                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7244 }
7245
7246 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7247 {
7248         int i;
7249
7250         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7251                 if (btrfs_dev_stat_read(dev, i) != 0)
7252                         break;
7253         if (i == BTRFS_DEV_STAT_VALUES_MAX)
7254                 return; /* all values == 0, suppress message */
7255
7256         btrfs_info_in_rcu(dev->fs_info,
7257                 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7258                rcu_str_deref(dev->name),
7259                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7260                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7261                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7262                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7263                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7264 }
7265
7266 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7267                         struct btrfs_ioctl_get_dev_stats *stats)
7268 {
7269         struct btrfs_device *dev;
7270         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7271         int i;
7272
7273         mutex_lock(&fs_devices->device_list_mutex);
7274         dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
7275                                 true);
7276         mutex_unlock(&fs_devices->device_list_mutex);
7277
7278         if (!dev) {
7279                 btrfs_warn(fs_info, "get dev_stats failed, device not found");
7280                 return -ENODEV;
7281         } else if (!dev->dev_stats_valid) {
7282                 btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7283                 return -ENODEV;
7284         } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7285                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7286                         if (stats->nr_items > i)
7287                                 stats->values[i] =
7288                                         btrfs_dev_stat_read_and_reset(dev, i);
7289                         else
7290                                 btrfs_dev_stat_set(dev, i, 0);
7291                 }
7292         } else {
7293                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7294                         if (stats->nr_items > i)
7295                                 stats->values[i] = btrfs_dev_stat_read(dev, i);
7296         }
7297         if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7298                 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7299         return 0;
7300 }
7301
7302 void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path)
7303 {
7304         struct buffer_head *bh;
7305         struct btrfs_super_block *disk_super;
7306         int copy_num;
7307
7308         if (!bdev)
7309                 return;
7310
7311         for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
7312                 copy_num++) {
7313
7314                 if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
7315                         continue;
7316
7317                 disk_super = (struct btrfs_super_block *)bh->b_data;
7318
7319                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
7320                 set_buffer_dirty(bh);
7321                 sync_dirty_buffer(bh);
7322                 brelse(bh);
7323         }
7324
7325         /* Notify udev that device has changed */
7326         btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
7327
7328         /* Update ctime/mtime for device path for libblkid */
7329         update_dev_time(device_path);
7330 }
7331
7332 /*
7333  * Update the size and bytes used for each device where it changed.  This is
7334  * delayed since we would otherwise get errors while writing out the
7335  * superblocks.
7336  *
7337  * Must be invoked during transaction commit.
7338  */
7339 void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7340 {
7341         struct btrfs_device *curr, *next;
7342
7343         ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7344
7345         if (list_empty(&trans->dev_update_list))
7346                 return;
7347
7348         /*
7349          * We don't need the device_list_mutex here.  This list is owned by the
7350          * transaction and the transaction must complete before the device is
7351          * released.
7352          */
7353         mutex_lock(&trans->fs_info->chunk_mutex);
7354         list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7355                                  post_commit_list) {
7356                 list_del_init(&curr->post_commit_list);
7357                 curr->commit_total_bytes = curr->disk_total_bytes;
7358                 curr->commit_bytes_used = curr->bytes_used;
7359         }
7360         mutex_unlock(&trans->fs_info->chunk_mutex);
7361 }
7362
7363 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
7364 {
7365         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7366         while (fs_devices) {
7367                 fs_devices->fs_info = fs_info;
7368                 fs_devices = fs_devices->seed;
7369         }
7370 }
7371
7372 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
7373 {
7374         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7375         while (fs_devices) {
7376                 fs_devices->fs_info = NULL;
7377                 fs_devices = fs_devices->seed;
7378         }
7379 }
7380
7381 /*
7382  * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7383  */
7384 int btrfs_bg_type_to_factor(u64 flags)
7385 {
7386         const int index = btrfs_bg_flags_to_raid_index(flags);
7387
7388         return btrfs_raid_array[index].ncopies;
7389 }
7390
7391
7392
7393 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7394                                  u64 chunk_offset, u64 devid,
7395                                  u64 physical_offset, u64 physical_len)
7396 {
7397         struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7398         struct extent_map *em;
7399         struct map_lookup *map;
7400         struct btrfs_device *dev;
7401         u64 stripe_len;
7402         bool found = false;
7403         int ret = 0;
7404         int i;
7405
7406         read_lock(&em_tree->lock);
7407         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7408         read_unlock(&em_tree->lock);
7409
7410         if (!em) {
7411                 btrfs_err(fs_info,
7412 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7413                           physical_offset, devid);
7414                 ret = -EUCLEAN;
7415                 goto out;
7416         }
7417
7418         map = em->map_lookup;
7419         stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
7420         if (physical_len != stripe_len) {
7421                 btrfs_err(fs_info,
7422 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7423                           physical_offset, devid, em->start, physical_len,
7424                           stripe_len);
7425                 ret = -EUCLEAN;
7426                 goto out;
7427         }
7428
7429         for (i = 0; i < map->num_stripes; i++) {
7430                 if (map->stripes[i].dev->devid == devid &&
7431                     map->stripes[i].physical == physical_offset) {
7432                         found = true;
7433                         if (map->verified_stripes >= map->num_stripes) {
7434                                 btrfs_err(fs_info,
7435                                 "too many dev extents for chunk %llu found",
7436                                           em->start);
7437                                 ret = -EUCLEAN;
7438                                 goto out;
7439                         }
7440                         map->verified_stripes++;
7441                         break;
7442                 }
7443         }
7444         if (!found) {
7445                 btrfs_err(fs_info,
7446         "dev extent physical offset %llu devid %llu has no corresponding chunk",
7447                         physical_offset, devid);
7448                 ret = -EUCLEAN;
7449         }
7450
7451         /* Make sure no dev extent is beyond device bondary */
7452         dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
7453         if (!dev) {
7454                 btrfs_err(fs_info, "failed to find devid %llu", devid);
7455                 ret = -EUCLEAN;
7456                 goto out;
7457         }
7458
7459         /* It's possible this device is a dummy for seed device */
7460         if (dev->disk_total_bytes == 0) {
7461                 dev = btrfs_find_device(fs_info->fs_devices->seed, devid, NULL,
7462                                         NULL, false);
7463                 if (!dev) {
7464                         btrfs_err(fs_info, "failed to find seed devid %llu",
7465                                   devid);
7466                         ret = -EUCLEAN;
7467                         goto out;
7468                 }
7469         }
7470
7471         if (physical_offset + physical_len > dev->disk_total_bytes) {
7472                 btrfs_err(fs_info,
7473 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7474                           devid, physical_offset, physical_len,
7475                           dev->disk_total_bytes);
7476                 ret = -EUCLEAN;
7477                 goto out;
7478         }
7479 out:
7480         free_extent_map(em);
7481         return ret;
7482 }
7483
7484 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7485 {
7486         struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7487         struct extent_map *em;
7488         struct rb_node *node;
7489         int ret = 0;
7490
7491         read_lock(&em_tree->lock);
7492         for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
7493                 em = rb_entry(node, struct extent_map, rb_node);
7494                 if (em->map_lookup->num_stripes !=
7495                     em->map_lookup->verified_stripes) {
7496                         btrfs_err(fs_info,
7497                         "chunk %llu has missing dev extent, have %d expect %d",
7498                                   em->start, em->map_lookup->verified_stripes,
7499                                   em->map_lookup->num_stripes);
7500                         ret = -EUCLEAN;
7501                         goto out;
7502                 }
7503         }
7504 out:
7505         read_unlock(&em_tree->lock);
7506         return ret;
7507 }
7508
7509 /*
7510  * Ensure that all dev extents are mapped to correct chunk, otherwise
7511  * later chunk allocation/free would cause unexpected behavior.
7512  *
7513  * NOTE: This will iterate through the whole device tree, which should be of
7514  * the same size level as the chunk tree.  This slightly increases mount time.
7515  */
7516 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
7517 {
7518         struct btrfs_path *path;
7519         struct btrfs_root *root = fs_info->dev_root;
7520         struct btrfs_key key;
7521         u64 prev_devid = 0;
7522         u64 prev_dev_ext_end = 0;
7523         int ret = 0;
7524
7525         key.objectid = 1;
7526         key.type = BTRFS_DEV_EXTENT_KEY;
7527         key.offset = 0;
7528
7529         path = btrfs_alloc_path();
7530         if (!path)
7531                 return -ENOMEM;
7532
7533         path->reada = READA_FORWARD;
7534         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7535         if (ret < 0)
7536                 goto out;
7537
7538         if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7539                 ret = btrfs_next_item(root, path);
7540                 if (ret < 0)
7541                         goto out;
7542                 /* No dev extents at all? Not good */
7543                 if (ret > 0) {
7544                         ret = -EUCLEAN;
7545                         goto out;
7546                 }
7547         }
7548         while (1) {
7549                 struct extent_buffer *leaf = path->nodes[0];
7550                 struct btrfs_dev_extent *dext;
7551                 int slot = path->slots[0];
7552                 u64 chunk_offset;
7553                 u64 physical_offset;
7554                 u64 physical_len;
7555                 u64 devid;
7556
7557                 btrfs_item_key_to_cpu(leaf, &key, slot);
7558                 if (key.type != BTRFS_DEV_EXTENT_KEY)
7559                         break;
7560                 devid = key.objectid;
7561                 physical_offset = key.offset;
7562
7563                 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
7564                 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
7565                 physical_len = btrfs_dev_extent_length(leaf, dext);
7566
7567                 /* Check if this dev extent overlaps with the previous one */
7568                 if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
7569                         btrfs_err(fs_info,
7570 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7571                                   devid, physical_offset, prev_dev_ext_end);
7572                         ret = -EUCLEAN;
7573                         goto out;
7574                 }
7575
7576                 ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
7577                                             physical_offset, physical_len);
7578                 if (ret < 0)
7579                         goto out;
7580                 prev_devid = devid;
7581                 prev_dev_ext_end = physical_offset + physical_len;
7582
7583                 ret = btrfs_next_item(root, path);
7584                 if (ret < 0)
7585                         goto out;
7586                 if (ret > 0) {
7587                         ret = 0;
7588                         break;
7589                 }
7590         }
7591
7592         /* Ensure all chunks have corresponding dev extents */
7593         ret = verify_chunk_dev_extent_mapping(fs_info);
7594 out:
7595         btrfs_free_path(path);
7596         return ret;
7597 }
7598
7599 /*
7600  * Check whether the given block group or device is pinned by any inode being
7601  * used as a swapfile.
7602  */
7603 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
7604 {
7605         struct btrfs_swapfile_pin *sp;
7606         struct rb_node *node;
7607
7608         spin_lock(&fs_info->swapfile_pins_lock);
7609         node = fs_info->swapfile_pins.rb_node;
7610         while (node) {
7611                 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
7612                 if (ptr < sp->ptr)
7613                         node = node->rb_left;
7614                 else if (ptr > sp->ptr)
7615                         node = node->rb_right;
7616                 else
7617                         break;
7618         }
7619         spin_unlock(&fs_info->swapfile_pins_lock);
7620         return node != NULL;
7621 }