2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <sys/types.h>
22 #include <uuid/uuid.h>
27 #include "transaction.h"
28 #include "print-tree.h"
31 #include "kernel-lib/raid56.h"
34 struct btrfs_device *dev;
38 static inline int nr_parity_stripes(struct map_lookup *map)
40 if (map->type & BTRFS_BLOCK_GROUP_RAID5)
42 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
48 static inline int nr_data_stripes(struct map_lookup *map)
50 return map->num_stripes - nr_parity_stripes(map);
53 #define is_parity_stripe(x) ( ((x) == BTRFS_RAID5_P_STRIPE) || ((x) == BTRFS_RAID6_Q_STRIPE) )
55 static LIST_HEAD(fs_uuids);
57 static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
60 struct btrfs_device *dev;
62 list_for_each_entry(dev, head, dev_list) {
63 if (dev->devid == devid &&
64 !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE)) {
71 static struct btrfs_fs_devices *find_fsid(u8 *fsid)
73 struct btrfs_fs_devices *fs_devices;
75 list_for_each_entry(fs_devices, &fs_uuids, list) {
76 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
82 static int device_list_add(const char *path,
83 struct btrfs_super_block *disk_super,
84 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
86 struct btrfs_device *device;
87 struct btrfs_fs_devices *fs_devices;
88 u64 found_transid = btrfs_super_generation(disk_super);
90 fs_devices = find_fsid(disk_super->fsid);
92 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
95 INIT_LIST_HEAD(&fs_devices->devices);
96 list_add(&fs_devices->list, &fs_uuids);
97 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
98 fs_devices->latest_devid = devid;
99 fs_devices->latest_trans = found_transid;
100 fs_devices->lowest_devid = (u64)-1;
103 device = __find_device(&fs_devices->devices, devid,
104 disk_super->dev_item.uuid);
107 device = kzalloc(sizeof(*device), GFP_NOFS);
109 /* we can safely leave the fs_devices entry around */
113 device->devid = devid;
114 device->generation = found_transid;
115 memcpy(device->uuid, disk_super->dev_item.uuid,
117 device->name = kstrdup(path, GFP_NOFS);
122 device->label = kstrdup(disk_super->label, GFP_NOFS);
123 if (!device->label) {
128 device->total_devs = btrfs_super_num_devices(disk_super);
129 device->super_bytes_used = btrfs_super_bytes_used(disk_super);
130 device->total_bytes =
131 btrfs_stack_device_total_bytes(&disk_super->dev_item);
133 btrfs_stack_device_bytes_used(&disk_super->dev_item);
134 list_add(&device->dev_list, &fs_devices->devices);
135 device->fs_devices = fs_devices;
136 } else if (!device->name || strcmp(device->name, path)) {
140 * The existing device has newer generation, so this one could
141 * be a stale one, don't add it.
143 if (found_transid < device->generation) {
145 "adding device %s gen %llu but found an existing device %s gen %llu",
146 path, found_transid, device->name,
159 if (found_transid > fs_devices->latest_trans) {
160 fs_devices->latest_devid = devid;
161 fs_devices->latest_trans = found_transid;
163 if (fs_devices->lowest_devid > devid) {
164 fs_devices->lowest_devid = devid;
166 *fs_devices_ret = fs_devices;
170 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
172 struct btrfs_fs_devices *seed_devices;
173 struct btrfs_device *device;
179 while (!list_empty(&fs_devices->devices)) {
180 device = list_entry(fs_devices->devices.next,
181 struct btrfs_device, dev_list);
182 if (device->fd != -1) {
183 if (fsync(device->fd) == -1) {
184 warning("fsync on device %llu failed: %s",
185 device->devid, strerror(errno));
188 if (posix_fadvise(device->fd, 0, 0, POSIX_FADV_DONTNEED))
189 fprintf(stderr, "Warning, could not drop caches\n");
193 device->writeable = 0;
194 list_del(&device->dev_list);
195 /* free the memory */
201 seed_devices = fs_devices->seed;
202 fs_devices->seed = NULL;
204 struct btrfs_fs_devices *orig;
207 fs_devices = seed_devices;
208 list_del(&orig->list);
212 list_del(&fs_devices->list);
219 void btrfs_close_all_devices(void)
221 struct btrfs_fs_devices *fs_devices;
223 while (!list_empty(&fs_uuids)) {
224 fs_devices = list_entry(fs_uuids.next, struct btrfs_fs_devices,
226 btrfs_close_devices(fs_devices);
230 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, int flags)
233 struct btrfs_device *device;
236 list_for_each_entry(device, &fs_devices->devices, dev_list) {
238 printk("no name for device %llu, skip it now\n", device->devid);
242 fd = open(device->name, flags);
245 error("cannot open device '%s': %s", device->name,
250 if (posix_fadvise(fd, 0, 0, POSIX_FADV_DONTNEED))
251 fprintf(stderr, "Warning, could not drop caches\n");
253 if (device->devid == fs_devices->latest_devid)
254 fs_devices->latest_bdev = fd;
255 if (device->devid == fs_devices->lowest_devid)
256 fs_devices->lowest_bdev = fd;
259 device->writeable = 1;
263 btrfs_close_devices(fs_devices);
267 int btrfs_scan_one_device(int fd, const char *path,
268 struct btrfs_fs_devices **fs_devices_ret,
269 u64 *total_devs, u64 super_offset, unsigned sbflags)
271 struct btrfs_super_block *disk_super;
272 char buf[BTRFS_SUPER_INFO_SIZE];
276 disk_super = (struct btrfs_super_block *)buf;
277 ret = btrfs_read_dev_super(fd, disk_super, super_offset, sbflags);
280 devid = btrfs_stack_device_id(&disk_super->dev_item);
281 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_METADUMP)
284 *total_devs = btrfs_super_num_devices(disk_super);
286 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
292 * find_free_dev_extent_start - find free space in the specified device
293 * @device: the device which we search the free space in
294 * @num_bytes: the size of the free space that we need
295 * @search_start: the position from which to begin the search
296 * @start: store the start of the free space.
297 * @len: the size of the free space. that we find, or the size
298 * of the max free space if we don't find suitable free space
300 * this uses a pretty simple search, the expectation is that it is
301 * called very infrequently and that a given device has a small number
304 * @start is used to store the start of the free space if we find. But if we
305 * don't find suitable free space, it will be used to store the start position
306 * of the max free space.
308 * @len is used to store the size of the free space that we find.
309 * But if we don't find suitable free space, it is used to store the size of
310 * the max free space.
312 static int find_free_dev_extent_start(struct btrfs_device *device,
313 u64 num_bytes, u64 search_start,
314 u64 *start, u64 *len)
316 struct btrfs_key key;
317 struct btrfs_root *root = device->dev_root;
318 struct btrfs_dev_extent *dev_extent;
319 struct btrfs_path *path;
324 u64 search_end = device->total_bytes;
327 struct extent_buffer *l;
328 u64 min_search_start;
331 * We don't want to overwrite the superblock on the drive nor any area
332 * used by the boot loader (grub for example), so we make sure to start
333 * at an offset of at least 1MB.
335 min_search_start = max(root->fs_info->alloc_start, (u64)SZ_1M);
336 search_start = max(search_start, min_search_start);
338 path = btrfs_alloc_path();
342 max_hole_start = search_start;
345 if (search_start >= search_end) {
352 key.objectid = device->devid;
353 key.offset = search_start;
354 key.type = BTRFS_DEV_EXTENT_KEY;
356 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
360 ret = btrfs_previous_item(root, path, key.objectid, key.type);
367 slot = path->slots[0];
368 if (slot >= btrfs_header_nritems(l)) {
369 ret = btrfs_next_leaf(root, path);
377 btrfs_item_key_to_cpu(l, &key, slot);
379 if (key.objectid < device->devid)
382 if (key.objectid > device->devid)
385 if (key.type != BTRFS_DEV_EXTENT_KEY)
388 if (key.offset > search_start) {
389 hole_size = key.offset - search_start;
392 * Have to check before we set max_hole_start, otherwise
393 * we could end up sending back this offset anyway.
395 if (hole_size > max_hole_size) {
396 max_hole_start = search_start;
397 max_hole_size = hole_size;
401 * If this free space is greater than which we need,
402 * it must be the max free space that we have found
403 * until now, so max_hole_start must point to the start
404 * of this free space and the length of this free space
405 * is stored in max_hole_size. Thus, we return
406 * max_hole_start and max_hole_size and go back to the
409 if (hole_size >= num_bytes) {
415 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
416 extent_end = key.offset + btrfs_dev_extent_length(l,
418 if (extent_end > search_start)
419 search_start = extent_end;
426 * At this point, search_start should be the end of
427 * allocated dev extents, and when shrinking the device,
428 * search_end may be smaller than search_start.
430 if (search_end > search_start) {
431 hole_size = search_end - search_start;
433 if (hole_size > max_hole_size) {
434 max_hole_start = search_start;
435 max_hole_size = hole_size;
440 if (max_hole_size < num_bytes)
446 btrfs_free_path(path);
447 *start = max_hole_start;
449 *len = max_hole_size;
453 static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
456 /* FIXME use last free of some kind */
457 return find_free_dev_extent_start(device, num_bytes, 0, start, NULL);
460 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
461 struct btrfs_device *device,
462 u64 chunk_offset, u64 num_bytes, u64 *start,
466 struct btrfs_path *path;
467 struct btrfs_root *root = device->dev_root;
468 struct btrfs_dev_extent *extent;
469 struct extent_buffer *leaf;
470 struct btrfs_key key;
472 path = btrfs_alloc_path();
477 * For convert case, just skip search free dev_extent, as caller
478 * is responsible to make sure it's free.
481 ret = find_free_dev_extent(device, num_bytes, start);
486 key.objectid = device->devid;
488 key.type = BTRFS_DEV_EXTENT_KEY;
489 ret = btrfs_insert_empty_item(trans, root, path, &key,
493 leaf = path->nodes[0];
494 extent = btrfs_item_ptr(leaf, path->slots[0],
495 struct btrfs_dev_extent);
496 btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID);
497 btrfs_set_dev_extent_chunk_objectid(leaf, extent,
498 BTRFS_FIRST_CHUNK_TREE_OBJECTID);
499 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
501 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
502 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
505 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
506 btrfs_mark_buffer_dirty(leaf);
508 btrfs_free_path(path);
512 static int find_next_chunk(struct btrfs_fs_info *fs_info, u64 *offset)
514 struct btrfs_root *root = fs_info->chunk_root;
515 struct btrfs_path *path;
517 struct btrfs_key key;
518 struct btrfs_chunk *chunk;
519 struct btrfs_key found_key;
521 path = btrfs_alloc_path();
525 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
526 key.offset = (u64)-1;
527 key.type = BTRFS_CHUNK_ITEM_KEY;
529 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
535 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
539 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
541 if (found_key.objectid != BTRFS_FIRST_CHUNK_TREE_OBJECTID)
544 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
546 *offset = found_key.offset +
547 btrfs_chunk_length(path->nodes[0], chunk);
552 btrfs_free_path(path);
556 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
560 struct btrfs_key key;
561 struct btrfs_key found_key;
563 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
564 key.type = BTRFS_DEV_ITEM_KEY;
565 key.offset = (u64)-1;
567 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
573 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
578 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
580 *objectid = found_key.offset + 1;
584 btrfs_release_path(path);
589 * the device information is stored in the chunk root
590 * the btrfs_device struct should be fully filled in
592 int btrfs_add_device(struct btrfs_trans_handle *trans,
593 struct btrfs_fs_info *fs_info,
594 struct btrfs_device *device)
597 struct btrfs_path *path;
598 struct btrfs_dev_item *dev_item;
599 struct extent_buffer *leaf;
600 struct btrfs_key key;
601 struct btrfs_root *root = fs_info->chunk_root;
605 path = btrfs_alloc_path();
609 ret = find_next_devid(root, path, &free_devid);
613 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
614 key.type = BTRFS_DEV_ITEM_KEY;
615 key.offset = free_devid;
617 ret = btrfs_insert_empty_item(trans, root, path, &key,
622 leaf = path->nodes[0];
623 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
625 device->devid = free_devid;
626 btrfs_set_device_id(leaf, dev_item, device->devid);
627 btrfs_set_device_generation(leaf, dev_item, 0);
628 btrfs_set_device_type(leaf, dev_item, device->type);
629 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
630 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
631 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
632 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
633 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
634 btrfs_set_device_group(leaf, dev_item, 0);
635 btrfs_set_device_seek_speed(leaf, dev_item, 0);
636 btrfs_set_device_bandwidth(leaf, dev_item, 0);
637 btrfs_set_device_start_offset(leaf, dev_item, 0);
639 ptr = (unsigned long)btrfs_device_uuid(dev_item);
640 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
641 ptr = (unsigned long)btrfs_device_fsid(dev_item);
642 write_extent_buffer(leaf, fs_info->fsid, ptr, BTRFS_UUID_SIZE);
643 btrfs_mark_buffer_dirty(leaf);
647 btrfs_free_path(path);
651 int btrfs_update_device(struct btrfs_trans_handle *trans,
652 struct btrfs_device *device)
655 struct btrfs_path *path;
656 struct btrfs_root *root;
657 struct btrfs_dev_item *dev_item;
658 struct extent_buffer *leaf;
659 struct btrfs_key key;
661 root = device->dev_root->fs_info->chunk_root;
663 path = btrfs_alloc_path();
667 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
668 key.type = BTRFS_DEV_ITEM_KEY;
669 key.offset = device->devid;
671 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
680 leaf = path->nodes[0];
681 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
683 btrfs_set_device_id(leaf, dev_item, device->devid);
684 btrfs_set_device_type(leaf, dev_item, device->type);
685 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
686 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
687 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
688 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
689 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
690 btrfs_mark_buffer_dirty(leaf);
693 btrfs_free_path(path);
697 int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
698 struct btrfs_chunk *chunk, int item_size)
700 struct btrfs_super_block *super_copy = fs_info->super_copy;
701 struct btrfs_disk_key disk_key;
705 array_size = btrfs_super_sys_array_size(super_copy);
706 if (array_size + item_size + sizeof(disk_key)
707 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
710 ptr = super_copy->sys_chunk_array + array_size;
711 btrfs_cpu_key_to_disk(&disk_key, key);
712 memcpy(ptr, &disk_key, sizeof(disk_key));
713 ptr += sizeof(disk_key);
714 memcpy(ptr, chunk, item_size);
715 item_size += sizeof(disk_key);
716 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
720 static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
723 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
725 else if (type & BTRFS_BLOCK_GROUP_RAID10)
726 return calc_size * (num_stripes / sub_stripes);
727 else if (type & BTRFS_BLOCK_GROUP_RAID5)
728 return calc_size * (num_stripes - 1);
729 else if (type & BTRFS_BLOCK_GROUP_RAID6)
730 return calc_size * (num_stripes - 2);
732 return calc_size * num_stripes;
736 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
738 /* TODO, add a way to store the preferred stripe size */
739 return BTRFS_STRIPE_LEN;
743 * btrfs_device_avail_bytes - count bytes available for alloc_chunk
745 * It is not equal to "device->total_bytes - device->bytes_used".
746 * We do not allocate any chunk in 1M at beginning of device, and not
747 * allowed to allocate any chunk before alloc_start if it is specified.
748 * So search holes from max(1M, alloc_start) to device->total_bytes.
750 static int btrfs_device_avail_bytes(struct btrfs_trans_handle *trans,
751 struct btrfs_device *device,
754 struct btrfs_path *path;
755 struct btrfs_root *root = device->dev_root;
756 struct btrfs_key key;
757 struct btrfs_dev_extent *dev_extent = NULL;
758 struct extent_buffer *l;
759 u64 search_start = root->fs_info->alloc_start;
760 u64 search_end = device->total_bytes;
766 search_start = max(BTRFS_BLOCK_RESERVED_1M_FOR_SUPER, search_start);
768 path = btrfs_alloc_path();
772 key.objectid = device->devid;
773 key.offset = root->fs_info->alloc_start;
774 key.type = BTRFS_DEV_EXTENT_KEY;
777 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
780 ret = btrfs_previous_item(root, path, 0, key.type);
786 slot = path->slots[0];
787 if (slot >= btrfs_header_nritems(l)) {
788 ret = btrfs_next_leaf(root, path);
795 btrfs_item_key_to_cpu(l, &key, slot);
797 if (key.objectid < device->devid)
799 if (key.objectid > device->devid)
801 if (key.type != BTRFS_DEV_EXTENT_KEY)
803 if (key.offset > search_end)
805 if (key.offset > search_start)
806 free_bytes += key.offset - search_start;
808 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
809 extent_end = key.offset + btrfs_dev_extent_length(l,
811 if (extent_end > search_start)
812 search_start = extent_end;
813 if (search_start > search_end)
820 if (search_start < search_end)
821 free_bytes += search_end - search_start;
823 *avail_bytes = free_bytes;
826 btrfs_free_path(path);
830 #define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r) \
831 - sizeof(struct btrfs_item) \
832 - sizeof(struct btrfs_chunk)) \
833 / sizeof(struct btrfs_stripe) + 1)
835 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
836 - 2 * sizeof(struct btrfs_disk_key) \
837 - 2 * sizeof(struct btrfs_chunk)) \
838 / sizeof(struct btrfs_stripe) + 1)
840 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
841 struct btrfs_fs_info *info, u64 *start,
842 u64 *num_bytes, u64 type)
845 struct btrfs_root *extent_root = info->extent_root;
846 struct btrfs_root *chunk_root = info->chunk_root;
847 struct btrfs_stripe *stripes;
848 struct btrfs_device *device = NULL;
849 struct btrfs_chunk *chunk;
850 struct list_head private_devs;
851 struct list_head *dev_list = &info->fs_devices->devices;
852 struct list_head *cur;
853 struct map_lookup *map;
854 int min_stripe_size = SZ_1M;
855 u64 calc_size = SZ_8M;
857 u64 max_chunk_size = 4 * calc_size;
868 int stripe_len = BTRFS_STRIPE_LEN;
869 struct btrfs_key key;
872 if (list_empty(dev_list)) {
876 if (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
877 if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
879 max_chunk_size = calc_size * 2;
880 min_stripe_size = SZ_1M;
881 max_stripes = BTRFS_MAX_DEVS_SYS_CHUNK;
882 } else if (type & BTRFS_BLOCK_GROUP_DATA) {
884 max_chunk_size = 10 * calc_size;
885 min_stripe_size = SZ_64M;
886 max_stripes = BTRFS_MAX_DEVS(chunk_root);
887 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
889 max_chunk_size = 4 * calc_size;
890 min_stripe_size = SZ_32M;
891 max_stripes = BTRFS_MAX_DEVS(chunk_root);
894 if (type & BTRFS_BLOCK_GROUP_RAID1) {
895 num_stripes = min_t(u64, 2,
896 btrfs_super_num_devices(info->super_copy));
901 if (type & BTRFS_BLOCK_GROUP_DUP) {
905 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
906 num_stripes = btrfs_super_num_devices(info->super_copy);
907 if (num_stripes > max_stripes)
908 num_stripes = max_stripes;
911 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
912 num_stripes = btrfs_super_num_devices(info->super_copy);
913 if (num_stripes > max_stripes)
914 num_stripes = max_stripes;
917 num_stripes &= ~(u32)1;
921 if (type & (BTRFS_BLOCK_GROUP_RAID5)) {
922 num_stripes = btrfs_super_num_devices(info->super_copy);
923 if (num_stripes > max_stripes)
924 num_stripes = max_stripes;
928 stripe_len = find_raid56_stripe_len(num_stripes - 1,
929 btrfs_super_stripesize(info->super_copy));
931 if (type & (BTRFS_BLOCK_GROUP_RAID6)) {
932 num_stripes = btrfs_super_num_devices(info->super_copy);
933 if (num_stripes > max_stripes)
934 num_stripes = max_stripes;
938 stripe_len = find_raid56_stripe_len(num_stripes - 2,
939 btrfs_super_stripesize(info->super_copy));
942 /* we don't want a chunk larger than 10% of the FS */
943 percent_max = div_factor(btrfs_super_total_bytes(info->super_copy), 1);
944 max_chunk_size = min(percent_max, max_chunk_size);
947 if (chunk_bytes_by_type(type, calc_size, num_stripes, sub_stripes) >
949 calc_size = max_chunk_size;
950 calc_size /= num_stripes;
951 calc_size /= stripe_len;
952 calc_size *= stripe_len;
954 /* we don't want tiny stripes */
955 calc_size = max_t(u64, calc_size, min_stripe_size);
957 calc_size /= stripe_len;
958 calc_size *= stripe_len;
959 INIT_LIST_HEAD(&private_devs);
960 cur = dev_list->next;
963 if (type & BTRFS_BLOCK_GROUP_DUP)
964 min_free = calc_size * 2;
966 min_free = calc_size;
968 /* build a private list of devices we will allocate from */
969 while(index < num_stripes) {
970 device = list_entry(cur, struct btrfs_device, dev_list);
971 ret = btrfs_device_avail_bytes(trans, device, &avail);
975 if (avail >= min_free) {
976 list_move_tail(&device->dev_list, &private_devs);
978 if (type & BTRFS_BLOCK_GROUP_DUP)
980 } else if (avail > max_avail)
985 if (index < num_stripes) {
986 list_splice(&private_devs, dev_list);
987 if (index >= min_stripes) {
989 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
990 num_stripes /= sub_stripes;
991 num_stripes *= sub_stripes;
996 if (!looped && max_avail > 0) {
998 calc_size = max_avail;
1003 ret = find_next_chunk(info, &offset);
1006 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1007 key.type = BTRFS_CHUNK_ITEM_KEY;
1008 key.offset = offset;
1010 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
1014 map = kmalloc(btrfs_map_lookup_size(num_stripes), GFP_NOFS);
1020 stripes = &chunk->stripe;
1021 *num_bytes = chunk_bytes_by_type(type, calc_size,
1022 num_stripes, sub_stripes);
1024 while(index < num_stripes) {
1025 struct btrfs_stripe *stripe;
1026 BUG_ON(list_empty(&private_devs));
1027 cur = private_devs.next;
1028 device = list_entry(cur, struct btrfs_device, dev_list);
1030 /* loop over this device again if we're doing a dup group */
1031 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
1032 (index == num_stripes - 1))
1033 list_move_tail(&device->dev_list, dev_list);
1035 ret = btrfs_alloc_dev_extent(trans, device, key.offset,
1036 calc_size, &dev_offset, 0);
1040 device->bytes_used += calc_size;
1041 ret = btrfs_update_device(trans, device);
1045 map->stripes[index].dev = device;
1046 map->stripes[index].physical = dev_offset;
1047 stripe = stripes + index;
1048 btrfs_set_stack_stripe_devid(stripe, device->devid);
1049 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1050 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
1053 BUG_ON(!list_empty(&private_devs));
1055 /* key was set above */
1056 btrfs_set_stack_chunk_length(chunk, *num_bytes);
1057 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
1058 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
1059 btrfs_set_stack_chunk_type(chunk, type);
1060 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
1061 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1062 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
1063 btrfs_set_stack_chunk_sector_size(chunk, info->sectorsize);
1064 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
1065 map->sector_size = info->sectorsize;
1066 map->stripe_len = stripe_len;
1067 map->io_align = stripe_len;
1068 map->io_width = stripe_len;
1070 map->num_stripes = num_stripes;
1071 map->sub_stripes = sub_stripes;
1073 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1074 btrfs_chunk_item_size(num_stripes));
1076 *start = key.offset;;
1078 map->ce.start = key.offset;
1079 map->ce.size = *num_bytes;
1081 ret = insert_cache_extent(&info->mapping_tree.cache_tree, &map->ce);
1085 if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1086 ret = btrfs_add_system_chunk(info, &key,
1087 chunk, btrfs_chunk_item_size(num_stripes));
1103 * Alloc a DATA chunk with SINGLE profile.
1105 * If 'convert' is set, it will alloc a chunk with 1:1 mapping
1106 * (btrfs logical bytenr == on-disk bytenr)
1107 * For that case, caller must make sure the chunk and dev_extent are not
1110 int btrfs_alloc_data_chunk(struct btrfs_trans_handle *trans,
1111 struct btrfs_fs_info *info, u64 *start,
1112 u64 num_bytes, u64 type, int convert)
1115 struct btrfs_root *extent_root = info->extent_root;
1116 struct btrfs_root *chunk_root = info->chunk_root;
1117 struct btrfs_stripe *stripes;
1118 struct btrfs_device *device = NULL;
1119 struct btrfs_chunk *chunk;
1120 struct list_head *dev_list = &info->fs_devices->devices;
1121 struct list_head *cur;
1122 struct map_lookup *map;
1123 u64 calc_size = SZ_8M;
1124 int num_stripes = 1;
1125 int sub_stripes = 0;
1128 int stripe_len = BTRFS_STRIPE_LEN;
1129 struct btrfs_key key;
1131 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1132 key.type = BTRFS_CHUNK_ITEM_KEY;
1134 if (*start != round_down(*start, info->sectorsize)) {
1135 error("DATA chunk start not sectorsize aligned: %llu",
1136 (unsigned long long)*start);
1139 key.offset = *start;
1140 dev_offset = *start;
1144 ret = find_next_chunk(info, &tmp);
1150 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
1154 map = kmalloc(btrfs_map_lookup_size(num_stripes), GFP_NOFS);
1160 stripes = &chunk->stripe;
1161 calc_size = num_bytes;
1164 cur = dev_list->next;
1165 device = list_entry(cur, struct btrfs_device, dev_list);
1167 while (index < num_stripes) {
1168 struct btrfs_stripe *stripe;
1170 ret = btrfs_alloc_dev_extent(trans, device, key.offset,
1171 calc_size, &dev_offset, convert);
1174 device->bytes_used += calc_size;
1175 ret = btrfs_update_device(trans, device);
1178 map->stripes[index].dev = device;
1179 map->stripes[index].physical = dev_offset;
1180 stripe = stripes + index;
1181 btrfs_set_stack_stripe_devid(stripe, device->devid);
1182 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1183 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
1187 /* key was set above */
1188 btrfs_set_stack_chunk_length(chunk, num_bytes);
1189 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
1190 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
1191 btrfs_set_stack_chunk_type(chunk, type);
1192 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
1193 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1194 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
1195 btrfs_set_stack_chunk_sector_size(chunk, info->sectorsize);
1196 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
1197 map->sector_size = info->sectorsize;
1198 map->stripe_len = stripe_len;
1199 map->io_align = stripe_len;
1200 map->io_width = stripe_len;
1202 map->num_stripes = num_stripes;
1203 map->sub_stripes = sub_stripes;
1205 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1206 btrfs_chunk_item_size(num_stripes));
1209 *start = key.offset;
1211 map->ce.start = key.offset;
1212 map->ce.size = num_bytes;
1214 ret = insert_cache_extent(&info->mapping_tree.cache_tree, &map->ce);
1221 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
1223 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1224 struct cache_extent *ce;
1225 struct map_lookup *map;
1228 ce = search_cache_extent(&map_tree->cache_tree, logical);
1230 fprintf(stderr, "No mapping for %llu-%llu\n",
1231 (unsigned long long)logical,
1232 (unsigned long long)logical+len);
1235 if (ce->start > logical || ce->start + ce->size < logical) {
1236 fprintf(stderr, "Invalid mapping for %llu-%llu, got "
1237 "%llu-%llu\n", (unsigned long long)logical,
1238 (unsigned long long)logical+len,
1239 (unsigned long long)ce->start,
1240 (unsigned long long)ce->start + ce->size);
1243 map = container_of(ce, struct map_lookup, ce);
1245 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
1246 ret = map->num_stripes;
1247 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1248 ret = map->sub_stripes;
1249 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
1251 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
1258 int btrfs_next_bg(struct btrfs_fs_info *fs_info, u64 *logical,
1259 u64 *size, u64 type)
1261 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1262 struct cache_extent *ce;
1263 struct map_lookup *map;
1266 ce = search_cache_extent(&map_tree->cache_tree, cur);
1270 * only jump to next bg if our cur is not 0
1271 * As the initial logical for btrfs_next_bg() is 0, and
1272 * if we jump to next bg, we skipped a valid bg.
1275 ce = next_cache_extent(ce);
1281 map = container_of(ce, struct map_lookup, ce);
1282 if (map->type & type) {
1283 *logical = ce->start;
1288 ce = next_cache_extent(ce);
1294 int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
1295 u64 chunk_start, u64 physical, u64 devid,
1296 u64 **logical, int *naddrs, int *stripe_len)
1298 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1299 struct cache_extent *ce;
1300 struct map_lookup *map;
1308 ce = search_cache_extent(&map_tree->cache_tree, chunk_start);
1310 map = container_of(ce, struct map_lookup, ce);
1313 rmap_len = map->stripe_len;
1314 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1315 length = ce->size / (map->num_stripes / map->sub_stripes);
1316 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
1317 length = ce->size / map->num_stripes;
1318 else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
1319 BTRFS_BLOCK_GROUP_RAID6)) {
1320 length = ce->size / nr_data_stripes(map);
1321 rmap_len = map->stripe_len * nr_data_stripes(map);
1324 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
1326 for (i = 0; i < map->num_stripes; i++) {
1327 if (devid && map->stripes[i].dev->devid != devid)
1329 if (map->stripes[i].physical > physical ||
1330 map->stripes[i].physical + length <= physical)
1333 stripe_nr = (physical - map->stripes[i].physical) /
1336 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1337 stripe_nr = (stripe_nr * map->num_stripes + i) /
1339 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
1340 stripe_nr = stripe_nr * map->num_stripes + i;
1341 } /* else if RAID[56], multiply by nr_data_stripes().
1342 * Alternatively, just use rmap_len below instead of
1343 * map->stripe_len */
1345 bytenr = ce->start + stripe_nr * rmap_len;
1346 for (j = 0; j < nr; j++) {
1347 if (buf[j] == bytenr)
1356 *stripe_len = rmap_len;
1361 static inline int parity_smaller(u64 a, u64 b)
1366 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
1367 static void sort_parity_stripes(struct btrfs_multi_bio *bbio, u64 *raid_map)
1369 struct btrfs_bio_stripe s;
1376 for (i = 0; i < bbio->num_stripes - 1; i++) {
1377 if (parity_smaller(raid_map[i], raid_map[i+1])) {
1378 s = bbio->stripes[i];
1380 bbio->stripes[i] = bbio->stripes[i+1];
1381 raid_map[i] = raid_map[i+1];
1382 bbio->stripes[i+1] = s;
1390 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
1391 u64 logical, u64 *length,
1392 struct btrfs_multi_bio **multi_ret, int mirror_num,
1395 return __btrfs_map_block(fs_info, rw, logical, length, NULL,
1396 multi_ret, mirror_num, raid_map_ret);
1399 int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
1400 u64 logical, u64 *length, u64 *type,
1401 struct btrfs_multi_bio **multi_ret, int mirror_num,
1404 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1405 struct cache_extent *ce;
1406 struct map_lookup *map;
1410 u64 *raid_map = NULL;
1411 int stripes_allocated = 8;
1412 int stripes_required = 1;
1415 struct btrfs_multi_bio *multi = NULL;
1417 if (multi_ret && rw == READ) {
1418 stripes_allocated = 1;
1421 ce = search_cache_extent(&map_tree->cache_tree, logical);
1427 if (ce->start > logical) {
1429 *length = ce->start - logical;
1434 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
1439 map = container_of(ce, struct map_lookup, ce);
1440 offset = logical - ce->start;
1443 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
1444 BTRFS_BLOCK_GROUP_DUP)) {
1445 stripes_required = map->num_stripes;
1446 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1447 stripes_required = map->sub_stripes;
1450 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)
1451 && multi_ret && ((rw & WRITE) || mirror_num > 1) && raid_map_ret) {
1452 /* RAID[56] write or recovery. Return all stripes */
1453 stripes_required = map->num_stripes;
1455 /* Only allocate the map if we've already got a large enough multi_ret */
1456 if (stripes_allocated >= stripes_required) {
1457 raid_map = kmalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
1465 /* if our multi bio struct is too small, back off and try again */
1466 if (multi_ret && stripes_allocated < stripes_required) {
1467 stripes_allocated = stripes_required;
1474 * stripe_nr counts the total number of stripes we have to stride
1475 * to get to this block
1477 stripe_nr = stripe_nr / map->stripe_len;
1479 stripe_offset = stripe_nr * map->stripe_len;
1480 BUG_ON(offset < stripe_offset);
1482 /* stripe_offset is the offset of this block in its stripe*/
1483 stripe_offset = offset - stripe_offset;
1485 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
1486 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
1487 BTRFS_BLOCK_GROUP_RAID10 |
1488 BTRFS_BLOCK_GROUP_DUP)) {
1489 /* we limit the length of each bio to what fits in a stripe */
1490 *length = min_t(u64, ce->size - offset,
1491 map->stripe_len - stripe_offset);
1493 *length = ce->size - offset;
1499 multi->num_stripes = 1;
1501 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1503 multi->num_stripes = map->num_stripes;
1504 else if (mirror_num)
1505 stripe_index = mirror_num - 1;
1507 stripe_index = stripe_nr % map->num_stripes;
1508 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1509 int factor = map->num_stripes / map->sub_stripes;
1511 stripe_index = stripe_nr % factor;
1512 stripe_index *= map->sub_stripes;
1515 multi->num_stripes = map->sub_stripes;
1516 else if (mirror_num)
1517 stripe_index += mirror_num - 1;
1519 stripe_nr = stripe_nr / factor;
1520 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1522 multi->num_stripes = map->num_stripes;
1523 else if (mirror_num)
1524 stripe_index = mirror_num - 1;
1525 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
1526 BTRFS_BLOCK_GROUP_RAID6)) {
1531 u64 raid56_full_stripe_start;
1532 u64 full_stripe_len = nr_data_stripes(map) * map->stripe_len;
1535 * align the start of our data stripe in the logical
1538 raid56_full_stripe_start = offset / full_stripe_len;
1539 raid56_full_stripe_start *= full_stripe_len;
1541 /* get the data stripe number */
1542 stripe_nr = raid56_full_stripe_start / map->stripe_len;
1543 stripe_nr = stripe_nr / nr_data_stripes(map);
1545 /* Work out the disk rotation on this stripe-set */
1546 rot = stripe_nr % map->num_stripes;
1548 /* Fill in the logical address of each stripe */
1549 tmp = stripe_nr * nr_data_stripes(map);
1551 for (i = 0; i < nr_data_stripes(map); i++)
1552 raid_map[(i+rot) % map->num_stripes] =
1553 ce->start + (tmp + i) * map->stripe_len;
1555 raid_map[(i+rot) % map->num_stripes] = BTRFS_RAID5_P_STRIPE;
1556 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
1557 raid_map[(i+rot+1) % map->num_stripes] = BTRFS_RAID6_Q_STRIPE;
1559 *length = map->stripe_len;
1562 multi->num_stripes = map->num_stripes;
1564 stripe_index = stripe_nr % nr_data_stripes(map);
1565 stripe_nr = stripe_nr / nr_data_stripes(map);
1568 * Mirror #0 or #1 means the original data block.
1569 * Mirror #2 is RAID5 parity block.
1570 * Mirror #3 is RAID6 Q block.
1573 stripe_index = nr_data_stripes(map) + mirror_num - 2;
1575 /* We distribute the parity blocks across stripes */
1576 stripe_index = (stripe_nr + stripe_index) % map->num_stripes;
1580 * after this do_div call, stripe_nr is the number of stripes
1581 * on this device we have to walk to find the data, and
1582 * stripe_index is the number of our device in the stripe array
1584 stripe_index = stripe_nr % map->num_stripes;
1585 stripe_nr = stripe_nr / map->num_stripes;
1587 BUG_ON(stripe_index >= map->num_stripes);
1589 for (i = 0; i < multi->num_stripes; i++) {
1590 multi->stripes[i].physical =
1591 map->stripes[stripe_index].physical + stripe_offset +
1592 stripe_nr * map->stripe_len;
1593 multi->stripes[i].dev = map->stripes[stripe_index].dev;
1602 sort_parity_stripes(multi, raid_map);
1603 *raid_map_ret = raid_map;
1609 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
1612 struct btrfs_device *device;
1613 struct btrfs_fs_devices *cur_devices;
1615 cur_devices = fs_info->fs_devices;
1616 while (cur_devices) {
1618 (!memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE) ||
1619 fs_info->ignore_fsid_mismatch)) {
1620 device = __find_device(&cur_devices->devices,
1625 cur_devices = cur_devices->seed;
1630 struct btrfs_device *
1631 btrfs_find_device_by_devid(struct btrfs_fs_devices *fs_devices,
1632 u64 devid, int instance)
1634 struct list_head *head = &fs_devices->devices;
1635 struct btrfs_device *dev;
1638 list_for_each_entry(dev, head, dev_list) {
1639 if (dev->devid == devid && num_found++ == instance)
1645 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
1647 struct cache_extent *ce;
1648 struct map_lookup *map;
1649 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1654 * During chunk recovering, we may fail to find block group's
1655 * corresponding chunk, we will rebuild it later
1657 ce = search_cache_extent(&map_tree->cache_tree, chunk_offset);
1658 if (!fs_info->is_chunk_recover)
1663 map = container_of(ce, struct map_lookup, ce);
1664 for (i = 0; i < map->num_stripes; i++) {
1665 if (!map->stripes[i].dev->writeable) {
1674 static struct btrfs_device *fill_missing_device(u64 devid)
1676 struct btrfs_device *device;
1678 device = kzalloc(sizeof(*device), GFP_NOFS);
1679 device->devid = devid;
1685 * slot == -1: SYSTEM chunk
1686 * return -EIO on error, otherwise return 0
1688 int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
1689 struct extent_buffer *leaf,
1690 struct btrfs_chunk *chunk,
1691 int slot, u64 logical)
1698 u32 chunk_ondisk_size;
1699 u32 sectorsize = fs_info->sectorsize;
1701 length = btrfs_chunk_length(leaf, chunk);
1702 stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
1703 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
1704 sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
1705 type = btrfs_chunk_type(leaf, chunk);
1708 * These valid checks may be insufficient to cover every corner cases.
1710 if (!IS_ALIGNED(logical, sectorsize)) {
1711 error("invalid chunk logical %llu", logical);
1714 if (btrfs_chunk_sector_size(leaf, chunk) != sectorsize) {
1715 error("invalid chunk sectorsize %llu",
1716 (unsigned long long)btrfs_chunk_sector_size(leaf, chunk));
1719 if (!length || !IS_ALIGNED(length, sectorsize)) {
1720 error("invalid chunk length %llu", length);
1723 if (stripe_len != BTRFS_STRIPE_LEN) {
1724 error("invalid chunk stripe length: %llu", stripe_len);
1727 /* Check on chunk item type */
1728 if (slot == -1 && (type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
1729 error("invalid chunk type %llu", type);
1732 if (type & ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
1733 BTRFS_BLOCK_GROUP_PROFILE_MASK)) {
1734 error("unrecognized chunk type: %llu",
1735 ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
1736 BTRFS_BLOCK_GROUP_PROFILE_MASK) & type);
1739 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1740 error("missing chunk type flag: %llu", type);
1743 if (!(is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) ||
1744 (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0)) {
1745 error("conflicting chunk type detected: %llu", type);
1748 if ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
1749 !is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK)) {
1750 error("conflicting chunk profile detected: %llu", type);
1754 chunk_ondisk_size = btrfs_chunk_item_size(num_stripes);
1756 * Btrfs_chunk contains at least one stripe, and for sys_chunk
1757 * it can't exceed the system chunk array size
1758 * For normal chunk, it should match its chunk item size.
1760 if (num_stripes < 1 ||
1761 (slot == -1 && chunk_ondisk_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) ||
1762 (slot >= 0 && chunk_ondisk_size > btrfs_item_size_nr(leaf, slot))) {
1763 error("invalid num_stripes: %u", num_stripes);
1767 * Device number check against profile
1769 if ((type & BTRFS_BLOCK_GROUP_RAID10 && (sub_stripes != 2 ||
1770 !IS_ALIGNED(num_stripes, sub_stripes))) ||
1771 (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
1772 (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
1773 (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
1774 (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
1775 ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
1776 num_stripes != 1)) {
1777 error("Invalid num_stripes:sub_stripes %u:%u for profile %llu",
1778 num_stripes, sub_stripes,
1779 type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
1787 * Slot is used to verify the chunk item is valid
1789 * For sys chunk in superblock, pass -1 to indicate sys chunk.
1791 static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
1792 struct extent_buffer *leaf,
1793 struct btrfs_chunk *chunk, int slot)
1795 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1796 struct map_lookup *map;
1797 struct cache_extent *ce;
1801 u8 uuid[BTRFS_UUID_SIZE];
1806 logical = key->offset;
1807 length = btrfs_chunk_length(leaf, chunk);
1808 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
1809 /* Validation check */
1810 ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, slot, logical);
1812 error("%s checksums match, but it has an invalid chunk, %s",
1813 (slot == -1) ? "Superblock" : "Metadata",
1814 (slot == -1) ? "try btrfsck --repair -s <superblock> ie, 0,1,2" : "");
1818 ce = search_cache_extent(&map_tree->cache_tree, logical);
1820 /* already mapped? */
1821 if (ce && ce->start <= logical && ce->start + ce->size > logical) {
1825 map = kmalloc(btrfs_map_lookup_size(num_stripes), GFP_NOFS);
1829 map->ce.start = logical;
1830 map->ce.size = length;
1831 map->num_stripes = num_stripes;
1832 map->io_width = btrfs_chunk_io_width(leaf, chunk);
1833 map->io_align = btrfs_chunk_io_align(leaf, chunk);
1834 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
1835 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
1836 map->type = btrfs_chunk_type(leaf, chunk);
1837 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
1839 for (i = 0; i < num_stripes; i++) {
1840 map->stripes[i].physical =
1841 btrfs_stripe_offset_nr(leaf, chunk, i);
1842 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
1843 read_extent_buffer(leaf, uuid, (unsigned long)
1844 btrfs_stripe_dev_uuid_nr(chunk, i),
1846 map->stripes[i].dev = btrfs_find_device(fs_info, devid, uuid,
1848 if (!map->stripes[i].dev) {
1849 map->stripes[i].dev = fill_missing_device(devid);
1850 printf("warning, device %llu is missing\n",
1851 (unsigned long long)devid);
1852 list_add(&map->stripes[i].dev->dev_list,
1853 &fs_info->fs_devices->devices);
1857 ret = insert_cache_extent(&map_tree->cache_tree, &map->ce);
1863 static int fill_device_from_item(struct extent_buffer *leaf,
1864 struct btrfs_dev_item *dev_item,
1865 struct btrfs_device *device)
1869 device->devid = btrfs_device_id(leaf, dev_item);
1870 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
1871 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
1872 device->type = btrfs_device_type(leaf, dev_item);
1873 device->io_align = btrfs_device_io_align(leaf, dev_item);
1874 device->io_width = btrfs_device_io_width(leaf, dev_item);
1875 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
1877 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1878 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1883 static int open_seed_devices(struct btrfs_fs_info *fs_info, u8 *fsid)
1885 struct btrfs_fs_devices *fs_devices;
1888 fs_devices = fs_info->fs_devices->seed;
1889 while (fs_devices) {
1890 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
1894 fs_devices = fs_devices->seed;
1897 fs_devices = find_fsid(fsid);
1899 /* missing all seed devices */
1900 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1905 INIT_LIST_HEAD(&fs_devices->devices);
1906 list_add(&fs_devices->list, &fs_uuids);
1907 memcpy(fs_devices->fsid, fsid, BTRFS_FSID_SIZE);
1910 ret = btrfs_open_devices(fs_devices, O_RDONLY);
1914 fs_devices->seed = fs_info->fs_devices->seed;
1915 fs_info->fs_devices->seed = fs_devices;
1920 static int read_one_dev(struct btrfs_fs_info *fs_info,
1921 struct extent_buffer *leaf,
1922 struct btrfs_dev_item *dev_item)
1924 struct btrfs_device *device;
1927 u8 fs_uuid[BTRFS_UUID_SIZE];
1928 u8 dev_uuid[BTRFS_UUID_SIZE];
1930 devid = btrfs_device_id(leaf, dev_item);
1931 read_extent_buffer(leaf, dev_uuid,
1932 (unsigned long)btrfs_device_uuid(dev_item),
1934 read_extent_buffer(leaf, fs_uuid,
1935 (unsigned long)btrfs_device_fsid(dev_item),
1938 if (memcmp(fs_uuid, fs_info->fsid, BTRFS_UUID_SIZE)) {
1939 ret = open_seed_devices(fs_info, fs_uuid);
1944 device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid);
1946 device = kzalloc(sizeof(*device), GFP_NOFS);
1950 list_add(&device->dev_list,
1951 &fs_info->fs_devices->devices);
1954 fill_device_from_item(leaf, dev_item, device);
1955 device->dev_root = fs_info->dev_root;
1959 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
1961 struct btrfs_super_block *super_copy = fs_info->super_copy;
1962 struct extent_buffer *sb;
1963 struct btrfs_disk_key *disk_key;
1964 struct btrfs_chunk *chunk;
1966 unsigned long sb_array_offset;
1972 struct btrfs_key key;
1974 if (fs_info->nodesize < BTRFS_SUPER_INFO_SIZE) {
1975 printf("ERROR: nodesize %u too small to read superblock\n",
1979 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
1982 btrfs_set_buffer_uptodate(sb);
1983 write_extent_buffer(sb, super_copy, 0, sizeof(*super_copy));
1984 array_size = btrfs_super_sys_array_size(super_copy);
1986 array_ptr = super_copy->sys_chunk_array;
1987 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
1990 while (cur_offset < array_size) {
1991 disk_key = (struct btrfs_disk_key *)array_ptr;
1992 len = sizeof(*disk_key);
1993 if (cur_offset + len > array_size)
1994 goto out_short_read;
1996 btrfs_disk_key_to_cpu(&key, disk_key);
1999 sb_array_offset += len;
2002 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2003 chunk = (struct btrfs_chunk *)sb_array_offset;
2005 * At least one btrfs_chunk with one stripe must be
2006 * present, exact stripe count check comes afterwards
2008 len = btrfs_chunk_item_size(1);
2009 if (cur_offset + len > array_size)
2010 goto out_short_read;
2012 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
2015 "ERROR: invalid number of stripes %u in sys_array at offset %u\n",
2016 num_stripes, cur_offset);
2021 len = btrfs_chunk_item_size(num_stripes);
2022 if (cur_offset + len > array_size)
2023 goto out_short_read;
2025 ret = read_one_chunk(fs_info, &key, sb, chunk, -1);
2030 "ERROR: unexpected item type %u in sys_array at offset %u\n",
2031 (u32)key.type, cur_offset);
2036 sb_array_offset += len;
2039 free_extent_buffer(sb);
2043 printk("ERROR: sys_array too short to read %u bytes at offset %u\n",
2045 free_extent_buffer(sb);
2049 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
2051 struct btrfs_path *path;
2052 struct extent_buffer *leaf;
2053 struct btrfs_key key;
2054 struct btrfs_key found_key;
2055 struct btrfs_root *root = fs_info->chunk_root;
2059 path = btrfs_alloc_path();
2064 * Read all device items, and then all the chunk items. All
2065 * device items are found before any chunk item (their object id
2066 * is smaller than the lowest possible object id for a chunk
2067 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
2069 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2072 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2076 leaf = path->nodes[0];
2077 slot = path->slots[0];
2078 if (slot >= btrfs_header_nritems(leaf)) {
2079 ret = btrfs_next_leaf(root, path);
2086 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2087 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
2088 struct btrfs_dev_item *dev_item;
2089 dev_item = btrfs_item_ptr(leaf, slot,
2090 struct btrfs_dev_item);
2091 ret = read_one_dev(fs_info, leaf, dev_item);
2093 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
2094 struct btrfs_chunk *chunk;
2095 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2096 ret = read_one_chunk(fs_info, &found_key, leaf, chunk,
2105 btrfs_free_path(path);
2109 struct list_head *btrfs_scanned_uuids(void)
2114 static int rmw_eb(struct btrfs_fs_info *info,
2115 struct extent_buffer *eb, struct extent_buffer *orig_eb)
2118 unsigned long orig_off = 0;
2119 unsigned long dest_off = 0;
2120 unsigned long copy_len = eb->len;
2122 ret = read_whole_eb(info, eb, 0);
2126 if (eb->start + eb->len <= orig_eb->start ||
2127 eb->start >= orig_eb->start + orig_eb->len)
2130 * | ----- orig_eb ------- |
2131 * | ----- stripe ------- |
2132 * | ----- orig_eb ------- |
2133 * | ----- orig_eb ------- |
2135 if (eb->start > orig_eb->start)
2136 orig_off = eb->start - orig_eb->start;
2137 if (orig_eb->start > eb->start)
2138 dest_off = orig_eb->start - eb->start;
2140 if (copy_len > orig_eb->len - orig_off)
2141 copy_len = orig_eb->len - orig_off;
2142 if (copy_len > eb->len - dest_off)
2143 copy_len = eb->len - dest_off;
2145 memcpy(eb->data + dest_off, orig_eb->data + orig_off, copy_len);
2149 static int split_eb_for_raid56(struct btrfs_fs_info *info,
2150 struct extent_buffer *orig_eb,
2151 struct extent_buffer **ebs,
2152 u64 stripe_len, u64 *raid_map,
2155 struct extent_buffer **tmp_ebs;
2156 u64 start = orig_eb->start;
2161 tmp_ebs = calloc(num_stripes, sizeof(*tmp_ebs));
2165 /* Alloc memory in a row for data stripes */
2166 for (i = 0; i < num_stripes; i++) {
2167 if (raid_map[i] >= BTRFS_RAID5_P_STRIPE)
2170 tmp_ebs[i] = calloc(1, sizeof(**tmp_ebs) + stripe_len);
2177 for (i = 0; i < num_stripes; i++) {
2178 struct extent_buffer *eb = tmp_ebs[i];
2180 if (raid_map[i] >= BTRFS_RAID5_P_STRIPE)
2183 eb->start = raid_map[i];
2184 eb->len = stripe_len;
2188 eb->dev_bytenr = (u64)-1;
2190 this_eb_start = raid_map[i];
2192 if (start > this_eb_start ||
2193 start + orig_eb->len < this_eb_start + stripe_len) {
2194 ret = rmw_eb(info, eb, orig_eb);
2198 memcpy(eb->data, orig_eb->data + eb->start - start,
2206 for (i = 0; i < num_stripes; i++)
2212 int write_raid56_with_parity(struct btrfs_fs_info *info,
2213 struct extent_buffer *eb,
2214 struct btrfs_multi_bio *multi,
2215 u64 stripe_len, u64 *raid_map)
2217 struct extent_buffer **ebs, *p_eb = NULL, *q_eb = NULL;
2220 int alloc_size = eb->len;
2223 ebs = malloc(sizeof(*ebs) * multi->num_stripes);
2224 pointers = malloc(sizeof(*pointers) * multi->num_stripes);
2225 if (!ebs || !pointers) {
2231 if (stripe_len > alloc_size)
2232 alloc_size = stripe_len;
2234 ret = split_eb_for_raid56(info, eb, ebs, stripe_len, raid_map,
2235 multi->num_stripes);
2239 for (i = 0; i < multi->num_stripes; i++) {
2240 struct extent_buffer *new_eb;
2241 if (raid_map[i] < BTRFS_RAID5_P_STRIPE) {
2242 ebs[i]->dev_bytenr = multi->stripes[i].physical;
2243 ebs[i]->fd = multi->stripes[i].dev->fd;
2244 multi->stripes[i].dev->total_ios++;
2245 if (ebs[i]->start != raid_map[i]) {
2247 goto out_free_split;
2251 new_eb = malloc(sizeof(*eb) + alloc_size);
2254 goto out_free_split;
2256 new_eb->dev_bytenr = multi->stripes[i].physical;
2257 new_eb->fd = multi->stripes[i].dev->fd;
2258 multi->stripes[i].dev->total_ios++;
2259 new_eb->len = stripe_len;
2261 if (raid_map[i] == BTRFS_RAID5_P_STRIPE)
2263 else if (raid_map[i] == BTRFS_RAID6_Q_STRIPE)
2267 ebs[multi->num_stripes - 2] = p_eb;
2268 ebs[multi->num_stripes - 1] = q_eb;
2270 for (i = 0; i < multi->num_stripes; i++)
2271 pointers[i] = ebs[i]->data;
2273 raid6_gen_syndrome(multi->num_stripes, stripe_len, pointers);
2275 ebs[multi->num_stripes - 1] = p_eb;
2276 for (i = 0; i < multi->num_stripes; i++)
2277 pointers[i] = ebs[i]->data;
2278 ret = raid5_gen_result(multi->num_stripes, stripe_len,
2279 multi->num_stripes - 1, pointers);
2281 goto out_free_split;
2284 for (i = 0; i < multi->num_stripes; i++) {
2285 ret = write_extent_to_disk(ebs[i]);
2287 goto out_free_split;
2291 for (i = 0; i < multi->num_stripes; i++) {
2303 * Get stripe length from chunk item and its stripe items
2305 * Caller should only call this function after validating the chunk item
2306 * by using btrfs_check_chunk_valid().
2308 u64 btrfs_stripe_length(struct btrfs_fs_info *fs_info,
2309 struct extent_buffer *leaf,
2310 struct btrfs_chunk *chunk)
2314 u32 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2315 u64 profile = btrfs_chunk_type(leaf, chunk) &
2316 BTRFS_BLOCK_GROUP_PROFILE_MASK;
2318 chunk_len = btrfs_chunk_length(leaf, chunk);
2321 case 0: /* Single profile */
2322 case BTRFS_BLOCK_GROUP_RAID1:
2323 case BTRFS_BLOCK_GROUP_DUP:
2324 stripe_len = chunk_len;
2326 case BTRFS_BLOCK_GROUP_RAID0:
2327 stripe_len = chunk_len / num_stripes;
2329 case BTRFS_BLOCK_GROUP_RAID5:
2330 stripe_len = chunk_len / (num_stripes - 1);
2332 case BTRFS_BLOCK_GROUP_RAID6:
2333 stripe_len = chunk_len / (num_stripes - 2);
2335 case BTRFS_BLOCK_GROUP_RAID10:
2336 stripe_len = chunk_len / (num_stripes /
2337 btrfs_chunk_sub_stripes(leaf, chunk));
2340 /* Invalid chunk profile found */
2347 * Return 0 if size of @device is already good
2348 * Return >0 if size of @device is not aligned but fixed without problems
2349 * Return <0 if something wrong happened when aligning the size of @device
2351 int btrfs_fix_device_size(struct btrfs_fs_info *fs_info,
2352 struct btrfs_device *device)
2354 struct btrfs_trans_handle *trans;
2355 struct btrfs_key key;
2356 struct btrfs_path path;
2357 struct btrfs_root *chunk_root = fs_info->chunk_root;
2358 struct btrfs_dev_item *di;
2359 u64 old_bytes = device->total_bytes;
2362 if (IS_ALIGNED(old_bytes, fs_info->sectorsize))
2365 /* Align the in-memory total_bytes first, and use it as correct size */
2366 device->total_bytes = round_down(device->total_bytes,
2367 fs_info->sectorsize);
2369 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2370 key.type = BTRFS_DEV_ITEM_KEY;
2371 key.offset = device->devid;
2373 trans = btrfs_start_transaction(chunk_root, 1);
2374 if (IS_ERR(trans)) {
2375 ret = PTR_ERR(trans);
2376 error("error starting transaction: %d (%s)",
2377 ret, strerror(-ret));
2381 btrfs_init_path(&path);
2382 ret = btrfs_search_slot(trans, chunk_root, &key, &path, 0, 1);
2384 error("failed to find DEV_ITEM for devid %llu", device->devid);
2389 error("failed to search chunk root: %d (%s)",
2390 ret, strerror(-ret));
2393 di = btrfs_item_ptr(path.nodes[0], path.slots[0], struct btrfs_dev_item);
2394 btrfs_set_device_total_bytes(path.nodes[0], di, device->total_bytes);
2395 btrfs_mark_buffer_dirty(path.nodes[0]);
2396 ret = btrfs_commit_transaction(trans, chunk_root);
2398 error("failed to commit current transaction: %d (%s)",
2399 ret, strerror(-ret));
2400 btrfs_release_path(&path);
2403 btrfs_release_path(&path);
2404 printf("Fixed device size for devid %llu, old size: %llu new size: %llu\n",
2405 device->devid, old_bytes, device->total_bytes);
2409 /* We haven't modified anything, it's OK to commit current trans */
2410 btrfs_commit_transaction(trans, chunk_root);
2411 btrfs_release_path(&path);
2416 * Return 0 if super block total_bytes matches all devices' total_bytes
2417 * Return >0 if super block total_bytes mismatch but fixed without problem
2418 * Return <0 if we failed to fix super block total_bytes
2420 int btrfs_fix_super_size(struct btrfs_fs_info *fs_info)
2422 struct btrfs_trans_handle *trans;
2423 struct btrfs_device *device;
2424 struct list_head *dev_list = &fs_info->fs_devices->devices;
2425 u64 total_bytes = 0;
2426 u64 old_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2429 list_for_each_entry(device, dev_list, dev_list) {
2431 * Caller should ensure this function is called after aligning
2432 * all devices' total_bytes.
2434 if (!IS_ALIGNED(device->total_bytes, fs_info->sectorsize)) {
2435 error("device %llu total_bytes %llu not aligned to %u",
2436 device->devid, device->total_bytes,
2437 fs_info->sectorsize);
2440 total_bytes += device->total_bytes;
2443 if (total_bytes == old_bytes)
2446 btrfs_set_super_total_bytes(fs_info->super_copy, total_bytes);
2448 /* Commit transaction to update all super blocks */
2449 trans = btrfs_start_transaction(fs_info->tree_root, 1);
2450 if (IS_ERR(trans)) {
2451 ret = PTR_ERR(trans);
2452 error("error starting transaction: %d (%s)",
2453 ret, strerror(-ret));
2456 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
2458 error("failed to commit current transaction: %d (%s)",
2459 ret, strerror(-ret));
2462 printf("Fixed super total bytes, old size: %llu new size: %llu\n",
2463 old_bytes, total_bytes);
2468 * Return 0 if all devices and super block sizes are good
2469 * Return >0 if any device/super size problem was found, but fixed
2470 * Return <0 if something wrong happened during fixing
2472 int btrfs_fix_device_and_super_size(struct btrfs_fs_info *fs_info)
2474 struct btrfs_device *device;
2475 struct list_head *dev_list = &fs_info->fs_devices->devices;
2476 bool have_bad_value = false;
2479 /* Seed device is not supported yet */
2480 if (fs_info->fs_devices->seed) {
2481 error("fixing device size with seed device is not supported yet");
2485 /* All devices must be set up before repairing */
2486 if (list_empty(dev_list)) {
2487 error("no device found");
2490 list_for_each_entry(device, dev_list, dev_list) {
2491 if (device->fd == -1 || !device->writeable) {
2492 error("devid %llu is missing or not writeable",
2495 "fixing device size needs all device(s) to be present and writeable");
2500 /* Repair total_bytes of each device */
2501 list_for_each_entry(device, dev_list, dev_list) {
2502 ret = btrfs_fix_device_size(fs_info, device);
2506 have_bad_value = true;
2509 /* Repair super total_byte */
2510 ret = btrfs_fix_super_size(fs_info);
2512 have_bad_value = true;
2513 if (have_bad_value) {
2515 "Fixed unaligned/mismatched total_bytes for super block and device items\n");
2518 printf("No device size related problem found\n");