2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <sys/types.h>
22 #include <uuid/uuid.h>
27 #include "transaction.h"
28 #include "print-tree.h"
31 #include "kernel-lib/raid56.h"
34 struct btrfs_device *dev;
38 static inline int nr_parity_stripes(struct map_lookup *map)
40 if (map->type & BTRFS_BLOCK_GROUP_RAID5)
42 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
48 static inline int nr_data_stripes(struct map_lookup *map)
50 return map->num_stripes - nr_parity_stripes(map);
53 #define is_parity_stripe(x) ( ((x) == BTRFS_RAID5_P_STRIPE) || ((x) == BTRFS_RAID6_Q_STRIPE) )
55 static LIST_HEAD(fs_uuids);
57 static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
60 struct btrfs_device *dev;
62 list_for_each_entry(dev, head, dev_list) {
63 if (dev->devid == devid &&
64 !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE)) {
71 static struct btrfs_fs_devices *find_fsid(u8 *fsid)
73 struct btrfs_fs_devices *fs_devices;
75 list_for_each_entry(fs_devices, &fs_uuids, list) {
76 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
82 static int device_list_add(const char *path,
83 struct btrfs_super_block *disk_super,
84 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
86 struct btrfs_device *device;
87 struct btrfs_fs_devices *fs_devices;
88 u64 found_transid = btrfs_super_generation(disk_super);
90 fs_devices = find_fsid(disk_super->fsid);
92 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
95 INIT_LIST_HEAD(&fs_devices->devices);
96 list_add(&fs_devices->list, &fs_uuids);
97 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
98 fs_devices->latest_devid = devid;
99 fs_devices->latest_trans = found_transid;
100 fs_devices->lowest_devid = (u64)-1;
103 device = __find_device(&fs_devices->devices, devid,
104 disk_super->dev_item.uuid);
107 device = kzalloc(sizeof(*device), GFP_NOFS);
109 /* we can safely leave the fs_devices entry around */
113 device->devid = devid;
114 device->generation = found_transid;
115 memcpy(device->uuid, disk_super->dev_item.uuid,
117 device->name = kstrdup(path, GFP_NOFS);
122 device->label = kstrdup(disk_super->label, GFP_NOFS);
123 if (!device->label) {
128 device->total_devs = btrfs_super_num_devices(disk_super);
129 device->super_bytes_used = btrfs_super_bytes_used(disk_super);
130 device->total_bytes =
131 btrfs_stack_device_total_bytes(&disk_super->dev_item);
133 btrfs_stack_device_bytes_used(&disk_super->dev_item);
134 list_add(&device->dev_list, &fs_devices->devices);
135 device->fs_devices = fs_devices;
136 } else if (!device->name || strcmp(device->name, path)) {
140 * The existing device has newer generation, so this one could
141 * be a stale one, don't add it.
143 if (found_transid < device->generation) {
145 "adding device %s gen %llu but found an existing device %s gen %llu",
146 path, found_transid, device->name,
159 if (found_transid > fs_devices->latest_trans) {
160 fs_devices->latest_devid = devid;
161 fs_devices->latest_trans = found_transid;
163 if (fs_devices->lowest_devid > devid) {
164 fs_devices->lowest_devid = devid;
166 *fs_devices_ret = fs_devices;
170 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
172 struct btrfs_fs_devices *seed_devices;
173 struct btrfs_device *device;
179 while (!list_empty(&fs_devices->devices)) {
180 device = list_entry(fs_devices->devices.next,
181 struct btrfs_device, dev_list);
182 if (device->fd != -1) {
183 if (fsync(device->fd) == -1) {
184 warning("fsync on device %llu failed: %s",
185 device->devid, strerror(errno));
188 if (posix_fadvise(device->fd, 0, 0, POSIX_FADV_DONTNEED))
189 fprintf(stderr, "Warning, could not drop caches\n");
193 device->writeable = 0;
194 list_del(&device->dev_list);
195 /* free the memory */
201 seed_devices = fs_devices->seed;
202 fs_devices->seed = NULL;
204 struct btrfs_fs_devices *orig;
207 fs_devices = seed_devices;
208 list_del(&orig->list);
212 list_del(&fs_devices->list);
219 void btrfs_close_all_devices(void)
221 struct btrfs_fs_devices *fs_devices;
223 while (!list_empty(&fs_uuids)) {
224 fs_devices = list_entry(fs_uuids.next, struct btrfs_fs_devices,
226 btrfs_close_devices(fs_devices);
230 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, int flags)
233 struct btrfs_device *device;
236 list_for_each_entry(device, &fs_devices->devices, dev_list) {
238 printk("no name for device %llu, skip it now\n", device->devid);
242 fd = open(device->name, flags);
245 error("cannot open device '%s': %s", device->name,
250 if (posix_fadvise(fd, 0, 0, POSIX_FADV_DONTNEED))
251 fprintf(stderr, "Warning, could not drop caches\n");
253 if (device->devid == fs_devices->latest_devid)
254 fs_devices->latest_bdev = fd;
255 if (device->devid == fs_devices->lowest_devid)
256 fs_devices->lowest_bdev = fd;
259 device->writeable = 1;
263 btrfs_close_devices(fs_devices);
267 int btrfs_scan_one_device(int fd, const char *path,
268 struct btrfs_fs_devices **fs_devices_ret,
269 u64 *total_devs, u64 super_offset, unsigned sbflags)
271 struct btrfs_super_block *disk_super;
272 char buf[BTRFS_SUPER_INFO_SIZE];
276 disk_super = (struct btrfs_super_block *)buf;
277 ret = btrfs_read_dev_super(fd, disk_super, super_offset, sbflags);
280 devid = btrfs_stack_device_id(&disk_super->dev_item);
281 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_METADUMP)
284 *total_devs = btrfs_super_num_devices(disk_super);
286 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
292 * find_free_dev_extent_start - find free space in the specified device
293 * @device: the device which we search the free space in
294 * @num_bytes: the size of the free space that we need
295 * @search_start: the position from which to begin the search
296 * @start: store the start of the free space.
297 * @len: the size of the free space. that we find, or the size
298 * of the max free space if we don't find suitable free space
300 * this uses a pretty simple search, the expectation is that it is
301 * called very infrequently and that a given device has a small number
304 * @start is used to store the start of the free space if we find. But if we
305 * don't find suitable free space, it will be used to store the start position
306 * of the max free space.
308 * @len is used to store the size of the free space that we find.
309 * But if we don't find suitable free space, it is used to store the size of
310 * the max free space.
312 static int find_free_dev_extent_start(struct btrfs_device *device,
313 u64 num_bytes, u64 search_start,
314 u64 *start, u64 *len)
316 struct btrfs_key key;
317 struct btrfs_root *root = device->dev_root;
318 struct btrfs_dev_extent *dev_extent;
319 struct btrfs_path *path;
324 u64 search_end = device->total_bytes;
327 struct extent_buffer *l;
328 u64 min_search_start;
331 * We don't want to overwrite the superblock on the drive nor any area
332 * used by the boot loader (grub for example), so we make sure to start
333 * at an offset of at least 1MB.
335 min_search_start = max(root->fs_info->alloc_start, (u64)SZ_1M);
336 search_start = max(search_start, min_search_start);
338 path = btrfs_alloc_path();
342 max_hole_start = search_start;
345 if (search_start >= search_end) {
352 key.objectid = device->devid;
353 key.offset = search_start;
354 key.type = BTRFS_DEV_EXTENT_KEY;
356 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
360 ret = btrfs_previous_item(root, path, key.objectid, key.type);
367 slot = path->slots[0];
368 if (slot >= btrfs_header_nritems(l)) {
369 ret = btrfs_next_leaf(root, path);
377 btrfs_item_key_to_cpu(l, &key, slot);
379 if (key.objectid < device->devid)
382 if (key.objectid > device->devid)
385 if (key.type != BTRFS_DEV_EXTENT_KEY)
388 if (key.offset > search_start) {
389 hole_size = key.offset - search_start;
392 * Have to check before we set max_hole_start, otherwise
393 * we could end up sending back this offset anyway.
395 if (hole_size > max_hole_size) {
396 max_hole_start = search_start;
397 max_hole_size = hole_size;
401 * If this free space is greater than which we need,
402 * it must be the max free space that we have found
403 * until now, so max_hole_start must point to the start
404 * of this free space and the length of this free space
405 * is stored in max_hole_size. Thus, we return
406 * max_hole_start and max_hole_size and go back to the
409 if (hole_size >= num_bytes) {
415 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
416 extent_end = key.offset + btrfs_dev_extent_length(l,
418 if (extent_end > search_start)
419 search_start = extent_end;
426 * At this point, search_start should be the end of
427 * allocated dev extents, and when shrinking the device,
428 * search_end may be smaller than search_start.
430 if (search_end > search_start) {
431 hole_size = search_end - search_start;
433 if (hole_size > max_hole_size) {
434 max_hole_start = search_start;
435 max_hole_size = hole_size;
440 if (max_hole_size < num_bytes)
446 btrfs_free_path(path);
447 *start = max_hole_start;
449 *len = max_hole_size;
453 static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
456 /* FIXME use last free of some kind */
457 return find_free_dev_extent_start(device, num_bytes, 0, start, NULL);
460 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
461 struct btrfs_device *device,
462 u64 chunk_tree, u64 chunk_objectid,
464 u64 num_bytes, u64 *start, int convert)
467 struct btrfs_path *path;
468 struct btrfs_root *root = device->dev_root;
469 struct btrfs_dev_extent *extent;
470 struct extent_buffer *leaf;
471 struct btrfs_key key;
473 path = btrfs_alloc_path();
478 * For convert case, just skip search free dev_extent, as caller
479 * is responsible to make sure it's free.
482 ret = find_free_dev_extent(device, num_bytes, start);
487 key.objectid = device->devid;
489 key.type = BTRFS_DEV_EXTENT_KEY;
490 ret = btrfs_insert_empty_item(trans, root, path, &key,
494 leaf = path->nodes[0];
495 extent = btrfs_item_ptr(leaf, path->slots[0],
496 struct btrfs_dev_extent);
497 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
498 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
499 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
501 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
502 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
505 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
506 btrfs_mark_buffer_dirty(leaf);
508 btrfs_free_path(path);
512 static int find_next_chunk(struct btrfs_fs_info *fs_info, u64 *offset)
514 struct btrfs_root *root = fs_info->chunk_root;
515 struct btrfs_path *path;
517 struct btrfs_key key;
518 struct btrfs_chunk *chunk;
519 struct btrfs_key found_key;
521 path = btrfs_alloc_path();
525 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
526 key.offset = (u64)-1;
527 key.type = BTRFS_CHUNK_ITEM_KEY;
529 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
535 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
539 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
541 if (found_key.objectid != BTRFS_FIRST_CHUNK_TREE_OBJECTID)
544 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
546 *offset = found_key.offset +
547 btrfs_chunk_length(path->nodes[0], chunk);
552 btrfs_free_path(path);
556 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
560 struct btrfs_key key;
561 struct btrfs_key found_key;
563 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
564 key.type = BTRFS_DEV_ITEM_KEY;
565 key.offset = (u64)-1;
567 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
573 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
578 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
580 *objectid = found_key.offset + 1;
584 btrfs_release_path(path);
589 * the device information is stored in the chunk root
590 * the btrfs_device struct should be fully filled in
592 int btrfs_add_device(struct btrfs_trans_handle *trans,
593 struct btrfs_fs_info *fs_info,
594 struct btrfs_device *device)
597 struct btrfs_path *path;
598 struct btrfs_dev_item *dev_item;
599 struct extent_buffer *leaf;
600 struct btrfs_key key;
601 struct btrfs_root *root = fs_info->chunk_root;
605 path = btrfs_alloc_path();
609 ret = find_next_devid(root, path, &free_devid);
613 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
614 key.type = BTRFS_DEV_ITEM_KEY;
615 key.offset = free_devid;
617 ret = btrfs_insert_empty_item(trans, root, path, &key,
622 leaf = path->nodes[0];
623 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
625 device->devid = free_devid;
626 btrfs_set_device_id(leaf, dev_item, device->devid);
627 btrfs_set_device_generation(leaf, dev_item, 0);
628 btrfs_set_device_type(leaf, dev_item, device->type);
629 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
630 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
631 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
632 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
633 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
634 btrfs_set_device_group(leaf, dev_item, 0);
635 btrfs_set_device_seek_speed(leaf, dev_item, 0);
636 btrfs_set_device_bandwidth(leaf, dev_item, 0);
637 btrfs_set_device_start_offset(leaf, dev_item, 0);
639 ptr = (unsigned long)btrfs_device_uuid(dev_item);
640 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
641 ptr = (unsigned long)btrfs_device_fsid(dev_item);
642 write_extent_buffer(leaf, fs_info->fsid, ptr, BTRFS_UUID_SIZE);
643 btrfs_mark_buffer_dirty(leaf);
647 btrfs_free_path(path);
651 int btrfs_update_device(struct btrfs_trans_handle *trans,
652 struct btrfs_device *device)
655 struct btrfs_path *path;
656 struct btrfs_root *root;
657 struct btrfs_dev_item *dev_item;
658 struct extent_buffer *leaf;
659 struct btrfs_key key;
661 root = device->dev_root->fs_info->chunk_root;
663 path = btrfs_alloc_path();
667 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
668 key.type = BTRFS_DEV_ITEM_KEY;
669 key.offset = device->devid;
671 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
680 leaf = path->nodes[0];
681 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
683 btrfs_set_device_id(leaf, dev_item, device->devid);
684 btrfs_set_device_type(leaf, dev_item, device->type);
685 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
686 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
687 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
688 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
689 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
690 btrfs_mark_buffer_dirty(leaf);
693 btrfs_free_path(path);
697 int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
698 struct btrfs_chunk *chunk, int item_size)
700 struct btrfs_super_block *super_copy = fs_info->super_copy;
701 struct btrfs_disk_key disk_key;
705 array_size = btrfs_super_sys_array_size(super_copy);
706 if (array_size + item_size + sizeof(disk_key)
707 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
710 ptr = super_copy->sys_chunk_array + array_size;
711 btrfs_cpu_key_to_disk(&disk_key, key);
712 memcpy(ptr, &disk_key, sizeof(disk_key));
713 ptr += sizeof(disk_key);
714 memcpy(ptr, chunk, item_size);
715 item_size += sizeof(disk_key);
716 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
720 static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
723 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
725 else if (type & BTRFS_BLOCK_GROUP_RAID10)
726 return calc_size * (num_stripes / sub_stripes);
727 else if (type & BTRFS_BLOCK_GROUP_RAID5)
728 return calc_size * (num_stripes - 1);
729 else if (type & BTRFS_BLOCK_GROUP_RAID6)
730 return calc_size * (num_stripes - 2);
732 return calc_size * num_stripes;
736 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
738 /* TODO, add a way to store the preferred stripe size */
739 return BTRFS_STRIPE_LEN;
743 * btrfs_device_avail_bytes - count bytes available for alloc_chunk
745 * It is not equal to "device->total_bytes - device->bytes_used".
746 * We do not allocate any chunk in 1M at beginning of device, and not
747 * allowed to allocate any chunk before alloc_start if it is specified.
748 * So search holes from max(1M, alloc_start) to device->total_bytes.
750 static int btrfs_device_avail_bytes(struct btrfs_trans_handle *trans,
751 struct btrfs_device *device,
754 struct btrfs_path *path;
755 struct btrfs_root *root = device->dev_root;
756 struct btrfs_key key;
757 struct btrfs_dev_extent *dev_extent = NULL;
758 struct extent_buffer *l;
759 u64 search_start = root->fs_info->alloc_start;
760 u64 search_end = device->total_bytes;
766 search_start = max(BTRFS_BLOCK_RESERVED_1M_FOR_SUPER, search_start);
768 path = btrfs_alloc_path();
772 key.objectid = device->devid;
773 key.offset = root->fs_info->alloc_start;
774 key.type = BTRFS_DEV_EXTENT_KEY;
777 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
780 ret = btrfs_previous_item(root, path, 0, key.type);
786 slot = path->slots[0];
787 if (slot >= btrfs_header_nritems(l)) {
788 ret = btrfs_next_leaf(root, path);
795 btrfs_item_key_to_cpu(l, &key, slot);
797 if (key.objectid < device->devid)
799 if (key.objectid > device->devid)
801 if (key.type != BTRFS_DEV_EXTENT_KEY)
803 if (key.offset > search_end)
805 if (key.offset > search_start)
806 free_bytes += key.offset - search_start;
808 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
809 extent_end = key.offset + btrfs_dev_extent_length(l,
811 if (extent_end > search_start)
812 search_start = extent_end;
813 if (search_start > search_end)
820 if (search_start < search_end)
821 free_bytes += search_end - search_start;
823 *avail_bytes = free_bytes;
826 btrfs_free_path(path);
830 #define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r) \
831 - sizeof(struct btrfs_item) \
832 - sizeof(struct btrfs_chunk)) \
833 / sizeof(struct btrfs_stripe) + 1)
835 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
836 - 2 * sizeof(struct btrfs_disk_key) \
837 - 2 * sizeof(struct btrfs_chunk)) \
838 / sizeof(struct btrfs_stripe) + 1)
840 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
841 struct btrfs_fs_info *info, u64 *start,
842 u64 *num_bytes, u64 type)
845 struct btrfs_root *extent_root = info->extent_root;
846 struct btrfs_root *chunk_root = info->chunk_root;
847 struct btrfs_stripe *stripes;
848 struct btrfs_device *device = NULL;
849 struct btrfs_chunk *chunk;
850 struct list_head private_devs;
851 struct list_head *dev_list = &info->fs_devices->devices;
852 struct list_head *cur;
853 struct map_lookup *map;
854 int min_stripe_size = SZ_1M;
855 u64 calc_size = SZ_8M;
857 u64 max_chunk_size = 4 * calc_size;
868 int stripe_len = BTRFS_STRIPE_LEN;
869 struct btrfs_key key;
872 if (list_empty(dev_list)) {
876 if (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
877 if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
879 max_chunk_size = calc_size * 2;
880 min_stripe_size = SZ_1M;
881 max_stripes = BTRFS_MAX_DEVS_SYS_CHUNK;
882 } else if (type & BTRFS_BLOCK_GROUP_DATA) {
884 max_chunk_size = 10 * calc_size;
885 min_stripe_size = SZ_64M;
886 max_stripes = BTRFS_MAX_DEVS(chunk_root);
887 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
889 max_chunk_size = 4 * calc_size;
890 min_stripe_size = SZ_32M;
891 max_stripes = BTRFS_MAX_DEVS(chunk_root);
894 if (type & BTRFS_BLOCK_GROUP_RAID1) {
895 num_stripes = min_t(u64, 2,
896 btrfs_super_num_devices(info->super_copy));
901 if (type & BTRFS_BLOCK_GROUP_DUP) {
905 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
906 num_stripes = btrfs_super_num_devices(info->super_copy);
907 if (num_stripes > max_stripes)
908 num_stripes = max_stripes;
911 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
912 num_stripes = btrfs_super_num_devices(info->super_copy);
913 if (num_stripes > max_stripes)
914 num_stripes = max_stripes;
917 num_stripes &= ~(u32)1;
921 if (type & (BTRFS_BLOCK_GROUP_RAID5)) {
922 num_stripes = btrfs_super_num_devices(info->super_copy);
923 if (num_stripes > max_stripes)
924 num_stripes = max_stripes;
928 stripe_len = find_raid56_stripe_len(num_stripes - 1,
929 btrfs_super_stripesize(info->super_copy));
931 if (type & (BTRFS_BLOCK_GROUP_RAID6)) {
932 num_stripes = btrfs_super_num_devices(info->super_copy);
933 if (num_stripes > max_stripes)
934 num_stripes = max_stripes;
938 stripe_len = find_raid56_stripe_len(num_stripes - 2,
939 btrfs_super_stripesize(info->super_copy));
942 /* we don't want a chunk larger than 10% of the FS */
943 percent_max = div_factor(btrfs_super_total_bytes(info->super_copy), 1);
944 max_chunk_size = min(percent_max, max_chunk_size);
947 if (chunk_bytes_by_type(type, calc_size, num_stripes, sub_stripes) >
949 calc_size = max_chunk_size;
950 calc_size /= num_stripes;
951 calc_size /= stripe_len;
952 calc_size *= stripe_len;
954 /* we don't want tiny stripes */
955 calc_size = max_t(u64, calc_size, min_stripe_size);
957 calc_size /= stripe_len;
958 calc_size *= stripe_len;
959 INIT_LIST_HEAD(&private_devs);
960 cur = dev_list->next;
963 if (type & BTRFS_BLOCK_GROUP_DUP)
964 min_free = calc_size * 2;
966 min_free = calc_size;
968 /* build a private list of devices we will allocate from */
969 while(index < num_stripes) {
970 device = list_entry(cur, struct btrfs_device, dev_list);
971 ret = btrfs_device_avail_bytes(trans, device, &avail);
975 if (avail >= min_free) {
976 list_move_tail(&device->dev_list, &private_devs);
978 if (type & BTRFS_BLOCK_GROUP_DUP)
980 } else if (avail > max_avail)
985 if (index < num_stripes) {
986 list_splice(&private_devs, dev_list);
987 if (index >= min_stripes) {
989 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
990 num_stripes /= sub_stripes;
991 num_stripes *= sub_stripes;
996 if (!looped && max_avail > 0) {
998 calc_size = max_avail;
1003 ret = find_next_chunk(info, &offset);
1006 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1007 key.type = BTRFS_CHUNK_ITEM_KEY;
1008 key.offset = offset;
1010 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
1014 map = kmalloc(btrfs_map_lookup_size(num_stripes), GFP_NOFS);
1020 stripes = &chunk->stripe;
1021 *num_bytes = chunk_bytes_by_type(type, calc_size,
1022 num_stripes, sub_stripes);
1024 while(index < num_stripes) {
1025 struct btrfs_stripe *stripe;
1026 BUG_ON(list_empty(&private_devs));
1027 cur = private_devs.next;
1028 device = list_entry(cur, struct btrfs_device, dev_list);
1030 /* loop over this device again if we're doing a dup group */
1031 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
1032 (index == num_stripes - 1))
1033 list_move_tail(&device->dev_list, dev_list);
1035 ret = btrfs_alloc_dev_extent(trans, device,
1036 info->chunk_root->root_key.objectid,
1037 BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
1038 calc_size, &dev_offset, 0);
1042 device->bytes_used += calc_size;
1043 ret = btrfs_update_device(trans, device);
1047 map->stripes[index].dev = device;
1048 map->stripes[index].physical = dev_offset;
1049 stripe = stripes + index;
1050 btrfs_set_stack_stripe_devid(stripe, device->devid);
1051 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1052 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
1055 BUG_ON(!list_empty(&private_devs));
1057 /* key was set above */
1058 btrfs_set_stack_chunk_length(chunk, *num_bytes);
1059 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
1060 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
1061 btrfs_set_stack_chunk_type(chunk, type);
1062 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
1063 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1064 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
1065 btrfs_set_stack_chunk_sector_size(chunk, info->sectorsize);
1066 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
1067 map->sector_size = info->sectorsize;
1068 map->stripe_len = stripe_len;
1069 map->io_align = stripe_len;
1070 map->io_width = stripe_len;
1072 map->num_stripes = num_stripes;
1073 map->sub_stripes = sub_stripes;
1075 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1076 btrfs_chunk_item_size(num_stripes));
1078 *start = key.offset;;
1080 map->ce.start = key.offset;
1081 map->ce.size = *num_bytes;
1083 ret = insert_cache_extent(&info->mapping_tree.cache_tree, &map->ce);
1087 if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1088 ret = btrfs_add_system_chunk(info, &key,
1089 chunk, btrfs_chunk_item_size(num_stripes));
1105 * Alloc a DATA chunk with SINGLE profile.
1107 * If 'convert' is set, it will alloc a chunk with 1:1 mapping
1108 * (btrfs logical bytenr == on-disk bytenr)
1109 * For that case, caller must make sure the chunk and dev_extent are not
1112 int btrfs_alloc_data_chunk(struct btrfs_trans_handle *trans,
1113 struct btrfs_fs_info *info, u64 *start,
1114 u64 num_bytes, u64 type, int convert)
1117 struct btrfs_root *extent_root = info->extent_root;
1118 struct btrfs_root *chunk_root = info->chunk_root;
1119 struct btrfs_stripe *stripes;
1120 struct btrfs_device *device = NULL;
1121 struct btrfs_chunk *chunk;
1122 struct list_head *dev_list = &info->fs_devices->devices;
1123 struct list_head *cur;
1124 struct map_lookup *map;
1125 u64 calc_size = SZ_8M;
1126 int num_stripes = 1;
1127 int sub_stripes = 0;
1130 int stripe_len = BTRFS_STRIPE_LEN;
1131 struct btrfs_key key;
1133 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1134 key.type = BTRFS_CHUNK_ITEM_KEY;
1136 if (*start != round_down(*start, info->sectorsize)) {
1137 error("DATA chunk start not sectorsize aligned: %llu",
1138 (unsigned long long)*start);
1141 key.offset = *start;
1142 dev_offset = *start;
1146 ret = find_next_chunk(info, &tmp);
1152 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
1156 map = kmalloc(btrfs_map_lookup_size(num_stripes), GFP_NOFS);
1162 stripes = &chunk->stripe;
1163 calc_size = num_bytes;
1166 cur = dev_list->next;
1167 device = list_entry(cur, struct btrfs_device, dev_list);
1169 while (index < num_stripes) {
1170 struct btrfs_stripe *stripe;
1172 ret = btrfs_alloc_dev_extent(trans, device,
1173 info->chunk_root->root_key.objectid,
1174 BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
1175 calc_size, &dev_offset, convert);
1178 device->bytes_used += calc_size;
1179 ret = btrfs_update_device(trans, device);
1182 map->stripes[index].dev = device;
1183 map->stripes[index].physical = dev_offset;
1184 stripe = stripes + index;
1185 btrfs_set_stack_stripe_devid(stripe, device->devid);
1186 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1187 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
1191 /* key was set above */
1192 btrfs_set_stack_chunk_length(chunk, num_bytes);
1193 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
1194 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
1195 btrfs_set_stack_chunk_type(chunk, type);
1196 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
1197 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1198 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
1199 btrfs_set_stack_chunk_sector_size(chunk, info->sectorsize);
1200 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
1201 map->sector_size = info->sectorsize;
1202 map->stripe_len = stripe_len;
1203 map->io_align = stripe_len;
1204 map->io_width = stripe_len;
1206 map->num_stripes = num_stripes;
1207 map->sub_stripes = sub_stripes;
1209 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1210 btrfs_chunk_item_size(num_stripes));
1213 *start = key.offset;
1215 map->ce.start = key.offset;
1216 map->ce.size = num_bytes;
1218 ret = insert_cache_extent(&info->mapping_tree.cache_tree, &map->ce);
1225 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
1227 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1228 struct cache_extent *ce;
1229 struct map_lookup *map;
1232 ce = search_cache_extent(&map_tree->cache_tree, logical);
1234 fprintf(stderr, "No mapping for %llu-%llu\n",
1235 (unsigned long long)logical,
1236 (unsigned long long)logical+len);
1239 if (ce->start > logical || ce->start + ce->size < logical) {
1240 fprintf(stderr, "Invalid mapping for %llu-%llu, got "
1241 "%llu-%llu\n", (unsigned long long)logical,
1242 (unsigned long long)logical+len,
1243 (unsigned long long)ce->start,
1244 (unsigned long long)ce->start + ce->size);
1247 map = container_of(ce, struct map_lookup, ce);
1249 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
1250 ret = map->num_stripes;
1251 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1252 ret = map->sub_stripes;
1253 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
1255 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
1262 int btrfs_next_bg(struct btrfs_fs_info *fs_info, u64 *logical,
1263 u64 *size, u64 type)
1265 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1266 struct cache_extent *ce;
1267 struct map_lookup *map;
1270 ce = search_cache_extent(&map_tree->cache_tree, cur);
1274 * only jump to next bg if our cur is not 0
1275 * As the initial logical for btrfs_next_bg() is 0, and
1276 * if we jump to next bg, we skipped a valid bg.
1279 ce = next_cache_extent(ce);
1285 map = container_of(ce, struct map_lookup, ce);
1286 if (map->type & type) {
1287 *logical = ce->start;
1292 ce = next_cache_extent(ce);
1298 int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
1299 u64 chunk_start, u64 physical, u64 devid,
1300 u64 **logical, int *naddrs, int *stripe_len)
1302 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1303 struct cache_extent *ce;
1304 struct map_lookup *map;
1312 ce = search_cache_extent(&map_tree->cache_tree, chunk_start);
1314 map = container_of(ce, struct map_lookup, ce);
1317 rmap_len = map->stripe_len;
1318 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1319 length = ce->size / (map->num_stripes / map->sub_stripes);
1320 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
1321 length = ce->size / map->num_stripes;
1322 else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
1323 BTRFS_BLOCK_GROUP_RAID6)) {
1324 length = ce->size / nr_data_stripes(map);
1325 rmap_len = map->stripe_len * nr_data_stripes(map);
1328 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
1330 for (i = 0; i < map->num_stripes; i++) {
1331 if (devid && map->stripes[i].dev->devid != devid)
1333 if (map->stripes[i].physical > physical ||
1334 map->stripes[i].physical + length <= physical)
1337 stripe_nr = (physical - map->stripes[i].physical) /
1340 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1341 stripe_nr = (stripe_nr * map->num_stripes + i) /
1343 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
1344 stripe_nr = stripe_nr * map->num_stripes + i;
1345 } /* else if RAID[56], multiply by nr_data_stripes().
1346 * Alternatively, just use rmap_len below instead of
1347 * map->stripe_len */
1349 bytenr = ce->start + stripe_nr * rmap_len;
1350 for (j = 0; j < nr; j++) {
1351 if (buf[j] == bytenr)
1360 *stripe_len = rmap_len;
1365 static inline int parity_smaller(u64 a, u64 b)
1370 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
1371 static void sort_parity_stripes(struct btrfs_multi_bio *bbio, u64 *raid_map)
1373 struct btrfs_bio_stripe s;
1380 for (i = 0; i < bbio->num_stripes - 1; i++) {
1381 if (parity_smaller(raid_map[i], raid_map[i+1])) {
1382 s = bbio->stripes[i];
1384 bbio->stripes[i] = bbio->stripes[i+1];
1385 raid_map[i] = raid_map[i+1];
1386 bbio->stripes[i+1] = s;
1394 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
1395 u64 logical, u64 *length,
1396 struct btrfs_multi_bio **multi_ret, int mirror_num,
1399 return __btrfs_map_block(fs_info, rw, logical, length, NULL,
1400 multi_ret, mirror_num, raid_map_ret);
1403 int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
1404 u64 logical, u64 *length, u64 *type,
1405 struct btrfs_multi_bio **multi_ret, int mirror_num,
1408 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1409 struct cache_extent *ce;
1410 struct map_lookup *map;
1414 u64 *raid_map = NULL;
1415 int stripes_allocated = 8;
1416 int stripes_required = 1;
1419 struct btrfs_multi_bio *multi = NULL;
1421 if (multi_ret && rw == READ) {
1422 stripes_allocated = 1;
1425 ce = search_cache_extent(&map_tree->cache_tree, logical);
1431 if (ce->start > logical) {
1433 *length = ce->start - logical;
1438 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
1443 map = container_of(ce, struct map_lookup, ce);
1444 offset = logical - ce->start;
1447 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
1448 BTRFS_BLOCK_GROUP_DUP)) {
1449 stripes_required = map->num_stripes;
1450 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1451 stripes_required = map->sub_stripes;
1454 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)
1455 && multi_ret && ((rw & WRITE) || mirror_num > 1) && raid_map_ret) {
1456 /* RAID[56] write or recovery. Return all stripes */
1457 stripes_required = map->num_stripes;
1459 /* Only allocate the map if we've already got a large enough multi_ret */
1460 if (stripes_allocated >= stripes_required) {
1461 raid_map = kmalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
1469 /* if our multi bio struct is too small, back off and try again */
1470 if (multi_ret && stripes_allocated < stripes_required) {
1471 stripes_allocated = stripes_required;
1478 * stripe_nr counts the total number of stripes we have to stride
1479 * to get to this block
1481 stripe_nr = stripe_nr / map->stripe_len;
1483 stripe_offset = stripe_nr * map->stripe_len;
1484 BUG_ON(offset < stripe_offset);
1486 /* stripe_offset is the offset of this block in its stripe*/
1487 stripe_offset = offset - stripe_offset;
1489 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
1490 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
1491 BTRFS_BLOCK_GROUP_RAID10 |
1492 BTRFS_BLOCK_GROUP_DUP)) {
1493 /* we limit the length of each bio to what fits in a stripe */
1494 *length = min_t(u64, ce->size - offset,
1495 map->stripe_len - stripe_offset);
1497 *length = ce->size - offset;
1503 multi->num_stripes = 1;
1505 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1507 multi->num_stripes = map->num_stripes;
1508 else if (mirror_num)
1509 stripe_index = mirror_num - 1;
1511 stripe_index = stripe_nr % map->num_stripes;
1512 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1513 int factor = map->num_stripes / map->sub_stripes;
1515 stripe_index = stripe_nr % factor;
1516 stripe_index *= map->sub_stripes;
1519 multi->num_stripes = map->sub_stripes;
1520 else if (mirror_num)
1521 stripe_index += mirror_num - 1;
1523 stripe_nr = stripe_nr / factor;
1524 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1526 multi->num_stripes = map->num_stripes;
1527 else if (mirror_num)
1528 stripe_index = mirror_num - 1;
1529 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
1530 BTRFS_BLOCK_GROUP_RAID6)) {
1535 u64 raid56_full_stripe_start;
1536 u64 full_stripe_len = nr_data_stripes(map) * map->stripe_len;
1539 * align the start of our data stripe in the logical
1542 raid56_full_stripe_start = offset / full_stripe_len;
1543 raid56_full_stripe_start *= full_stripe_len;
1545 /* get the data stripe number */
1546 stripe_nr = raid56_full_stripe_start / map->stripe_len;
1547 stripe_nr = stripe_nr / nr_data_stripes(map);
1549 /* Work out the disk rotation on this stripe-set */
1550 rot = stripe_nr % map->num_stripes;
1552 /* Fill in the logical address of each stripe */
1553 tmp = stripe_nr * nr_data_stripes(map);
1555 for (i = 0; i < nr_data_stripes(map); i++)
1556 raid_map[(i+rot) % map->num_stripes] =
1557 ce->start + (tmp + i) * map->stripe_len;
1559 raid_map[(i+rot) % map->num_stripes] = BTRFS_RAID5_P_STRIPE;
1560 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
1561 raid_map[(i+rot+1) % map->num_stripes] = BTRFS_RAID6_Q_STRIPE;
1563 *length = map->stripe_len;
1566 multi->num_stripes = map->num_stripes;
1568 stripe_index = stripe_nr % nr_data_stripes(map);
1569 stripe_nr = stripe_nr / nr_data_stripes(map);
1572 * Mirror #0 or #1 means the original data block.
1573 * Mirror #2 is RAID5 parity block.
1574 * Mirror #3 is RAID6 Q block.
1577 stripe_index = nr_data_stripes(map) + mirror_num - 2;
1579 /* We distribute the parity blocks across stripes */
1580 stripe_index = (stripe_nr + stripe_index) % map->num_stripes;
1584 * after this do_div call, stripe_nr is the number of stripes
1585 * on this device we have to walk to find the data, and
1586 * stripe_index is the number of our device in the stripe array
1588 stripe_index = stripe_nr % map->num_stripes;
1589 stripe_nr = stripe_nr / map->num_stripes;
1591 BUG_ON(stripe_index >= map->num_stripes);
1593 for (i = 0; i < multi->num_stripes; i++) {
1594 multi->stripes[i].physical =
1595 map->stripes[stripe_index].physical + stripe_offset +
1596 stripe_nr * map->stripe_len;
1597 multi->stripes[i].dev = map->stripes[stripe_index].dev;
1606 sort_parity_stripes(multi, raid_map);
1607 *raid_map_ret = raid_map;
1613 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
1616 struct btrfs_device *device;
1617 struct btrfs_fs_devices *cur_devices;
1619 cur_devices = fs_info->fs_devices;
1620 while (cur_devices) {
1622 (!memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE) ||
1623 fs_info->ignore_fsid_mismatch)) {
1624 device = __find_device(&cur_devices->devices,
1629 cur_devices = cur_devices->seed;
1634 struct btrfs_device *
1635 btrfs_find_device_by_devid(struct btrfs_fs_devices *fs_devices,
1636 u64 devid, int instance)
1638 struct list_head *head = &fs_devices->devices;
1639 struct btrfs_device *dev;
1642 list_for_each_entry(dev, head, dev_list) {
1643 if (dev->devid == devid && num_found++ == instance)
1649 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
1651 struct cache_extent *ce;
1652 struct map_lookup *map;
1653 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1658 * During chunk recovering, we may fail to find block group's
1659 * corresponding chunk, we will rebuild it later
1661 ce = search_cache_extent(&map_tree->cache_tree, chunk_offset);
1662 if (!fs_info->is_chunk_recover)
1667 map = container_of(ce, struct map_lookup, ce);
1668 for (i = 0; i < map->num_stripes; i++) {
1669 if (!map->stripes[i].dev->writeable) {
1678 static struct btrfs_device *fill_missing_device(u64 devid)
1680 struct btrfs_device *device;
1682 device = kzalloc(sizeof(*device), GFP_NOFS);
1683 device->devid = devid;
1689 * slot == -1: SYSTEM chunk
1690 * return -EIO on error, otherwise return 0
1692 int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
1693 struct extent_buffer *leaf,
1694 struct btrfs_chunk *chunk,
1695 int slot, u64 logical)
1702 u32 chunk_ondisk_size;
1703 u32 sectorsize = fs_info->sectorsize;
1705 length = btrfs_chunk_length(leaf, chunk);
1706 stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
1707 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
1708 sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
1709 type = btrfs_chunk_type(leaf, chunk);
1712 * These valid checks may be insufficient to cover every corner cases.
1714 if (!IS_ALIGNED(logical, sectorsize)) {
1715 error("invalid chunk logical %llu", logical);
1718 if (btrfs_chunk_sector_size(leaf, chunk) != sectorsize) {
1719 error("invalid chunk sectorsize %llu",
1720 (unsigned long long)btrfs_chunk_sector_size(leaf, chunk));
1723 if (!length || !IS_ALIGNED(length, sectorsize)) {
1724 error("invalid chunk length %llu", length);
1727 if (stripe_len != BTRFS_STRIPE_LEN) {
1728 error("invalid chunk stripe length: %llu", stripe_len);
1731 /* Check on chunk item type */
1732 if (slot == -1 && (type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
1733 error("invalid chunk type %llu", type);
1736 if (type & ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
1737 BTRFS_BLOCK_GROUP_PROFILE_MASK)) {
1738 error("unrecognized chunk type: %llu",
1739 ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
1740 BTRFS_BLOCK_GROUP_PROFILE_MASK) & type);
1743 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1744 error("missing chunk type flag: %llu", type);
1747 if (!(is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) ||
1748 (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0)) {
1749 error("conflicting chunk type detected: %llu", type);
1752 if ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
1753 !is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK)) {
1754 error("conflicting chunk profile detected: %llu", type);
1758 chunk_ondisk_size = btrfs_chunk_item_size(num_stripes);
1760 * Btrfs_chunk contains at least one stripe, and for sys_chunk
1761 * it can't exceed the system chunk array size
1762 * For normal chunk, it should match its chunk item size.
1764 if (num_stripes < 1 ||
1765 (slot == -1 && chunk_ondisk_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) ||
1766 (slot >= 0 && chunk_ondisk_size > btrfs_item_size_nr(leaf, slot))) {
1767 error("invalid num_stripes: %u", num_stripes);
1771 * Device number check against profile
1773 if ((type & BTRFS_BLOCK_GROUP_RAID10 && (sub_stripes != 2 ||
1774 !IS_ALIGNED(num_stripes, sub_stripes))) ||
1775 (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
1776 (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
1777 (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
1778 (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
1779 ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
1780 num_stripes != 1)) {
1781 error("Invalid num_stripes:sub_stripes %u:%u for profile %llu",
1782 num_stripes, sub_stripes,
1783 type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
1791 * Slot is used to verify the chunk item is valid
1793 * For sys chunk in superblock, pass -1 to indicate sys chunk.
1795 static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
1796 struct extent_buffer *leaf,
1797 struct btrfs_chunk *chunk, int slot)
1799 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1800 struct map_lookup *map;
1801 struct cache_extent *ce;
1805 u8 uuid[BTRFS_UUID_SIZE];
1810 logical = key->offset;
1811 length = btrfs_chunk_length(leaf, chunk);
1812 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
1813 /* Validation check */
1814 ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, slot, logical);
1816 error("%s checksums match, but it has an invalid chunk, %s",
1817 (slot == -1) ? "Superblock" : "Metadata",
1818 (slot == -1) ? "try btrfsck --repair -s <superblock> ie, 0,1,2" : "");
1822 ce = search_cache_extent(&map_tree->cache_tree, logical);
1824 /* already mapped? */
1825 if (ce && ce->start <= logical && ce->start + ce->size > logical) {
1829 map = kmalloc(btrfs_map_lookup_size(num_stripes), GFP_NOFS);
1833 map->ce.start = logical;
1834 map->ce.size = length;
1835 map->num_stripes = num_stripes;
1836 map->io_width = btrfs_chunk_io_width(leaf, chunk);
1837 map->io_align = btrfs_chunk_io_align(leaf, chunk);
1838 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
1839 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
1840 map->type = btrfs_chunk_type(leaf, chunk);
1841 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
1843 for (i = 0; i < num_stripes; i++) {
1844 map->stripes[i].physical =
1845 btrfs_stripe_offset_nr(leaf, chunk, i);
1846 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
1847 read_extent_buffer(leaf, uuid, (unsigned long)
1848 btrfs_stripe_dev_uuid_nr(chunk, i),
1850 map->stripes[i].dev = btrfs_find_device(fs_info, devid, uuid,
1852 if (!map->stripes[i].dev) {
1853 map->stripes[i].dev = fill_missing_device(devid);
1854 printf("warning, device %llu is missing\n",
1855 (unsigned long long)devid);
1856 list_add(&map->stripes[i].dev->dev_list,
1857 &fs_info->fs_devices->devices);
1861 ret = insert_cache_extent(&map_tree->cache_tree, &map->ce);
1867 static int fill_device_from_item(struct extent_buffer *leaf,
1868 struct btrfs_dev_item *dev_item,
1869 struct btrfs_device *device)
1873 device->devid = btrfs_device_id(leaf, dev_item);
1874 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
1875 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
1876 device->type = btrfs_device_type(leaf, dev_item);
1877 device->io_align = btrfs_device_io_align(leaf, dev_item);
1878 device->io_width = btrfs_device_io_width(leaf, dev_item);
1879 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
1881 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1882 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1887 static int open_seed_devices(struct btrfs_fs_info *fs_info, u8 *fsid)
1889 struct btrfs_fs_devices *fs_devices;
1892 fs_devices = fs_info->fs_devices->seed;
1893 while (fs_devices) {
1894 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
1898 fs_devices = fs_devices->seed;
1901 fs_devices = find_fsid(fsid);
1903 /* missing all seed devices */
1904 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1909 INIT_LIST_HEAD(&fs_devices->devices);
1910 list_add(&fs_devices->list, &fs_uuids);
1911 memcpy(fs_devices->fsid, fsid, BTRFS_FSID_SIZE);
1914 ret = btrfs_open_devices(fs_devices, O_RDONLY);
1918 fs_devices->seed = fs_info->fs_devices->seed;
1919 fs_info->fs_devices->seed = fs_devices;
1924 static int read_one_dev(struct btrfs_fs_info *fs_info,
1925 struct extent_buffer *leaf,
1926 struct btrfs_dev_item *dev_item)
1928 struct btrfs_device *device;
1931 u8 fs_uuid[BTRFS_UUID_SIZE];
1932 u8 dev_uuid[BTRFS_UUID_SIZE];
1934 devid = btrfs_device_id(leaf, dev_item);
1935 read_extent_buffer(leaf, dev_uuid,
1936 (unsigned long)btrfs_device_uuid(dev_item),
1938 read_extent_buffer(leaf, fs_uuid,
1939 (unsigned long)btrfs_device_fsid(dev_item),
1942 if (memcmp(fs_uuid, fs_info->fsid, BTRFS_UUID_SIZE)) {
1943 ret = open_seed_devices(fs_info, fs_uuid);
1948 device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid);
1950 device = kzalloc(sizeof(*device), GFP_NOFS);
1954 list_add(&device->dev_list,
1955 &fs_info->fs_devices->devices);
1958 fill_device_from_item(leaf, dev_item, device);
1959 device->dev_root = fs_info->dev_root;
1963 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
1965 struct btrfs_super_block *super_copy = fs_info->super_copy;
1966 struct extent_buffer *sb;
1967 struct btrfs_disk_key *disk_key;
1968 struct btrfs_chunk *chunk;
1970 unsigned long sb_array_offset;
1976 struct btrfs_key key;
1978 if (fs_info->nodesize < BTRFS_SUPER_INFO_SIZE) {
1979 printf("ERROR: nodesize %u too small to read superblock\n",
1983 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
1986 btrfs_set_buffer_uptodate(sb);
1987 write_extent_buffer(sb, super_copy, 0, sizeof(*super_copy));
1988 array_size = btrfs_super_sys_array_size(super_copy);
1990 array_ptr = super_copy->sys_chunk_array;
1991 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
1994 while (cur_offset < array_size) {
1995 disk_key = (struct btrfs_disk_key *)array_ptr;
1996 len = sizeof(*disk_key);
1997 if (cur_offset + len > array_size)
1998 goto out_short_read;
2000 btrfs_disk_key_to_cpu(&key, disk_key);
2003 sb_array_offset += len;
2006 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2007 chunk = (struct btrfs_chunk *)sb_array_offset;
2009 * At least one btrfs_chunk with one stripe must be
2010 * present, exact stripe count check comes afterwards
2012 len = btrfs_chunk_item_size(1);
2013 if (cur_offset + len > array_size)
2014 goto out_short_read;
2016 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
2019 "ERROR: invalid number of stripes %u in sys_array at offset %u\n",
2020 num_stripes, cur_offset);
2025 len = btrfs_chunk_item_size(num_stripes);
2026 if (cur_offset + len > array_size)
2027 goto out_short_read;
2029 ret = read_one_chunk(fs_info, &key, sb, chunk, -1);
2034 "ERROR: unexpected item type %u in sys_array at offset %u\n",
2035 (u32)key.type, cur_offset);
2040 sb_array_offset += len;
2043 free_extent_buffer(sb);
2047 printk("ERROR: sys_array too short to read %u bytes at offset %u\n",
2049 free_extent_buffer(sb);
2053 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
2055 struct btrfs_path *path;
2056 struct extent_buffer *leaf;
2057 struct btrfs_key key;
2058 struct btrfs_key found_key;
2059 struct btrfs_root *root = fs_info->chunk_root;
2063 path = btrfs_alloc_path();
2068 * Read all device items, and then all the chunk items. All
2069 * device items are found before any chunk item (their object id
2070 * is smaller than the lowest possible object id for a chunk
2071 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
2073 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2076 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2080 leaf = path->nodes[0];
2081 slot = path->slots[0];
2082 if (slot >= btrfs_header_nritems(leaf)) {
2083 ret = btrfs_next_leaf(root, path);
2090 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2091 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
2092 struct btrfs_dev_item *dev_item;
2093 dev_item = btrfs_item_ptr(leaf, slot,
2094 struct btrfs_dev_item);
2095 ret = read_one_dev(fs_info, leaf, dev_item);
2097 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
2098 struct btrfs_chunk *chunk;
2099 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2100 ret = read_one_chunk(fs_info, &found_key, leaf, chunk,
2109 btrfs_free_path(path);
2113 struct list_head *btrfs_scanned_uuids(void)
2118 static int rmw_eb(struct btrfs_fs_info *info,
2119 struct extent_buffer *eb, struct extent_buffer *orig_eb)
2122 unsigned long orig_off = 0;
2123 unsigned long dest_off = 0;
2124 unsigned long copy_len = eb->len;
2126 ret = read_whole_eb(info, eb, 0);
2130 if (eb->start + eb->len <= orig_eb->start ||
2131 eb->start >= orig_eb->start + orig_eb->len)
2134 * | ----- orig_eb ------- |
2135 * | ----- stripe ------- |
2136 * | ----- orig_eb ------- |
2137 * | ----- orig_eb ------- |
2139 if (eb->start > orig_eb->start)
2140 orig_off = eb->start - orig_eb->start;
2141 if (orig_eb->start > eb->start)
2142 dest_off = orig_eb->start - eb->start;
2144 if (copy_len > orig_eb->len - orig_off)
2145 copy_len = orig_eb->len - orig_off;
2146 if (copy_len > eb->len - dest_off)
2147 copy_len = eb->len - dest_off;
2149 memcpy(eb->data + dest_off, orig_eb->data + orig_off, copy_len);
2153 static int split_eb_for_raid56(struct btrfs_fs_info *info,
2154 struct extent_buffer *orig_eb,
2155 struct extent_buffer **ebs,
2156 u64 stripe_len, u64 *raid_map,
2159 struct extent_buffer **tmp_ebs;
2160 u64 start = orig_eb->start;
2165 tmp_ebs = calloc(num_stripes, sizeof(*tmp_ebs));
2169 /* Alloc memory in a row for data stripes */
2170 for (i = 0; i < num_stripes; i++) {
2171 if (raid_map[i] >= BTRFS_RAID5_P_STRIPE)
2174 tmp_ebs[i] = calloc(1, sizeof(**tmp_ebs) + stripe_len);
2181 for (i = 0; i < num_stripes; i++) {
2182 struct extent_buffer *eb = tmp_ebs[i];
2184 if (raid_map[i] >= BTRFS_RAID5_P_STRIPE)
2187 eb->start = raid_map[i];
2188 eb->len = stripe_len;
2192 eb->dev_bytenr = (u64)-1;
2194 this_eb_start = raid_map[i];
2196 if (start > this_eb_start ||
2197 start + orig_eb->len < this_eb_start + stripe_len) {
2198 ret = rmw_eb(info, eb, orig_eb);
2202 memcpy(eb->data, orig_eb->data + eb->start - start,
2210 for (i = 0; i < num_stripes; i++)
2216 int write_raid56_with_parity(struct btrfs_fs_info *info,
2217 struct extent_buffer *eb,
2218 struct btrfs_multi_bio *multi,
2219 u64 stripe_len, u64 *raid_map)
2221 struct extent_buffer **ebs, *p_eb = NULL, *q_eb = NULL;
2224 int alloc_size = eb->len;
2227 ebs = malloc(sizeof(*ebs) * multi->num_stripes);
2228 pointers = malloc(sizeof(*pointers) * multi->num_stripes);
2229 if (!ebs || !pointers) {
2235 if (stripe_len > alloc_size)
2236 alloc_size = stripe_len;
2238 ret = split_eb_for_raid56(info, eb, ebs, stripe_len, raid_map,
2239 multi->num_stripes);
2243 for (i = 0; i < multi->num_stripes; i++) {
2244 struct extent_buffer *new_eb;
2245 if (raid_map[i] < BTRFS_RAID5_P_STRIPE) {
2246 ebs[i]->dev_bytenr = multi->stripes[i].physical;
2247 ebs[i]->fd = multi->stripes[i].dev->fd;
2248 multi->stripes[i].dev->total_ios++;
2249 if (ebs[i]->start != raid_map[i]) {
2251 goto out_free_split;
2255 new_eb = malloc(sizeof(*eb) + alloc_size);
2258 goto out_free_split;
2260 new_eb->dev_bytenr = multi->stripes[i].physical;
2261 new_eb->fd = multi->stripes[i].dev->fd;
2262 multi->stripes[i].dev->total_ios++;
2263 new_eb->len = stripe_len;
2265 if (raid_map[i] == BTRFS_RAID5_P_STRIPE)
2267 else if (raid_map[i] == BTRFS_RAID6_Q_STRIPE)
2271 ebs[multi->num_stripes - 2] = p_eb;
2272 ebs[multi->num_stripes - 1] = q_eb;
2274 for (i = 0; i < multi->num_stripes; i++)
2275 pointers[i] = ebs[i]->data;
2277 raid6_gen_syndrome(multi->num_stripes, stripe_len, pointers);
2279 ebs[multi->num_stripes - 1] = p_eb;
2280 for (i = 0; i < multi->num_stripes; i++)
2281 pointers[i] = ebs[i]->data;
2282 ret = raid5_gen_result(multi->num_stripes, stripe_len,
2283 multi->num_stripes - 1, pointers);
2285 goto out_free_split;
2288 for (i = 0; i < multi->num_stripes; i++) {
2289 ret = write_extent_to_disk(ebs[i]);
2291 goto out_free_split;
2295 for (i = 0; i < multi->num_stripes; i++) {
2307 * Get stripe length from chunk item and its stripe items
2309 * Caller should only call this function after validating the chunk item
2310 * by using btrfs_check_chunk_valid().
2312 u64 btrfs_stripe_length(struct btrfs_fs_info *fs_info,
2313 struct extent_buffer *leaf,
2314 struct btrfs_chunk *chunk)
2318 u32 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2319 u64 profile = btrfs_chunk_type(leaf, chunk) &
2320 BTRFS_BLOCK_GROUP_PROFILE_MASK;
2322 chunk_len = btrfs_chunk_length(leaf, chunk);
2325 case 0: /* Single profile */
2326 case BTRFS_BLOCK_GROUP_RAID1:
2327 case BTRFS_BLOCK_GROUP_DUP:
2328 stripe_len = chunk_len;
2330 case BTRFS_BLOCK_GROUP_RAID0:
2331 stripe_len = chunk_len / num_stripes;
2333 case BTRFS_BLOCK_GROUP_RAID5:
2334 stripe_len = chunk_len / (num_stripes - 1);
2336 case BTRFS_BLOCK_GROUP_RAID6:
2337 stripe_len = chunk_len / (num_stripes - 2);
2339 case BTRFS_BLOCK_GROUP_RAID10:
2340 stripe_len = chunk_len / (num_stripes /
2341 btrfs_chunk_sub_stripes(leaf, chunk));
2344 /* Invalid chunk profile found */
2351 * Return 0 if size of @device is already good
2352 * Return >0 if size of @device is not aligned but fixed without problems
2353 * Return <0 if something wrong happened when aligning the size of @device
2355 int btrfs_fix_device_size(struct btrfs_fs_info *fs_info,
2356 struct btrfs_device *device)
2358 struct btrfs_trans_handle *trans;
2359 struct btrfs_key key;
2360 struct btrfs_path path;
2361 struct btrfs_root *chunk_root = fs_info->chunk_root;
2362 struct btrfs_dev_item *di;
2363 u64 old_bytes = device->total_bytes;
2366 if (IS_ALIGNED(old_bytes, fs_info->sectorsize))
2369 /* Align the in-memory total_bytes first, and use it as correct size */
2370 device->total_bytes = round_down(device->total_bytes,
2371 fs_info->sectorsize);
2373 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2374 key.type = BTRFS_DEV_ITEM_KEY;
2375 key.offset = device->devid;
2377 trans = btrfs_start_transaction(chunk_root, 1);
2378 if (IS_ERR(trans)) {
2379 ret = PTR_ERR(trans);
2380 error("error starting transaction: %d (%s)",
2381 ret, strerror(-ret));
2385 btrfs_init_path(&path);
2386 ret = btrfs_search_slot(trans, chunk_root, &key, &path, 0, 1);
2388 error("failed to find DEV_ITEM for devid %llu", device->devid);
2393 error("failed to search chunk root: %d (%s)",
2394 ret, strerror(-ret));
2397 di = btrfs_item_ptr(path.nodes[0], path.slots[0], struct btrfs_dev_item);
2398 btrfs_set_device_total_bytes(path.nodes[0], di, device->total_bytes);
2399 btrfs_mark_buffer_dirty(path.nodes[0]);
2400 ret = btrfs_commit_transaction(trans, chunk_root);
2402 error("failed to commit current transaction: %d (%s)",
2403 ret, strerror(-ret));
2404 btrfs_release_path(&path);
2407 btrfs_release_path(&path);
2408 printf("Fixed device size for devid %llu, old size: %llu new size: %llu\n",
2409 device->devid, old_bytes, device->total_bytes);
2413 /* We haven't modified anything, it's OK to commit current trans */
2414 btrfs_commit_transaction(trans, chunk_root);
2415 btrfs_release_path(&path);
2420 * Return 0 if super block total_bytes matches all devices' total_bytes
2421 * Return >0 if super block total_bytes mismatch but fixed without problem
2422 * Return <0 if we failed to fix super block total_bytes
2424 int btrfs_fix_super_size(struct btrfs_fs_info *fs_info)
2426 struct btrfs_trans_handle *trans;
2427 struct btrfs_device *device;
2428 struct list_head *dev_list = &fs_info->fs_devices->devices;
2429 u64 total_bytes = 0;
2430 u64 old_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2433 list_for_each_entry(device, dev_list, dev_list) {
2435 * Caller should ensure this function is called after aligning
2436 * all devices' total_bytes.
2438 if (!IS_ALIGNED(device->total_bytes, fs_info->sectorsize)) {
2439 error("device %llu total_bytes %llu not aligned to %u",
2440 device->devid, device->total_bytes,
2441 fs_info->sectorsize);
2444 total_bytes += device->total_bytes;
2447 if (total_bytes == old_bytes)
2450 btrfs_set_super_total_bytes(fs_info->super_copy, total_bytes);
2452 /* Commit transaction to update all super blocks */
2453 trans = btrfs_start_transaction(fs_info->tree_root, 1);
2454 if (IS_ERR(trans)) {
2455 ret = PTR_ERR(trans);
2456 error("error starting transaction: %d (%s)",
2457 ret, strerror(-ret));
2460 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
2462 error("failed to commit current transaction: %d (%s)",
2463 ret, strerror(-ret));
2466 printf("Fixed super total bytes, old size: %llu new size: %llu\n",
2467 old_bytes, total_bytes);
2472 * Return 0 if all devices and super block sizes are good
2473 * Return >0 if any device/super size problem was found, but fixed
2474 * Return <0 if something wrong happened during fixing
2476 int btrfs_fix_device_and_super_size(struct btrfs_fs_info *fs_info)
2478 struct btrfs_device *device;
2479 struct list_head *dev_list = &fs_info->fs_devices->devices;
2480 bool have_bad_value = false;
2483 /* Seed device is not supported yet */
2484 if (fs_info->fs_devices->seed) {
2485 error("fixing device size with seed device is not supported yet");
2489 /* All devices must be set up before repairing */
2490 if (list_empty(dev_list)) {
2491 error("no device found");
2494 list_for_each_entry(device, dev_list, dev_list) {
2495 if (device->fd == -1 || !device->writeable) {
2496 error("devid %llu is missing or not writeable",
2499 "fixing device size needs all device(s) to be present and writeable");
2504 /* Repair total_bytes of each device */
2505 list_for_each_entry(device, dev_list, dev_list) {
2506 ret = btrfs_fix_device_size(fs_info, device);
2510 have_bad_value = true;
2513 /* Repair super total_byte */
2514 ret = btrfs_fix_super_size(fs_info);
2516 have_bad_value = true;
2517 if (have_bad_value) {
2519 "Fixed unaligned/mismatched total_bytes for super block and device items\n");
2522 printf("No device size related problem found\n");