2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <sys/types.h>
22 #include <uuid/uuid.h>
27 #include "transaction.h"
28 #include "print-tree.h"
31 #include "kernel-lib/raid56.h"
33 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
34 [BTRFS_RAID_RAID10] = {
37 .devs_max = 0, /* 0 == as many as possible */
39 .tolerated_failures = 1,
43 [BTRFS_RAID_RAID1] = {
48 .tolerated_failures = 1,
57 .tolerated_failures = 0,
61 [BTRFS_RAID_RAID0] = {
66 .tolerated_failures = 0,
70 [BTRFS_RAID_SINGLE] = {
75 .tolerated_failures = 0,
79 [BTRFS_RAID_RAID5] = {
84 .tolerated_failures = 1,
88 [BTRFS_RAID_RAID6] = {
93 .tolerated_failures = 2,
100 struct btrfs_device *dev;
104 static inline int nr_parity_stripes(struct map_lookup *map)
106 if (map->type & BTRFS_BLOCK_GROUP_RAID5)
108 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
114 static inline int nr_data_stripes(struct map_lookup *map)
116 return map->num_stripes - nr_parity_stripes(map);
119 #define is_parity_stripe(x) ( ((x) == BTRFS_RAID5_P_STRIPE) || ((x) == BTRFS_RAID6_Q_STRIPE) )
121 static LIST_HEAD(fs_uuids);
123 static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
126 struct btrfs_device *dev;
128 list_for_each_entry(dev, head, dev_list) {
129 if (dev->devid == devid &&
130 !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE)) {
137 static struct btrfs_fs_devices *find_fsid(u8 *fsid)
139 struct btrfs_fs_devices *fs_devices;
141 list_for_each_entry(fs_devices, &fs_uuids, list) {
142 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
148 static int device_list_add(const char *path,
149 struct btrfs_super_block *disk_super,
150 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
152 struct btrfs_device *device;
153 struct btrfs_fs_devices *fs_devices;
154 u64 found_transid = btrfs_super_generation(disk_super);
156 fs_devices = find_fsid(disk_super->fsid);
158 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
161 INIT_LIST_HEAD(&fs_devices->devices);
162 list_add(&fs_devices->list, &fs_uuids);
163 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
164 fs_devices->latest_devid = devid;
165 fs_devices->latest_trans = found_transid;
166 fs_devices->lowest_devid = (u64)-1;
169 device = __find_device(&fs_devices->devices, devid,
170 disk_super->dev_item.uuid);
173 device = kzalloc(sizeof(*device), GFP_NOFS);
175 /* we can safely leave the fs_devices entry around */
179 device->devid = devid;
180 device->generation = found_transid;
181 memcpy(device->uuid, disk_super->dev_item.uuid,
183 device->name = kstrdup(path, GFP_NOFS);
188 device->label = kstrdup(disk_super->label, GFP_NOFS);
189 if (!device->label) {
194 device->total_devs = btrfs_super_num_devices(disk_super);
195 device->super_bytes_used = btrfs_super_bytes_used(disk_super);
196 device->total_bytes =
197 btrfs_stack_device_total_bytes(&disk_super->dev_item);
199 btrfs_stack_device_bytes_used(&disk_super->dev_item);
200 list_add(&device->dev_list, &fs_devices->devices);
201 device->fs_devices = fs_devices;
202 } else if (!device->name || strcmp(device->name, path)) {
206 * The existing device has newer generation, so this one could
207 * be a stale one, don't add it.
209 if (found_transid < device->generation) {
211 "adding device %s gen %llu but found an existing device %s gen %llu",
212 path, found_transid, device->name,
225 if (found_transid > fs_devices->latest_trans) {
226 fs_devices->latest_devid = devid;
227 fs_devices->latest_trans = found_transid;
229 if (fs_devices->lowest_devid > devid) {
230 fs_devices->lowest_devid = devid;
232 *fs_devices_ret = fs_devices;
236 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
238 struct btrfs_fs_devices *seed_devices;
239 struct btrfs_device *device;
245 while (!list_empty(&fs_devices->devices)) {
246 device = list_entry(fs_devices->devices.next,
247 struct btrfs_device, dev_list);
248 if (device->fd != -1) {
249 if (fsync(device->fd) == -1) {
250 warning("fsync on device %llu failed: %m",
254 if (posix_fadvise(device->fd, 0, 0, POSIX_FADV_DONTNEED))
255 fprintf(stderr, "Warning, could not drop caches\n");
259 device->writeable = 0;
260 list_del(&device->dev_list);
261 /* free the memory */
267 seed_devices = fs_devices->seed;
268 fs_devices->seed = NULL;
270 struct btrfs_fs_devices *orig;
273 fs_devices = seed_devices;
274 list_del(&orig->list);
278 list_del(&fs_devices->list);
285 void btrfs_close_all_devices(void)
287 struct btrfs_fs_devices *fs_devices;
289 while (!list_empty(&fs_uuids)) {
290 fs_devices = list_entry(fs_uuids.next, struct btrfs_fs_devices,
292 btrfs_close_devices(fs_devices);
296 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, int flags)
299 struct btrfs_device *device;
302 list_for_each_entry(device, &fs_devices->devices, dev_list) {
304 printk("no name for device %llu, skip it now\n", device->devid);
308 fd = open(device->name, flags);
311 error("cannot open device '%s': %m", device->name);
315 if (posix_fadvise(fd, 0, 0, POSIX_FADV_DONTNEED))
316 fprintf(stderr, "Warning, could not drop caches\n");
318 if (device->devid == fs_devices->latest_devid)
319 fs_devices->latest_bdev = fd;
320 if (device->devid == fs_devices->lowest_devid)
321 fs_devices->lowest_bdev = fd;
324 device->writeable = 1;
328 btrfs_close_devices(fs_devices);
332 int btrfs_scan_one_device(int fd, const char *path,
333 struct btrfs_fs_devices **fs_devices_ret,
334 u64 *total_devs, u64 super_offset, unsigned sbflags)
336 struct btrfs_super_block *disk_super;
337 char buf[BTRFS_SUPER_INFO_SIZE];
341 disk_super = (struct btrfs_super_block *)buf;
342 ret = btrfs_read_dev_super(fd, disk_super, super_offset, sbflags);
345 devid = btrfs_stack_device_id(&disk_super->dev_item);
346 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_METADUMP)
349 *total_devs = btrfs_super_num_devices(disk_super);
351 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
357 * find_free_dev_extent_start - find free space in the specified device
358 * @device: the device which we search the free space in
359 * @num_bytes: the size of the free space that we need
360 * @search_start: the position from which to begin the search
361 * @start: store the start of the free space.
362 * @len: the size of the free space. that we find, or the size
363 * of the max free space if we don't find suitable free space
365 * this uses a pretty simple search, the expectation is that it is
366 * called very infrequently and that a given device has a small number
369 * @start is used to store the start of the free space if we find. But if we
370 * don't find suitable free space, it will be used to store the start position
371 * of the max free space.
373 * @len is used to store the size of the free space that we find.
374 * But if we don't find suitable free space, it is used to store the size of
375 * the max free space.
377 static int find_free_dev_extent_start(struct btrfs_device *device,
378 u64 num_bytes, u64 search_start,
379 u64 *start, u64 *len)
381 struct btrfs_key key;
382 struct btrfs_root *root = device->dev_root;
383 struct btrfs_dev_extent *dev_extent;
384 struct btrfs_path *path;
389 u64 search_end = device->total_bytes;
392 struct extent_buffer *l;
393 u64 min_search_start;
396 * We don't want to overwrite the superblock on the drive nor any area
397 * used by the boot loader (grub for example), so we make sure to start
398 * at an offset of at least 1MB.
400 min_search_start = max(root->fs_info->alloc_start, (u64)SZ_1M);
401 search_start = max(search_start, min_search_start);
403 path = btrfs_alloc_path();
407 max_hole_start = search_start;
410 if (search_start >= search_end) {
417 key.objectid = device->devid;
418 key.offset = search_start;
419 key.type = BTRFS_DEV_EXTENT_KEY;
421 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
425 ret = btrfs_previous_item(root, path, key.objectid, key.type);
432 slot = path->slots[0];
433 if (slot >= btrfs_header_nritems(l)) {
434 ret = btrfs_next_leaf(root, path);
442 btrfs_item_key_to_cpu(l, &key, slot);
444 if (key.objectid < device->devid)
447 if (key.objectid > device->devid)
450 if (key.type != BTRFS_DEV_EXTENT_KEY)
453 if (key.offset > search_start) {
454 hole_size = key.offset - search_start;
457 * Have to check before we set max_hole_start, otherwise
458 * we could end up sending back this offset anyway.
460 if (hole_size > max_hole_size) {
461 max_hole_start = search_start;
462 max_hole_size = hole_size;
466 * If this free space is greater than which we need,
467 * it must be the max free space that we have found
468 * until now, so max_hole_start must point to the start
469 * of this free space and the length of this free space
470 * is stored in max_hole_size. Thus, we return
471 * max_hole_start and max_hole_size and go back to the
474 if (hole_size >= num_bytes) {
480 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
481 extent_end = key.offset + btrfs_dev_extent_length(l,
483 if (extent_end > search_start)
484 search_start = extent_end;
491 * At this point, search_start should be the end of
492 * allocated dev extents, and when shrinking the device,
493 * search_end may be smaller than search_start.
495 if (search_end > search_start) {
496 hole_size = search_end - search_start;
498 if (hole_size > max_hole_size) {
499 max_hole_start = search_start;
500 max_hole_size = hole_size;
505 if (max_hole_size < num_bytes)
511 btrfs_free_path(path);
512 *start = max_hole_start;
514 *len = max_hole_size;
518 static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
519 u64 *start, u64 *len)
521 /* FIXME use last free of some kind */
522 return find_free_dev_extent_start(device, num_bytes, 0, start, len);
525 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
526 struct btrfs_device *device,
527 u64 chunk_offset, u64 num_bytes, u64 *start,
531 struct btrfs_path *path;
532 struct btrfs_root *root = device->dev_root;
533 struct btrfs_dev_extent *extent;
534 struct extent_buffer *leaf;
535 struct btrfs_key key;
537 path = btrfs_alloc_path();
542 * For convert case, just skip search free dev_extent, as caller
543 * is responsible to make sure it's free.
546 ret = find_free_dev_extent(device, num_bytes, start, NULL);
551 key.objectid = device->devid;
553 key.type = BTRFS_DEV_EXTENT_KEY;
554 ret = btrfs_insert_empty_item(trans, root, path, &key,
558 leaf = path->nodes[0];
559 extent = btrfs_item_ptr(leaf, path->slots[0],
560 struct btrfs_dev_extent);
561 btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID);
562 btrfs_set_dev_extent_chunk_objectid(leaf, extent,
563 BTRFS_FIRST_CHUNK_TREE_OBJECTID);
564 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
566 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
567 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
570 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
571 btrfs_mark_buffer_dirty(leaf);
573 btrfs_free_path(path);
577 static int find_next_chunk(struct btrfs_fs_info *fs_info, u64 *offset)
579 struct btrfs_root *root = fs_info->chunk_root;
580 struct btrfs_path *path;
582 struct btrfs_key key;
583 struct btrfs_chunk *chunk;
584 struct btrfs_key found_key;
586 path = btrfs_alloc_path();
590 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
591 key.offset = (u64)-1;
592 key.type = BTRFS_CHUNK_ITEM_KEY;
594 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
600 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
604 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
606 if (found_key.objectid != BTRFS_FIRST_CHUNK_TREE_OBJECTID)
609 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
611 *offset = found_key.offset +
612 btrfs_chunk_length(path->nodes[0], chunk);
617 btrfs_free_path(path);
621 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
625 struct btrfs_key key;
626 struct btrfs_key found_key;
628 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
629 key.type = BTRFS_DEV_ITEM_KEY;
630 key.offset = (u64)-1;
632 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
638 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
643 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
645 *objectid = found_key.offset + 1;
649 btrfs_release_path(path);
654 * the device information is stored in the chunk root
655 * the btrfs_device struct should be fully filled in
657 int btrfs_add_device(struct btrfs_trans_handle *trans,
658 struct btrfs_fs_info *fs_info,
659 struct btrfs_device *device)
662 struct btrfs_path *path;
663 struct btrfs_dev_item *dev_item;
664 struct extent_buffer *leaf;
665 struct btrfs_key key;
666 struct btrfs_root *root = fs_info->chunk_root;
670 path = btrfs_alloc_path();
674 ret = find_next_devid(root, path, &free_devid);
678 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
679 key.type = BTRFS_DEV_ITEM_KEY;
680 key.offset = free_devid;
682 ret = btrfs_insert_empty_item(trans, root, path, &key,
687 leaf = path->nodes[0];
688 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
690 device->devid = free_devid;
691 btrfs_set_device_id(leaf, dev_item, device->devid);
692 btrfs_set_device_generation(leaf, dev_item, 0);
693 btrfs_set_device_type(leaf, dev_item, device->type);
694 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
695 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
696 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
697 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
698 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
699 btrfs_set_device_group(leaf, dev_item, 0);
700 btrfs_set_device_seek_speed(leaf, dev_item, 0);
701 btrfs_set_device_bandwidth(leaf, dev_item, 0);
702 btrfs_set_device_start_offset(leaf, dev_item, 0);
704 ptr = (unsigned long)btrfs_device_uuid(dev_item);
705 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
706 ptr = (unsigned long)btrfs_device_fsid(dev_item);
707 write_extent_buffer(leaf, fs_info->fsid, ptr, BTRFS_UUID_SIZE);
708 btrfs_mark_buffer_dirty(leaf);
712 btrfs_free_path(path);
716 int btrfs_update_device(struct btrfs_trans_handle *trans,
717 struct btrfs_device *device)
720 struct btrfs_path *path;
721 struct btrfs_root *root;
722 struct btrfs_dev_item *dev_item;
723 struct extent_buffer *leaf;
724 struct btrfs_key key;
726 root = device->dev_root->fs_info->chunk_root;
728 path = btrfs_alloc_path();
732 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
733 key.type = BTRFS_DEV_ITEM_KEY;
734 key.offset = device->devid;
736 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
745 leaf = path->nodes[0];
746 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
748 btrfs_set_device_id(leaf, dev_item, device->devid);
749 btrfs_set_device_type(leaf, dev_item, device->type);
750 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
751 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
752 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
753 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
754 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
755 btrfs_mark_buffer_dirty(leaf);
758 btrfs_free_path(path);
762 int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
763 struct btrfs_chunk *chunk, int item_size)
765 struct btrfs_super_block *super_copy = fs_info->super_copy;
766 struct btrfs_disk_key disk_key;
770 array_size = btrfs_super_sys_array_size(super_copy);
771 if (array_size + item_size + sizeof(disk_key)
772 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
775 ptr = super_copy->sys_chunk_array + array_size;
776 btrfs_cpu_key_to_disk(&disk_key, key);
777 memcpy(ptr, &disk_key, sizeof(disk_key));
778 ptr += sizeof(disk_key);
779 memcpy(ptr, chunk, item_size);
780 item_size += sizeof(disk_key);
781 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
785 static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
788 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
790 else if (type & BTRFS_BLOCK_GROUP_RAID10)
791 return calc_size * (num_stripes / sub_stripes);
792 else if (type & BTRFS_BLOCK_GROUP_RAID5)
793 return calc_size * (num_stripes - 1);
794 else if (type & BTRFS_BLOCK_GROUP_RAID6)
795 return calc_size * (num_stripes - 2);
797 return calc_size * num_stripes;
801 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
803 /* TODO, add a way to store the preferred stripe size */
804 return BTRFS_STRIPE_LEN;
808 * btrfs_device_avail_bytes - count bytes available for alloc_chunk
810 * It is not equal to "device->total_bytes - device->bytes_used".
811 * We do not allocate any chunk in 1M at beginning of device, and not
812 * allowed to allocate any chunk before alloc_start if it is specified.
813 * So search holes from max(1M, alloc_start) to device->total_bytes.
815 static int btrfs_device_avail_bytes(struct btrfs_trans_handle *trans,
816 struct btrfs_device *device,
819 struct btrfs_path *path;
820 struct btrfs_root *root = device->dev_root;
821 struct btrfs_key key;
822 struct btrfs_dev_extent *dev_extent = NULL;
823 struct extent_buffer *l;
824 u64 search_start = root->fs_info->alloc_start;
825 u64 search_end = device->total_bytes;
831 search_start = max(BTRFS_BLOCK_RESERVED_1M_FOR_SUPER, search_start);
833 path = btrfs_alloc_path();
837 key.objectid = device->devid;
838 key.offset = root->fs_info->alloc_start;
839 key.type = BTRFS_DEV_EXTENT_KEY;
842 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
845 ret = btrfs_previous_item(root, path, 0, key.type);
851 slot = path->slots[0];
852 if (slot >= btrfs_header_nritems(l)) {
853 ret = btrfs_next_leaf(root, path);
860 btrfs_item_key_to_cpu(l, &key, slot);
862 if (key.objectid < device->devid)
864 if (key.objectid > device->devid)
866 if (key.type != BTRFS_DEV_EXTENT_KEY)
868 if (key.offset > search_end)
870 if (key.offset > search_start)
871 free_bytes += key.offset - search_start;
873 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
874 extent_end = key.offset + btrfs_dev_extent_length(l,
876 if (extent_end > search_start)
877 search_start = extent_end;
878 if (search_start > search_end)
885 if (search_start < search_end)
886 free_bytes += search_end - search_start;
888 *avail_bytes = free_bytes;
891 btrfs_free_path(path);
895 #define BTRFS_MAX_DEVS(info) ((BTRFS_LEAF_DATA_SIZE(info) \
896 - sizeof(struct btrfs_item) \
897 - sizeof(struct btrfs_chunk)) \
898 / sizeof(struct btrfs_stripe) + 1)
900 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
901 - 2 * sizeof(struct btrfs_disk_key) \
902 - 2 * sizeof(struct btrfs_chunk)) \
903 / sizeof(struct btrfs_stripe) + 1)
905 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
906 struct btrfs_fs_info *info, u64 *start,
907 u64 *num_bytes, u64 type)
910 struct btrfs_root *extent_root = info->extent_root;
911 struct btrfs_root *chunk_root = info->chunk_root;
912 struct btrfs_stripe *stripes;
913 struct btrfs_device *device = NULL;
914 struct btrfs_chunk *chunk;
915 struct list_head private_devs;
916 struct list_head *dev_list = &info->fs_devices->devices;
917 struct list_head *cur;
918 struct map_lookup *map;
919 int min_stripe_size = SZ_1M;
920 u64 calc_size = SZ_8M;
922 u64 max_chunk_size = 4 * calc_size;
933 int stripe_len = BTRFS_STRIPE_LEN;
934 struct btrfs_key key;
937 if (list_empty(dev_list)) {
941 if (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
942 if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
944 max_chunk_size = calc_size * 2;
945 min_stripe_size = SZ_1M;
946 max_stripes = BTRFS_MAX_DEVS_SYS_CHUNK;
947 } else if (type & BTRFS_BLOCK_GROUP_DATA) {
949 max_chunk_size = 10 * calc_size;
950 min_stripe_size = SZ_64M;
951 max_stripes = BTRFS_MAX_DEVS(info);
952 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
954 max_chunk_size = 4 * calc_size;
955 min_stripe_size = SZ_32M;
956 max_stripes = BTRFS_MAX_DEVS(info);
959 if (type & BTRFS_BLOCK_GROUP_RAID1) {
960 num_stripes = min_t(u64, 2,
961 btrfs_super_num_devices(info->super_copy));
966 if (type & BTRFS_BLOCK_GROUP_DUP) {
970 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
971 num_stripes = btrfs_super_num_devices(info->super_copy);
972 if (num_stripes > max_stripes)
973 num_stripes = max_stripes;
976 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
977 num_stripes = btrfs_super_num_devices(info->super_copy);
978 if (num_stripes > max_stripes)
979 num_stripes = max_stripes;
982 num_stripes &= ~(u32)1;
986 if (type & (BTRFS_BLOCK_GROUP_RAID5)) {
987 num_stripes = btrfs_super_num_devices(info->super_copy);
988 if (num_stripes > max_stripes)
989 num_stripes = max_stripes;
993 stripe_len = find_raid56_stripe_len(num_stripes - 1,
994 btrfs_super_stripesize(info->super_copy));
996 if (type & (BTRFS_BLOCK_GROUP_RAID6)) {
997 num_stripes = btrfs_super_num_devices(info->super_copy);
998 if (num_stripes > max_stripes)
999 num_stripes = max_stripes;
1000 if (num_stripes < 3)
1003 stripe_len = find_raid56_stripe_len(num_stripes - 2,
1004 btrfs_super_stripesize(info->super_copy));
1007 /* we don't want a chunk larger than 10% of the FS */
1008 percent_max = div_factor(btrfs_super_total_bytes(info->super_copy), 1);
1009 max_chunk_size = min(percent_max, max_chunk_size);
1012 if (chunk_bytes_by_type(type, calc_size, num_stripes, sub_stripes) >
1014 calc_size = max_chunk_size;
1015 calc_size /= num_stripes;
1016 calc_size /= stripe_len;
1017 calc_size *= stripe_len;
1019 /* we don't want tiny stripes */
1020 calc_size = max_t(u64, calc_size, min_stripe_size);
1022 calc_size /= stripe_len;
1023 calc_size *= stripe_len;
1024 INIT_LIST_HEAD(&private_devs);
1025 cur = dev_list->next;
1028 if (type & BTRFS_BLOCK_GROUP_DUP)
1029 min_free = calc_size * 2;
1031 min_free = calc_size;
1033 /* build a private list of devices we will allocate from */
1034 while(index < num_stripes) {
1035 device = list_entry(cur, struct btrfs_device, dev_list);
1036 ret = btrfs_device_avail_bytes(trans, device, &avail);
1040 if (avail >= min_free) {
1041 list_move_tail(&device->dev_list, &private_devs);
1043 if (type & BTRFS_BLOCK_GROUP_DUP)
1045 } else if (avail > max_avail)
1047 if (cur == dev_list)
1050 if (index < num_stripes) {
1051 list_splice(&private_devs, dev_list);
1052 if (index >= min_stripes) {
1053 num_stripes = index;
1054 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1055 num_stripes /= sub_stripes;
1056 num_stripes *= sub_stripes;
1061 if (!looped && max_avail > 0) {
1063 calc_size = max_avail;
1068 ret = find_next_chunk(info, &offset);
1071 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1072 key.type = BTRFS_CHUNK_ITEM_KEY;
1073 key.offset = offset;
1075 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
1079 map = kmalloc(btrfs_map_lookup_size(num_stripes), GFP_NOFS);
1085 stripes = &chunk->stripe;
1086 *num_bytes = chunk_bytes_by_type(type, calc_size,
1087 num_stripes, sub_stripes);
1089 while(index < num_stripes) {
1090 struct btrfs_stripe *stripe;
1091 BUG_ON(list_empty(&private_devs));
1092 cur = private_devs.next;
1093 device = list_entry(cur, struct btrfs_device, dev_list);
1095 /* loop over this device again if we're doing a dup group */
1096 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
1097 (index == num_stripes - 1))
1098 list_move_tail(&device->dev_list, dev_list);
1100 ret = btrfs_alloc_dev_extent(trans, device, key.offset,
1101 calc_size, &dev_offset, 0);
1105 device->bytes_used += calc_size;
1106 ret = btrfs_update_device(trans, device);
1110 map->stripes[index].dev = device;
1111 map->stripes[index].physical = dev_offset;
1112 stripe = stripes + index;
1113 btrfs_set_stack_stripe_devid(stripe, device->devid);
1114 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1115 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
1118 BUG_ON(!list_empty(&private_devs));
1120 /* key was set above */
1121 btrfs_set_stack_chunk_length(chunk, *num_bytes);
1122 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
1123 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
1124 btrfs_set_stack_chunk_type(chunk, type);
1125 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
1126 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1127 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
1128 btrfs_set_stack_chunk_sector_size(chunk, info->sectorsize);
1129 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
1130 map->sector_size = info->sectorsize;
1131 map->stripe_len = stripe_len;
1132 map->io_align = stripe_len;
1133 map->io_width = stripe_len;
1135 map->num_stripes = num_stripes;
1136 map->sub_stripes = sub_stripes;
1138 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1139 btrfs_chunk_item_size(num_stripes));
1141 *start = key.offset;;
1143 map->ce.start = key.offset;
1144 map->ce.size = *num_bytes;
1146 ret = insert_cache_extent(&info->mapping_tree.cache_tree, &map->ce);
1150 if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1151 ret = btrfs_add_system_chunk(info, &key,
1152 chunk, btrfs_chunk_item_size(num_stripes));
1168 * Alloc a DATA chunk with SINGLE profile.
1170 * If 'convert' is set, it will alloc a chunk with 1:1 mapping
1171 * (btrfs logical bytenr == on-disk bytenr)
1172 * For that case, caller must make sure the chunk and dev_extent are not
1175 int btrfs_alloc_data_chunk(struct btrfs_trans_handle *trans,
1176 struct btrfs_fs_info *info, u64 *start,
1177 u64 num_bytes, u64 type, int convert)
1180 struct btrfs_root *extent_root = info->extent_root;
1181 struct btrfs_root *chunk_root = info->chunk_root;
1182 struct btrfs_stripe *stripes;
1183 struct btrfs_device *device = NULL;
1184 struct btrfs_chunk *chunk;
1185 struct list_head *dev_list = &info->fs_devices->devices;
1186 struct list_head *cur;
1187 struct map_lookup *map;
1188 u64 calc_size = SZ_8M;
1189 int num_stripes = 1;
1190 int sub_stripes = 0;
1193 int stripe_len = BTRFS_STRIPE_LEN;
1194 struct btrfs_key key;
1196 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1197 key.type = BTRFS_CHUNK_ITEM_KEY;
1199 if (*start != round_down(*start, info->sectorsize)) {
1200 error("DATA chunk start not sectorsize aligned: %llu",
1201 (unsigned long long)*start);
1204 key.offset = *start;
1205 dev_offset = *start;
1209 ret = find_next_chunk(info, &tmp);
1215 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
1219 map = kmalloc(btrfs_map_lookup_size(num_stripes), GFP_NOFS);
1225 stripes = &chunk->stripe;
1226 calc_size = num_bytes;
1229 cur = dev_list->next;
1230 device = list_entry(cur, struct btrfs_device, dev_list);
1232 while (index < num_stripes) {
1233 struct btrfs_stripe *stripe;
1235 ret = btrfs_alloc_dev_extent(trans, device, key.offset,
1236 calc_size, &dev_offset, convert);
1239 device->bytes_used += calc_size;
1240 ret = btrfs_update_device(trans, device);
1243 map->stripes[index].dev = device;
1244 map->stripes[index].physical = dev_offset;
1245 stripe = stripes + index;
1246 btrfs_set_stack_stripe_devid(stripe, device->devid);
1247 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1248 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
1252 /* key was set above */
1253 btrfs_set_stack_chunk_length(chunk, num_bytes);
1254 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
1255 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
1256 btrfs_set_stack_chunk_type(chunk, type);
1257 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
1258 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1259 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
1260 btrfs_set_stack_chunk_sector_size(chunk, info->sectorsize);
1261 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
1262 map->sector_size = info->sectorsize;
1263 map->stripe_len = stripe_len;
1264 map->io_align = stripe_len;
1265 map->io_width = stripe_len;
1267 map->num_stripes = num_stripes;
1268 map->sub_stripes = sub_stripes;
1270 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1271 btrfs_chunk_item_size(num_stripes));
1274 *start = key.offset;
1276 map->ce.start = key.offset;
1277 map->ce.size = num_bytes;
1279 ret = insert_cache_extent(&info->mapping_tree.cache_tree, &map->ce);
1286 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
1288 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1289 struct cache_extent *ce;
1290 struct map_lookup *map;
1293 ce = search_cache_extent(&map_tree->cache_tree, logical);
1295 fprintf(stderr, "No mapping for %llu-%llu\n",
1296 (unsigned long long)logical,
1297 (unsigned long long)logical+len);
1300 if (ce->start > logical || ce->start + ce->size < logical) {
1301 fprintf(stderr, "Invalid mapping for %llu-%llu, got "
1302 "%llu-%llu\n", (unsigned long long)logical,
1303 (unsigned long long)logical+len,
1304 (unsigned long long)ce->start,
1305 (unsigned long long)ce->start + ce->size);
1308 map = container_of(ce, struct map_lookup, ce);
1310 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
1311 ret = map->num_stripes;
1312 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1313 ret = map->sub_stripes;
1314 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
1316 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
1323 int btrfs_next_bg(struct btrfs_fs_info *fs_info, u64 *logical,
1324 u64 *size, u64 type)
1326 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1327 struct cache_extent *ce;
1328 struct map_lookup *map;
1331 ce = search_cache_extent(&map_tree->cache_tree, cur);
1335 * only jump to next bg if our cur is not 0
1336 * As the initial logical for btrfs_next_bg() is 0, and
1337 * if we jump to next bg, we skipped a valid bg.
1340 ce = next_cache_extent(ce);
1346 map = container_of(ce, struct map_lookup, ce);
1347 if (map->type & type) {
1348 *logical = ce->start;
1353 ce = next_cache_extent(ce);
1359 int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
1360 u64 chunk_start, u64 physical, u64 devid,
1361 u64 **logical, int *naddrs, int *stripe_len)
1363 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1364 struct cache_extent *ce;
1365 struct map_lookup *map;
1373 ce = search_cache_extent(&map_tree->cache_tree, chunk_start);
1375 map = container_of(ce, struct map_lookup, ce);
1378 rmap_len = map->stripe_len;
1379 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1380 length = ce->size / (map->num_stripes / map->sub_stripes);
1381 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
1382 length = ce->size / map->num_stripes;
1383 else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
1384 BTRFS_BLOCK_GROUP_RAID6)) {
1385 length = ce->size / nr_data_stripes(map);
1386 rmap_len = map->stripe_len * nr_data_stripes(map);
1389 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
1391 for (i = 0; i < map->num_stripes; i++) {
1392 if (devid && map->stripes[i].dev->devid != devid)
1394 if (map->stripes[i].physical > physical ||
1395 map->stripes[i].physical + length <= physical)
1398 stripe_nr = (physical - map->stripes[i].physical) /
1401 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1402 stripe_nr = (stripe_nr * map->num_stripes + i) /
1404 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
1405 stripe_nr = stripe_nr * map->num_stripes + i;
1406 } /* else if RAID[56], multiply by nr_data_stripes().
1407 * Alternatively, just use rmap_len below instead of
1408 * map->stripe_len */
1410 bytenr = ce->start + stripe_nr * rmap_len;
1411 for (j = 0; j < nr; j++) {
1412 if (buf[j] == bytenr)
1421 *stripe_len = rmap_len;
1426 static inline int parity_smaller(u64 a, u64 b)
1431 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
1432 static void sort_parity_stripes(struct btrfs_multi_bio *bbio, u64 *raid_map)
1434 struct btrfs_bio_stripe s;
1441 for (i = 0; i < bbio->num_stripes - 1; i++) {
1442 if (parity_smaller(raid_map[i], raid_map[i+1])) {
1443 s = bbio->stripes[i];
1445 bbio->stripes[i] = bbio->stripes[i+1];
1446 raid_map[i] = raid_map[i+1];
1447 bbio->stripes[i+1] = s;
1455 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
1456 u64 logical, u64 *length,
1457 struct btrfs_multi_bio **multi_ret, int mirror_num,
1460 return __btrfs_map_block(fs_info, rw, logical, length, NULL,
1461 multi_ret, mirror_num, raid_map_ret);
1464 int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
1465 u64 logical, u64 *length, u64 *type,
1466 struct btrfs_multi_bio **multi_ret, int mirror_num,
1469 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1470 struct cache_extent *ce;
1471 struct map_lookup *map;
1475 u64 *raid_map = NULL;
1476 int stripes_allocated = 8;
1477 int stripes_required = 1;
1480 struct btrfs_multi_bio *multi = NULL;
1482 if (multi_ret && rw == READ) {
1483 stripes_allocated = 1;
1486 ce = search_cache_extent(&map_tree->cache_tree, logical);
1492 if (ce->start > logical) {
1494 *length = ce->start - logical;
1499 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
1504 map = container_of(ce, struct map_lookup, ce);
1505 offset = logical - ce->start;
1508 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
1509 BTRFS_BLOCK_GROUP_DUP)) {
1510 stripes_required = map->num_stripes;
1511 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1512 stripes_required = map->sub_stripes;
1515 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)
1516 && multi_ret && ((rw & WRITE) || mirror_num > 1) && raid_map_ret) {
1517 /* RAID[56] write or recovery. Return all stripes */
1518 stripes_required = map->num_stripes;
1520 /* Only allocate the map if we've already got a large enough multi_ret */
1521 if (stripes_allocated >= stripes_required) {
1522 raid_map = kmalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
1530 /* if our multi bio struct is too small, back off and try again */
1531 if (multi_ret && stripes_allocated < stripes_required) {
1532 stripes_allocated = stripes_required;
1539 * stripe_nr counts the total number of stripes we have to stride
1540 * to get to this block
1542 stripe_nr = stripe_nr / map->stripe_len;
1544 stripe_offset = stripe_nr * map->stripe_len;
1545 BUG_ON(offset < stripe_offset);
1547 /* stripe_offset is the offset of this block in its stripe*/
1548 stripe_offset = offset - stripe_offset;
1550 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
1551 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
1552 BTRFS_BLOCK_GROUP_RAID10 |
1553 BTRFS_BLOCK_GROUP_DUP)) {
1554 /* we limit the length of each bio to what fits in a stripe */
1555 *length = min_t(u64, ce->size - offset,
1556 map->stripe_len - stripe_offset);
1558 *length = ce->size - offset;
1564 multi->num_stripes = 1;
1566 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1568 multi->num_stripes = map->num_stripes;
1569 else if (mirror_num)
1570 stripe_index = mirror_num - 1;
1572 stripe_index = stripe_nr % map->num_stripes;
1573 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1574 int factor = map->num_stripes / map->sub_stripes;
1576 stripe_index = stripe_nr % factor;
1577 stripe_index *= map->sub_stripes;
1580 multi->num_stripes = map->sub_stripes;
1581 else if (mirror_num)
1582 stripe_index += mirror_num - 1;
1584 stripe_nr = stripe_nr / factor;
1585 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1587 multi->num_stripes = map->num_stripes;
1588 else if (mirror_num)
1589 stripe_index = mirror_num - 1;
1590 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
1591 BTRFS_BLOCK_GROUP_RAID6)) {
1596 u64 raid56_full_stripe_start;
1597 u64 full_stripe_len = nr_data_stripes(map) * map->stripe_len;
1600 * align the start of our data stripe in the logical
1603 raid56_full_stripe_start = offset / full_stripe_len;
1604 raid56_full_stripe_start *= full_stripe_len;
1606 /* get the data stripe number */
1607 stripe_nr = raid56_full_stripe_start / map->stripe_len;
1608 stripe_nr = stripe_nr / nr_data_stripes(map);
1610 /* Work out the disk rotation on this stripe-set */
1611 rot = stripe_nr % map->num_stripes;
1613 /* Fill in the logical address of each stripe */
1614 tmp = stripe_nr * nr_data_stripes(map);
1616 for (i = 0; i < nr_data_stripes(map); i++)
1617 raid_map[(i+rot) % map->num_stripes] =
1618 ce->start + (tmp + i) * map->stripe_len;
1620 raid_map[(i+rot) % map->num_stripes] = BTRFS_RAID5_P_STRIPE;
1621 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
1622 raid_map[(i+rot+1) % map->num_stripes] = BTRFS_RAID6_Q_STRIPE;
1624 *length = map->stripe_len;
1627 multi->num_stripes = map->num_stripes;
1629 stripe_index = stripe_nr % nr_data_stripes(map);
1630 stripe_nr = stripe_nr / nr_data_stripes(map);
1633 * Mirror #0 or #1 means the original data block.
1634 * Mirror #2 is RAID5 parity block.
1635 * Mirror #3 is RAID6 Q block.
1638 stripe_index = nr_data_stripes(map) + mirror_num - 2;
1640 /* We distribute the parity blocks across stripes */
1641 stripe_index = (stripe_nr + stripe_index) % map->num_stripes;
1645 * after this do_div call, stripe_nr is the number of stripes
1646 * on this device we have to walk to find the data, and
1647 * stripe_index is the number of our device in the stripe array
1649 stripe_index = stripe_nr % map->num_stripes;
1650 stripe_nr = stripe_nr / map->num_stripes;
1652 BUG_ON(stripe_index >= map->num_stripes);
1654 for (i = 0; i < multi->num_stripes; i++) {
1655 multi->stripes[i].physical =
1656 map->stripes[stripe_index].physical + stripe_offset +
1657 stripe_nr * map->stripe_len;
1658 multi->stripes[i].dev = map->stripes[stripe_index].dev;
1667 sort_parity_stripes(multi, raid_map);
1668 *raid_map_ret = raid_map;
1674 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
1677 struct btrfs_device *device;
1678 struct btrfs_fs_devices *cur_devices;
1680 cur_devices = fs_info->fs_devices;
1681 while (cur_devices) {
1683 (!memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE) ||
1684 fs_info->ignore_fsid_mismatch)) {
1685 device = __find_device(&cur_devices->devices,
1690 cur_devices = cur_devices->seed;
1695 struct btrfs_device *
1696 btrfs_find_device_by_devid(struct btrfs_fs_devices *fs_devices,
1697 u64 devid, int instance)
1699 struct list_head *head = &fs_devices->devices;
1700 struct btrfs_device *dev;
1703 list_for_each_entry(dev, head, dev_list) {
1704 if (dev->devid == devid && num_found++ == instance)
1710 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
1712 struct cache_extent *ce;
1713 struct map_lookup *map;
1714 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1719 * During chunk recovering, we may fail to find block group's
1720 * corresponding chunk, we will rebuild it later
1722 ce = search_cache_extent(&map_tree->cache_tree, chunk_offset);
1723 if (!fs_info->is_chunk_recover)
1728 map = container_of(ce, struct map_lookup, ce);
1729 for (i = 0; i < map->num_stripes; i++) {
1730 if (!map->stripes[i].dev->writeable) {
1739 static struct btrfs_device *fill_missing_device(u64 devid)
1741 struct btrfs_device *device;
1743 device = kzalloc(sizeof(*device), GFP_NOFS);
1744 device->devid = devid;
1750 * slot == -1: SYSTEM chunk
1751 * return -EIO on error, otherwise return 0
1753 int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
1754 struct extent_buffer *leaf,
1755 struct btrfs_chunk *chunk,
1756 int slot, u64 logical)
1763 u32 chunk_ondisk_size;
1764 u32 sectorsize = fs_info->sectorsize;
1766 length = btrfs_chunk_length(leaf, chunk);
1767 stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
1768 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
1769 sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
1770 type = btrfs_chunk_type(leaf, chunk);
1773 * These valid checks may be insufficient to cover every corner cases.
1775 if (!IS_ALIGNED(logical, sectorsize)) {
1776 error("invalid chunk logical %llu", logical);
1779 if (btrfs_chunk_sector_size(leaf, chunk) != sectorsize) {
1780 error("invalid chunk sectorsize %llu",
1781 (unsigned long long)btrfs_chunk_sector_size(leaf, chunk));
1784 if (!length || !IS_ALIGNED(length, sectorsize)) {
1785 error("invalid chunk length %llu", length);
1788 if (stripe_len != BTRFS_STRIPE_LEN) {
1789 error("invalid chunk stripe length: %llu", stripe_len);
1792 /* Check on chunk item type */
1793 if (slot == -1 && (type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
1794 error("invalid chunk type %llu", type);
1797 if (type & ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
1798 BTRFS_BLOCK_GROUP_PROFILE_MASK)) {
1799 error("unrecognized chunk type: %llu",
1800 ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
1801 BTRFS_BLOCK_GROUP_PROFILE_MASK) & type);
1804 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1805 error("missing chunk type flag: %llu", type);
1808 if (!(is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) ||
1809 (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0)) {
1810 error("conflicting chunk type detected: %llu", type);
1813 if ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
1814 !is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK)) {
1815 error("conflicting chunk profile detected: %llu", type);
1819 chunk_ondisk_size = btrfs_chunk_item_size(num_stripes);
1821 * Btrfs_chunk contains at least one stripe, and for sys_chunk
1822 * it can't exceed the system chunk array size
1823 * For normal chunk, it should match its chunk item size.
1825 if (num_stripes < 1 ||
1826 (slot == -1 && chunk_ondisk_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) ||
1827 (slot >= 0 && chunk_ondisk_size > btrfs_item_size_nr(leaf, slot))) {
1828 error("invalid num_stripes: %u", num_stripes);
1832 * Device number check against profile
1834 if ((type & BTRFS_BLOCK_GROUP_RAID10 && (sub_stripes != 2 ||
1835 !IS_ALIGNED(num_stripes, sub_stripes))) ||
1836 (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
1837 (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
1838 (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
1839 (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
1840 ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
1841 num_stripes != 1)) {
1842 error("Invalid num_stripes:sub_stripes %u:%u for profile %llu",
1843 num_stripes, sub_stripes,
1844 type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
1852 * Slot is used to verify the chunk item is valid
1854 * For sys chunk in superblock, pass -1 to indicate sys chunk.
1856 static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
1857 struct extent_buffer *leaf,
1858 struct btrfs_chunk *chunk, int slot)
1860 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1861 struct map_lookup *map;
1862 struct cache_extent *ce;
1866 u8 uuid[BTRFS_UUID_SIZE];
1871 logical = key->offset;
1872 length = btrfs_chunk_length(leaf, chunk);
1873 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
1874 /* Validation check */
1875 ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, slot, logical);
1877 error("%s checksums match, but it has an invalid chunk, %s",
1878 (slot == -1) ? "Superblock" : "Metadata",
1879 (slot == -1) ? "try btrfsck --repair -s <superblock> ie, 0,1,2" : "");
1883 ce = search_cache_extent(&map_tree->cache_tree, logical);
1885 /* already mapped? */
1886 if (ce && ce->start <= logical && ce->start + ce->size > logical) {
1890 map = kmalloc(btrfs_map_lookup_size(num_stripes), GFP_NOFS);
1894 map->ce.start = logical;
1895 map->ce.size = length;
1896 map->num_stripes = num_stripes;
1897 map->io_width = btrfs_chunk_io_width(leaf, chunk);
1898 map->io_align = btrfs_chunk_io_align(leaf, chunk);
1899 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
1900 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
1901 map->type = btrfs_chunk_type(leaf, chunk);
1902 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
1904 for (i = 0; i < num_stripes; i++) {
1905 map->stripes[i].physical =
1906 btrfs_stripe_offset_nr(leaf, chunk, i);
1907 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
1908 read_extent_buffer(leaf, uuid, (unsigned long)
1909 btrfs_stripe_dev_uuid_nr(chunk, i),
1911 map->stripes[i].dev = btrfs_find_device(fs_info, devid, uuid,
1913 if (!map->stripes[i].dev) {
1914 map->stripes[i].dev = fill_missing_device(devid);
1915 printf("warning, device %llu is missing\n",
1916 (unsigned long long)devid);
1917 list_add(&map->stripes[i].dev->dev_list,
1918 &fs_info->fs_devices->devices);
1922 ret = insert_cache_extent(&map_tree->cache_tree, &map->ce);
1928 static int fill_device_from_item(struct extent_buffer *leaf,
1929 struct btrfs_dev_item *dev_item,
1930 struct btrfs_device *device)
1934 device->devid = btrfs_device_id(leaf, dev_item);
1935 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
1936 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
1937 device->type = btrfs_device_type(leaf, dev_item);
1938 device->io_align = btrfs_device_io_align(leaf, dev_item);
1939 device->io_width = btrfs_device_io_width(leaf, dev_item);
1940 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
1942 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1943 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1948 static int open_seed_devices(struct btrfs_fs_info *fs_info, u8 *fsid)
1950 struct btrfs_fs_devices *fs_devices;
1953 fs_devices = fs_info->fs_devices->seed;
1954 while (fs_devices) {
1955 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
1959 fs_devices = fs_devices->seed;
1962 fs_devices = find_fsid(fsid);
1964 /* missing all seed devices */
1965 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1970 INIT_LIST_HEAD(&fs_devices->devices);
1971 list_add(&fs_devices->list, &fs_uuids);
1972 memcpy(fs_devices->fsid, fsid, BTRFS_FSID_SIZE);
1975 ret = btrfs_open_devices(fs_devices, O_RDONLY);
1979 fs_devices->seed = fs_info->fs_devices->seed;
1980 fs_info->fs_devices->seed = fs_devices;
1985 static int read_one_dev(struct btrfs_fs_info *fs_info,
1986 struct extent_buffer *leaf,
1987 struct btrfs_dev_item *dev_item)
1989 struct btrfs_device *device;
1992 u8 fs_uuid[BTRFS_UUID_SIZE];
1993 u8 dev_uuid[BTRFS_UUID_SIZE];
1995 devid = btrfs_device_id(leaf, dev_item);
1996 read_extent_buffer(leaf, dev_uuid,
1997 (unsigned long)btrfs_device_uuid(dev_item),
1999 read_extent_buffer(leaf, fs_uuid,
2000 (unsigned long)btrfs_device_fsid(dev_item),
2003 if (memcmp(fs_uuid, fs_info->fsid, BTRFS_UUID_SIZE)) {
2004 ret = open_seed_devices(fs_info, fs_uuid);
2009 device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid);
2011 device = kzalloc(sizeof(*device), GFP_NOFS);
2015 list_add(&device->dev_list,
2016 &fs_info->fs_devices->devices);
2019 fill_device_from_item(leaf, dev_item, device);
2020 device->dev_root = fs_info->dev_root;
2024 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
2026 struct btrfs_super_block *super_copy = fs_info->super_copy;
2027 struct extent_buffer *sb;
2028 struct btrfs_disk_key *disk_key;
2029 struct btrfs_chunk *chunk;
2031 unsigned long sb_array_offset;
2037 struct btrfs_key key;
2039 if (fs_info->nodesize < BTRFS_SUPER_INFO_SIZE) {
2040 printf("ERROR: nodesize %u too small to read superblock\n",
2044 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
2047 btrfs_set_buffer_uptodate(sb);
2048 write_extent_buffer(sb, super_copy, 0, sizeof(*super_copy));
2049 array_size = btrfs_super_sys_array_size(super_copy);
2051 array_ptr = super_copy->sys_chunk_array;
2052 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
2055 while (cur_offset < array_size) {
2056 disk_key = (struct btrfs_disk_key *)array_ptr;
2057 len = sizeof(*disk_key);
2058 if (cur_offset + len > array_size)
2059 goto out_short_read;
2061 btrfs_disk_key_to_cpu(&key, disk_key);
2064 sb_array_offset += len;
2067 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2068 chunk = (struct btrfs_chunk *)sb_array_offset;
2070 * At least one btrfs_chunk with one stripe must be
2071 * present, exact stripe count check comes afterwards
2073 len = btrfs_chunk_item_size(1);
2074 if (cur_offset + len > array_size)
2075 goto out_short_read;
2077 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
2080 "ERROR: invalid number of stripes %u in sys_array at offset %u\n",
2081 num_stripes, cur_offset);
2086 len = btrfs_chunk_item_size(num_stripes);
2087 if (cur_offset + len > array_size)
2088 goto out_short_read;
2090 ret = read_one_chunk(fs_info, &key, sb, chunk, -1);
2095 "ERROR: unexpected item type %u in sys_array at offset %u\n",
2096 (u32)key.type, cur_offset);
2101 sb_array_offset += len;
2104 free_extent_buffer(sb);
2108 printk("ERROR: sys_array too short to read %u bytes at offset %u\n",
2110 free_extent_buffer(sb);
2114 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
2116 struct btrfs_path *path;
2117 struct extent_buffer *leaf;
2118 struct btrfs_key key;
2119 struct btrfs_key found_key;
2120 struct btrfs_root *root = fs_info->chunk_root;
2124 path = btrfs_alloc_path();
2129 * Read all device items, and then all the chunk items. All
2130 * device items are found before any chunk item (their object id
2131 * is smaller than the lowest possible object id for a chunk
2132 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
2134 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2137 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2141 leaf = path->nodes[0];
2142 slot = path->slots[0];
2143 if (slot >= btrfs_header_nritems(leaf)) {
2144 ret = btrfs_next_leaf(root, path);
2151 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2152 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
2153 struct btrfs_dev_item *dev_item;
2154 dev_item = btrfs_item_ptr(leaf, slot,
2155 struct btrfs_dev_item);
2156 ret = read_one_dev(fs_info, leaf, dev_item);
2158 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
2159 struct btrfs_chunk *chunk;
2160 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2161 ret = read_one_chunk(fs_info, &found_key, leaf, chunk,
2170 btrfs_free_path(path);
2174 struct list_head *btrfs_scanned_uuids(void)
2179 static int rmw_eb(struct btrfs_fs_info *info,
2180 struct extent_buffer *eb, struct extent_buffer *orig_eb)
2183 unsigned long orig_off = 0;
2184 unsigned long dest_off = 0;
2185 unsigned long copy_len = eb->len;
2187 ret = read_whole_eb(info, eb, 0);
2191 if (eb->start + eb->len <= orig_eb->start ||
2192 eb->start >= orig_eb->start + orig_eb->len)
2195 * | ----- orig_eb ------- |
2196 * | ----- stripe ------- |
2197 * | ----- orig_eb ------- |
2198 * | ----- orig_eb ------- |
2200 if (eb->start > orig_eb->start)
2201 orig_off = eb->start - orig_eb->start;
2202 if (orig_eb->start > eb->start)
2203 dest_off = orig_eb->start - eb->start;
2205 if (copy_len > orig_eb->len - orig_off)
2206 copy_len = orig_eb->len - orig_off;
2207 if (copy_len > eb->len - dest_off)
2208 copy_len = eb->len - dest_off;
2210 memcpy(eb->data + dest_off, orig_eb->data + orig_off, copy_len);
2214 static int split_eb_for_raid56(struct btrfs_fs_info *info,
2215 struct extent_buffer *orig_eb,
2216 struct extent_buffer **ebs,
2217 u64 stripe_len, u64 *raid_map,
2220 struct extent_buffer **tmp_ebs;
2221 u64 start = orig_eb->start;
2226 tmp_ebs = calloc(num_stripes, sizeof(*tmp_ebs));
2230 /* Alloc memory in a row for data stripes */
2231 for (i = 0; i < num_stripes; i++) {
2232 if (raid_map[i] >= BTRFS_RAID5_P_STRIPE)
2235 tmp_ebs[i] = calloc(1, sizeof(**tmp_ebs) + stripe_len);
2242 for (i = 0; i < num_stripes; i++) {
2243 struct extent_buffer *eb = tmp_ebs[i];
2245 if (raid_map[i] >= BTRFS_RAID5_P_STRIPE)
2248 eb->start = raid_map[i];
2249 eb->len = stripe_len;
2253 eb->dev_bytenr = (u64)-1;
2255 this_eb_start = raid_map[i];
2257 if (start > this_eb_start ||
2258 start + orig_eb->len < this_eb_start + stripe_len) {
2259 ret = rmw_eb(info, eb, orig_eb);
2263 memcpy(eb->data, orig_eb->data + eb->start - start,
2271 for (i = 0; i < num_stripes; i++)
2277 int write_raid56_with_parity(struct btrfs_fs_info *info,
2278 struct extent_buffer *eb,
2279 struct btrfs_multi_bio *multi,
2280 u64 stripe_len, u64 *raid_map)
2282 struct extent_buffer **ebs, *p_eb = NULL, *q_eb = NULL;
2285 int alloc_size = eb->len;
2288 ebs = malloc(sizeof(*ebs) * multi->num_stripes);
2289 pointers = malloc(sizeof(*pointers) * multi->num_stripes);
2290 if (!ebs || !pointers) {
2296 if (stripe_len > alloc_size)
2297 alloc_size = stripe_len;
2299 ret = split_eb_for_raid56(info, eb, ebs, stripe_len, raid_map,
2300 multi->num_stripes);
2304 for (i = 0; i < multi->num_stripes; i++) {
2305 struct extent_buffer *new_eb;
2306 if (raid_map[i] < BTRFS_RAID5_P_STRIPE) {
2307 ebs[i]->dev_bytenr = multi->stripes[i].physical;
2308 ebs[i]->fd = multi->stripes[i].dev->fd;
2309 multi->stripes[i].dev->total_ios++;
2310 if (ebs[i]->start != raid_map[i]) {
2312 goto out_free_split;
2316 new_eb = malloc(sizeof(*eb) + alloc_size);
2319 goto out_free_split;
2321 new_eb->dev_bytenr = multi->stripes[i].physical;
2322 new_eb->fd = multi->stripes[i].dev->fd;
2323 multi->stripes[i].dev->total_ios++;
2324 new_eb->len = stripe_len;
2326 if (raid_map[i] == BTRFS_RAID5_P_STRIPE)
2328 else if (raid_map[i] == BTRFS_RAID6_Q_STRIPE)
2332 ebs[multi->num_stripes - 2] = p_eb;
2333 ebs[multi->num_stripes - 1] = q_eb;
2335 for (i = 0; i < multi->num_stripes; i++)
2336 pointers[i] = ebs[i]->data;
2338 raid6_gen_syndrome(multi->num_stripes, stripe_len, pointers);
2340 ebs[multi->num_stripes - 1] = p_eb;
2341 for (i = 0; i < multi->num_stripes; i++)
2342 pointers[i] = ebs[i]->data;
2343 ret = raid5_gen_result(multi->num_stripes, stripe_len,
2344 multi->num_stripes - 1, pointers);
2346 goto out_free_split;
2349 for (i = 0; i < multi->num_stripes; i++) {
2350 ret = write_extent_to_disk(ebs[i]);
2352 goto out_free_split;
2356 for (i = 0; i < multi->num_stripes; i++) {
2368 * Get stripe length from chunk item and its stripe items
2370 * Caller should only call this function after validating the chunk item
2371 * by using btrfs_check_chunk_valid().
2373 u64 btrfs_stripe_length(struct btrfs_fs_info *fs_info,
2374 struct extent_buffer *leaf,
2375 struct btrfs_chunk *chunk)
2379 u32 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2380 u64 profile = btrfs_chunk_type(leaf, chunk) &
2381 BTRFS_BLOCK_GROUP_PROFILE_MASK;
2383 chunk_len = btrfs_chunk_length(leaf, chunk);
2386 case 0: /* Single profile */
2387 case BTRFS_BLOCK_GROUP_RAID1:
2388 case BTRFS_BLOCK_GROUP_DUP:
2389 stripe_len = chunk_len;
2391 case BTRFS_BLOCK_GROUP_RAID0:
2392 stripe_len = chunk_len / num_stripes;
2394 case BTRFS_BLOCK_GROUP_RAID5:
2395 stripe_len = chunk_len / (num_stripes - 1);
2397 case BTRFS_BLOCK_GROUP_RAID6:
2398 stripe_len = chunk_len / (num_stripes - 2);
2400 case BTRFS_BLOCK_GROUP_RAID10:
2401 stripe_len = chunk_len / (num_stripes /
2402 btrfs_chunk_sub_stripes(leaf, chunk));
2405 /* Invalid chunk profile found */
2412 * Return 0 if size of @device is already good
2413 * Return >0 if size of @device is not aligned but fixed without problems
2414 * Return <0 if something wrong happened when aligning the size of @device
2416 int btrfs_fix_device_size(struct btrfs_fs_info *fs_info,
2417 struct btrfs_device *device)
2419 struct btrfs_trans_handle *trans;
2420 struct btrfs_key key;
2421 struct btrfs_path path;
2422 struct btrfs_root *chunk_root = fs_info->chunk_root;
2423 struct btrfs_dev_item *di;
2424 u64 old_bytes = device->total_bytes;
2427 if (IS_ALIGNED(old_bytes, fs_info->sectorsize))
2430 /* Align the in-memory total_bytes first, and use it as correct size */
2431 device->total_bytes = round_down(device->total_bytes,
2432 fs_info->sectorsize);
2434 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2435 key.type = BTRFS_DEV_ITEM_KEY;
2436 key.offset = device->devid;
2438 trans = btrfs_start_transaction(chunk_root, 1);
2439 if (IS_ERR(trans)) {
2440 ret = PTR_ERR(trans);
2441 error("error starting transaction: %d (%s)",
2442 ret, strerror(-ret));
2446 btrfs_init_path(&path);
2447 ret = btrfs_search_slot(trans, chunk_root, &key, &path, 0, 1);
2449 error("failed to find DEV_ITEM for devid %llu", device->devid);
2454 error("failed to search chunk root: %d (%s)",
2455 ret, strerror(-ret));
2458 di = btrfs_item_ptr(path.nodes[0], path.slots[0], struct btrfs_dev_item);
2459 btrfs_set_device_total_bytes(path.nodes[0], di, device->total_bytes);
2460 btrfs_mark_buffer_dirty(path.nodes[0]);
2461 ret = btrfs_commit_transaction(trans, chunk_root);
2463 error("failed to commit current transaction: %d (%s)",
2464 ret, strerror(-ret));
2465 btrfs_release_path(&path);
2468 btrfs_release_path(&path);
2469 printf("Fixed device size for devid %llu, old size: %llu new size: %llu\n",
2470 device->devid, old_bytes, device->total_bytes);
2474 /* We haven't modified anything, it's OK to commit current trans */
2475 btrfs_commit_transaction(trans, chunk_root);
2476 btrfs_release_path(&path);
2481 * Return 0 if super block total_bytes matches all devices' total_bytes
2482 * Return >0 if super block total_bytes mismatch but fixed without problem
2483 * Return <0 if we failed to fix super block total_bytes
2485 int btrfs_fix_super_size(struct btrfs_fs_info *fs_info)
2487 struct btrfs_trans_handle *trans;
2488 struct btrfs_device *device;
2489 struct list_head *dev_list = &fs_info->fs_devices->devices;
2490 u64 total_bytes = 0;
2491 u64 old_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2494 list_for_each_entry(device, dev_list, dev_list) {
2496 * Caller should ensure this function is called after aligning
2497 * all devices' total_bytes.
2499 if (!IS_ALIGNED(device->total_bytes, fs_info->sectorsize)) {
2500 error("device %llu total_bytes %llu not aligned to %u",
2501 device->devid, device->total_bytes,
2502 fs_info->sectorsize);
2505 total_bytes += device->total_bytes;
2508 if (total_bytes == old_bytes)
2511 btrfs_set_super_total_bytes(fs_info->super_copy, total_bytes);
2513 /* Commit transaction to update all super blocks */
2514 trans = btrfs_start_transaction(fs_info->tree_root, 1);
2515 if (IS_ERR(trans)) {
2516 ret = PTR_ERR(trans);
2517 error("error starting transaction: %d (%s)",
2518 ret, strerror(-ret));
2521 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
2523 error("failed to commit current transaction: %d (%s)",
2524 ret, strerror(-ret));
2527 printf("Fixed super total bytes, old size: %llu new size: %llu\n",
2528 old_bytes, total_bytes);
2533 * Return 0 if all devices and super block sizes are good
2534 * Return >0 if any device/super size problem was found, but fixed
2535 * Return <0 if something wrong happened during fixing
2537 int btrfs_fix_device_and_super_size(struct btrfs_fs_info *fs_info)
2539 struct btrfs_device *device;
2540 struct list_head *dev_list = &fs_info->fs_devices->devices;
2541 bool have_bad_value = false;
2544 /* Seed device is not supported yet */
2545 if (fs_info->fs_devices->seed) {
2546 error("fixing device size with seed device is not supported yet");
2550 /* All devices must be set up before repairing */
2551 if (list_empty(dev_list)) {
2552 error("no device found");
2555 list_for_each_entry(device, dev_list, dev_list) {
2556 if (device->fd == -1 || !device->writeable) {
2557 error("devid %llu is missing or not writeable",
2560 "fixing device size needs all device(s) to be present and writeable");
2565 /* Repair total_bytes of each device */
2566 list_for_each_entry(device, dev_list, dev_list) {
2567 ret = btrfs_fix_device_size(fs_info, device);
2571 have_bad_value = true;
2574 /* Repair super total_byte */
2575 ret = btrfs_fix_super_size(fs_info);
2577 have_bad_value = true;
2578 if (have_bad_value) {
2580 "Fixed unaligned/mismatched total_bytes for super block and device items\n");
2583 printf("No device size related problem found\n");