2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #define _XOPEN_SOURCE 600
22 #include <sys/types.h>
24 #include <uuid/uuid.h>
29 #include "transaction.h"
30 #include "print-tree.h"
34 struct btrfs_device *dev;
38 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
39 (sizeof(struct btrfs_bio_stripe) * (n)))
41 static LIST_HEAD(fs_uuids);
43 static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
46 struct btrfs_device *dev;
47 struct list_head *cur;
49 list_for_each(cur, head) {
50 dev = list_entry(cur, struct btrfs_device, dev_list);
51 if (dev->devid == devid &&
52 !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE)) {
59 static struct btrfs_fs_devices *find_fsid(u8 *fsid)
61 struct list_head *cur;
62 struct btrfs_fs_devices *fs_devices;
64 list_for_each(cur, &fs_uuids) {
65 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
66 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
72 static int device_list_add(const char *path,
73 struct btrfs_super_block *disk_super,
74 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
76 struct btrfs_device *device;
77 struct btrfs_fs_devices *fs_devices;
78 u64 found_transid = btrfs_super_generation(disk_super);
80 fs_devices = find_fsid(disk_super->fsid);
82 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
85 INIT_LIST_HEAD(&fs_devices->devices);
86 list_add(&fs_devices->list, &fs_uuids);
87 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
88 fs_devices->latest_devid = devid;
89 fs_devices->latest_trans = found_transid;
90 fs_devices->lowest_devid = (u64)-1;
93 device = __find_device(&fs_devices->devices, devid,
94 disk_super->dev_item.uuid);
97 device = kzalloc(sizeof(*device), GFP_NOFS);
99 /* we can safely leave the fs_devices entry around */
102 device->devid = devid;
103 memcpy(device->uuid, disk_super->dev_item.uuid,
105 device->name = kstrdup(path, GFP_NOFS);
110 device->label = kstrdup(disk_super->label, GFP_NOFS);
111 device->total_devs = btrfs_super_num_devices(disk_super);
112 device->super_bytes_used = btrfs_super_bytes_used(disk_super);
113 device->total_bytes =
114 btrfs_stack_device_total_bytes(&disk_super->dev_item);
116 btrfs_stack_device_bytes_used(&disk_super->dev_item);
117 list_add(&device->dev_list, &fs_devices->devices);
118 device->fs_devices = fs_devices;
119 } else if (!device->name || strcmp(device->name, path)) {
120 char *name = strdup(path);
128 if (found_transid > fs_devices->latest_trans) {
129 fs_devices->latest_devid = devid;
130 fs_devices->latest_trans = found_transid;
132 if (fs_devices->lowest_devid > devid) {
133 fs_devices->lowest_devid = devid;
135 *fs_devices_ret = fs_devices;
139 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
141 struct btrfs_fs_devices *seed_devices;
142 struct list_head *cur;
143 struct btrfs_device *device;
145 list_for_each(cur, &fs_devices->devices) {
146 device = list_entry(cur, struct btrfs_device, dev_list);
149 device->writeable = 0;
152 seed_devices = fs_devices->seed;
153 fs_devices->seed = NULL;
155 fs_devices = seed_devices;
162 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, int flags)
165 struct list_head *head = &fs_devices->devices;
166 struct list_head *cur;
167 struct btrfs_device *device;
170 list_for_each(cur, head) {
171 device = list_entry(cur, struct btrfs_device, dev_list);
173 fd = open(device->name, flags);
179 if (device->devid == fs_devices->latest_devid)
180 fs_devices->latest_bdev = fd;
181 if (device->devid == fs_devices->lowest_devid)
182 fs_devices->lowest_bdev = fd;
185 device->writeable = 1;
189 btrfs_close_devices(fs_devices);
193 int btrfs_scan_one_device(int fd, const char *path,
194 struct btrfs_fs_devices **fs_devices_ret,
195 u64 *total_devs, u64 super_offset)
197 struct btrfs_super_block *disk_super;
208 disk_super = (struct btrfs_super_block *)buf;
209 ret = btrfs_read_dev_super(fd, disk_super, super_offset);
214 devid = le64_to_cpu(disk_super->dev_item.devid);
215 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_METADUMP)
218 *total_devs = btrfs_super_num_devices(disk_super);
219 uuid_unparse(disk_super->fsid, uuidbuf);
221 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
230 * this uses a pretty simple search, the expectation is that it is
231 * called very infrequently and that a given device has a small number
234 static int find_free_dev_extent(struct btrfs_trans_handle *trans,
235 struct btrfs_device *device,
236 struct btrfs_path *path,
237 u64 num_bytes, u64 *start)
239 struct btrfs_key key;
240 struct btrfs_root *root = device->dev_root;
241 struct btrfs_dev_extent *dev_extent = NULL;
244 u64 search_start = 0;
245 u64 search_end = device->total_bytes;
249 struct extent_buffer *l;
254 /* FIXME use last free of some kind */
256 /* we don't want to overwrite the superblock on the drive,
257 * so we make sure to start at an offset of at least 1MB
259 search_start = max((u64)1024 * 1024, search_start);
261 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
262 search_start = max(root->fs_info->alloc_start, search_start);
264 key.objectid = device->devid;
265 key.offset = search_start;
266 key.type = BTRFS_DEV_EXTENT_KEY;
267 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
270 ret = btrfs_previous_item(root, path, 0, key.type);
274 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
277 slot = path->slots[0];
278 if (slot >= btrfs_header_nritems(l)) {
279 ret = btrfs_next_leaf(root, path);
286 if (search_start >= search_end) {
290 *start = search_start;
294 *start = last_byte > search_start ?
295 last_byte : search_start;
296 if (search_end <= *start) {
302 btrfs_item_key_to_cpu(l, &key, slot);
304 if (key.objectid < device->devid)
307 if (key.objectid > device->devid)
310 if (key.offset >= search_start && key.offset > last_byte &&
312 if (last_byte < search_start)
313 last_byte = search_start;
314 hole_size = key.offset - last_byte;
315 if (key.offset > last_byte &&
316 hole_size >= num_bytes) {
321 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
326 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
327 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
333 /* we have to make sure we didn't find an extent that has already
334 * been allocated by the map tree or the original allocation
336 btrfs_release_path(root, path);
337 BUG_ON(*start < search_start);
339 if (*start + num_bytes > search_end) {
343 /* check for pending inserts here */
347 btrfs_release_path(root, path);
351 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
352 struct btrfs_device *device,
353 u64 chunk_tree, u64 chunk_objectid,
355 u64 num_bytes, u64 *start)
358 struct btrfs_path *path;
359 struct btrfs_root *root = device->dev_root;
360 struct btrfs_dev_extent *extent;
361 struct extent_buffer *leaf;
362 struct btrfs_key key;
364 path = btrfs_alloc_path();
368 ret = find_free_dev_extent(trans, device, path, num_bytes, start);
373 key.objectid = device->devid;
375 key.type = BTRFS_DEV_EXTENT_KEY;
376 ret = btrfs_insert_empty_item(trans, root, path, &key,
380 leaf = path->nodes[0];
381 extent = btrfs_item_ptr(leaf, path->slots[0],
382 struct btrfs_dev_extent);
383 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
384 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
385 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
387 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
388 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
391 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
392 btrfs_mark_buffer_dirty(leaf);
394 btrfs_free_path(path);
398 static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
400 struct btrfs_path *path;
402 struct btrfs_key key;
403 struct btrfs_chunk *chunk;
404 struct btrfs_key found_key;
406 path = btrfs_alloc_path();
409 key.objectid = objectid;
410 key.offset = (u64)-1;
411 key.type = BTRFS_CHUNK_ITEM_KEY;
413 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
419 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
423 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
425 if (found_key.objectid != objectid)
428 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
430 *offset = found_key.offset +
431 btrfs_chunk_length(path->nodes[0], chunk);
436 btrfs_free_path(path);
440 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
444 struct btrfs_key key;
445 struct btrfs_key found_key;
447 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
448 key.type = BTRFS_DEV_ITEM_KEY;
449 key.offset = (u64)-1;
451 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
457 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
462 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
464 *objectid = found_key.offset + 1;
468 btrfs_release_path(root, path);
473 * the device information is stored in the chunk root
474 * the btrfs_device struct should be fully filled in
476 int btrfs_add_device(struct btrfs_trans_handle *trans,
477 struct btrfs_root *root,
478 struct btrfs_device *device)
481 struct btrfs_path *path;
482 struct btrfs_dev_item *dev_item;
483 struct extent_buffer *leaf;
484 struct btrfs_key key;
488 root = root->fs_info->chunk_root;
490 path = btrfs_alloc_path();
494 ret = find_next_devid(root, path, &free_devid);
498 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
499 key.type = BTRFS_DEV_ITEM_KEY;
500 key.offset = free_devid;
502 ret = btrfs_insert_empty_item(trans, root, path, &key,
507 leaf = path->nodes[0];
508 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
510 device->devid = free_devid;
511 btrfs_set_device_id(leaf, dev_item, device->devid);
512 btrfs_set_device_generation(leaf, dev_item, 0);
513 btrfs_set_device_type(leaf, dev_item, device->type);
514 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
515 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
516 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
517 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
518 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
519 btrfs_set_device_group(leaf, dev_item, 0);
520 btrfs_set_device_seek_speed(leaf, dev_item, 0);
521 btrfs_set_device_bandwidth(leaf, dev_item, 0);
522 btrfs_set_device_start_offset(leaf, dev_item, 0);
524 ptr = (unsigned long)btrfs_device_uuid(dev_item);
525 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
526 ptr = (unsigned long)btrfs_device_fsid(dev_item);
527 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
528 btrfs_mark_buffer_dirty(leaf);
532 btrfs_free_path(path);
536 int btrfs_update_device(struct btrfs_trans_handle *trans,
537 struct btrfs_device *device)
540 struct btrfs_path *path;
541 struct btrfs_root *root;
542 struct btrfs_dev_item *dev_item;
543 struct extent_buffer *leaf;
544 struct btrfs_key key;
546 root = device->dev_root->fs_info->chunk_root;
548 path = btrfs_alloc_path();
552 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
553 key.type = BTRFS_DEV_ITEM_KEY;
554 key.offset = device->devid;
556 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
565 leaf = path->nodes[0];
566 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
568 btrfs_set_device_id(leaf, dev_item, device->devid);
569 btrfs_set_device_type(leaf, dev_item, device->type);
570 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
571 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
572 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
573 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
574 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
575 btrfs_mark_buffer_dirty(leaf);
578 btrfs_free_path(path);
582 int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
583 struct btrfs_root *root,
584 struct btrfs_key *key,
585 struct btrfs_chunk *chunk, int item_size)
587 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
588 struct btrfs_disk_key disk_key;
592 array_size = btrfs_super_sys_array_size(super_copy);
593 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
596 ptr = super_copy->sys_chunk_array + array_size;
597 btrfs_cpu_key_to_disk(&disk_key, key);
598 memcpy(ptr, &disk_key, sizeof(disk_key));
599 ptr += sizeof(disk_key);
600 memcpy(ptr, chunk, item_size);
601 item_size += sizeof(disk_key);
602 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
606 static u64 div_factor(u64 num, int factor)
614 static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
617 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
619 else if (type & BTRFS_BLOCK_GROUP_RAID10)
620 return calc_size * (num_stripes / sub_stripes);
622 return calc_size * num_stripes;
626 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
627 struct btrfs_root *extent_root, u64 *start,
628 u64 *num_bytes, u64 type)
631 struct btrfs_fs_info *info = extent_root->fs_info;
632 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
633 struct btrfs_stripe *stripes;
634 struct btrfs_device *device = NULL;
635 struct btrfs_chunk *chunk;
636 struct list_head private_devs;
637 struct list_head *dev_list = &extent_root->fs_info->fs_devices->devices;
638 struct list_head *cur;
639 struct map_lookup *map;
640 int min_stripe_size = 1 * 1024 * 1024;
641 u64 calc_size = 8 * 1024 * 1024;
643 u64 max_chunk_size = 4 * calc_size;
653 int stripe_len = 64 * 1024;
654 struct btrfs_key key;
657 if (list_empty(dev_list)) {
661 if (type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
662 BTRFS_BLOCK_GROUP_RAID10 |
663 BTRFS_BLOCK_GROUP_DUP)) {
664 if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
665 calc_size = 8 * 1024 * 1024;
666 max_chunk_size = calc_size * 2;
667 min_stripe_size = 1 * 1024 * 1024;
668 } else if (type & BTRFS_BLOCK_GROUP_DATA) {
669 calc_size = 1024 * 1024 * 1024;
670 max_chunk_size = 10 * calc_size;
671 min_stripe_size = 64 * 1024 * 1024;
672 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
673 calc_size = 1024 * 1024 * 1024;
674 max_chunk_size = 4 * calc_size;
675 min_stripe_size = 32 * 1024 * 1024;
678 if (type & BTRFS_BLOCK_GROUP_RAID1) {
679 num_stripes = min_t(u64, 2,
680 btrfs_super_num_devices(&info->super_copy));
685 if (type & BTRFS_BLOCK_GROUP_DUP) {
689 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
690 num_stripes = btrfs_super_num_devices(&info->super_copy);
693 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
694 num_stripes = btrfs_super_num_devices(&info->super_copy);
697 num_stripes &= ~(u32)1;
702 /* we don't want a chunk larger than 10% of the FS */
703 percent_max = div_factor(btrfs_super_total_bytes(&info->super_copy), 1);
704 max_chunk_size = min(percent_max, max_chunk_size);
707 if (chunk_bytes_by_type(type, calc_size, num_stripes, sub_stripes) >
709 calc_size = max_chunk_size;
710 calc_size /= num_stripes;
711 calc_size /= stripe_len;
712 calc_size *= stripe_len;
714 /* we don't want tiny stripes */
715 calc_size = max_t(u64, calc_size, min_stripe_size);
717 calc_size /= stripe_len;
718 calc_size *= stripe_len;
719 INIT_LIST_HEAD(&private_devs);
720 cur = dev_list->next;
723 if (type & BTRFS_BLOCK_GROUP_DUP)
724 min_free = calc_size * 2;
726 min_free = calc_size;
728 /* build a private list of devices we will allocate from */
729 while(index < num_stripes) {
730 device = list_entry(cur, struct btrfs_device, dev_list);
731 avail = device->total_bytes - device->bytes_used;
733 if (avail >= min_free) {
734 list_move_tail(&device->dev_list, &private_devs);
736 if (type & BTRFS_BLOCK_GROUP_DUP)
738 } else if (avail > max_avail)
743 if (index < num_stripes) {
744 list_splice(&private_devs, dev_list);
745 if (index >= min_stripes) {
747 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
748 num_stripes /= sub_stripes;
749 num_stripes *= sub_stripes;
754 if (!looped && max_avail > 0) {
756 calc_size = max_avail;
761 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
765 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
766 key.type = BTRFS_CHUNK_ITEM_KEY;
769 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
773 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
779 stripes = &chunk->stripe;
780 *num_bytes = chunk_bytes_by_type(type, calc_size,
781 num_stripes, sub_stripes);
783 while(index < num_stripes) {
784 struct btrfs_stripe *stripe;
785 BUG_ON(list_empty(&private_devs));
786 cur = private_devs.next;
787 device = list_entry(cur, struct btrfs_device, dev_list);
789 /* loop over this device again if we're doing a dup group */
790 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
791 (index == num_stripes - 1))
792 list_move_tail(&device->dev_list, dev_list);
794 ret = btrfs_alloc_dev_extent(trans, device,
795 info->chunk_root->root_key.objectid,
796 BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
797 calc_size, &dev_offset);
800 device->bytes_used += calc_size;
801 ret = btrfs_update_device(trans, device);
804 map->stripes[index].dev = device;
805 map->stripes[index].physical = dev_offset;
806 stripe = stripes + index;
807 btrfs_set_stack_stripe_devid(stripe, device->devid);
808 btrfs_set_stack_stripe_offset(stripe, dev_offset);
809 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
812 BUG_ON(!list_empty(&private_devs));
814 /* key was set above */
815 btrfs_set_stack_chunk_length(chunk, *num_bytes);
816 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
817 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
818 btrfs_set_stack_chunk_type(chunk, type);
819 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
820 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
821 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
822 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
823 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
824 map->sector_size = extent_root->sectorsize;
825 map->stripe_len = stripe_len;
826 map->io_align = stripe_len;
827 map->io_width = stripe_len;
829 map->num_stripes = num_stripes;
830 map->sub_stripes = sub_stripes;
832 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
833 btrfs_chunk_item_size(num_stripes));
835 *start = key.offset;;
837 map->ce.start = key.offset;
838 map->ce.size = *num_bytes;
840 ret = insert_existing_cache_extent(
841 &extent_root->fs_info->mapping_tree.cache_tree,
845 if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
846 ret = btrfs_add_system_chunk(trans, chunk_root, &key,
847 chunk, btrfs_chunk_item_size(num_stripes));
855 int btrfs_alloc_data_chunk(struct btrfs_trans_handle *trans,
856 struct btrfs_root *extent_root, u64 *start,
857 u64 num_bytes, u64 type)
860 struct btrfs_fs_info *info = extent_root->fs_info;
861 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
862 struct btrfs_stripe *stripes;
863 struct btrfs_device *device = NULL;
864 struct btrfs_chunk *chunk;
865 struct list_head *dev_list = &extent_root->fs_info->fs_devices->devices;
866 struct list_head *cur;
867 struct map_lookup *map;
868 u64 calc_size = 8 * 1024 * 1024;
873 int stripe_len = 64 * 1024;
874 struct btrfs_key key;
876 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
877 key.type = BTRFS_CHUNK_ITEM_KEY;
878 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
883 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
887 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
893 stripes = &chunk->stripe;
894 calc_size = num_bytes;
897 cur = dev_list->next;
898 device = list_entry(cur, struct btrfs_device, dev_list);
900 while (index < num_stripes) {
901 struct btrfs_stripe *stripe;
903 ret = btrfs_alloc_dev_extent(trans, device,
904 info->chunk_root->root_key.objectid,
905 BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
906 calc_size, &dev_offset);
909 device->bytes_used += calc_size;
910 ret = btrfs_update_device(trans, device);
913 map->stripes[index].dev = device;
914 map->stripes[index].physical = dev_offset;
915 stripe = stripes + index;
916 btrfs_set_stack_stripe_devid(stripe, device->devid);
917 btrfs_set_stack_stripe_offset(stripe, dev_offset);
918 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
922 /* key was set above */
923 btrfs_set_stack_chunk_length(chunk, num_bytes);
924 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
925 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
926 btrfs_set_stack_chunk_type(chunk, type);
927 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
928 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
929 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
930 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
931 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
932 map->sector_size = extent_root->sectorsize;
933 map->stripe_len = stripe_len;
934 map->io_align = stripe_len;
935 map->io_width = stripe_len;
937 map->num_stripes = num_stripes;
938 map->sub_stripes = sub_stripes;
940 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
941 btrfs_chunk_item_size(num_stripes));
945 map->ce.start = key.offset;
946 map->ce.size = num_bytes;
948 ret = insert_existing_cache_extent(
949 &extent_root->fs_info->mapping_tree.cache_tree,
957 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
959 cache_tree_init(&tree->cache_tree);
962 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
964 struct cache_extent *ce;
965 struct map_lookup *map;
968 ce = find_first_cache_extent(&map_tree->cache_tree, logical);
970 BUG_ON(ce->start > logical || ce->start + ce->size < logical);
971 map = container_of(ce, struct map_lookup, ce);
973 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
974 ret = map->num_stripes;
975 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
976 ret = map->sub_stripes;
982 int btrfs_next_metadata(struct btrfs_mapping_tree *map_tree, u64 *logical,
985 struct cache_extent *ce;
986 struct map_lookup *map;
988 ce = find_first_cache_extent(&map_tree->cache_tree, *logical);
991 ce = next_cache_extent(ce);
995 map = container_of(ce, struct map_lookup, ce);
996 if (map->type & BTRFS_BLOCK_GROUP_METADATA) {
997 *logical = ce->start;
1006 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
1007 u64 chunk_start, u64 physical, u64 devid,
1008 u64 **logical, int *naddrs, int *stripe_len)
1010 struct cache_extent *ce;
1011 struct map_lookup *map;
1018 ce = find_first_cache_extent(&map_tree->cache_tree, chunk_start);
1020 map = container_of(ce, struct map_lookup, ce);
1023 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1024 length = ce->size / (map->num_stripes / map->sub_stripes);
1025 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
1026 length = ce->size / map->num_stripes;
1028 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
1030 for (i = 0; i < map->num_stripes; i++) {
1031 if (devid && map->stripes[i].dev->devid != devid)
1033 if (map->stripes[i].physical > physical ||
1034 map->stripes[i].physical + length <= physical)
1037 stripe_nr = (physical - map->stripes[i].physical) /
1040 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1041 stripe_nr = (stripe_nr * map->num_stripes + i) /
1043 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
1044 stripe_nr = stripe_nr * map->num_stripes + i;
1046 bytenr = ce->start + stripe_nr * map->stripe_len;
1047 for (j = 0; j < nr; j++) {
1048 if (buf[j] == bytenr)
1057 *stripe_len = map->stripe_len;
1062 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1063 u64 logical, u64 *length,
1064 struct btrfs_multi_bio **multi_ret, int mirror_num)
1066 return __btrfs_map_block(map_tree, rw, logical, length, NULL,
1067 multi_ret, mirror_num);
1070 int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1071 u64 logical, u64 *length, u64 *type,
1072 struct btrfs_multi_bio **multi_ret, int mirror_num)
1074 struct cache_extent *ce;
1075 struct map_lookup *map;
1079 int stripes_allocated = 8;
1080 int stripes_required = 1;
1083 struct btrfs_multi_bio *multi = NULL;
1085 if (multi_ret && rw == READ) {
1086 stripes_allocated = 1;
1089 ce = find_first_cache_extent(&map_tree->cache_tree, logical);
1095 if (ce->start > logical || ce->start + ce->size < logical) {
1102 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
1107 map = container_of(ce, struct map_lookup, ce);
1108 offset = logical - ce->start;
1111 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
1112 BTRFS_BLOCK_GROUP_DUP)) {
1113 stripes_required = map->num_stripes;
1114 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1115 stripes_required = map->sub_stripes;
1118 /* if our multi bio struct is too small, back off and try again */
1119 if (multi_ret && rw == WRITE &&
1120 stripes_allocated < stripes_required) {
1121 stripes_allocated = map->num_stripes;
1127 * stripe_nr counts the total number of stripes we have to stride
1128 * to get to this block
1130 stripe_nr = stripe_nr / map->stripe_len;
1132 stripe_offset = stripe_nr * map->stripe_len;
1133 BUG_ON(offset < stripe_offset);
1135 /* stripe_offset is the offset of this block in its stripe*/
1136 stripe_offset = offset - stripe_offset;
1138 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
1139 BTRFS_BLOCK_GROUP_RAID10 |
1140 BTRFS_BLOCK_GROUP_DUP)) {
1141 /* we limit the length of each bio to what fits in a stripe */
1142 *length = min_t(u64, ce->size - offset,
1143 map->stripe_len - stripe_offset);
1145 *length = ce->size - offset;
1151 multi->num_stripes = 1;
1153 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1155 multi->num_stripes = map->num_stripes;
1156 else if (mirror_num)
1157 stripe_index = mirror_num - 1;
1159 stripe_index = stripe_nr % map->num_stripes;
1160 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1161 int factor = map->num_stripes / map->sub_stripes;
1163 stripe_index = stripe_nr % factor;
1164 stripe_index *= map->sub_stripes;
1167 multi->num_stripes = map->sub_stripes;
1168 else if (mirror_num)
1169 stripe_index += mirror_num - 1;
1171 stripe_nr = stripe_nr / factor;
1172 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1174 multi->num_stripes = map->num_stripes;
1175 else if (mirror_num)
1176 stripe_index = mirror_num - 1;
1179 * after this do_div call, stripe_nr is the number of stripes
1180 * on this device we have to walk to find the data, and
1181 * stripe_index is the number of our device in the stripe array
1183 stripe_index = stripe_nr % map->num_stripes;
1184 stripe_nr = stripe_nr / map->num_stripes;
1186 BUG_ON(stripe_index >= map->num_stripes);
1188 for (i = 0; i < multi->num_stripes; i++) {
1189 multi->stripes[i].physical =
1190 map->stripes[stripe_index].physical + stripe_offset +
1191 stripe_nr * map->stripe_len;
1192 multi->stripes[i].dev = map->stripes[stripe_index].dev;
1202 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
1205 struct btrfs_device *device;
1206 struct btrfs_fs_devices *cur_devices;
1208 cur_devices = root->fs_info->fs_devices;
1209 while (cur_devices) {
1211 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
1212 device = __find_device(&cur_devices->devices,
1217 cur_devices = cur_devices->seed;
1222 struct btrfs_device *btrfs_find_device_by_devid(struct btrfs_root *root,
1223 u64 devid, int instance)
1225 struct list_head *head = &root->fs_info->fs_devices->devices;
1226 struct btrfs_device *dev;
1227 struct list_head *cur;
1230 list_for_each(cur, head) {
1231 dev = list_entry(cur, struct btrfs_device, dev_list);
1232 if (dev->devid == devid && num_found++ == instance)
1238 int btrfs_bootstrap_super_map(struct btrfs_mapping_tree *map_tree,
1239 struct btrfs_fs_devices *fs_devices)
1241 struct map_lookup *map;
1242 u64 logical = BTRFS_SUPER_INFO_OFFSET;
1243 u64 length = BTRFS_SUPER_INFO_SIZE;
1244 int num_stripes = 0;
1245 int sub_stripes = 0;
1248 struct list_head *cur;
1250 list_for_each(cur, &fs_devices->devices) {
1253 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1257 map->ce.start = logical;
1258 map->ce.size = length;
1259 map->num_stripes = num_stripes;
1260 map->sub_stripes = sub_stripes;
1261 map->io_width = length;
1262 map->io_align = length;
1263 map->sector_size = length;
1264 map->stripe_len = length;
1265 map->type = BTRFS_BLOCK_GROUP_RAID1;
1268 list_for_each(cur, &fs_devices->devices) {
1269 struct btrfs_device *device = list_entry(cur,
1270 struct btrfs_device,
1272 map->stripes[i].physical = logical;
1273 map->stripes[i].dev = device;
1276 ret = insert_existing_cache_extent(&map_tree->cache_tree, &map->ce);
1277 if (ret == -EEXIST) {
1278 struct cache_extent *old;
1279 struct map_lookup *old_map;
1280 old = find_cache_extent(&map_tree->cache_tree, logical, length);
1281 old_map = container_of(old, struct map_lookup, ce);
1282 remove_cache_extent(&map_tree->cache_tree, old);
1284 ret = insert_existing_cache_extent(&map_tree->cache_tree,
1291 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
1293 struct cache_extent *ce;
1294 struct map_lookup *map;
1295 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1299 ce = find_first_cache_extent(&map_tree->cache_tree, chunk_offset);
1302 map = container_of(ce, struct map_lookup, ce);
1303 for (i = 0; i < map->num_stripes; i++) {
1304 if (!map->stripes[i].dev->writeable) {
1313 static struct btrfs_device *fill_missing_device(u64 devid)
1315 struct btrfs_device *device;
1317 device = kzalloc(sizeof(*device), GFP_NOFS);
1318 device->devid = devid;
1323 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
1324 struct extent_buffer *leaf,
1325 struct btrfs_chunk *chunk)
1327 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1328 struct map_lookup *map;
1329 struct cache_extent *ce;
1333 u8 uuid[BTRFS_UUID_SIZE];
1338 logical = key->offset;
1339 length = btrfs_chunk_length(leaf, chunk);
1341 ce = find_first_cache_extent(&map_tree->cache_tree, logical);
1343 /* already mapped? */
1344 if (ce && ce->start <= logical && ce->start + ce->size > logical) {
1348 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
1349 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1353 map->ce.start = logical;
1354 map->ce.size = length;
1355 map->num_stripes = num_stripes;
1356 map->io_width = btrfs_chunk_io_width(leaf, chunk);
1357 map->io_align = btrfs_chunk_io_align(leaf, chunk);
1358 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
1359 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
1360 map->type = btrfs_chunk_type(leaf, chunk);
1361 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
1363 for (i = 0; i < num_stripes; i++) {
1364 map->stripes[i].physical =
1365 btrfs_stripe_offset_nr(leaf, chunk, i);
1366 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
1367 read_extent_buffer(leaf, uuid, (unsigned long)
1368 btrfs_stripe_dev_uuid_nr(chunk, i),
1370 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
1372 if (!map->stripes[i].dev) {
1373 map->stripes[i].dev = fill_missing_device(devid);
1374 printf("warning, device %llu is missing\n",
1375 (unsigned long long)devid);
1379 ret = insert_existing_cache_extent(&map_tree->cache_tree, &map->ce);
1385 static int fill_device_from_item(struct extent_buffer *leaf,
1386 struct btrfs_dev_item *dev_item,
1387 struct btrfs_device *device)
1391 device->devid = btrfs_device_id(leaf, dev_item);
1392 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
1393 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
1394 device->type = btrfs_device_type(leaf, dev_item);
1395 device->io_align = btrfs_device_io_align(leaf, dev_item);
1396 device->io_width = btrfs_device_io_width(leaf, dev_item);
1397 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
1399 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1400 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1405 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
1407 struct btrfs_fs_devices *fs_devices;
1410 fs_devices = root->fs_info->fs_devices->seed;
1411 while (fs_devices) {
1412 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
1416 fs_devices = fs_devices->seed;
1419 fs_devices = find_fsid(fsid);
1425 ret = btrfs_open_devices(fs_devices, O_RDONLY);
1429 fs_devices->seed = root->fs_info->fs_devices->seed;
1430 root->fs_info->fs_devices->seed = fs_devices;
1435 static int read_one_dev(struct btrfs_root *root,
1436 struct extent_buffer *leaf,
1437 struct btrfs_dev_item *dev_item)
1439 struct btrfs_device *device;
1442 u8 fs_uuid[BTRFS_UUID_SIZE];
1443 u8 dev_uuid[BTRFS_UUID_SIZE];
1445 devid = btrfs_device_id(leaf, dev_item);
1446 read_extent_buffer(leaf, dev_uuid,
1447 (unsigned long)btrfs_device_uuid(dev_item),
1449 read_extent_buffer(leaf, fs_uuid,
1450 (unsigned long)btrfs_device_fsid(dev_item),
1453 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
1454 ret = open_seed_devices(root, fs_uuid);
1459 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1461 printk("warning devid %llu not found already\n",
1462 (unsigned long long)devid);
1463 device = kmalloc(sizeof(*device), GFP_NOFS);
1466 device->total_ios = 0;
1467 list_add(&device->dev_list,
1468 &root->fs_info->fs_devices->devices);
1471 fill_device_from_item(leaf, dev_item, device);
1472 device->dev_root = root->fs_info->dev_root;
1476 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
1478 struct btrfs_dev_item *dev_item;
1480 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
1482 return read_one_dev(root, buf, dev_item);
1485 int btrfs_read_sys_array(struct btrfs_root *root)
1487 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1488 struct extent_buffer *sb;
1489 struct btrfs_disk_key *disk_key;
1490 struct btrfs_chunk *chunk;
1491 struct btrfs_key key;
1496 unsigned long sb_ptr;
1500 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
1501 BTRFS_SUPER_INFO_SIZE);
1504 btrfs_set_buffer_uptodate(sb);
1505 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
1506 array_size = btrfs_super_sys_array_size(super_copy);
1509 * we do this loop twice, once for the device items and
1510 * once for all of the chunks. This way there are device
1511 * structs filled in for every chunk
1513 ptr = super_copy->sys_chunk_array;
1514 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
1517 while (cur < array_size) {
1518 disk_key = (struct btrfs_disk_key *)ptr;
1519 btrfs_disk_key_to_cpu(&key, disk_key);
1521 len = sizeof(*disk_key);
1526 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1527 chunk = (struct btrfs_chunk *)sb_ptr;
1528 ret = read_one_chunk(root, &key, sb, chunk);
1531 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
1532 len = btrfs_chunk_item_size(num_stripes);
1540 free_extent_buffer(sb);
1544 int btrfs_read_chunk_tree(struct btrfs_root *root)
1546 struct btrfs_path *path;
1547 struct extent_buffer *leaf;
1548 struct btrfs_key key;
1549 struct btrfs_key found_key;
1553 root = root->fs_info->chunk_root;
1555 path = btrfs_alloc_path();
1559 /* first we search for all of the device items, and then we
1560 * read in all of the chunk items. This way we can create chunk
1561 * mappings that reference all of the devices that are afound
1563 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1567 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1569 leaf = path->nodes[0];
1570 slot = path->slots[0];
1571 if (slot >= btrfs_header_nritems(leaf)) {
1572 ret = btrfs_next_leaf(root, path);
1579 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1580 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
1581 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
1583 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
1584 struct btrfs_dev_item *dev_item;
1585 dev_item = btrfs_item_ptr(leaf, slot,
1586 struct btrfs_dev_item);
1587 ret = read_one_dev(root, leaf, dev_item);
1590 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
1591 struct btrfs_chunk *chunk;
1592 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
1593 ret = read_one_chunk(root, &found_key, leaf, chunk);
1598 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
1600 btrfs_release_path(root, path);
1604 btrfs_free_path(path);
1610 struct list_head *btrfs_scanned_uuids(void)