2 * Copyright (C) 2013 FUJITSU LIMITED. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #define _XOPEN_SOURCE 500
22 #include <stdio_ext.h>
24 #include <sys/types.h>
28 #include <uuid/uuid.h>
31 #include "kerncompat.h"
33 #include "radix-tree.h"
35 #include "extent-cache.h"
38 #include "transaction.h"
45 struct recover_control {
53 u64 chunk_root_generation;
55 struct btrfs_fs_devices *fs_devices;
57 struct cache_tree chunk;
58 struct block_group_tree bg;
59 struct device_extent_tree devext;
60 struct cache_tree eb_cache;
62 struct list_head good_chunks;
63 struct list_head bad_chunks;
64 struct list_head unrepaired_chunks;
65 pthread_mutex_t rc_lock;
68 struct extent_record {
69 struct cache_extent cache;
71 u8 csum[BTRFS_CSUM_SIZE];
72 struct btrfs_device *devices[BTRFS_MAX_MIRRORS];
73 u64 offsets[BTRFS_MAX_MIRRORS];
78 struct recover_control *rc;
79 struct btrfs_device *dev;
83 static struct extent_record *btrfs_new_extent_record(struct extent_buffer *eb)
85 struct extent_record *rec;
87 rec = malloc(sizeof(*rec));
89 fprintf(stderr, "Fail to allocate memory for extent record.\n");
93 memset(rec, 0, sizeof(*rec));
94 rec->cache.start = btrfs_header_bytenr(eb);
95 rec->cache.size = eb->len;
96 rec->generation = btrfs_header_generation(eb);
97 read_extent_buffer(eb, rec->csum, (unsigned long)btrfs_header_csum(eb),
102 static int process_extent_buffer(struct cache_tree *eb_cache,
103 struct extent_buffer *eb,
104 struct btrfs_device *device, u64 offset)
106 struct extent_record *rec;
107 struct extent_record *exist;
108 struct cache_extent *cache;
111 rec = btrfs_new_extent_record(eb);
112 if (!rec->cache.size)
115 cache = lookup_cache_extent(eb_cache,
119 exist = container_of(cache, struct extent_record, cache);
121 if (exist->generation > rec->generation)
123 if (exist->generation == rec->generation) {
124 if (exist->cache.start != rec->cache.start ||
125 exist->cache.size != rec->cache.size ||
126 memcmp(exist->csum, rec->csum, BTRFS_CSUM_SIZE)) {
129 BUG_ON(exist->nmirrors >= BTRFS_MAX_MIRRORS);
130 exist->devices[exist->nmirrors] = device;
131 exist->offsets[exist->nmirrors] = offset;
136 remove_cache_extent(eb_cache, cache);
141 rec->devices[0] = device;
142 rec->offsets[0] = offset;
144 ret = insert_cache_extent(eb_cache, &rec->cache);
153 static void free_extent_record(struct cache_extent *cache)
155 struct extent_record *er;
157 er = container_of(cache, struct extent_record, cache);
161 FREE_EXTENT_CACHE_BASED_TREE(extent_record, free_extent_record);
163 static struct btrfs_chunk *create_chunk_item(struct chunk_record *record)
165 struct btrfs_chunk *ret;
166 struct btrfs_stripe *chunk_stripe;
169 if (!record || record->num_stripes == 0)
171 ret = malloc(btrfs_chunk_item_size(record->num_stripes));
174 btrfs_set_stack_chunk_length(ret, record->length);
175 btrfs_set_stack_chunk_owner(ret, record->owner);
176 btrfs_set_stack_chunk_stripe_len(ret, record->stripe_len);
177 btrfs_set_stack_chunk_type(ret, record->type_flags);
178 btrfs_set_stack_chunk_io_align(ret, record->io_align);
179 btrfs_set_stack_chunk_io_width(ret, record->io_width);
180 btrfs_set_stack_chunk_sector_size(ret, record->sector_size);
181 btrfs_set_stack_chunk_num_stripes(ret, record->num_stripes);
182 btrfs_set_stack_chunk_sub_stripes(ret, record->sub_stripes);
183 for (i = 0, chunk_stripe = &ret->stripe; i < record->num_stripes;
184 i++, chunk_stripe++) {
185 btrfs_set_stack_stripe_devid(chunk_stripe,
186 record->stripes[i].devid);
187 btrfs_set_stack_stripe_offset(chunk_stripe,
188 record->stripes[i].offset);
189 memcpy(chunk_stripe->dev_uuid, record->stripes[i].dev_uuid,
195 static void init_recover_control(struct recover_control *rc, int verbose,
198 memset(rc, 0, sizeof(struct recover_control));
199 cache_tree_init(&rc->chunk);
200 cache_tree_init(&rc->eb_cache);
201 block_group_tree_init(&rc->bg);
202 device_extent_tree_init(&rc->devext);
204 INIT_LIST_HEAD(&rc->good_chunks);
205 INIT_LIST_HEAD(&rc->bad_chunks);
206 INIT_LIST_HEAD(&rc->unrepaired_chunks);
208 rc->verbose = verbose;
210 pthread_mutex_init(&rc->rc_lock, NULL);
213 static void free_recover_control(struct recover_control *rc)
215 free_block_group_tree(&rc->bg);
216 free_chunk_cache_tree(&rc->chunk);
217 free_device_extent_tree(&rc->devext);
218 free_extent_record_tree(&rc->eb_cache);
219 pthread_mutex_destroy(&rc->rc_lock);
222 static int process_block_group_item(struct block_group_tree *bg_cache,
223 struct extent_buffer *leaf,
224 struct btrfs_key *key, int slot)
226 struct block_group_record *rec;
227 struct block_group_record *exist;
228 struct cache_extent *cache;
231 rec = btrfs_new_block_group_record(leaf, key, slot);
232 if (!rec->cache.size)
235 cache = lookup_cache_extent(&bg_cache->tree,
239 exist = container_of(cache, struct block_group_record, cache);
241 /*check the generation and replace if needed*/
242 if (exist->generation > rec->generation)
244 if (exist->generation == rec->generation) {
245 int offset = offsetof(struct block_group_record,
248 * According to the current kernel code, the following
249 * case is impossble, or there is something wrong in
252 if (memcmp(((void *)exist) + offset,
253 ((void *)rec) + offset,
254 sizeof(*rec) - offset))
258 remove_cache_extent(&bg_cache->tree, cache);
259 list_del_init(&exist->list);
262 * We must do seach again to avoid the following cache.
263 * /--old bg 1--//--old bg 2--/
269 ret = insert_block_group_record(bg_cache, rec);
278 static int process_chunk_item(struct cache_tree *chunk_cache,
279 struct extent_buffer *leaf, struct btrfs_key *key,
282 struct chunk_record *rec;
283 struct chunk_record *exist;
284 struct cache_extent *cache;
287 rec = btrfs_new_chunk_record(leaf, key, slot);
288 if (!rec->cache.size)
291 cache = lookup_cache_extent(chunk_cache, rec->offset, rec->length);
293 exist = container_of(cache, struct chunk_record, cache);
295 if (exist->generation > rec->generation)
297 if (exist->generation == rec->generation) {
298 int num_stripes = rec->num_stripes;
299 int rec_size = btrfs_chunk_record_size(num_stripes);
300 int offset = offsetof(struct chunk_record, generation);
302 if (exist->num_stripes != rec->num_stripes ||
303 memcmp(((void *)exist) + offset,
304 ((void *)rec) + offset,
309 remove_cache_extent(chunk_cache, cache);
313 ret = insert_cache_extent(chunk_cache, &rec->cache);
322 static int process_device_extent_item(struct device_extent_tree *devext_cache,
323 struct extent_buffer *leaf,
324 struct btrfs_key *key, int slot)
326 struct device_extent_record *rec;
327 struct device_extent_record *exist;
328 struct cache_extent *cache;
331 rec = btrfs_new_device_extent_record(leaf, key, slot);
332 if (!rec->cache.size)
335 cache = lookup_cache_extent2(&devext_cache->tree,
340 exist = container_of(cache, struct device_extent_record, cache);
341 if (exist->generation > rec->generation)
343 if (exist->generation == rec->generation) {
344 int offset = offsetof(struct device_extent_record,
346 if (memcmp(((void *)exist) + offset,
347 ((void *)rec) + offset,
348 sizeof(*rec) - offset))
352 remove_cache_extent(&devext_cache->tree, cache);
353 list_del_init(&exist->chunk_list);
354 list_del_init(&exist->device_list);
359 ret = insert_device_extent_record(devext_cache, rec);
368 static void print_block_group_info(struct block_group_record *rec, char *prefix)
371 printf("%s", prefix);
372 printf("Block Group: start = %llu, len = %llu, flag = %llx\n",
373 rec->objectid, rec->offset, rec->flags);
376 static void print_block_group_tree(struct block_group_tree *tree)
378 struct cache_extent *cache;
379 struct block_group_record *rec;
381 printf("All Block Groups:\n");
382 for (cache = first_cache_extent(&tree->tree); cache;
383 cache = next_cache_extent(cache)) {
384 rec = container_of(cache, struct block_group_record, cache);
385 print_block_group_info(rec, "\t");
390 static void print_stripe_info(struct stripe *data, char *prefix1, char *prefix2,
394 printf("%s", prefix1);
396 printf("%s", prefix2);
397 printf("[%2d] Stripe: devid = %llu, offset = %llu\n",
398 index, data->devid, data->offset);
401 static void print_chunk_self_info(struct chunk_record *rec, char *prefix)
406 printf("%s", prefix);
407 printf("Chunk: start = %llu, len = %llu, type = %llx, num_stripes = %u\n",
408 rec->offset, rec->length, rec->type_flags, rec->num_stripes);
410 printf("%s", prefix);
411 printf(" Stripes list:\n");
412 for (i = 0; i < rec->num_stripes; i++)
413 print_stripe_info(&rec->stripes[i], prefix, " ", i);
416 static void print_chunk_tree(struct cache_tree *tree)
418 struct cache_extent *n;
419 struct chunk_record *entry;
421 printf("All Chunks:\n");
422 for (n = first_cache_extent(tree); n;
423 n = next_cache_extent(n)) {
424 entry = container_of(n, struct chunk_record, cache);
425 print_chunk_self_info(entry, "\t");
430 static void print_device_extent_info(struct device_extent_record *rec,
434 printf("%s", prefix);
435 printf("Device extent: devid = %llu, start = %llu, len = %llu, chunk offset = %llu\n",
436 rec->objectid, rec->offset, rec->length, rec->chunk_offset);
439 static void print_device_extent_tree(struct device_extent_tree *tree)
441 struct cache_extent *n;
442 struct device_extent_record *entry;
444 printf("All Device Extents:\n");
445 for (n = first_cache_extent(&tree->tree); n;
446 n = next_cache_extent(n)) {
447 entry = container_of(n, struct device_extent_record, cache);
448 print_device_extent_info(entry, "\t");
453 static void print_device_info(struct btrfs_device *device, char *prefix)
456 printf("%s", prefix);
457 printf("Device: id = %llu, name = %s\n",
458 device->devid, device->name);
461 static void print_all_devices(struct list_head *devices)
463 struct btrfs_device *dev;
465 printf("All Devices:\n");
466 list_for_each_entry(dev, devices, dev_list)
467 print_device_info(dev, "\t");
471 static void print_scan_result(struct recover_control *rc)
476 printf("DEVICE SCAN RESULT:\n");
477 printf("Filesystem Information:\n");
478 printf("\tsectorsize: %d\n", rc->sectorsize);
479 printf("\tleafsize: %d\n", rc->leafsize);
480 printf("\ttree root generation: %llu\n", rc->generation);
481 printf("\tchunk root generation: %llu\n", rc->chunk_root_generation);
484 print_all_devices(&rc->fs_devices->devices);
485 print_block_group_tree(&rc->bg);
486 print_chunk_tree(&rc->chunk);
487 print_device_extent_tree(&rc->devext);
490 static void print_chunk_info(struct chunk_record *chunk, char *prefix)
492 struct device_extent_record *devext;
495 print_chunk_self_info(chunk, prefix);
497 printf("%s", prefix);
499 print_block_group_info(chunk->bg_rec, " ");
501 printf(" No block group.\n");
503 printf("%s", prefix);
504 if (list_empty(&chunk->dextents)) {
505 printf(" No device extent.\n");
507 printf(" Device extent list:\n");
509 list_for_each_entry(devext, &chunk->dextents, chunk_list) {
511 printf("%s", prefix);
512 printf("%s[%2d]", " ", i);
513 print_device_extent_info(devext, NULL);
519 static void print_check_result(struct recover_control *rc)
521 struct chunk_record *chunk;
522 struct block_group_record *bg;
523 struct device_extent_record *devext;
531 printf("CHECK RESULT:\n");
532 printf("Healthy Chunks:\n");
533 list_for_each_entry(chunk, &rc->good_chunks, list) {
534 print_chunk_info(chunk, " ");
538 printf("Bad Chunks:\n");
539 list_for_each_entry(chunk, &rc->bad_chunks, list) {
540 print_chunk_info(chunk, " ");
545 printf("Total Chunks:\t%d\n", total);
546 printf(" Heathy:\t%d\n", good);
547 printf(" Bad:\t%d\n", bad);
550 printf("Orphan Block Groups:\n");
551 list_for_each_entry(bg, &rc->bg.block_groups, list)
552 print_block_group_info(bg, " ");
555 printf("Orphan Device Extents:\n");
556 list_for_each_entry(devext, &rc->devext.no_chunk_orphans, chunk_list)
557 print_device_extent_info(devext, " ");
560 static int check_chunk_by_metadata(struct recover_control *rc,
561 struct btrfs_root *root,
562 struct chunk_record *chunk, int bg_only)
567 struct btrfs_path path;
568 struct btrfs_key key;
569 struct btrfs_root *dev_root;
570 struct stripe *stripe;
571 struct btrfs_dev_extent *dev_extent;
572 struct btrfs_block_group_item *bg_ptr;
573 struct extent_buffer *l;
575 btrfs_init_path(&path);
580 dev_root = root->fs_info->dev_root;
581 for (i = 0; i < chunk->num_stripes; i++) {
582 stripe = &chunk->stripes[i];
584 key.objectid = stripe->devid;
585 key.offset = stripe->offset;
586 key.type = BTRFS_DEV_EXTENT_KEY;
588 ret = btrfs_search_slot(NULL, dev_root, &key, &path, 0, 0);
590 fprintf(stderr, "Search device extent failed(%d)\n",
592 btrfs_release_path(&path);
594 } else if (ret > 0) {
597 "No device extent[%llu, %llu]\n",
598 stripe->devid, stripe->offset);
599 btrfs_release_path(&path);
603 slot = path.slots[0];
604 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
606 btrfs_dev_extent_chunk_offset(l, dev_extent)) {
609 "Device tree unmatch with chunks dev_extent[%llu, %llu], chunk[%llu, %llu]\n",
610 btrfs_dev_extent_chunk_offset(l,
612 btrfs_dev_extent_length(l, dev_extent),
613 chunk->offset, chunk->length);
614 btrfs_release_path(&path);
617 btrfs_release_path(&path);
621 key.objectid = chunk->offset;
622 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
623 key.offset = chunk->length;
625 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, &path,
628 fprintf(stderr, "Search block group failed(%d)\n", ret);
629 btrfs_release_path(&path);
631 } else if (ret > 0) {
633 fprintf(stderr, "No block group[%llu, %llu]\n",
634 key.objectid, key.offset);
635 btrfs_release_path(&path);
640 slot = path.slots[0];
641 bg_ptr = btrfs_item_ptr(l, slot, struct btrfs_block_group_item);
642 if (chunk->type_flags != btrfs_disk_block_group_flags(l, bg_ptr)) {
645 "Chunk[%llu, %llu]'s type(%llu) is differemt with Block Group's type(%llu)\n",
646 chunk->offset, chunk->length, chunk->type_flags,
647 btrfs_disk_block_group_flags(l, bg_ptr));
648 btrfs_release_path(&path);
651 btrfs_release_path(&path);
655 static int check_all_chunks_by_metadata(struct recover_control *rc,
656 struct btrfs_root *root)
658 struct chunk_record *chunk;
659 struct chunk_record *next;
660 LIST_HEAD(orphan_chunks);
664 list_for_each_entry_safe(chunk, next, &rc->good_chunks, list) {
665 err = check_chunk_by_metadata(rc, root, chunk, 0);
668 list_move_tail(&chunk->list, &orphan_chunks);
669 else if (err && !ret)
674 list_for_each_entry_safe(chunk, next, &rc->unrepaired_chunks, list) {
675 err = check_chunk_by_metadata(rc, root, chunk, 1);
677 list_move_tail(&chunk->list, &orphan_chunks);
678 else if (err && !ret)
682 list_for_each_entry(chunk, &rc->bad_chunks, list) {
683 err = check_chunk_by_metadata(rc, root, chunk, 1);
684 if (err != -ENOENT && !ret)
685 ret = err ? err : -EINVAL;
687 list_splice(&orphan_chunks, &rc->bad_chunks);
691 static int extract_metadata_record(struct recover_control *rc,
692 struct extent_buffer *leaf)
694 struct btrfs_key key;
699 nritems = btrfs_header_nritems(leaf);
700 for (i = 0; i < nritems; i++) {
701 btrfs_item_key_to_cpu(leaf, &key, i);
703 case BTRFS_BLOCK_GROUP_ITEM_KEY:
704 pthread_mutex_lock(&rc->rc_lock);
705 ret = process_block_group_item(&rc->bg, leaf, &key, i);
706 pthread_mutex_unlock(&rc->rc_lock);
708 case BTRFS_CHUNK_ITEM_KEY:
709 pthread_mutex_lock(&rc->rc_lock);
710 ret = process_chunk_item(&rc->chunk, leaf, &key, i);
711 pthread_mutex_unlock(&rc->rc_lock);
713 case BTRFS_DEV_EXTENT_KEY:
714 pthread_mutex_lock(&rc->rc_lock);
715 ret = process_device_extent_item(&rc->devext, leaf,
717 pthread_mutex_unlock(&rc->rc_lock);
726 static inline int is_super_block_address(u64 offset)
730 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
731 if (offset == btrfs_sb_offset(i))
737 static int scan_one_device(void *dev_scan_struct)
739 struct extent_buffer *buf;
742 struct device_scan *dev_scan = (struct device_scan *)dev_scan_struct;
743 struct recover_control *rc = dev_scan->rc;
744 struct btrfs_device *device = dev_scan->dev;
745 int fd = dev_scan->fd;
748 ret = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
752 buf = malloc(sizeof(*buf) + rc->leafsize);
755 buf->len = rc->leafsize;
759 if (is_super_block_address(bytenr))
760 bytenr += rc->sectorsize;
762 if (pread64(fd, buf->data, rc->leafsize, bytenr) <
766 if (memcmp_extent_buffer(buf, rc->fs_devices->fsid,
769 bytenr += rc->sectorsize;
773 if (verify_tree_block_csum_silent(buf, rc->csum_size)) {
774 bytenr += rc->sectorsize;
778 pthread_mutex_lock(&rc->rc_lock);
779 ret = process_extent_buffer(&rc->eb_cache, buf, device, bytenr);
780 pthread_mutex_unlock(&rc->rc_lock);
784 if (btrfs_header_level(buf) != 0)
787 switch (btrfs_header_owner(buf)) {
788 case BTRFS_EXTENT_TREE_OBJECTID:
789 case BTRFS_DEV_TREE_OBJECTID:
790 /* different tree use different generation */
791 if (btrfs_header_generation(buf) > rc->generation)
793 ret = extract_metadata_record(rc, buf);
797 case BTRFS_CHUNK_TREE_OBJECTID:
798 if (btrfs_header_generation(buf) >
799 rc->chunk_root_generation)
801 ret = extract_metadata_record(rc, buf);
807 bytenr += rc->leafsize;
815 static int scan_devices(struct recover_control *rc)
819 struct btrfs_device *dev;
820 struct device_scan *dev_scans;
829 list_for_each_entry(dev, &rc->fs_devices->devices, dev_list)
831 dev_scans = (struct device_scan *)malloc(sizeof(struct device_scan)
835 t_scans = (pthread_t *)malloc(sizeof(pthread_t) * devnr);
838 t_rets = (int *)malloc(sizeof(int) * devnr);
842 list_for_each_entry(dev, &rc->fs_devices->devices, dev_list) {
843 fd = open(dev->name, O_RDONLY);
845 fprintf(stderr, "Failed to open device %s\n",
850 dev_scans[devidx].rc = rc;
851 dev_scans[devidx].dev = dev;
852 dev_scans[devidx].fd = fd;
853 ret = pthread_create(&t_scans[devidx], NULL,
854 (void *)scan_one_device,
855 (void *)&dev_scans[devidx]);
858 cancel_to = devidx - 1;
866 ret = pthread_join(t_scans[i], (void **)&t_rets[i]);
867 if (ret || t_rets[i]) {
870 cancel_to = devnr - 1;
876 while (ret && (cancel_from <= cancel_to)) {
877 pthread_cancel(t_scans[cancel_from]);
887 static int build_device_map_by_chunk_record(struct btrfs_root *root,
888 struct chunk_record *chunk)
893 u8 uuid[BTRFS_UUID_SIZE];
895 struct btrfs_mapping_tree *map_tree;
896 struct map_lookup *map;
897 struct stripe *stripe;
899 map_tree = &root->fs_info->mapping_tree;
900 num_stripes = chunk->num_stripes;
901 map = malloc(btrfs_map_lookup_size(num_stripes));
904 map->ce.start = chunk->offset;
905 map->ce.size = chunk->length;
906 map->num_stripes = num_stripes;
907 map->io_width = chunk->io_width;
908 map->io_align = chunk->io_align;
909 map->sector_size = chunk->sector_size;
910 map->stripe_len = chunk->stripe_len;
911 map->type = chunk->type_flags;
912 map->sub_stripes = chunk->sub_stripes;
914 for (i = 0, stripe = chunk->stripes; i < num_stripes; i++, stripe++) {
915 devid = stripe->devid;
916 memcpy(uuid, stripe->dev_uuid, BTRFS_UUID_SIZE);
917 map->stripes[i].physical = stripe->offset;
918 map->stripes[i].dev = btrfs_find_device(root, devid,
920 if (!map->stripes[i].dev) {
926 ret = insert_cache_extent(&map_tree->cache_tree, &map->ce);
930 static int build_device_maps_by_chunk_records(struct recover_control *rc,
931 struct btrfs_root *root)
934 struct chunk_record *chunk;
936 list_for_each_entry(chunk, &rc->good_chunks, list) {
937 ret = build_device_map_by_chunk_record(root, chunk);
944 static int block_group_remove_all_extent_items(struct btrfs_trans_handle *trans,
945 struct btrfs_root *root,
946 struct block_group_record *bg)
948 struct btrfs_fs_info *fs_info = root->fs_info;
949 struct btrfs_key key;
950 struct btrfs_path path;
951 struct extent_buffer *leaf;
952 u64 start = bg->objectid;
953 u64 end = bg->objectid + bg->offset;
960 btrfs_init_path(&path);
961 root = root->fs_info->extent_root;
963 key.objectid = start;
965 key.type = BTRFS_EXTENT_ITEM_KEY;
967 ret = btrfs_search_slot(trans, root, &key, &path, -1, 1);
973 leaf = path.nodes[0];
974 nitems = btrfs_header_nritems(leaf);
976 /* The tree is empty. */
981 if (path.slots[0] >= nitems) {
982 ret = btrfs_next_leaf(root, &path);
989 leaf = path.nodes[0];
990 btrfs_item_key_to_cpu(leaf, &key, 0);
991 if (key.objectid >= end)
993 btrfs_release_path(&path);
999 for (i = path.slots[0]; i < nitems; i++) {
1000 btrfs_item_key_to_cpu(leaf, &key, i);
1001 if (key.objectid >= end)
1004 if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1014 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
1015 key.type == BTRFS_METADATA_ITEM_KEY) {
1016 old_val = btrfs_super_bytes_used(fs_info->super_copy);
1017 if (key.type == BTRFS_METADATA_ITEM_KEY)
1018 old_val += root->leafsize;
1020 old_val += key.offset;
1021 btrfs_set_super_bytes_used(fs_info->super_copy,
1027 ret = btrfs_del_items(trans, root, &path, del_s, del_nr);
1032 if (key.objectid < end) {
1033 if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1034 key.objectid += root->sectorsize;
1035 key.type = BTRFS_EXTENT_ITEM_KEY;
1038 btrfs_release_path(&path);
1042 btrfs_release_path(&path);
1046 static int block_group_free_all_extent(struct btrfs_trans_handle *trans,
1047 struct btrfs_root *root,
1048 struct block_group_record *bg)
1050 struct btrfs_block_group_cache *cache;
1051 struct btrfs_fs_info *info;
1055 info = root->fs_info;
1056 cache = btrfs_lookup_block_group(info, bg->objectid);
1060 start = cache->key.objectid;
1061 end = start + cache->key.offset - 1;
1063 set_extent_bits(&info->block_group_cache, start, end,
1064 BLOCK_GROUP_DIRTY, GFP_NOFS);
1065 set_extent_dirty(&info->free_space_cache, start, end, GFP_NOFS);
1067 btrfs_set_block_group_used(&cache->item, 0);
1072 static int remove_chunk_extent_item(struct btrfs_trans_handle *trans,
1073 struct recover_control *rc,
1074 struct btrfs_root *root)
1076 struct chunk_record *chunk;
1079 list_for_each_entry(chunk, &rc->good_chunks, list) {
1080 if (!(chunk->type_flags & BTRFS_BLOCK_GROUP_SYSTEM))
1082 ret = block_group_remove_all_extent_items(trans, root,
1087 ret = block_group_free_all_extent(trans, root, chunk->bg_rec);
1094 static int __rebuild_chunk_root(struct btrfs_trans_handle *trans,
1095 struct recover_control *rc,
1096 struct btrfs_root *root)
1099 struct btrfs_device *dev;
1100 struct extent_buffer *cow;
1101 struct btrfs_disk_key disk_key;
1104 list_for_each_entry(dev, &rc->fs_devices->devices, dev_list) {
1105 if (min_devid > dev->devid)
1106 min_devid = dev->devid;
1108 disk_key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1109 disk_key.type = BTRFS_DEV_ITEM_KEY;
1110 disk_key.offset = min_devid;
1112 cow = btrfs_alloc_free_block(trans, root, root->nodesize,
1113 BTRFS_CHUNK_TREE_OBJECTID,
1114 &disk_key, 0, 0, 0);
1115 btrfs_set_header_bytenr(cow, cow->start);
1116 btrfs_set_header_generation(cow, trans->transid);
1117 btrfs_set_header_nritems(cow, 0);
1118 btrfs_set_header_level(cow, 0);
1119 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1120 btrfs_set_header_owner(cow, BTRFS_CHUNK_TREE_OBJECTID);
1121 write_extent_buffer(cow, root->fs_info->fsid,
1122 btrfs_header_fsid(), BTRFS_FSID_SIZE);
1124 write_extent_buffer(cow, root->fs_info->chunk_tree_uuid,
1125 btrfs_header_chunk_tree_uuid(cow),
1129 btrfs_mark_buffer_dirty(cow);
1134 static int __rebuild_device_items(struct btrfs_trans_handle *trans,
1135 struct recover_control *rc,
1136 struct btrfs_root *root)
1138 struct btrfs_device *dev;
1139 struct btrfs_key key;
1140 struct btrfs_dev_item *dev_item;
1143 dev_item = malloc(sizeof(struct btrfs_dev_item));
1147 list_for_each_entry(dev, &rc->fs_devices->devices, dev_list) {
1148 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1149 key.type = BTRFS_DEV_ITEM_KEY;
1150 key.offset = dev->devid;
1152 btrfs_set_stack_device_generation(dev_item, 0);
1153 btrfs_set_stack_device_type(dev_item, dev->type);
1154 btrfs_set_stack_device_id(dev_item, dev->devid);
1155 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
1156 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
1157 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
1158 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
1159 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
1160 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
1161 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
1163 ret = btrfs_insert_item(trans, root, &key,
1164 dev_item, sizeof(*dev_item));
1171 static int __rebuild_chunk_items(struct btrfs_trans_handle *trans,
1172 struct recover_control *rc,
1173 struct btrfs_root *root)
1175 struct btrfs_key key;
1176 struct btrfs_chunk *chunk = NULL;
1177 struct btrfs_root *chunk_root;
1178 struct chunk_record *chunk_rec;
1181 chunk_root = root->fs_info->chunk_root;
1183 list_for_each_entry(chunk_rec, &rc->good_chunks, list) {
1184 chunk = create_chunk_item(chunk_rec);
1188 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1189 key.type = BTRFS_CHUNK_ITEM_KEY;
1190 key.offset = chunk_rec->offset;
1192 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1193 btrfs_chunk_item_size(chunk->num_stripes));
1201 static int rebuild_chunk_tree(struct btrfs_trans_handle *trans,
1202 struct recover_control *rc,
1203 struct btrfs_root *root)
1207 root = root->fs_info->chunk_root;
1209 ret = __rebuild_chunk_root(trans, rc, root);
1213 ret = __rebuild_device_items(trans, rc, root);
1217 ret = __rebuild_chunk_items(trans, rc, root);
1222 static int rebuild_sys_array(struct recover_control *rc,
1223 struct btrfs_root *root)
1225 struct btrfs_chunk *chunk;
1226 struct btrfs_key key;
1227 struct chunk_record *chunk_rec;
1231 btrfs_set_super_sys_array_size(root->fs_info->super_copy, 0);
1233 list_for_each_entry(chunk_rec, &rc->good_chunks, list) {
1234 if (!(chunk_rec->type_flags & BTRFS_BLOCK_GROUP_SYSTEM))
1237 num_stripes = chunk_rec->num_stripes;
1238 chunk = create_chunk_item(chunk_rec);
1244 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1245 key.type = BTRFS_CHUNK_ITEM_KEY;
1246 key.offset = chunk_rec->offset;
1248 ret = btrfs_add_system_chunk(NULL, root, &key, chunk,
1249 btrfs_chunk_item_size(num_stripes));
1258 static struct btrfs_root *
1259 open_ctree_with_broken_chunk(struct recover_control *rc)
1261 struct btrfs_fs_info *fs_info;
1262 struct btrfs_super_block *disk_super;
1263 struct extent_buffer *eb;
1270 fs_info = btrfs_new_fs_info(1, BTRFS_SUPER_INFO_OFFSET);
1272 fprintf(stderr, "Failed to allocate memory for fs_info\n");
1273 return ERR_PTR(-ENOMEM);
1275 fs_info->is_chunk_recover = 1;
1277 fs_info->fs_devices = rc->fs_devices;
1278 ret = btrfs_open_devices(fs_info->fs_devices, O_RDWR);
1282 disk_super = fs_info->super_copy;
1283 ret = btrfs_read_dev_super(fs_info->fs_devices->latest_bdev,
1284 disk_super, fs_info->super_bytenr);
1286 fprintf(stderr, "No valid btrfs found\n");
1290 memcpy(fs_info->fsid, &disk_super->fsid, BTRFS_FSID_SIZE);
1292 ret = btrfs_check_fs_compatibility(disk_super, 1);
1296 nodesize = btrfs_super_nodesize(disk_super);
1297 leafsize = btrfs_super_leafsize(disk_super);
1298 sectorsize = btrfs_super_sectorsize(disk_super);
1299 stripesize = btrfs_super_stripesize(disk_super);
1301 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1302 fs_info->chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1304 ret = build_device_maps_by_chunk_records(rc, fs_info->chunk_root);
1308 ret = btrfs_setup_all_roots(fs_info, 0, 0);
1312 eb = fs_info->tree_root->node;
1313 read_extent_buffer(eb, fs_info->chunk_tree_uuid,
1314 btrfs_header_chunk_tree_uuid(eb),
1317 return fs_info->fs_root;
1319 btrfs_release_all_roots(fs_info);
1321 btrfs_cleanup_all_caches(fs_info);
1323 btrfs_close_devices(fs_info->fs_devices);
1325 btrfs_free_fs_info(fs_info);
1326 return ERR_PTR(ret);
1329 static int recover_prepare(struct recover_control *rc, char *path)
1333 struct btrfs_super_block *sb;
1334 struct btrfs_fs_devices *fs_devices;
1337 fd = open(path, O_RDONLY);
1339 fprintf(stderr, "open %s\n error.\n", path);
1343 sb = malloc(sizeof(struct btrfs_super_block));
1345 fprintf(stderr, "allocating memory for sb failed.\n");
1350 ret = btrfs_read_dev_super(fd, sb, BTRFS_SUPER_INFO_OFFSET);
1352 fprintf(stderr, "read super block error\n");
1356 rc->sectorsize = btrfs_super_sectorsize(sb);
1357 rc->leafsize = btrfs_super_leafsize(sb);
1358 rc->generation = btrfs_super_generation(sb);
1359 rc->chunk_root_generation = btrfs_super_chunk_root_generation(sb);
1360 rc->csum_size = btrfs_super_csum_size(sb);
1362 /* if seed, the result of scanning below will be partial */
1363 if (btrfs_super_flags(sb) & BTRFS_SUPER_FLAG_SEEDING) {
1364 fprintf(stderr, "this device is seed device\n");
1369 ret = btrfs_scan_fs_devices(fd, path, &fs_devices, 0, 1);
1373 rc->fs_devices = fs_devices;
1376 print_all_devices(&rc->fs_devices->devices);
1385 static int btrfs_get_device_extents(u64 chunk_object,
1386 struct list_head *orphan_devexts,
1387 struct list_head *ret_list)
1389 struct device_extent_record *devext;
1390 struct device_extent_record *next;
1393 list_for_each_entry_safe(devext, next, orphan_devexts, chunk_list) {
1394 if (devext->chunk_offset == chunk_object) {
1395 list_move_tail(&devext->chunk_list, ret_list);
1402 static int calc_num_stripes(u64 type)
1404 if (type & (BTRFS_BLOCK_GROUP_RAID0 |
1405 BTRFS_BLOCK_GROUP_RAID10 |
1406 BTRFS_BLOCK_GROUP_RAID5 |
1407 BTRFS_BLOCK_GROUP_RAID6))
1409 else if (type & (BTRFS_BLOCK_GROUP_RAID1 |
1410 BTRFS_BLOCK_GROUP_DUP))
1416 static inline int calc_sub_nstripes(u64 type)
1418 if (type & BTRFS_BLOCK_GROUP_RAID10)
1424 static int btrfs_verify_device_extents(struct block_group_record *bg,
1425 struct list_head *devexts, int ndevexts)
1427 struct device_extent_record *devext;
1429 int expected_num_stripes;
1431 expected_num_stripes = calc_num_stripes(bg->flags);
1432 if (expected_num_stripes && expected_num_stripes != ndevexts)
1435 strpie_length = calc_stripe_length(bg->flags, bg->offset, ndevexts);
1436 list_for_each_entry(devext, devexts, chunk_list) {
1437 if (devext->length != strpie_length)
1443 static int btrfs_rebuild_unordered_chunk_stripes(struct recover_control *rc,
1444 struct chunk_record *chunk)
1446 struct device_extent_record *devext;
1447 struct btrfs_device *device;
1450 devext = list_first_entry(&chunk->dextents, struct device_extent_record,
1452 for (i = 0; i < chunk->num_stripes; i++) {
1453 chunk->stripes[i].devid = devext->objectid;
1454 chunk->stripes[i].offset = devext->offset;
1455 device = btrfs_find_device_by_devid(rc->fs_devices,
1460 BUG_ON(btrfs_find_device_by_devid(rc->fs_devices,
1463 memcpy(chunk->stripes[i].dev_uuid, device->uuid,
1465 devext = list_next_entry(devext, chunk_list);
1470 static int btrfs_calc_stripe_index(struct chunk_record *chunk, u64 logical)
1472 u64 offset = logical - chunk->offset;
1474 int nr_data_stripes;
1477 stripe_nr = offset / chunk->stripe_len;
1478 if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID0) {
1479 index = stripe_nr % chunk->num_stripes;
1480 } else if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID10) {
1481 index = stripe_nr % (chunk->num_stripes / chunk->sub_stripes);
1482 index *= chunk->sub_stripes;
1483 } else if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID5) {
1484 nr_data_stripes = chunk->num_stripes - 1;
1485 index = stripe_nr % nr_data_stripes;
1486 stripe_nr /= nr_data_stripes;
1487 index = (index + stripe_nr) % chunk->num_stripes;
1488 } else if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID6) {
1489 nr_data_stripes = chunk->num_stripes - 2;
1490 index = stripe_nr % nr_data_stripes;
1491 stripe_nr /= nr_data_stripes;
1492 index = (index + stripe_nr) % chunk->num_stripes;
1499 /* calc the logical offset which is the start of the next stripe. */
1500 static inline u64 btrfs_next_stripe_logical_offset(struct chunk_record *chunk,
1503 u64 offset = logical - chunk->offset;
1505 offset /= chunk->stripe_len;
1506 offset *= chunk->stripe_len;
1507 offset += chunk->stripe_len;
1509 return offset + chunk->offset;
1512 static int is_extent_record_in_device_extent(struct extent_record *er,
1513 struct device_extent_record *dext,
1518 for (i = 0; i < er->nmirrors; i++) {
1519 if (er->devices[i]->devid == dext->objectid &&
1520 er->offsets[i] >= dext->offset &&
1521 er->offsets[i] < dext->offset + dext->length) {
1530 btrfs_rebuild_ordered_meta_chunk_stripes(struct recover_control *rc,
1531 struct chunk_record *chunk)
1533 u64 start = chunk->offset;
1534 u64 end = chunk->offset + chunk->length;
1535 struct cache_extent *cache;
1536 struct extent_record *er;
1537 struct device_extent_record *devext;
1538 struct device_extent_record *next;
1539 struct btrfs_device *device;
1545 cache = lookup_cache_extent(&rc->eb_cache,
1546 start, chunk->length);
1548 /* No used space, we can reorder the stripes freely. */
1549 ret = btrfs_rebuild_unordered_chunk_stripes(rc, chunk);
1553 list_splice_init(&chunk->dextents, &devexts);
1555 er = container_of(cache, struct extent_record, cache);
1556 index = btrfs_calc_stripe_index(chunk, er->cache.start);
1557 if (chunk->stripes[index].devid)
1559 list_for_each_entry_safe(devext, next, &devexts, chunk_list) {
1560 if (is_extent_record_in_device_extent(er, devext, &mirror)) {
1561 chunk->stripes[index].devid = devext->objectid;
1562 chunk->stripes[index].offset = devext->offset;
1563 memcpy(chunk->stripes[index].dev_uuid,
1564 er->devices[mirror]->uuid,
1567 list_move(&devext->chunk_list, &chunk->dextents);
1571 start = btrfs_next_stripe_logical_offset(chunk, er->cache.start);
1573 goto no_extent_record;
1575 cache = lookup_cache_extent(&rc->eb_cache, start, end - start);
1579 if (list_empty(&devexts))
1582 if (chunk->type_flags & (BTRFS_BLOCK_GROUP_RAID5 |
1583 BTRFS_BLOCK_GROUP_RAID6)) {
1584 /* Fixme: try to recover the order by the parity block. */
1585 list_splice_tail(&devexts, &chunk->dextents);
1589 /* There is no data on the lost stripes, we can reorder them freely. */
1590 for (index = 0; index < chunk->num_stripes; index++) {
1591 if (chunk->stripes[index].devid)
1594 devext = list_first_entry(&devexts,
1595 struct device_extent_record,
1597 list_move(&devext->chunk_list, &chunk->dextents);
1599 chunk->stripes[index].devid = devext->objectid;
1600 chunk->stripes[index].offset = devext->offset;
1601 device = btrfs_find_device_by_devid(rc->fs_devices,
1605 list_splice_tail(&devexts, &chunk->dextents);
1608 BUG_ON(btrfs_find_device_by_devid(rc->fs_devices,
1611 memcpy(chunk->stripes[index].dev_uuid, device->uuid,
1617 #define BTRFS_ORDERED_RAID (BTRFS_BLOCK_GROUP_RAID0 | \
1618 BTRFS_BLOCK_GROUP_RAID10 | \
1619 BTRFS_BLOCK_GROUP_RAID5 | \
1620 BTRFS_BLOCK_GROUP_RAID6)
1622 static int btrfs_rebuild_chunk_stripes(struct recover_control *rc,
1623 struct chunk_record *chunk)
1628 * All the data in the system metadata chunk will be dropped,
1629 * so we need not guarantee that the data is right or not, that
1630 * is we can reorder the stripes in the system metadata chunk.
1632 if ((chunk->type_flags & BTRFS_BLOCK_GROUP_METADATA) &&
1633 (chunk->type_flags & BTRFS_ORDERED_RAID))
1634 ret =btrfs_rebuild_ordered_meta_chunk_stripes(rc, chunk);
1635 else if ((chunk->type_flags & BTRFS_BLOCK_GROUP_DATA) &&
1636 (chunk->type_flags & BTRFS_ORDERED_RAID))
1637 ret = 1; /* Be handled after the fs is opened. */
1639 ret = btrfs_rebuild_unordered_chunk_stripes(rc, chunk);
1644 static int next_csum(struct btrfs_root *root,
1645 struct extent_buffer **leaf,
1646 struct btrfs_path *path,
1651 struct btrfs_key *key)
1654 struct btrfs_root *csum_root = root->fs_info->csum_root;
1655 struct btrfs_csum_item *csum_item;
1656 u32 blocksize = root->sectorsize;
1657 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
1658 int csums_in_item = btrfs_item_size_nr(*leaf, *slot) / csum_size;
1660 if (*csum_offset >= csums_in_item) {
1663 if (*slot >= btrfs_header_nritems(*leaf)) {
1664 ret = btrfs_next_leaf(csum_root, path);
1669 *leaf = path->nodes[0];
1670 *slot = path->slots[0];
1672 btrfs_item_key_to_cpu(*leaf, key, *slot);
1675 if (key->offset + (*csum_offset) * blocksize >= end)
1677 csum_item = btrfs_item_ptr(*leaf, *slot, struct btrfs_csum_item);
1678 csum_item = (struct btrfs_csum_item *)((unsigned char *)csum_item
1679 + (*csum_offset) * csum_size);
1680 read_extent_buffer(*leaf, tree_csum,
1681 (unsigned long)csum_item, csum_size);
1685 static u64 calc_data_offset(struct btrfs_key *key,
1686 struct chunk_record *chunk,
1692 int logical_stripe_nr;
1694 int nr_data_stripes;
1696 data_offset = key->offset + csum_offset * blocksize - chunk->offset;
1697 nr_data_stripes = chunk->num_stripes;
1699 if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID5)
1700 nr_data_stripes -= 1;
1701 else if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID6)
1702 nr_data_stripes -= 2;
1704 logical_stripe_nr = data_offset / chunk->stripe_len;
1705 dev_stripe_nr = logical_stripe_nr / nr_data_stripes;
1707 data_offset -= logical_stripe_nr * chunk->stripe_len;
1708 data_offset += dev_stripe_nr * chunk->stripe_len;
1710 return dev_offset + data_offset;
1713 static int check_one_csum(int fd, u64 start, u32 len, u32 tree_csum)
1717 u32 csum_result = ~(u32)0;
1722 ret = pread64(fd, data, len, start);
1723 if (ret < 0 || ret != len) {
1728 csum_result = btrfs_csum_data(NULL, data, csum_result, len);
1729 btrfs_csum_final(csum_result, (char *)&csum_result);
1730 if (csum_result != tree_csum)
1737 static u64 item_end_offset(struct btrfs_root *root, struct btrfs_key *key,
1738 struct extent_buffer *leaf, int slot) {
1739 u32 blocksize = root->sectorsize;
1740 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
1742 u64 offset = btrfs_item_size_nr(leaf, slot);
1743 offset /= csum_size;
1744 offset *= blocksize;
1745 offset += key->offset;
1750 static int insert_stripe(struct list_head *devexts,
1751 struct recover_control *rc,
1752 struct chunk_record *chunk,
1754 struct device_extent_record *devext;
1755 struct btrfs_device *dev;
1757 devext = list_entry(devexts->next, struct device_extent_record,
1759 dev = btrfs_find_device_by_devid(rc->fs_devices, devext->objectid,
1763 BUG_ON(btrfs_find_device_by_devid(rc->fs_devices, devext->objectid,
1766 chunk->stripes[index].devid = devext->objectid;
1767 chunk->stripes[index].offset = devext->offset;
1768 memcpy(chunk->stripes[index].dev_uuid, dev->uuid, BTRFS_UUID_SIZE);
1770 list_move(&devext->chunk_list, &chunk->dextents);
1775 static inline int count_devext_records(struct list_head *record_list)
1777 int num_of_records = 0;
1778 struct device_extent_record *devext;
1780 list_for_each_entry(devext, record_list, chunk_list)
1783 return num_of_records;
1786 static int fill_chunk_up(struct chunk_record *chunk, struct list_head *devexts,
1787 struct recover_control *rc)
1792 for (i = 0; i < chunk->num_stripes; i++) {
1793 if (!chunk->stripes[i].devid) {
1794 ret = insert_stripe(devexts, rc, chunk, i);
1803 #define EQUAL_STRIPE (1 << 0)
1805 static int rebuild_raid_data_chunk_stripes(struct recover_control *rc,
1806 struct btrfs_root *root,
1807 struct chunk_record *chunk,
1813 struct btrfs_path path;
1814 struct btrfs_key prev_key;
1815 struct btrfs_key key;
1816 struct btrfs_root *csum_root;
1817 struct extent_buffer *leaf;
1818 struct device_extent_record *devext;
1819 struct device_extent_record *next;
1820 struct btrfs_device *dev;
1821 u64 start = chunk->offset;
1822 u64 end = start + chunk->stripe_len;
1823 u64 chunk_end = chunk->offset + chunk->length;
1824 u64 csum_offset = 0;
1826 u32 blocksize = root->sectorsize;
1829 int num_unordered = 0;
1830 LIST_HEAD(unordered);
1831 LIST_HEAD(candidates);
1833 csum_root = root->fs_info->csum_root;
1834 btrfs_init_path(&path);
1835 list_splice_init(&chunk->dextents, &candidates);
1837 if (list_is_last(candidates.next, &candidates))
1840 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1841 key.type = BTRFS_EXTENT_CSUM_KEY;
1844 ret = btrfs_search_slot(NULL, csum_root, &key, &path, 0, 0);
1846 fprintf(stderr, "Search csum failed(%d)\n", ret);
1849 leaf = path.nodes[0];
1850 slot = path.slots[0];
1852 if (slot >= btrfs_header_nritems(leaf)) {
1853 ret = btrfs_next_leaf(csum_root, &path);
1856 "Walk tree failed(%d)\n", ret);
1858 } else if (ret > 0) {
1859 slot = btrfs_header_nritems(leaf) - 1;
1860 btrfs_item_key_to_cpu(leaf, &key, slot);
1861 if (item_end_offset(root, &key, leaf, slot)
1863 csum_offset = start - key.offset;
1864 csum_offset /= blocksize;
1869 leaf = path.nodes[0];
1870 slot = path.slots[0];
1872 btrfs_item_key_to_cpu(leaf, &key, slot);
1873 ret = btrfs_previous_item(csum_root, &path, 0,
1874 BTRFS_EXTENT_CSUM_KEY);
1878 if (key.offset >= end)
1883 leaf = path.nodes[0];
1884 slot = path.slots[0];
1886 btrfs_item_key_to_cpu(leaf, &prev_key, slot);
1887 if (item_end_offset(root, &prev_key, leaf, slot) > start) {
1888 csum_offset = start - prev_key.offset;
1889 csum_offset /= blocksize;
1890 btrfs_item_key_to_cpu(leaf, &key, slot);
1892 if (key.offset >= end)
1896 if (key.offset + csum_offset * blocksize > chunk_end)
1900 ret = next_csum(root, &leaf, &path, &slot, &csum_offset, &tree_csum,
1903 fprintf(stderr, "Fetch csum failed\n");
1905 } else if (ret == 1) {
1906 if (!(*flags & EQUAL_STRIPE))
1907 *flags |= EQUAL_STRIPE;
1909 } else if (ret == 2)
1912 list_for_each_entry_safe(devext, next, &candidates, chunk_list) {
1913 data_offset = calc_data_offset(&key, chunk, devext->offset,
1914 csum_offset, blocksize);
1915 dev = btrfs_find_device_by_devid(rc->fs_devices,
1916 devext->objectid, 0);
1921 BUG_ON(btrfs_find_device_by_devid(rc->fs_devices,
1922 devext->objectid, 1));
1924 ret = check_one_csum(dev->fd, data_offset, blocksize,
1929 list_move(&devext->chunk_list, &unordered);
1932 if (list_empty(&candidates)) {
1933 num_unordered = count_devext_records(&unordered);
1934 if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID6
1935 && num_unordered == 2) {
1936 btrfs_release_path(&path);
1937 ret = fill_chunk_up(chunk, &unordered, rc);
1944 if (list_is_last(candidates.next, &candidates)) {
1945 index = btrfs_calc_stripe_index(chunk,
1946 key.offset + csum_offset * blocksize);
1947 if (chunk->stripes[index].devid)
1949 ret = insert_stripe(&candidates, rc, chunk, index);
1957 start = btrfs_next_stripe_logical_offset(chunk, start);
1958 end = min(start + chunk->stripe_len, chunk_end);
1959 list_splice_init(&unordered, &candidates);
1960 btrfs_release_path(&path);
1962 if (end < chunk_end)
1966 list_splice_init(&candidates, &unordered);
1967 num_unordered = count_devext_records(&unordered);
1968 if (num_unordered == 1) {
1969 for (i = 0; i < chunk->num_stripes; i++) {
1970 if (!chunk->stripes[i].devid) {
1975 ret = insert_stripe(&unordered, rc, chunk, index);
1979 if ((num_unordered == 2 && chunk->type_flags
1980 & BTRFS_BLOCK_GROUP_RAID5)
1981 || (num_unordered == 3 && chunk->type_flags
1982 & BTRFS_BLOCK_GROUP_RAID6)) {
1983 ret = fill_chunk_up(chunk, &unordered, rc);
1987 ret = !!ret || (list_empty(&unordered) ? 0 : 1);
1988 list_splice_init(&candidates, &chunk->dextents);
1989 list_splice_init(&unordered, &chunk->dextents);
1990 btrfs_release_path(&path);
1995 static int btrfs_rebuild_ordered_data_chunk_stripes(struct recover_control *rc,
1996 struct btrfs_root *root)
1998 struct chunk_record *chunk;
1999 struct chunk_record *next;
2004 list_for_each_entry_safe(chunk, next, &rc->unrepaired_chunks, list) {
2005 if ((chunk->type_flags & BTRFS_BLOCK_GROUP_DATA)
2006 && (chunk->type_flags & BTRFS_ORDERED_RAID)) {
2008 err = rebuild_raid_data_chunk_stripes(rc, root, chunk,
2011 list_move(&chunk->list, &rc->bad_chunks);
2012 if (flags & EQUAL_STRIPE)
2014 "Failure: too many equal stripes in chunk[%llu %llu]\n",
2015 chunk->offset, chunk->length);
2019 list_move(&chunk->list, &rc->good_chunks);
2025 static int btrfs_recover_chunks(struct recover_control *rc)
2027 struct chunk_record *chunk;
2028 struct block_group_record *bg;
2029 struct block_group_record *next;
2030 LIST_HEAD(new_chunks);
2035 /* create the chunk by block group */
2036 list_for_each_entry_safe(bg, next, &rc->bg.block_groups, list) {
2037 nstripes = btrfs_get_device_extents(bg->objectid,
2038 &rc->devext.no_chunk_orphans,
2040 chunk = malloc(btrfs_chunk_record_size(nstripes));
2043 memset(chunk, 0, btrfs_chunk_record_size(nstripes));
2044 INIT_LIST_HEAD(&chunk->dextents);
2046 chunk->cache.start = bg->objectid;
2047 chunk->cache.size = bg->offset;
2048 chunk->objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2049 chunk->type = BTRFS_CHUNK_ITEM_KEY;
2050 chunk->offset = bg->objectid;
2051 chunk->generation = bg->generation;
2052 chunk->length = bg->offset;
2053 chunk->owner = BTRFS_CHUNK_TREE_OBJECTID;
2054 chunk->stripe_len = BTRFS_STRIPE_LEN;
2055 chunk->type_flags = bg->flags;
2056 chunk->io_width = BTRFS_STRIPE_LEN;
2057 chunk->io_align = BTRFS_STRIPE_LEN;
2058 chunk->sector_size = rc->sectorsize;
2059 chunk->sub_stripes = calc_sub_nstripes(bg->flags);
2061 ret = insert_cache_extent(&rc->chunk, &chunk->cache);
2065 list_add_tail(&chunk->list, &rc->bad_chunks);
2069 list_splice_init(&devexts, &chunk->dextents);
2071 ret = btrfs_verify_device_extents(bg, &devexts, nstripes);
2073 list_add_tail(&chunk->list, &rc->bad_chunks);
2077 chunk->num_stripes = nstripes;
2078 ret = btrfs_rebuild_chunk_stripes(rc, chunk);
2080 list_add_tail(&chunk->list, &rc->unrepaired_chunks);
2082 list_add_tail(&chunk->list, &rc->bad_chunks);
2084 list_add_tail(&chunk->list, &rc->good_chunks);
2087 * Don't worry about the lost orphan device extents, they don't
2088 * have its chunk and block group, they must be the old ones that
2095 * Return 0 when succesful, < 0 on error and > 0 if aborted by user
2097 int btrfs_recover_chunk_tree(char *path, int verbose, int yes)
2100 struct btrfs_root *root = NULL;
2101 struct btrfs_trans_handle *trans;
2102 struct recover_control rc;
2104 init_recover_control(&rc, verbose, yes);
2106 ret = recover_prepare(&rc, path);
2108 fprintf(stderr, "recover prepare error\n");
2112 ret = scan_devices(&rc);
2114 fprintf(stderr, "scan chunk headers error\n");
2118 if (cache_tree_empty(&rc.chunk) &&
2119 cache_tree_empty(&rc.bg.tree) &&
2120 cache_tree_empty(&rc.devext.tree)) {
2121 fprintf(stderr, "no recoverable chunk\n");
2125 print_scan_result(&rc);
2127 ret = check_chunks(&rc.chunk, &rc.bg, &rc.devext, &rc.good_chunks,
2129 print_check_result(&rc);
2131 if (!list_empty(&rc.bg.block_groups) ||
2132 !list_empty(&rc.devext.no_chunk_orphans)) {
2133 ret = btrfs_recover_chunks(&rc);
2138 * If the chunk is healthy, its block group item and device
2139 * extent item should be written on the disks. So, it is very
2140 * likely that the bad chunk is a old one that has been
2141 * droppped from the fs. Don't deal with them now, we will
2142 * check it after the fs is opened.
2145 fprintf(stderr, "Check chunks successfully with no orphans\n");
2149 root = open_ctree_with_broken_chunk(&rc);
2151 fprintf(stderr, "open with broken chunk error\n");
2152 ret = PTR_ERR(root);
2156 ret = check_all_chunks_by_metadata(&rc, root);
2158 fprintf(stderr, "The chunks in memory can not match the metadata of the fs. Repair failed.\n");
2159 goto fail_close_ctree;
2162 ret = btrfs_rebuild_ordered_data_chunk_stripes(&rc, root);
2164 fprintf(stderr, "Failed to rebuild ordered chunk stripes.\n");
2165 goto fail_close_ctree;
2169 ret = ask_user("We are going to rebuild the chunk tree on disk, it might destroy the old metadata on the disk, Are you sure?");
2172 goto fail_close_ctree;
2176 trans = btrfs_start_transaction(root, 1);
2177 ret = remove_chunk_extent_item(trans, &rc, root);
2180 ret = rebuild_chunk_tree(trans, &rc, root);
2183 ret = rebuild_sys_array(&rc, root);
2186 btrfs_commit_transaction(trans, root);
2190 free_recover_control(&rc);