2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
22 #include <sys/types.h>
28 #include "kerncompat.h"
32 #include "transaction.h"
36 #include "extent_io.h"
38 #define HEADER_MAGIC 0xbd5c25e27295668bULL
39 #define MAX_PENDING_SIZE (256 * 1024)
40 #define BLOCK_SIZE 1024
41 #define BLOCK_MASK (BLOCK_SIZE - 1)
43 #define COMPRESS_NONE 0
44 #define COMPRESS_ZLIB 1
46 struct meta_cluster_item {
49 } __attribute__ ((__packed__));
51 struct meta_cluster_header {
56 } __attribute__ ((__packed__));
58 /* cluster header + index items + buffers */
60 struct meta_cluster_header header;
61 struct meta_cluster_item items[];
62 } __attribute__ ((__packed__));
64 #define ITEMS_PER_CLUSTER ((BLOCK_SIZE - sizeof(struct meta_cluster)) / \
65 sizeof(struct meta_cluster_item))
75 struct list_head list;
76 struct list_head ordered;
84 struct metadump_struct {
85 struct btrfs_root *root;
88 struct meta_cluster *cluster;
92 pthread_mutex_t mutex;
94 struct rb_root name_tree;
96 struct list_head list;
97 struct list_head ordered;
119 struct mdrestore_struct {
125 pthread_mutex_t mutex;
128 struct rb_root chunk_tree;
129 struct list_head list;
133 u8 uuid[BTRFS_UUID_SIZE];
134 u8 fsid[BTRFS_FSID_SIZE];
142 struct btrfs_fs_info *info;
145 static void print_usage(void) __attribute__((noreturn));
146 static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
147 u64 search, u64 cluster_bytenr);
148 static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size);
150 static void csum_block(u8 *buf, size_t len)
152 char result[BTRFS_CRC32_SIZE];
154 crc = crc32c(crc, buf + BTRFS_CSUM_SIZE, len - BTRFS_CSUM_SIZE);
155 btrfs_csum_final(crc, result);
156 memcpy(buf, result, BTRFS_CRC32_SIZE);
159 static int has_name(struct btrfs_key *key)
162 case BTRFS_DIR_ITEM_KEY:
163 case BTRFS_DIR_INDEX_KEY:
164 case BTRFS_INODE_REF_KEY:
165 case BTRFS_INODE_EXTREF_KEY:
166 case BTRFS_XATTR_ITEM_KEY:
175 static char *generate_garbage(u32 name_len)
177 char *buf = malloc(name_len);
183 for (i = 0; i < name_len; i++) {
184 char c = rand() % 94 + 33;
194 static int name_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
196 struct name *entry = rb_entry(a, struct name, n);
197 struct name *ins = rb_entry(b, struct name, n);
200 len = min(ins->len, entry->len);
201 return memcmp(ins->val, entry->val, len);
204 static int chunk_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
206 struct fs_chunk *entry = rb_entry(a, struct fs_chunk, n);
207 struct fs_chunk *ins = rb_entry(b, struct fs_chunk, n);
209 if (fuzz && ins->logical >= entry->logical &&
210 ins->logical < entry->logical + entry->bytes)
213 if (ins->logical < entry->logical)
215 else if (ins->logical > entry->logical)
220 static void tree_insert(struct rb_root *root, struct rb_node *ins,
221 int (*cmp)(struct rb_node *a, struct rb_node *b,
224 struct rb_node ** p = &root->rb_node;
225 struct rb_node * parent = NULL;
231 dir = cmp(*p, ins, 0);
240 rb_link_node(ins, parent, p);
241 rb_insert_color(ins, root);
244 static struct rb_node *tree_search(struct rb_root *root,
245 struct rb_node *search,
246 int (*cmp)(struct rb_node *a,
247 struct rb_node *b, int fuzz),
250 struct rb_node *n = root->rb_node;
254 dir = cmp(n, search, fuzz);
266 static char *find_collision(struct metadump_struct *md, char *name,
270 struct rb_node *entry;
272 unsigned long checksum;
278 entry = tree_search(&md->name_tree, &tmp.n, name_cmp, 0);
280 val = rb_entry(entry, struct name, n);
285 val = malloc(sizeof(struct name));
287 fprintf(stderr, "Couldn't sanitize name, enomem\n");
292 memset(val, 0, sizeof(*val));
296 val->sub = malloc(name_len);
298 fprintf(stderr, "Couldn't sanitize name, enomem\n");
304 checksum = crc32c(~1, val->val, name_len);
305 memset(val->sub, ' ', name_len);
308 if (crc32c(~1, val->sub, name_len) == checksum &&
309 memcmp(val->sub, val->val, val->len)) {
314 if (val->sub[i] == 127) {
319 } while (val->sub[i] == 127);
324 if (val->sub[i] == '/')
326 memset(val->sub, ' ', i);
331 if (val->sub[i] == '/')
337 fprintf(stderr, "Couldn't find a collision for '%.*s', "
338 "generating normal garbage, it won't match indexes\n",
340 for (i = 0; i < name_len; i++) {
341 char c = rand() % 94 + 33;
349 tree_insert(&md->name_tree, &val->n, name_cmp);
353 static void sanitize_dir_item(struct metadump_struct *md, struct extent_buffer *eb,
356 struct btrfs_dir_item *dir_item;
359 unsigned long name_ptr;
364 int free_garbage = (md->sanitize_names == 1);
366 dir_item = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
367 total_len = btrfs_item_size_nr(eb, slot);
368 while (cur < total_len) {
369 this_len = sizeof(*dir_item) +
370 btrfs_dir_name_len(eb, dir_item) +
371 btrfs_dir_data_len(eb, dir_item);
372 name_ptr = (unsigned long)(dir_item + 1);
373 name_len = btrfs_dir_name_len(eb, dir_item);
375 if (md->sanitize_names > 1) {
376 buf = malloc(name_len);
378 fprintf(stderr, "Couldn't sanitize name, "
382 read_extent_buffer(eb, buf, name_ptr, name_len);
383 garbage = find_collision(md, buf, name_len);
385 garbage = generate_garbage(name_len);
388 fprintf(stderr, "Couldn't sanitize name, enomem\n");
391 write_extent_buffer(eb, garbage, name_ptr, name_len);
393 dir_item = (struct btrfs_dir_item *)((char *)dir_item +
400 static void sanitize_inode_ref(struct metadump_struct *md,
401 struct extent_buffer *eb, int slot, int ext)
403 struct btrfs_inode_extref *extref;
404 struct btrfs_inode_ref *ref;
407 unsigned long name_ptr;
411 int free_garbage = (md->sanitize_names == 1);
413 item_size = btrfs_item_size_nr(eb, slot);
414 ptr = btrfs_item_ptr_offset(eb, slot);
415 while (cur_offset < item_size) {
417 extref = (struct btrfs_inode_extref *)(ptr +
419 name_ptr = (unsigned long)(&extref->name);
420 len = btrfs_inode_extref_name_len(eb, extref);
421 cur_offset += sizeof(*extref);
423 ref = (struct btrfs_inode_ref *)(ptr + cur_offset);
424 len = btrfs_inode_ref_name_len(eb, ref);
425 name_ptr = (unsigned long)(ref + 1);
426 cur_offset += sizeof(*ref);
430 if (md->sanitize_names > 1) {
433 fprintf(stderr, "Couldn't sanitize name, "
437 read_extent_buffer(eb, buf, name_ptr, len);
438 garbage = find_collision(md, buf, len);
440 garbage = generate_garbage(len);
444 fprintf(stderr, "Couldn't sanitize name, enomem\n");
447 write_extent_buffer(eb, garbage, name_ptr, len);
453 static void sanitize_xattr(struct metadump_struct *md,
454 struct extent_buffer *eb, int slot)
456 struct btrfs_dir_item *dir_item;
457 unsigned long data_ptr;
460 dir_item = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
461 data_len = btrfs_dir_data_len(eb, dir_item);
463 data_ptr = (unsigned long)((char *)(dir_item + 1) +
464 btrfs_dir_name_len(eb, dir_item));
465 memset_extent_buffer(eb, 0, data_ptr, data_len);
468 static void sanitize_name(struct metadump_struct *md, u8 *dst,
469 struct extent_buffer *src, struct btrfs_key *key,
472 struct extent_buffer *eb;
474 eb = alloc_dummy_eb(src->start, src->len);
476 fprintf(stderr, "Couldn't sanitize name, no memory\n");
480 memcpy(eb->data, dst, eb->len);
483 case BTRFS_DIR_ITEM_KEY:
484 case BTRFS_DIR_INDEX_KEY:
485 sanitize_dir_item(md, eb, slot);
487 case BTRFS_INODE_REF_KEY:
488 sanitize_inode_ref(md, eb, slot, 0);
490 case BTRFS_INODE_EXTREF_KEY:
491 sanitize_inode_ref(md, eb, slot, 1);
493 case BTRFS_XATTR_ITEM_KEY:
494 sanitize_xattr(md, eb, slot);
500 memcpy(dst, eb->data, eb->len);
505 * zero inline extents and csum items
507 static void zero_items(struct metadump_struct *md, u8 *dst,
508 struct extent_buffer *src)
510 struct btrfs_file_extent_item *fi;
511 struct btrfs_item *item;
512 struct btrfs_key key;
513 u32 nritems = btrfs_header_nritems(src);
518 for (i = 0; i < nritems; i++) {
519 item = btrfs_item_nr(i);
520 btrfs_item_key_to_cpu(src, &key, i);
521 if (key.type == BTRFS_CSUM_ITEM_KEY) {
522 size = btrfs_item_size_nr(src, i);
523 memset(dst + btrfs_leaf_data(src) +
524 btrfs_item_offset_nr(src, i), 0, size);
528 if (md->sanitize_names && has_name(&key)) {
529 sanitize_name(md, dst, src, &key, i);
533 if (key.type != BTRFS_EXTENT_DATA_KEY)
536 fi = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
537 extent_type = btrfs_file_extent_type(src, fi);
538 if (extent_type != BTRFS_FILE_EXTENT_INLINE)
541 ptr = btrfs_file_extent_inline_start(fi);
542 size = btrfs_file_extent_inline_item_len(src, item);
543 memset(dst + ptr, 0, size);
548 * copy buffer and zero useless data in the buffer
550 static void copy_buffer(struct metadump_struct *md, u8 *dst,
551 struct extent_buffer *src)
557 memcpy(dst, src->data, src->len);
558 if (src->start == BTRFS_SUPER_INFO_OFFSET)
561 level = btrfs_header_level(src);
562 nritems = btrfs_header_nritems(src);
565 size = sizeof(struct btrfs_header);
566 memset(dst + size, 0, src->len - size);
567 } else if (level == 0) {
568 size = btrfs_leaf_data(src) +
569 btrfs_item_offset_nr(src, nritems - 1) -
570 btrfs_item_nr_offset(nritems);
571 memset(dst + btrfs_item_nr_offset(nritems), 0, size);
572 zero_items(md, dst, src);
574 size = offsetof(struct btrfs_node, ptrs) +
575 sizeof(struct btrfs_key_ptr) * nritems;
576 memset(dst + size, 0, src->len - size);
578 csum_block(dst, src->len);
581 static void *dump_worker(void *data)
583 struct metadump_struct *md = (struct metadump_struct *)data;
584 struct async_work *async;
588 pthread_mutex_lock(&md->mutex);
589 while (list_empty(&md->list)) {
591 pthread_mutex_unlock(&md->mutex);
594 pthread_cond_wait(&md->cond, &md->mutex);
596 async = list_entry(md->list.next, struct async_work, list);
597 list_del_init(&async->list);
598 pthread_mutex_unlock(&md->mutex);
600 if (md->compress_level > 0) {
601 u8 *orig = async->buffer;
603 async->bufsize = compressBound(async->size);
604 async->buffer = malloc(async->bufsize);
605 if (!async->buffer) {
606 fprintf(stderr, "Error allocing buffer\n");
607 pthread_mutex_lock(&md->mutex);
610 pthread_mutex_unlock(&md->mutex);
614 ret = compress2(async->buffer,
615 (unsigned long *)&async->bufsize,
616 orig, async->size, md->compress_level);
624 pthread_mutex_lock(&md->mutex);
626 pthread_mutex_unlock(&md->mutex);
632 static void meta_cluster_init(struct metadump_struct *md, u64 start)
634 struct meta_cluster_header *header;
638 header = &md->cluster->header;
639 header->magic = cpu_to_le64(HEADER_MAGIC);
640 header->bytenr = cpu_to_le64(start);
641 header->nritems = cpu_to_le32(0);
642 header->compress = md->compress_level > 0 ?
643 COMPRESS_ZLIB : COMPRESS_NONE;
646 static void metadump_destroy(struct metadump_struct *md, int num_threads)
651 pthread_mutex_lock(&md->mutex);
653 pthread_cond_broadcast(&md->cond);
654 pthread_mutex_unlock(&md->mutex);
656 for (i = 0; i < num_threads; i++)
657 pthread_join(md->threads[i], NULL);
659 pthread_cond_destroy(&md->cond);
660 pthread_mutex_destroy(&md->mutex);
662 while ((n = rb_first(&md->name_tree))) {
665 name = rb_entry(n, struct name, n);
666 rb_erase(n, &md->name_tree);
675 static int metadump_init(struct metadump_struct *md, struct btrfs_root *root,
676 FILE *out, int num_threads, int compress_level,
681 memset(md, 0, sizeof(*md));
682 pthread_cond_init(&md->cond, NULL);
683 pthread_mutex_init(&md->mutex, NULL);
684 INIT_LIST_HEAD(&md->list);
685 INIT_LIST_HEAD(&md->ordered);
688 md->pending_start = (u64)-1;
689 md->compress_level = compress_level;
690 md->cluster = calloc(1, BLOCK_SIZE);
691 md->sanitize_names = sanitize_names;
692 if (sanitize_names > 1)
693 crc32c_optimization_init();
696 pthread_cond_destroy(&md->cond);
697 pthread_mutex_destroy(&md->mutex);
701 meta_cluster_init(md, 0);
705 md->name_tree.rb_node = NULL;
706 md->num_threads = num_threads;
707 md->threads = calloc(num_threads, sizeof(pthread_t));
710 pthread_cond_destroy(&md->cond);
711 pthread_mutex_destroy(&md->mutex);
715 for (i = 0; i < num_threads; i++) {
716 ret = pthread_create(md->threads + i, NULL, dump_worker, md);
722 metadump_destroy(md, i + 1);
727 static int write_zero(FILE *out, size_t size)
729 static char zero[BLOCK_SIZE];
730 return fwrite(zero, size, 1, out);
733 static int write_buffers(struct metadump_struct *md, u64 *next)
735 struct meta_cluster_header *header = &md->cluster->header;
736 struct meta_cluster_item *item;
737 struct async_work *async;
743 if (list_empty(&md->ordered))
746 /* wait until all buffers are compressed */
747 while (!err && md->num_items > md->num_ready) {
748 struct timespec ts = {
752 pthread_mutex_unlock(&md->mutex);
753 nanosleep(&ts, NULL);
754 pthread_mutex_lock(&md->mutex);
759 fprintf(stderr, "One of the threads errored out %s\n",
764 /* setup and write index block */
765 list_for_each_entry(async, &md->ordered, ordered) {
766 item = md->cluster->items + nritems;
767 item->bytenr = cpu_to_le64(async->start);
768 item->size = cpu_to_le32(async->bufsize);
771 header->nritems = cpu_to_le32(nritems);
773 ret = fwrite(md->cluster, BLOCK_SIZE, 1, md->out);
775 fprintf(stderr, "Error writing out cluster: %d\n", errno);
780 bytenr += le64_to_cpu(header->bytenr) + BLOCK_SIZE;
781 while (!list_empty(&md->ordered)) {
782 async = list_entry(md->ordered.next, struct async_work,
784 list_del_init(&async->ordered);
786 bytenr += async->bufsize;
788 ret = fwrite(async->buffer, async->bufsize, 1,
793 fprintf(stderr, "Error writing out cluster: %d\n",
801 /* zero unused space in the last block */
802 if (!err && bytenr & BLOCK_MASK) {
803 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
806 ret = write_zero(md->out, size);
808 fprintf(stderr, "Error zeroing out buffer: %d\n",
818 static int read_data_extent(struct metadump_struct *md,
819 struct async_work *async)
821 struct btrfs_multi_bio *multi = NULL;
822 struct btrfs_device *device;
823 u64 bytes_left = async->size;
824 u64 logical = async->start;
833 read_len = bytes_left;
834 ret = btrfs_map_block(&md->root->fs_info->mapping_tree, READ,
835 logical, &read_len, &multi, 0, NULL);
837 fprintf(stderr, "Couldn't map data block %d\n", ret);
841 device = multi->stripes[0].dev;
843 if (device->fd == 0) {
845 "Device we need to read from is not open\n");
850 bytenr = multi->stripes[0].physical;
853 read_len = min(read_len, bytes_left);
854 done = pread64(fd, async->buffer+offset, read_len, bytenr);
855 if (done < read_len) {
857 fprintf(stderr, "Error reading extent %d\n",
860 fprintf(stderr, "Short read\n");
872 static int flush_pending(struct metadump_struct *md, int done)
874 struct async_work *async = NULL;
875 struct extent_buffer *eb;
876 u64 blocksize = md->root->nodesize;
882 if (md->pending_size) {
883 async = calloc(1, sizeof(*async));
887 async->start = md->pending_start;
888 async->size = md->pending_size;
889 async->bufsize = async->size;
890 async->buffer = malloc(async->bufsize);
891 if (!async->buffer) {
896 start = async->start;
900 ret = read_data_extent(md, async);
908 while (!md->data && size > 0) {
909 u64 this_read = min(blocksize, size);
910 eb = read_tree_block(md->root, start, this_read, 0);
915 "Error reading metadata block\n");
918 copy_buffer(md, async->buffer + offset, eb);
919 free_extent_buffer(eb);
925 md->pending_start = (u64)-1;
926 md->pending_size = 0;
931 pthread_mutex_lock(&md->mutex);
933 list_add_tail(&async->ordered, &md->ordered);
935 if (md->compress_level > 0) {
936 list_add_tail(&async->list, &md->list);
937 pthread_cond_signal(&md->cond);
942 if (md->num_items >= ITEMS_PER_CLUSTER || done) {
943 ret = write_buffers(md, &start);
945 fprintf(stderr, "Error writing buffers %d\n",
948 meta_cluster_init(md, start);
950 pthread_mutex_unlock(&md->mutex);
954 static int add_extent(u64 start, u64 size, struct metadump_struct *md,
958 if (md->data != data ||
959 md->pending_size + size > MAX_PENDING_SIZE ||
960 md->pending_start + md->pending_size != start) {
961 ret = flush_pending(md, 0);
964 md->pending_start = start;
966 readahead_tree_block(md->root, start, size, 0);
967 md->pending_size += size;
972 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
973 static int is_tree_block(struct btrfs_root *extent_root,
974 struct btrfs_path *path, u64 bytenr)
976 struct extent_buffer *leaf;
977 struct btrfs_key key;
981 leaf = path->nodes[0];
983 struct btrfs_extent_ref_v0 *ref_item;
985 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
986 ret = btrfs_next_leaf(extent_root, path);
991 leaf = path->nodes[0];
993 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
994 if (key.objectid != bytenr)
996 if (key.type != BTRFS_EXTENT_REF_V0_KEY)
998 ref_item = btrfs_item_ptr(leaf, path->slots[0],
999 struct btrfs_extent_ref_v0);
1000 ref_objectid = btrfs_ref_objectid_v0(leaf, ref_item);
1001 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID)
1009 static int copy_tree_blocks(struct btrfs_root *root, struct extent_buffer *eb,
1010 struct metadump_struct *metadump, int root_tree)
1012 struct extent_buffer *tmp;
1013 struct btrfs_root_item *ri;
1014 struct btrfs_key key;
1021 ret = add_extent(btrfs_header_bytenr(eb), root->leafsize, metadump, 0);
1023 fprintf(stderr, "Error adding metadata block\n");
1027 if (btrfs_header_level(eb) == 0 && !root_tree)
1030 level = btrfs_header_level(eb);
1031 nritems = btrfs_header_nritems(eb);
1032 for (i = 0; i < nritems; i++) {
1034 btrfs_item_key_to_cpu(eb, &key, i);
1035 if (key.type != BTRFS_ROOT_ITEM_KEY)
1037 ri = btrfs_item_ptr(eb, i, struct btrfs_root_item);
1038 bytenr = btrfs_disk_root_bytenr(eb, ri);
1039 tmp = read_tree_block(root, bytenr, root->leafsize, 0);
1042 "Error reading log root block\n");
1045 ret = copy_tree_blocks(root, tmp, metadump, 0);
1046 free_extent_buffer(tmp);
1050 bytenr = btrfs_node_blockptr(eb, i);
1051 tmp = read_tree_block(root, bytenr, root->leafsize, 0);
1053 fprintf(stderr, "Error reading log block\n");
1056 ret = copy_tree_blocks(root, tmp, metadump, root_tree);
1057 free_extent_buffer(tmp);
1066 static int copy_log_trees(struct btrfs_root *root,
1067 struct metadump_struct *metadump,
1068 struct btrfs_path *path)
1070 u64 blocknr = btrfs_super_log_root(root->fs_info->super_copy);
1075 if (!root->fs_info->log_root_tree ||
1076 !root->fs_info->log_root_tree->node) {
1077 fprintf(stderr, "Error copying tree log, it wasn't setup\n");
1081 return copy_tree_blocks(root, root->fs_info->log_root_tree->node,
1085 static int copy_space_cache(struct btrfs_root *root,
1086 struct metadump_struct *metadump,
1087 struct btrfs_path *path)
1089 struct extent_buffer *leaf;
1090 struct btrfs_file_extent_item *fi;
1091 struct btrfs_key key;
1092 u64 bytenr, num_bytes;
1095 root = root->fs_info->tree_root;
1098 key.type = BTRFS_EXTENT_DATA_KEY;
1101 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1103 fprintf(stderr, "Error searching for free space inode %d\n",
1108 leaf = path->nodes[0];
1111 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1112 ret = btrfs_next_leaf(root, path);
1114 fprintf(stderr, "Error going to next leaf "
1120 leaf = path->nodes[0];
1123 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1124 if (key.type != BTRFS_EXTENT_DATA_KEY) {
1129 fi = btrfs_item_ptr(leaf, path->slots[0],
1130 struct btrfs_file_extent_item);
1131 if (btrfs_file_extent_type(leaf, fi) !=
1132 BTRFS_FILE_EXTENT_REG) {
1137 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1138 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1139 ret = add_extent(bytenr, num_bytes, metadump, 1);
1141 fprintf(stderr, "Error adding space cache blocks %d\n",
1143 btrfs_release_path(path);
1152 static int copy_from_extent_tree(struct metadump_struct *metadump,
1153 struct btrfs_path *path)
1155 struct btrfs_root *extent_root;
1156 struct extent_buffer *leaf;
1157 struct btrfs_extent_item *ei;
1158 struct btrfs_key key;
1163 extent_root = metadump->root->fs_info->extent_root;
1164 bytenr = BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE;
1165 key.objectid = bytenr;
1166 key.type = BTRFS_EXTENT_ITEM_KEY;
1169 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1171 fprintf(stderr, "Error searching extent root %d\n", ret);
1176 leaf = path->nodes[0];
1179 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1180 ret = btrfs_next_leaf(extent_root, path);
1182 fprintf(stderr, "Error going to next leaf %d"
1190 leaf = path->nodes[0];
1193 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1194 if (key.objectid < bytenr ||
1195 (key.type != BTRFS_EXTENT_ITEM_KEY &&
1196 key.type != BTRFS_METADATA_ITEM_KEY)) {
1201 bytenr = key.objectid;
1202 if (key.type == BTRFS_METADATA_ITEM_KEY)
1203 num_bytes = extent_root->leafsize;
1205 num_bytes = key.offset;
1207 if (btrfs_item_size_nr(leaf, path->slots[0]) > sizeof(*ei)) {
1208 ei = btrfs_item_ptr(leaf, path->slots[0],
1209 struct btrfs_extent_item);
1210 if (btrfs_extent_flags(leaf, ei) &
1211 BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1212 ret = add_extent(bytenr, num_bytes, metadump,
1215 fprintf(stderr, "Error adding block "
1221 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1222 ret = is_tree_block(extent_root, path, bytenr);
1224 fprintf(stderr, "Error checking tree block "
1230 ret = add_extent(bytenr, num_bytes, metadump,
1233 fprintf(stderr, "Error adding block "
1240 fprintf(stderr, "Either extent tree corruption or "
1241 "you haven't built with V0 support\n");
1246 bytenr += num_bytes;
1249 btrfs_release_path(path);
1254 static int create_metadump(const char *input, FILE *out, int num_threads,
1255 int compress_level, int sanitize, int walk_trees)
1257 struct btrfs_root *root;
1258 struct btrfs_path *path = NULL;
1259 struct metadump_struct metadump;
1263 root = open_ctree(input, 0, 0);
1265 fprintf(stderr, "Open ctree failed\n");
1269 BUG_ON(root->nodesize != root->leafsize);
1271 ret = metadump_init(&metadump, root, out, num_threads,
1272 compress_level, sanitize);
1274 fprintf(stderr, "Error initing metadump %d\n", ret);
1279 ret = add_extent(BTRFS_SUPER_INFO_OFFSET, BTRFS_SUPER_INFO_SIZE,
1282 fprintf(stderr, "Error adding metadata %d\n", ret);
1287 path = btrfs_alloc_path();
1289 fprintf(stderr, "Out of memory allocing path\n");
1295 ret = copy_tree_blocks(root, root->fs_info->chunk_root->node,
1302 ret = copy_tree_blocks(root, root->fs_info->tree_root->node,
1309 ret = copy_from_extent_tree(&metadump, path);
1316 ret = copy_log_trees(root, &metadump, path);
1322 ret = copy_space_cache(root, &metadump, path);
1324 ret = flush_pending(&metadump, 1);
1328 fprintf(stderr, "Error flushing pending %d\n", ret);
1331 metadump_destroy(&metadump, num_threads);
1333 btrfs_free_path(path);
1334 ret = close_ctree(root);
1335 return err ? err : ret;
1338 static void update_super_old(u8 *buffer)
1340 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
1341 struct btrfs_chunk *chunk;
1342 struct btrfs_disk_key *key;
1343 u32 sectorsize = btrfs_super_sectorsize(super);
1344 u64 flags = btrfs_super_flags(super);
1346 flags |= BTRFS_SUPER_FLAG_METADUMP;
1347 btrfs_set_super_flags(super, flags);
1349 key = (struct btrfs_disk_key *)(super->sys_chunk_array);
1350 chunk = (struct btrfs_chunk *)(super->sys_chunk_array +
1351 sizeof(struct btrfs_disk_key));
1353 btrfs_set_disk_key_objectid(key, BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1354 btrfs_set_disk_key_type(key, BTRFS_CHUNK_ITEM_KEY);
1355 btrfs_set_disk_key_offset(key, 0);
1357 btrfs_set_stack_chunk_length(chunk, (u64)-1);
1358 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
1359 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
1360 btrfs_set_stack_chunk_type(chunk, BTRFS_BLOCK_GROUP_SYSTEM);
1361 btrfs_set_stack_chunk_io_align(chunk, sectorsize);
1362 btrfs_set_stack_chunk_io_width(chunk, sectorsize);
1363 btrfs_set_stack_chunk_sector_size(chunk, sectorsize);
1364 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1365 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1366 chunk->stripe.devid = super->dev_item.devid;
1367 btrfs_set_stack_stripe_offset(&chunk->stripe, 0);
1368 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
1369 btrfs_set_super_sys_array_size(super, sizeof(*key) + sizeof(*chunk));
1370 csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
1373 static int update_super(u8 *buffer)
1375 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
1376 struct btrfs_chunk *chunk;
1377 struct btrfs_disk_key *disk_key;
1378 struct btrfs_key key;
1379 u32 new_array_size = 0;
1382 u8 *ptr, *write_ptr;
1383 int old_num_stripes;
1385 write_ptr = ptr = super->sys_chunk_array;
1386 array_size = btrfs_super_sys_array_size(super);
1388 while (cur < array_size) {
1389 disk_key = (struct btrfs_disk_key *)ptr;
1390 btrfs_disk_key_to_cpu(&key, disk_key);
1392 new_array_size += sizeof(*disk_key);
1393 memmove(write_ptr, ptr, sizeof(*disk_key));
1395 write_ptr += sizeof(*disk_key);
1396 ptr += sizeof(*disk_key);
1397 cur += sizeof(*disk_key);
1399 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1400 chunk = (struct btrfs_chunk *)ptr;
1401 old_num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1402 chunk = (struct btrfs_chunk *)write_ptr;
1404 memmove(write_ptr, ptr, sizeof(*chunk));
1405 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1406 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1407 btrfs_set_stack_chunk_type(chunk,
1408 BTRFS_BLOCK_GROUP_SYSTEM);
1409 chunk->stripe.devid = super->dev_item.devid;
1410 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid,
1412 new_array_size += sizeof(*chunk);
1414 fprintf(stderr, "Bogus key in the sys chunk array "
1418 write_ptr += sizeof(*chunk);
1419 ptr += btrfs_chunk_item_size(old_num_stripes);
1420 cur += btrfs_chunk_item_size(old_num_stripes);
1423 btrfs_set_super_sys_array_size(super, new_array_size);
1424 csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
1429 static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size)
1431 struct extent_buffer *eb;
1433 eb = malloc(sizeof(struct extent_buffer) + size);
1436 memset(eb, 0, sizeof(struct extent_buffer) + size);
1443 static void truncate_item(struct extent_buffer *eb, int slot, u32 new_size)
1445 struct btrfs_item *item;
1453 old_size = btrfs_item_size_nr(eb, slot);
1454 if (old_size == new_size)
1457 nritems = btrfs_header_nritems(eb);
1458 data_end = btrfs_item_offset_nr(eb, nritems - 1);
1460 old_data_start = btrfs_item_offset_nr(eb, slot);
1461 size_diff = old_size - new_size;
1463 for (i = slot; i < nritems; i++) {
1465 item = btrfs_item_nr(i);
1466 ioff = btrfs_item_offset(eb, item);
1467 btrfs_set_item_offset(eb, item, ioff + size_diff);
1470 memmove_extent_buffer(eb, btrfs_leaf_data(eb) + data_end + size_diff,
1471 btrfs_leaf_data(eb) + data_end,
1472 old_data_start + new_size - data_end);
1473 item = btrfs_item_nr(slot);
1474 btrfs_set_item_size(eb, item, new_size);
1477 static int fixup_chunk_tree_block(struct mdrestore_struct *mdres,
1478 struct async_work *async, u8 *buffer,
1481 struct extent_buffer *eb;
1482 size_t size_left = size;
1483 u64 bytenr = async->start;
1486 if (size_left % mdres->leafsize)
1489 eb = alloc_dummy_eb(bytenr, mdres->leafsize);
1495 memcpy(eb->data, buffer, mdres->leafsize);
1497 if (btrfs_header_bytenr(eb) != bytenr)
1499 if (memcmp(mdres->fsid,
1500 eb->data + offsetof(struct btrfs_header, fsid),
1504 if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID)
1507 if (btrfs_header_level(eb) != 0)
1510 for (i = 0; i < btrfs_header_nritems(eb); i++) {
1511 struct btrfs_chunk chunk;
1512 struct btrfs_key key;
1515 btrfs_item_key_to_cpu(eb, &key, i);
1516 if (key.type != BTRFS_CHUNK_ITEM_KEY)
1518 truncate_item(eb, i, sizeof(chunk));
1519 read_extent_buffer(eb, &chunk,
1520 btrfs_item_ptr_offset(eb, i),
1523 /* Zero out the RAID profile */
1524 type = btrfs_stack_chunk_type(&chunk);
1525 type &= (BTRFS_BLOCK_GROUP_DATA |
1526 BTRFS_BLOCK_GROUP_SYSTEM |
1527 BTRFS_BLOCK_GROUP_METADATA |
1528 BTRFS_BLOCK_GROUP_DUP);
1529 btrfs_set_stack_chunk_type(&chunk, type);
1531 btrfs_set_stack_chunk_num_stripes(&chunk, 1);
1532 btrfs_set_stack_chunk_sub_stripes(&chunk, 0);
1533 btrfs_set_stack_stripe_devid(&chunk.stripe, mdres->devid);
1534 memcpy(chunk.stripe.dev_uuid, mdres->uuid,
1536 write_extent_buffer(eb, &chunk,
1537 btrfs_item_ptr_offset(eb, i),
1540 memcpy(buffer, eb->data, eb->len);
1541 csum_block(buffer, eb->len);
1543 size_left -= mdres->leafsize;
1544 buffer += mdres->leafsize;
1545 bytenr += mdres->leafsize;
1552 static void write_backup_supers(int fd, u8 *buf)
1554 struct btrfs_super_block *super = (struct btrfs_super_block *)buf;
1561 if (fstat(fd, &st)) {
1562 fprintf(stderr, "Couldn't stat restore point, won't be able "
1563 "to write backup supers: %d\n", errno);
1567 size = btrfs_device_size(fd, &st);
1569 for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1570 bytenr = btrfs_sb_offset(i);
1571 if (bytenr + BTRFS_SUPER_INFO_SIZE > size)
1573 btrfs_set_super_bytenr(super, bytenr);
1574 csum_block(buf, BTRFS_SUPER_INFO_SIZE);
1575 ret = pwrite64(fd, buf, BTRFS_SUPER_INFO_SIZE, bytenr);
1576 if (ret < BTRFS_SUPER_INFO_SIZE) {
1578 fprintf(stderr, "Problem writing out backup "
1579 "super block %d, err %d\n", i, errno);
1581 fprintf(stderr, "Short write writing out "
1582 "backup super block\n");
1588 static u64 logical_to_physical(struct mdrestore_struct *mdres, u64 logical, u64 *size)
1590 struct fs_chunk *fs_chunk;
1591 struct rb_node *entry;
1592 struct fs_chunk search;
1595 if (logical == BTRFS_SUPER_INFO_OFFSET)
1598 search.logical = logical;
1599 entry = tree_search(&mdres->chunk_tree, &search.n, chunk_cmp, 1);
1601 if (mdres->in != stdin)
1602 printf("Couldn't find a chunk, using logical\n");
1605 fs_chunk = rb_entry(entry, struct fs_chunk, n);
1606 if (fs_chunk->logical > logical || fs_chunk->logical + fs_chunk->bytes < logical)
1608 offset = search.logical - fs_chunk->logical;
1610 *size = min(*size, fs_chunk->bytes + fs_chunk->logical - logical);
1611 return fs_chunk->physical + offset;
1614 static void *restore_worker(void *data)
1616 struct mdrestore_struct *mdres = (struct mdrestore_struct *)data;
1617 struct async_work *async;
1623 int compress_size = MAX_PENDING_SIZE * 4;
1625 outfd = fileno(mdres->out);
1626 buffer = malloc(compress_size);
1628 fprintf(stderr, "Error allocing buffer\n");
1629 pthread_mutex_lock(&mdres->mutex);
1631 mdres->error = -ENOMEM;
1632 pthread_mutex_unlock(&mdres->mutex);
1641 pthread_mutex_lock(&mdres->mutex);
1642 while (!mdres->leafsize || list_empty(&mdres->list)) {
1644 pthread_mutex_unlock(&mdres->mutex);
1647 pthread_cond_wait(&mdres->cond, &mdres->mutex);
1649 async = list_entry(mdres->list.next, struct async_work, list);
1650 list_del_init(&async->list);
1651 pthread_mutex_unlock(&mdres->mutex);
1653 if (mdres->compress_method == COMPRESS_ZLIB) {
1654 size = compress_size;
1655 ret = uncompress(buffer, (unsigned long *)&size,
1656 async->buffer, async->bufsize);
1658 fprintf(stderr, "Error decompressing %d\n",
1664 outbuf = async->buffer;
1665 size = async->bufsize;
1668 if (!mdres->multi_devices) {
1669 if (async->start == BTRFS_SUPER_INFO_OFFSET) {
1670 if (mdres->old_restore) {
1671 update_super_old(outbuf);
1673 ret = update_super(outbuf);
1677 } else if (!mdres->old_restore) {
1678 ret = fixup_chunk_tree_block(mdres, async, outbuf, size);
1684 if (!mdres->fixup_offset) {
1686 u64 chunk_size = size;
1687 if (!mdres->multi_devices && !mdres->old_restore)
1688 bytenr = logical_to_physical(mdres,
1689 async->start + offset,
1692 bytenr = async->start + offset;
1694 ret = pwrite64(outfd, outbuf+offset, chunk_size,
1696 if (ret != chunk_size) {
1698 fprintf(stderr, "Error writing to "
1699 "device %d\n", errno);
1703 fprintf(stderr, "Short write\n");
1709 offset += chunk_size;
1711 } else if (async->start != BTRFS_SUPER_INFO_OFFSET) {
1712 ret = write_data_to_disk(mdres->info, outbuf, async->start, size, 0);
1714 printk("Error write data\n");
1720 /* backup super blocks are already there at fixup_offset stage */
1721 if (!mdres->multi_devices && async->start == BTRFS_SUPER_INFO_OFFSET)
1722 write_backup_supers(outfd, outbuf);
1724 pthread_mutex_lock(&mdres->mutex);
1725 if (err && !mdres->error)
1728 pthread_mutex_unlock(&mdres->mutex);
1730 free(async->buffer);
1738 static void mdrestore_destroy(struct mdrestore_struct *mdres, int num_threads)
1743 while ((n = rb_first(&mdres->chunk_tree))) {
1744 struct fs_chunk *entry;
1746 entry = rb_entry(n, struct fs_chunk, n);
1747 rb_erase(n, &mdres->chunk_tree);
1750 pthread_mutex_lock(&mdres->mutex);
1752 pthread_cond_broadcast(&mdres->cond);
1753 pthread_mutex_unlock(&mdres->mutex);
1755 for (i = 0; i < num_threads; i++)
1756 pthread_join(mdres->threads[i], NULL);
1758 pthread_cond_destroy(&mdres->cond);
1759 pthread_mutex_destroy(&mdres->mutex);
1760 free(mdres->threads);
1763 static int mdrestore_init(struct mdrestore_struct *mdres,
1764 FILE *in, FILE *out, int old_restore,
1765 int num_threads, int fixup_offset,
1766 struct btrfs_fs_info *info, int multi_devices)
1770 memset(mdres, 0, sizeof(*mdres));
1771 pthread_cond_init(&mdres->cond, NULL);
1772 pthread_mutex_init(&mdres->mutex, NULL);
1773 INIT_LIST_HEAD(&mdres->list);
1776 mdres->old_restore = old_restore;
1777 mdres->chunk_tree.rb_node = NULL;
1778 mdres->fixup_offset = fixup_offset;
1780 mdres->multi_devices = multi_devices;
1785 mdres->num_threads = num_threads;
1786 mdres->threads = calloc(num_threads, sizeof(pthread_t));
1787 if (!mdres->threads)
1789 for (i = 0; i < num_threads; i++) {
1790 ret = pthread_create(mdres->threads + i, NULL, restore_worker,
1796 mdrestore_destroy(mdres, i + 1);
1800 static int fill_mdres_info(struct mdrestore_struct *mdres,
1801 struct async_work *async)
1803 struct btrfs_super_block *super;
1808 /* We've already been initialized */
1809 if (mdres->leafsize)
1812 if (mdres->compress_method == COMPRESS_ZLIB) {
1813 size_t size = MAX_PENDING_SIZE * 2;
1815 buffer = malloc(MAX_PENDING_SIZE * 2);
1818 ret = uncompress(buffer, (unsigned long *)&size,
1819 async->buffer, async->bufsize);
1821 fprintf(stderr, "Error decompressing %d\n", ret);
1827 outbuf = async->buffer;
1830 super = (struct btrfs_super_block *)outbuf;
1831 mdres->leafsize = btrfs_super_leafsize(super);
1832 memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
1833 memcpy(mdres->uuid, super->dev_item.uuid,
1835 mdres->devid = le64_to_cpu(super->dev_item.devid);
1840 static int add_cluster(struct meta_cluster *cluster,
1841 struct mdrestore_struct *mdres, u64 *next)
1843 struct meta_cluster_item *item;
1844 struct meta_cluster_header *header = &cluster->header;
1845 struct async_work *async;
1850 BUG_ON(mdres->num_items);
1851 mdres->compress_method = header->compress;
1853 bytenr = le64_to_cpu(header->bytenr) + BLOCK_SIZE;
1854 nritems = le32_to_cpu(header->nritems);
1855 for (i = 0; i < nritems; i++) {
1856 item = &cluster->items[i];
1857 async = calloc(1, sizeof(*async));
1859 fprintf(stderr, "Error allocating async\n");
1862 async->start = le64_to_cpu(item->bytenr);
1863 async->bufsize = le32_to_cpu(item->size);
1864 async->buffer = malloc(async->bufsize);
1865 if (!async->buffer) {
1866 fprintf(stderr, "Error allocing async buffer\n");
1870 ret = fread(async->buffer, async->bufsize, 1, mdres->in);
1872 fprintf(stderr, "Error reading buffer %d\n", errno);
1873 free(async->buffer);
1877 bytenr += async->bufsize;
1879 pthread_mutex_lock(&mdres->mutex);
1880 if (async->start == BTRFS_SUPER_INFO_OFFSET) {
1881 ret = fill_mdres_info(mdres, async);
1883 fprintf(stderr, "Error setting up restore\n");
1884 pthread_mutex_unlock(&mdres->mutex);
1885 free(async->buffer);
1890 list_add_tail(&async->list, &mdres->list);
1892 pthread_cond_signal(&mdres->cond);
1893 pthread_mutex_unlock(&mdres->mutex);
1895 if (bytenr & BLOCK_MASK) {
1896 char buffer[BLOCK_MASK];
1897 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
1900 ret = fread(buffer, size, 1, mdres->in);
1902 fprintf(stderr, "Error reading in buffer %d\n", errno);
1910 static int wait_for_worker(struct mdrestore_struct *mdres)
1914 pthread_mutex_lock(&mdres->mutex);
1916 while (!ret && mdres->num_items > 0) {
1917 struct timespec ts = {
1919 .tv_nsec = 10000000,
1921 pthread_mutex_unlock(&mdres->mutex);
1922 nanosleep(&ts, NULL);
1923 pthread_mutex_lock(&mdres->mutex);
1926 pthread_mutex_unlock(&mdres->mutex);
1930 static int read_chunk_block(struct mdrestore_struct *mdres, u8 *buffer,
1931 u64 bytenr, u64 item_bytenr, u32 bufsize,
1934 struct extent_buffer *eb;
1938 eb = alloc_dummy_eb(bytenr, mdres->leafsize);
1944 while (item_bytenr != bytenr) {
1945 buffer += mdres->leafsize;
1946 item_bytenr += mdres->leafsize;
1949 memcpy(eb->data, buffer, mdres->leafsize);
1950 if (btrfs_header_bytenr(eb) != bytenr) {
1951 fprintf(stderr, "Eb bytenr doesn't match found bytenr\n");
1956 if (memcmp(mdres->fsid, eb->data + offsetof(struct btrfs_header, fsid),
1958 fprintf(stderr, "Fsid doesn't match\n");
1963 if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID) {
1964 fprintf(stderr, "Does not belong to the chunk tree\n");
1969 for (i = 0; i < btrfs_header_nritems(eb); i++) {
1970 struct btrfs_chunk chunk;
1971 struct fs_chunk *fs_chunk;
1972 struct btrfs_key key;
1974 if (btrfs_header_level(eb)) {
1975 u64 blockptr = btrfs_node_blockptr(eb, i);
1977 ret = search_for_chunk_blocks(mdres, blockptr,
1984 /* Yay a leaf! We loves leafs! */
1985 btrfs_item_key_to_cpu(eb, &key, i);
1986 if (key.type != BTRFS_CHUNK_ITEM_KEY)
1989 fs_chunk = malloc(sizeof(struct fs_chunk));
1991 fprintf(stderr, "Erorr allocating chunk\n");
1995 memset(fs_chunk, 0, sizeof(*fs_chunk));
1996 read_extent_buffer(eb, &chunk, btrfs_item_ptr_offset(eb, i),
1999 fs_chunk->logical = key.offset;
2000 fs_chunk->physical = btrfs_stack_stripe_offset(&chunk.stripe);
2001 fs_chunk->bytes = btrfs_stack_chunk_length(&chunk);
2002 tree_insert(&mdres->chunk_tree, &fs_chunk->n, chunk_cmp);
2009 /* If you have to ask you aren't worthy */
2010 static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
2011 u64 search, u64 cluster_bytenr)
2013 struct meta_cluster *cluster;
2014 struct meta_cluster_header *header;
2015 struct meta_cluster_item *item;
2016 u64 current_cluster = cluster_bytenr, bytenr;
2018 u32 bufsize, nritems, i;
2019 u32 max_size = MAX_PENDING_SIZE * 2;
2020 u8 *buffer, *tmp = NULL;
2023 cluster = malloc(BLOCK_SIZE);
2025 fprintf(stderr, "Error allocating cluster\n");
2029 buffer = malloc(max_size);
2031 fprintf(stderr, "Error allocing buffer\n");
2036 if (mdres->compress_method == COMPRESS_ZLIB) {
2037 tmp = malloc(max_size);
2039 fprintf(stderr, "Error allocing tmp buffer\n");
2046 bytenr = current_cluster;
2048 if (fseek(mdres->in, current_cluster, SEEK_SET)) {
2049 fprintf(stderr, "Error seeking: %d\n", errno);
2054 ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
2056 if (cluster_bytenr != 0) {
2058 current_cluster = 0;
2062 printf("ok this is where we screwed up?\n");
2065 } else if (ret < 0) {
2066 fprintf(stderr, "Error reading image\n");
2071 header = &cluster->header;
2072 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2073 le64_to_cpu(header->bytenr) != current_cluster) {
2074 fprintf(stderr, "bad header in metadump image\n");
2079 bytenr += BLOCK_SIZE;
2080 nritems = le32_to_cpu(header->nritems);
2081 for (i = 0; i < nritems; i++) {
2084 item = &cluster->items[i];
2085 bufsize = le32_to_cpu(item->size);
2086 item_bytenr = le64_to_cpu(item->bytenr);
2088 if (bufsize > max_size) {
2089 fprintf(stderr, "item %u size %u too big\n",
2095 if (mdres->compress_method == COMPRESS_ZLIB) {
2096 ret = fread(tmp, bufsize, 1, mdres->in);
2098 fprintf(stderr, "Error reading: %d\n",
2105 ret = uncompress(buffer,
2106 (unsigned long *)&size, tmp,
2109 fprintf(stderr, "Error decompressing "
2115 ret = fread(buffer, bufsize, 1, mdres->in);
2117 fprintf(stderr, "Error reading: %d\n",
2126 if (item_bytenr <= search &&
2127 item_bytenr + size > search) {
2128 ret = read_chunk_block(mdres, buffer, search,
2142 if (bytenr & BLOCK_MASK)
2143 bytenr += BLOCK_SIZE - (bytenr & BLOCK_MASK);
2144 current_cluster = bytenr;
2153 static int build_chunk_tree(struct mdrestore_struct *mdres,
2154 struct meta_cluster *cluster)
2156 struct btrfs_super_block *super;
2157 struct meta_cluster_header *header;
2158 struct meta_cluster_item *item = NULL;
2159 u64 chunk_root_bytenr = 0;
2165 /* We can't seek with stdin so don't bother doing this */
2166 if (mdres->in == stdin)
2169 ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
2171 fprintf(stderr, "Error reading in cluster: %d\n", errno);
2176 header = &cluster->header;
2177 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2178 le64_to_cpu(header->bytenr) != 0) {
2179 fprintf(stderr, "bad header in metadump image\n");
2183 bytenr += BLOCK_SIZE;
2184 mdres->compress_method = header->compress;
2185 nritems = le32_to_cpu(header->nritems);
2186 for (i = 0; i < nritems; i++) {
2187 item = &cluster->items[i];
2189 if (le64_to_cpu(item->bytenr) == BTRFS_SUPER_INFO_OFFSET)
2191 bytenr += le32_to_cpu(item->size);
2192 if (fseek(mdres->in, le32_to_cpu(item->size), SEEK_CUR)) {
2193 fprintf(stderr, "Error seeking: %d\n", errno);
2198 if (!item || le64_to_cpu(item->bytenr) != BTRFS_SUPER_INFO_OFFSET) {
2199 fprintf(stderr, "Huh, didn't find the super?\n");
2203 buffer = malloc(le32_to_cpu(item->size));
2205 fprintf(stderr, "Error allocing buffer\n");
2209 ret = fread(buffer, le32_to_cpu(item->size), 1, mdres->in);
2211 fprintf(stderr, "Error reading buffer: %d\n", errno);
2216 if (mdres->compress_method == COMPRESS_ZLIB) {
2217 size_t size = MAX_PENDING_SIZE * 2;
2220 tmp = malloc(MAX_PENDING_SIZE * 2);
2225 ret = uncompress(tmp, (unsigned long *)&size,
2226 buffer, le32_to_cpu(item->size));
2228 fprintf(stderr, "Error decompressing %d\n", ret);
2237 pthread_mutex_lock(&mdres->mutex);
2238 super = (struct btrfs_super_block *)buffer;
2239 chunk_root_bytenr = btrfs_super_chunk_root(super);
2240 mdres->leafsize = btrfs_super_leafsize(super);
2241 memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
2242 memcpy(mdres->uuid, super->dev_item.uuid,
2244 mdres->devid = le64_to_cpu(super->dev_item.devid);
2246 pthread_mutex_unlock(&mdres->mutex);
2248 return search_for_chunk_blocks(mdres, chunk_root_bytenr, 0);
2251 static int __restore_metadump(const char *input, FILE *out, int old_restore,
2252 int num_threads, int fixup_offset,
2253 const char *target, int multi_devices)
2255 struct meta_cluster *cluster = NULL;
2256 struct meta_cluster_header *header;
2257 struct mdrestore_struct mdrestore;
2258 struct btrfs_fs_info *info = NULL;
2263 if (!strcmp(input, "-")) {
2266 in = fopen(input, "r");
2268 perror("unable to open metadump image");
2273 /* NOTE: open with write mode */
2276 info = open_ctree_fs_info(target, 0, 0,
2278 OPEN_CTREE_RESTORE |
2279 OPEN_CTREE_PARTIAL);
2281 fprintf(stderr, "%s: open ctree failed\n", __func__);
2287 cluster = malloc(BLOCK_SIZE);
2289 fprintf(stderr, "Error allocating cluster\n");
2294 ret = mdrestore_init(&mdrestore, in, out, old_restore, num_threads,
2295 fixup_offset, info, multi_devices);
2297 fprintf(stderr, "Error initing mdrestore %d\n", ret);
2298 goto failed_cluster;
2301 if (!multi_devices && !old_restore) {
2302 ret = build_chunk_tree(&mdrestore, cluster);
2307 if (in != stdin && fseek(in, 0, SEEK_SET)) {
2308 fprintf(stderr, "Error seeking %d\n", errno);
2313 ret = fread(cluster, BLOCK_SIZE, 1, in);
2317 header = &cluster->header;
2318 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2319 le64_to_cpu(header->bytenr) != bytenr) {
2320 fprintf(stderr, "bad header in metadump image\n");
2324 ret = add_cluster(cluster, &mdrestore, &bytenr);
2326 fprintf(stderr, "Error adding cluster\n");
2330 ret = wait_for_worker(&mdrestore);
2332 fprintf(stderr, "One of the threads errored out %d\n",
2338 mdrestore_destroy(&mdrestore, num_threads);
2342 if (fixup_offset && info)
2343 close_ctree(info->chunk_root);
2350 static int restore_metadump(const char *input, FILE *out, int old_restore,
2351 int num_threads, int multi_devices)
2353 return __restore_metadump(input, out, old_restore, num_threads, 0, NULL,
2357 static int fixup_metadump(const char *input, FILE *out, int num_threads,
2360 return __restore_metadump(input, out, 0, num_threads, 1, target, 1);
2363 static int update_disk_super_on_device(struct btrfs_fs_info *info,
2364 const char *other_dev, u64 cur_devid)
2366 struct btrfs_key key;
2367 struct extent_buffer *leaf;
2368 struct btrfs_path path;
2369 struct btrfs_dev_item *dev_item;
2370 struct btrfs_super_block *disk_super;
2371 char dev_uuid[BTRFS_UUID_SIZE];
2372 char fs_uuid[BTRFS_UUID_SIZE];
2373 u64 devid, type, io_align, io_width;
2374 u64 sector_size, total_bytes, bytes_used;
2379 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2380 key.type = BTRFS_DEV_ITEM_KEY;
2381 key.offset = cur_devid;
2383 btrfs_init_path(&path);
2384 ret = btrfs_search_slot(NULL, info->chunk_root, &key, &path, 0, 0);
2386 fprintf(stderr, "search key fails\n");
2390 leaf = path.nodes[0];
2391 dev_item = btrfs_item_ptr(leaf, path.slots[0],
2392 struct btrfs_dev_item);
2394 devid = btrfs_device_id(leaf, dev_item);
2395 if (devid != cur_devid) {
2396 printk("devid %llu mismatch with %llu\n", devid, cur_devid);
2400 type = btrfs_device_type(leaf, dev_item);
2401 io_align = btrfs_device_io_align(leaf, dev_item);
2402 io_width = btrfs_device_io_width(leaf, dev_item);
2403 sector_size = btrfs_device_sector_size(leaf, dev_item);
2404 total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2405 bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2406 read_extent_buffer(leaf, dev_uuid, (unsigned long)btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE);
2407 read_extent_buffer(leaf, fs_uuid, (unsigned long)btrfs_device_fsid(dev_item), BTRFS_UUID_SIZE);
2409 btrfs_release_path(&path);
2411 printk("update disk super on %s devid=%llu\n", other_dev, devid);
2413 /* update other devices' super block */
2414 fp = open(other_dev, O_CREAT | O_RDWR, 0600);
2416 fprintf(stderr, "could not open %s\n", other_dev);
2420 buf = malloc(BTRFS_SUPER_INFO_SIZE);
2427 memcpy(buf, info->super_copy, BTRFS_SUPER_INFO_SIZE);
2429 disk_super = (struct btrfs_super_block *)buf;
2430 dev_item = &disk_super->dev_item;
2432 btrfs_set_stack_device_type(dev_item, type);
2433 btrfs_set_stack_device_id(dev_item, devid);
2434 btrfs_set_stack_device_total_bytes(dev_item, total_bytes);
2435 btrfs_set_stack_device_bytes_used(dev_item, bytes_used);
2436 btrfs_set_stack_device_io_align(dev_item, io_align);
2437 btrfs_set_stack_device_io_width(dev_item, io_width);
2438 btrfs_set_stack_device_sector_size(dev_item, sector_size);
2439 memcpy(dev_item->uuid, dev_uuid, BTRFS_UUID_SIZE);
2440 memcpy(dev_item->fsid, fs_uuid, BTRFS_UUID_SIZE);
2441 csum_block((u8 *)buf, BTRFS_SUPER_INFO_SIZE);
2443 ret = pwrite64(fp, buf, BTRFS_SUPER_INFO_SIZE, BTRFS_SUPER_INFO_OFFSET);
2444 if (ret != BTRFS_SUPER_INFO_SIZE) {
2449 write_backup_supers(fp, (u8 *)buf);
2457 static void print_usage(void)
2459 fprintf(stderr, "usage: btrfs-image [options] source target\n");
2460 fprintf(stderr, "\t-r \trestore metadump image\n");
2461 fprintf(stderr, "\t-c value\tcompression level (0 ~ 9)\n");
2462 fprintf(stderr, "\t-t value\tnumber of threads (1 ~ 32)\n");
2463 fprintf(stderr, "\t-o \tdon't mess with the chunk tree when restoring\n");
2464 fprintf(stderr, "\t-s \tsanitize file names, use once to just use garbage, use twice if you want crc collisions\n");
2465 fprintf(stderr, "\t-w \twalk all trees instead of using extent tree, do this if your extent tree is broken\n");
2466 fprintf(stderr, "\t-m \trestore for multiple devices\n");
2467 fprintf(stderr, "\n");
2468 fprintf(stderr, "\tIn the dump mode, source is the btrfs device and target is the output file (use '-' for stdout).\n");
2469 fprintf(stderr, "\tIn the restore mode, source is the dumped image and target is the btrfs device/file.\n");
2473 int main(int argc, char *argv[])
2477 u64 num_threads = 0;
2478 u64 compress_level = 0;
2480 int old_restore = 0;
2482 int multi_devices = 0;
2486 int usage_error = 0;
2490 int c = getopt(argc, argv, "rc:t:oswm");
2498 num_threads = arg_strtou64(optarg);
2499 if (num_threads > 32)
2503 compress_level = arg_strtou64(optarg);
2504 if (compress_level > 9)
2525 argc = argc - optind;
2527 if (check_argc_min(argc, 2))
2534 fprintf(stderr, "Usage error: create and restore cannot be used at the same time\n");
2538 if (walk_trees || sanitize || compress_level) {
2539 fprintf(stderr, "Usage error: use -w, -s, -c options for restore makes no sense\n");
2542 if (multi_devices && dev_cnt < 2) {
2543 fprintf(stderr, "Usage error: not enough devices specified for -m option\n");
2546 if (!multi_devices && dev_cnt != 1) {
2547 fprintf(stderr, "Usage error: accepts only 1 device without -m option\n");
2555 source = argv[optind];
2556 target = argv[optind + 1];
2558 if (create && !strcmp(target, "-")) {
2561 out = fopen(target, "w+");
2563 perror("unable to create target file");
2568 if (num_threads == 0 && compress_level > 0) {
2569 num_threads = sysconf(_SC_NPROCESSORS_ONLN);
2570 if (num_threads <= 0)
2575 ret = check_mounted(source);
2577 fprintf(stderr, "Could not check mount status: %s\n",
2582 "WARNING: The device is mounted. Make sure the filesystem is quiescent.\n");
2584 ret = create_metadump(source, out, num_threads,
2585 compress_level, sanitize, walk_trees);
2587 ret = restore_metadump(source, out, old_restore, 1,
2591 printk("%s failed (%s)\n", (create) ? "create" : "restore",
2596 /* extended support for multiple devices */
2597 if (!create && multi_devices) {
2598 struct btrfs_fs_info *info;
2602 info = open_ctree_fs_info(target, 0, 0,
2603 OPEN_CTREE_PARTIAL |
2604 OPEN_CTREE_RESTORE);
2607 fprintf(stderr, "unable to open %s error = %s\n",
2608 target, strerror(e));
2612 total_devs = btrfs_super_num_devices(info->super_copy);
2613 if (total_devs != dev_cnt) {
2614 printk("it needs %llu devices but has only %d\n",
2615 total_devs, dev_cnt);
2616 close_ctree(info->chunk_root);
2620 /* update super block on other disks */
2621 for (i = 2; i <= dev_cnt; i++) {
2622 ret = update_disk_super_on_device(info,
2623 argv[optind + i], (u64)i);
2625 printk("update disk super failed devid=%d (error=%d)\n",
2627 close_ctree(info->chunk_root);
2632 close_ctree(info->chunk_root);
2634 /* fix metadata block to map correct chunk */
2635 ret = fixup_metadump(source, out, 1, target);
2637 fprintf(stderr, "fix metadump failed (error=%d)\n",
2644 if (out == stdout) {
2648 if (ret && create) {
2651 unlink_ret = unlink(target);
2654 "unlink output file failed : %s\n",