2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
22 #include <sys/types.h>
28 #include "kerncompat.h"
32 #include "transaction.h"
35 #include "extent_io.h"
37 #define HEADER_MAGIC 0xbd5c25e27295668bULL
38 #define MAX_PENDING_SIZE (256 * 1024)
39 #define BLOCK_SIZE 1024
40 #define BLOCK_MASK (BLOCK_SIZE - 1)
42 #define COMPRESS_NONE 0
43 #define COMPRESS_ZLIB 1
45 struct meta_cluster_item {
48 } __attribute__ ((__packed__));
50 struct meta_cluster_header {
55 } __attribute__ ((__packed__));
57 /* cluster header + index items + buffers */
59 struct meta_cluster_header header;
60 struct meta_cluster_item items[];
61 } __attribute__ ((__packed__));
63 #define ITEMS_PER_CLUSTER ((BLOCK_SIZE - sizeof(struct meta_cluster)) / \
64 sizeof(struct meta_cluster_item))
74 struct list_head list;
75 struct list_head ordered;
83 struct metadump_struct {
84 struct btrfs_root *root;
87 struct meta_cluster *cluster;
91 pthread_mutex_t mutex;
93 struct rb_root name_tree;
95 struct list_head list;
96 struct list_head ordered;
118 struct mdrestore_struct {
124 pthread_mutex_t mutex;
127 struct rb_root chunk_tree;
128 struct list_head list;
132 u8 uuid[BTRFS_UUID_SIZE];
133 u8 fsid[BTRFS_FSID_SIZE];
141 struct btrfs_fs_info *info;
144 static void print_usage(void) __attribute__((noreturn));
145 static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
146 u64 search, u64 cluster_bytenr);
147 static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size);
149 static void csum_block(u8 *buf, size_t len)
151 char result[BTRFS_CRC32_SIZE];
153 crc = crc32c(crc, buf + BTRFS_CSUM_SIZE, len - BTRFS_CSUM_SIZE);
154 btrfs_csum_final(crc, result);
155 memcpy(buf, result, BTRFS_CRC32_SIZE);
158 static int has_name(struct btrfs_key *key)
161 case BTRFS_DIR_ITEM_KEY:
162 case BTRFS_DIR_INDEX_KEY:
163 case BTRFS_INODE_REF_KEY:
164 case BTRFS_INODE_EXTREF_KEY:
165 case BTRFS_XATTR_ITEM_KEY:
174 static char *generate_garbage(u32 name_len)
176 char *buf = malloc(name_len);
182 for (i = 0; i < name_len; i++) {
183 char c = rand() % 94 + 33;
193 static int name_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
195 struct name *entry = rb_entry(a, struct name, n);
196 struct name *ins = rb_entry(b, struct name, n);
199 len = min(ins->len, entry->len);
200 return memcmp(ins->val, entry->val, len);
203 static int chunk_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
205 struct fs_chunk *entry = rb_entry(a, struct fs_chunk, n);
206 struct fs_chunk *ins = rb_entry(b, struct fs_chunk, n);
208 if (fuzz && ins->logical >= entry->logical &&
209 ins->logical < entry->logical + entry->bytes)
212 if (ins->logical < entry->logical)
214 else if (ins->logical > entry->logical)
219 static void tree_insert(struct rb_root *root, struct rb_node *ins,
220 int (*cmp)(struct rb_node *a, struct rb_node *b,
223 struct rb_node ** p = &root->rb_node;
224 struct rb_node * parent = NULL;
230 dir = cmp(*p, ins, 0);
239 rb_link_node(ins, parent, p);
240 rb_insert_color(ins, root);
243 static struct rb_node *tree_search(struct rb_root *root,
244 struct rb_node *search,
245 int (*cmp)(struct rb_node *a,
246 struct rb_node *b, int fuzz),
249 struct rb_node *n = root->rb_node;
253 dir = cmp(n, search, fuzz);
265 static char *find_collision(struct metadump_struct *md, char *name,
269 struct rb_node *entry;
271 unsigned long checksum;
277 entry = tree_search(&md->name_tree, &tmp.n, name_cmp, 0);
279 val = rb_entry(entry, struct name, n);
284 val = malloc(sizeof(struct name));
286 fprintf(stderr, "Couldn't sanitize name, enomem\n");
291 memset(val, 0, sizeof(*val));
295 val->sub = malloc(name_len);
297 fprintf(stderr, "Couldn't sanitize name, enomem\n");
303 checksum = crc32c(~1, val->val, name_len);
304 memset(val->sub, ' ', name_len);
307 if (crc32c(~1, val->sub, name_len) == checksum &&
308 memcmp(val->sub, val->val, val->len)) {
313 if (val->sub[i] == 127) {
318 } while (val->sub[i] == 127);
323 if (val->sub[i] == '/')
325 memset(val->sub, ' ', i);
330 if (val->sub[i] == '/')
336 fprintf(stderr, "Couldn't find a collision for '%.*s', "
337 "generating normal garbage, it won't match indexes\n",
339 for (i = 0; i < name_len; i++) {
340 char c = rand() % 94 + 33;
348 tree_insert(&md->name_tree, &val->n, name_cmp);
352 static void sanitize_dir_item(struct metadump_struct *md, struct extent_buffer *eb,
355 struct btrfs_dir_item *dir_item;
358 unsigned long name_ptr;
363 int free_garbage = (md->sanitize_names == 1);
365 dir_item = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
366 total_len = btrfs_item_size_nr(eb, slot);
367 while (cur < total_len) {
368 this_len = sizeof(*dir_item) +
369 btrfs_dir_name_len(eb, dir_item) +
370 btrfs_dir_data_len(eb, dir_item);
371 name_ptr = (unsigned long)(dir_item + 1);
372 name_len = btrfs_dir_name_len(eb, dir_item);
374 if (md->sanitize_names > 1) {
375 buf = malloc(name_len);
377 fprintf(stderr, "Couldn't sanitize name, "
381 read_extent_buffer(eb, buf, name_ptr, name_len);
382 garbage = find_collision(md, buf, name_len);
384 garbage = generate_garbage(name_len);
387 fprintf(stderr, "Couldn't sanitize name, enomem\n");
390 write_extent_buffer(eb, garbage, name_ptr, name_len);
392 dir_item = (struct btrfs_dir_item *)((char *)dir_item +
399 static void sanitize_inode_ref(struct metadump_struct *md,
400 struct extent_buffer *eb, int slot, int ext)
402 struct btrfs_inode_extref *extref;
403 struct btrfs_inode_ref *ref;
406 unsigned long name_ptr;
410 int free_garbage = (md->sanitize_names == 1);
412 item_size = btrfs_item_size_nr(eb, slot);
413 ptr = btrfs_item_ptr_offset(eb, slot);
414 while (cur_offset < item_size) {
416 extref = (struct btrfs_inode_extref *)(ptr +
418 name_ptr = (unsigned long)(&extref->name);
419 len = btrfs_inode_extref_name_len(eb, extref);
420 cur_offset += sizeof(*extref);
422 ref = (struct btrfs_inode_ref *)(ptr + cur_offset);
423 len = btrfs_inode_ref_name_len(eb, ref);
424 name_ptr = (unsigned long)(ref + 1);
425 cur_offset += sizeof(*ref);
429 if (md->sanitize_names > 1) {
432 fprintf(stderr, "Couldn't sanitize name, "
436 read_extent_buffer(eb, buf, name_ptr, len);
437 garbage = find_collision(md, buf, len);
439 garbage = generate_garbage(len);
443 fprintf(stderr, "Couldn't sanitize name, enomem\n");
446 write_extent_buffer(eb, garbage, name_ptr, len);
452 static void sanitize_xattr(struct metadump_struct *md,
453 struct extent_buffer *eb, int slot)
455 struct btrfs_dir_item *dir_item;
456 unsigned long data_ptr;
459 dir_item = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
460 data_len = btrfs_dir_data_len(eb, dir_item);
462 data_ptr = (unsigned long)((char *)(dir_item + 1) +
463 btrfs_dir_name_len(eb, dir_item));
464 memset_extent_buffer(eb, 0, data_ptr, data_len);
467 static void sanitize_name(struct metadump_struct *md, u8 *dst,
468 struct extent_buffer *src, struct btrfs_key *key,
471 struct extent_buffer *eb;
473 eb = alloc_dummy_eb(src->start, src->len);
475 fprintf(stderr, "Couldn't sanitize name, no memory\n");
479 memcpy(eb->data, dst, eb->len);
482 case BTRFS_DIR_ITEM_KEY:
483 case BTRFS_DIR_INDEX_KEY:
484 sanitize_dir_item(md, eb, slot);
486 case BTRFS_INODE_REF_KEY:
487 sanitize_inode_ref(md, eb, slot, 0);
489 case BTRFS_INODE_EXTREF_KEY:
490 sanitize_inode_ref(md, eb, slot, 1);
492 case BTRFS_XATTR_ITEM_KEY:
493 sanitize_xattr(md, eb, slot);
499 memcpy(dst, eb->data, eb->len);
504 * zero inline extents and csum items
506 static void zero_items(struct metadump_struct *md, u8 *dst,
507 struct extent_buffer *src)
509 struct btrfs_file_extent_item *fi;
510 struct btrfs_item *item;
511 struct btrfs_key key;
512 u32 nritems = btrfs_header_nritems(src);
517 for (i = 0; i < nritems; i++) {
518 item = btrfs_item_nr(i);
519 btrfs_item_key_to_cpu(src, &key, i);
520 if (key.type == BTRFS_CSUM_ITEM_KEY) {
521 size = btrfs_item_size_nr(src, i);
522 memset(dst + btrfs_leaf_data(src) +
523 btrfs_item_offset_nr(src, i), 0, size);
527 if (md->sanitize_names && has_name(&key)) {
528 sanitize_name(md, dst, src, &key, i);
532 if (key.type != BTRFS_EXTENT_DATA_KEY)
535 fi = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
536 extent_type = btrfs_file_extent_type(src, fi);
537 if (extent_type != BTRFS_FILE_EXTENT_INLINE)
540 ptr = btrfs_file_extent_inline_start(fi);
541 size = btrfs_file_extent_inline_item_len(src, item);
542 memset(dst + ptr, 0, size);
547 * copy buffer and zero useless data in the buffer
549 static void copy_buffer(struct metadump_struct *md, u8 *dst,
550 struct extent_buffer *src)
556 memcpy(dst, src->data, src->len);
557 if (src->start == BTRFS_SUPER_INFO_OFFSET)
560 level = btrfs_header_level(src);
561 nritems = btrfs_header_nritems(src);
564 size = sizeof(struct btrfs_header);
565 memset(dst + size, 0, src->len - size);
566 } else if (level == 0) {
567 size = btrfs_leaf_data(src) +
568 btrfs_item_offset_nr(src, nritems - 1) -
569 btrfs_item_nr_offset(nritems);
570 memset(dst + btrfs_item_nr_offset(nritems), 0, size);
571 zero_items(md, dst, src);
573 size = offsetof(struct btrfs_node, ptrs) +
574 sizeof(struct btrfs_key_ptr) * nritems;
575 memset(dst + size, 0, src->len - size);
577 csum_block(dst, src->len);
580 static void *dump_worker(void *data)
582 struct metadump_struct *md = (struct metadump_struct *)data;
583 struct async_work *async;
587 pthread_mutex_lock(&md->mutex);
588 while (list_empty(&md->list)) {
590 pthread_mutex_unlock(&md->mutex);
593 pthread_cond_wait(&md->cond, &md->mutex);
595 async = list_entry(md->list.next, struct async_work, list);
596 list_del_init(&async->list);
597 pthread_mutex_unlock(&md->mutex);
599 if (md->compress_level > 0) {
600 u8 *orig = async->buffer;
602 async->bufsize = compressBound(async->size);
603 async->buffer = malloc(async->bufsize);
604 if (!async->buffer) {
605 fprintf(stderr, "Error allocing buffer\n");
606 pthread_mutex_lock(&md->mutex);
609 pthread_mutex_unlock(&md->mutex);
613 ret = compress2(async->buffer,
614 (unsigned long *)&async->bufsize,
615 orig, async->size, md->compress_level);
623 pthread_mutex_lock(&md->mutex);
625 pthread_mutex_unlock(&md->mutex);
631 static void meta_cluster_init(struct metadump_struct *md, u64 start)
633 struct meta_cluster_header *header;
637 header = &md->cluster->header;
638 header->magic = cpu_to_le64(HEADER_MAGIC);
639 header->bytenr = cpu_to_le64(start);
640 header->nritems = cpu_to_le32(0);
641 header->compress = md->compress_level > 0 ?
642 COMPRESS_ZLIB : COMPRESS_NONE;
645 static void metadump_destroy(struct metadump_struct *md, int num_threads)
650 pthread_mutex_lock(&md->mutex);
652 pthread_cond_broadcast(&md->cond);
653 pthread_mutex_unlock(&md->mutex);
655 for (i = 0; i < num_threads; i++)
656 pthread_join(md->threads[i], NULL);
658 pthread_cond_destroy(&md->cond);
659 pthread_mutex_destroy(&md->mutex);
661 while ((n = rb_first(&md->name_tree))) {
664 name = rb_entry(n, struct name, n);
665 rb_erase(n, &md->name_tree);
674 static int metadump_init(struct metadump_struct *md, struct btrfs_root *root,
675 FILE *out, int num_threads, int compress_level,
680 memset(md, 0, sizeof(*md));
681 pthread_cond_init(&md->cond, NULL);
682 pthread_mutex_init(&md->mutex, NULL);
683 INIT_LIST_HEAD(&md->list);
684 INIT_LIST_HEAD(&md->ordered);
687 md->pending_start = (u64)-1;
688 md->compress_level = compress_level;
689 md->cluster = calloc(1, BLOCK_SIZE);
690 md->sanitize_names = sanitize_names;
691 if (sanitize_names > 1)
692 crc32c_optimization_init();
695 pthread_cond_destroy(&md->cond);
696 pthread_mutex_destroy(&md->mutex);
700 meta_cluster_init(md, 0);
704 md->name_tree.rb_node = NULL;
705 md->num_threads = num_threads;
706 md->threads = calloc(num_threads, sizeof(pthread_t));
709 pthread_cond_destroy(&md->cond);
710 pthread_mutex_destroy(&md->mutex);
714 for (i = 0; i < num_threads; i++) {
715 ret = pthread_create(md->threads + i, NULL, dump_worker, md);
721 metadump_destroy(md, i + 1);
726 static int write_zero(FILE *out, size_t size)
728 static char zero[BLOCK_SIZE];
729 return fwrite(zero, size, 1, out);
732 static int write_buffers(struct metadump_struct *md, u64 *next)
734 struct meta_cluster_header *header = &md->cluster->header;
735 struct meta_cluster_item *item;
736 struct async_work *async;
742 if (list_empty(&md->ordered))
745 /* wait until all buffers are compressed */
746 while (!err && md->num_items > md->num_ready) {
747 struct timespec ts = {
751 pthread_mutex_unlock(&md->mutex);
752 nanosleep(&ts, NULL);
753 pthread_mutex_lock(&md->mutex);
758 fprintf(stderr, "One of the threads errored out %s\n",
763 /* setup and write index block */
764 list_for_each_entry(async, &md->ordered, ordered) {
765 item = md->cluster->items + nritems;
766 item->bytenr = cpu_to_le64(async->start);
767 item->size = cpu_to_le32(async->bufsize);
770 header->nritems = cpu_to_le32(nritems);
772 ret = fwrite(md->cluster, BLOCK_SIZE, 1, md->out);
774 fprintf(stderr, "Error writing out cluster: %d\n", errno);
779 bytenr += le64_to_cpu(header->bytenr) + BLOCK_SIZE;
780 while (!list_empty(&md->ordered)) {
781 async = list_entry(md->ordered.next, struct async_work,
783 list_del_init(&async->ordered);
785 bytenr += async->bufsize;
787 ret = fwrite(async->buffer, async->bufsize, 1,
792 fprintf(stderr, "Error writing out cluster: %d\n",
800 /* zero unused space in the last block */
801 if (!err && bytenr & BLOCK_MASK) {
802 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
805 ret = write_zero(md->out, size);
807 fprintf(stderr, "Error zeroing out buffer: %d\n",
817 static int read_data_extent(struct metadump_struct *md,
818 struct async_work *async)
820 struct btrfs_multi_bio *multi = NULL;
821 struct btrfs_device *device;
822 u64 bytes_left = async->size;
823 u64 logical = async->start;
832 read_len = bytes_left;
833 ret = btrfs_map_block(&md->root->fs_info->mapping_tree, READ,
834 logical, &read_len, &multi, 0, NULL);
836 fprintf(stderr, "Couldn't map data block %d\n", ret);
840 device = multi->stripes[0].dev;
842 if (device->fd == 0) {
844 "Device we need to read from is not open\n");
849 bytenr = multi->stripes[0].physical;
852 read_len = min(read_len, bytes_left);
853 done = pread64(fd, async->buffer+offset, read_len, bytenr);
854 if (done < read_len) {
856 fprintf(stderr, "Error reading extent %d\n",
859 fprintf(stderr, "Short read\n");
871 static int flush_pending(struct metadump_struct *md, int done)
873 struct async_work *async = NULL;
874 struct extent_buffer *eb;
875 u64 blocksize = md->root->nodesize;
881 if (md->pending_size) {
882 async = calloc(1, sizeof(*async));
886 async->start = md->pending_start;
887 async->size = md->pending_size;
888 async->bufsize = async->size;
889 async->buffer = malloc(async->bufsize);
890 if (!async->buffer) {
895 start = async->start;
899 ret = read_data_extent(md, async);
907 while (!md->data && size > 0) {
908 u64 this_read = min(blocksize, size);
909 eb = read_tree_block(md->root, start, this_read, 0);
910 if (!extent_buffer_uptodate(eb)) {
914 "Error reading metadata block\n");
917 copy_buffer(md, async->buffer + offset, eb);
918 free_extent_buffer(eb);
924 md->pending_start = (u64)-1;
925 md->pending_size = 0;
930 pthread_mutex_lock(&md->mutex);
932 list_add_tail(&async->ordered, &md->ordered);
934 if (md->compress_level > 0) {
935 list_add_tail(&async->list, &md->list);
936 pthread_cond_signal(&md->cond);
941 if (md->num_items >= ITEMS_PER_CLUSTER || done) {
942 ret = write_buffers(md, &start);
944 fprintf(stderr, "Error writing buffers %d\n",
947 meta_cluster_init(md, start);
949 pthread_mutex_unlock(&md->mutex);
953 static int add_extent(u64 start, u64 size, struct metadump_struct *md,
957 if (md->data != data ||
958 md->pending_size + size > MAX_PENDING_SIZE ||
959 md->pending_start + md->pending_size != start) {
960 ret = flush_pending(md, 0);
963 md->pending_start = start;
965 readahead_tree_block(md->root, start, size, 0);
966 md->pending_size += size;
971 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
972 static int is_tree_block(struct btrfs_root *extent_root,
973 struct btrfs_path *path, u64 bytenr)
975 struct extent_buffer *leaf;
976 struct btrfs_key key;
980 leaf = path->nodes[0];
982 struct btrfs_extent_ref_v0 *ref_item;
984 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
985 ret = btrfs_next_leaf(extent_root, path);
990 leaf = path->nodes[0];
992 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
993 if (key.objectid != bytenr)
995 if (key.type != BTRFS_EXTENT_REF_V0_KEY)
997 ref_item = btrfs_item_ptr(leaf, path->slots[0],
998 struct btrfs_extent_ref_v0);
999 ref_objectid = btrfs_ref_objectid_v0(leaf, ref_item);
1000 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID)
1008 static int copy_tree_blocks(struct btrfs_root *root, struct extent_buffer *eb,
1009 struct metadump_struct *metadump, int root_tree)
1011 struct extent_buffer *tmp;
1012 struct btrfs_root_item *ri;
1013 struct btrfs_key key;
1020 ret = add_extent(btrfs_header_bytenr(eb), root->leafsize, metadump, 0);
1022 fprintf(stderr, "Error adding metadata block\n");
1026 if (btrfs_header_level(eb) == 0 && !root_tree)
1029 level = btrfs_header_level(eb);
1030 nritems = btrfs_header_nritems(eb);
1031 for (i = 0; i < nritems; i++) {
1033 btrfs_item_key_to_cpu(eb, &key, i);
1034 if (key.type != BTRFS_ROOT_ITEM_KEY)
1036 ri = btrfs_item_ptr(eb, i, struct btrfs_root_item);
1037 bytenr = btrfs_disk_root_bytenr(eb, ri);
1038 tmp = read_tree_block(root, bytenr, root->leafsize, 0);
1039 if (!extent_buffer_uptodate(tmp)) {
1041 "Error reading log root block\n");
1044 ret = copy_tree_blocks(root, tmp, metadump, 0);
1045 free_extent_buffer(tmp);
1049 bytenr = btrfs_node_blockptr(eb, i);
1050 tmp = read_tree_block(root, bytenr, root->leafsize, 0);
1051 if (!extent_buffer_uptodate(tmp)) {
1052 fprintf(stderr, "Error reading log block\n");
1055 ret = copy_tree_blocks(root, tmp, metadump, root_tree);
1056 free_extent_buffer(tmp);
1065 static int copy_log_trees(struct btrfs_root *root,
1066 struct metadump_struct *metadump,
1067 struct btrfs_path *path)
1069 u64 blocknr = btrfs_super_log_root(root->fs_info->super_copy);
1074 if (!root->fs_info->log_root_tree ||
1075 !root->fs_info->log_root_tree->node) {
1076 fprintf(stderr, "Error copying tree log, it wasn't setup\n");
1080 return copy_tree_blocks(root, root->fs_info->log_root_tree->node,
1084 static int copy_space_cache(struct btrfs_root *root,
1085 struct metadump_struct *metadump,
1086 struct btrfs_path *path)
1088 struct extent_buffer *leaf;
1089 struct btrfs_file_extent_item *fi;
1090 struct btrfs_key key;
1091 u64 bytenr, num_bytes;
1094 root = root->fs_info->tree_root;
1097 key.type = BTRFS_EXTENT_DATA_KEY;
1100 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1102 fprintf(stderr, "Error searching for free space inode %d\n",
1107 leaf = path->nodes[0];
1110 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1111 ret = btrfs_next_leaf(root, path);
1113 fprintf(stderr, "Error going to next leaf "
1119 leaf = path->nodes[0];
1122 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1123 if (key.type != BTRFS_EXTENT_DATA_KEY) {
1128 fi = btrfs_item_ptr(leaf, path->slots[0],
1129 struct btrfs_file_extent_item);
1130 if (btrfs_file_extent_type(leaf, fi) !=
1131 BTRFS_FILE_EXTENT_REG) {
1136 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1137 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1138 ret = add_extent(bytenr, num_bytes, metadump, 1);
1140 fprintf(stderr, "Error adding space cache blocks %d\n",
1142 btrfs_release_path(path);
1151 static int copy_from_extent_tree(struct metadump_struct *metadump,
1152 struct btrfs_path *path)
1154 struct btrfs_root *extent_root;
1155 struct extent_buffer *leaf;
1156 struct btrfs_extent_item *ei;
1157 struct btrfs_key key;
1162 extent_root = metadump->root->fs_info->extent_root;
1163 bytenr = BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE;
1164 key.objectid = bytenr;
1165 key.type = BTRFS_EXTENT_ITEM_KEY;
1168 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1170 fprintf(stderr, "Error searching extent root %d\n", ret);
1175 leaf = path->nodes[0];
1178 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1179 ret = btrfs_next_leaf(extent_root, path);
1181 fprintf(stderr, "Error going to next leaf %d"
1189 leaf = path->nodes[0];
1192 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1193 if (key.objectid < bytenr ||
1194 (key.type != BTRFS_EXTENT_ITEM_KEY &&
1195 key.type != BTRFS_METADATA_ITEM_KEY)) {
1200 bytenr = key.objectid;
1201 if (key.type == BTRFS_METADATA_ITEM_KEY)
1202 num_bytes = extent_root->leafsize;
1204 num_bytes = key.offset;
1206 if (btrfs_item_size_nr(leaf, path->slots[0]) > sizeof(*ei)) {
1207 ei = btrfs_item_ptr(leaf, path->slots[0],
1208 struct btrfs_extent_item);
1209 if (btrfs_extent_flags(leaf, ei) &
1210 BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1211 ret = add_extent(bytenr, num_bytes, metadump,
1214 fprintf(stderr, "Error adding block "
1220 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1221 ret = is_tree_block(extent_root, path, bytenr);
1223 fprintf(stderr, "Error checking tree block "
1229 ret = add_extent(bytenr, num_bytes, metadump,
1232 fprintf(stderr, "Error adding block "
1239 fprintf(stderr, "Either extent tree corruption or "
1240 "you haven't built with V0 support\n");
1245 bytenr += num_bytes;
1248 btrfs_release_path(path);
1253 static int create_metadump(const char *input, FILE *out, int num_threads,
1254 int compress_level, int sanitize, int walk_trees)
1256 struct btrfs_root *root;
1257 struct btrfs_path *path = NULL;
1258 struct metadump_struct metadump;
1262 root = open_ctree(input, 0, 0);
1264 fprintf(stderr, "Open ctree failed\n");
1268 BUG_ON(root->nodesize != root->leafsize);
1270 ret = metadump_init(&metadump, root, out, num_threads,
1271 compress_level, sanitize);
1273 fprintf(stderr, "Error initing metadump %d\n", ret);
1278 ret = add_extent(BTRFS_SUPER_INFO_OFFSET, BTRFS_SUPER_INFO_SIZE,
1281 fprintf(stderr, "Error adding metadata %d\n", ret);
1286 path = btrfs_alloc_path();
1288 fprintf(stderr, "Out of memory allocing path\n");
1294 ret = copy_tree_blocks(root, root->fs_info->chunk_root->node,
1301 ret = copy_tree_blocks(root, root->fs_info->tree_root->node,
1308 ret = copy_from_extent_tree(&metadump, path);
1315 ret = copy_log_trees(root, &metadump, path);
1321 ret = copy_space_cache(root, &metadump, path);
1323 ret = flush_pending(&metadump, 1);
1327 fprintf(stderr, "Error flushing pending %d\n", ret);
1330 metadump_destroy(&metadump, num_threads);
1332 btrfs_free_path(path);
1333 ret = close_ctree(root);
1334 return err ? err : ret;
1337 static void update_super_old(u8 *buffer)
1339 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
1340 struct btrfs_chunk *chunk;
1341 struct btrfs_disk_key *key;
1342 u32 sectorsize = btrfs_super_sectorsize(super);
1343 u64 flags = btrfs_super_flags(super);
1345 flags |= BTRFS_SUPER_FLAG_METADUMP;
1346 btrfs_set_super_flags(super, flags);
1348 key = (struct btrfs_disk_key *)(super->sys_chunk_array);
1349 chunk = (struct btrfs_chunk *)(super->sys_chunk_array +
1350 sizeof(struct btrfs_disk_key));
1352 btrfs_set_disk_key_objectid(key, BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1353 btrfs_set_disk_key_type(key, BTRFS_CHUNK_ITEM_KEY);
1354 btrfs_set_disk_key_offset(key, 0);
1356 btrfs_set_stack_chunk_length(chunk, (u64)-1);
1357 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
1358 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
1359 btrfs_set_stack_chunk_type(chunk, BTRFS_BLOCK_GROUP_SYSTEM);
1360 btrfs_set_stack_chunk_io_align(chunk, sectorsize);
1361 btrfs_set_stack_chunk_io_width(chunk, sectorsize);
1362 btrfs_set_stack_chunk_sector_size(chunk, sectorsize);
1363 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1364 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1365 chunk->stripe.devid = super->dev_item.devid;
1366 btrfs_set_stack_stripe_offset(&chunk->stripe, 0);
1367 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
1368 btrfs_set_super_sys_array_size(super, sizeof(*key) + sizeof(*chunk));
1369 csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
1372 static int update_super(u8 *buffer)
1374 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
1375 struct btrfs_chunk *chunk;
1376 struct btrfs_disk_key *disk_key;
1377 struct btrfs_key key;
1378 u32 new_array_size = 0;
1381 u8 *ptr, *write_ptr;
1382 int old_num_stripes;
1384 write_ptr = ptr = super->sys_chunk_array;
1385 array_size = btrfs_super_sys_array_size(super);
1387 while (cur < array_size) {
1388 disk_key = (struct btrfs_disk_key *)ptr;
1389 btrfs_disk_key_to_cpu(&key, disk_key);
1391 new_array_size += sizeof(*disk_key);
1392 memmove(write_ptr, ptr, sizeof(*disk_key));
1394 write_ptr += sizeof(*disk_key);
1395 ptr += sizeof(*disk_key);
1396 cur += sizeof(*disk_key);
1398 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1399 chunk = (struct btrfs_chunk *)ptr;
1400 old_num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1401 chunk = (struct btrfs_chunk *)write_ptr;
1403 memmove(write_ptr, ptr, sizeof(*chunk));
1404 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1405 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1406 btrfs_set_stack_chunk_type(chunk,
1407 BTRFS_BLOCK_GROUP_SYSTEM);
1408 chunk->stripe.devid = super->dev_item.devid;
1409 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid,
1411 new_array_size += sizeof(*chunk);
1413 fprintf(stderr, "Bogus key in the sys chunk array "
1417 write_ptr += sizeof(*chunk);
1418 ptr += btrfs_chunk_item_size(old_num_stripes);
1419 cur += btrfs_chunk_item_size(old_num_stripes);
1422 btrfs_set_super_sys_array_size(super, new_array_size);
1423 csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
1428 static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size)
1430 struct extent_buffer *eb;
1432 eb = malloc(sizeof(struct extent_buffer) + size);
1435 memset(eb, 0, sizeof(struct extent_buffer) + size);
1442 static void truncate_item(struct extent_buffer *eb, int slot, u32 new_size)
1444 struct btrfs_item *item;
1452 old_size = btrfs_item_size_nr(eb, slot);
1453 if (old_size == new_size)
1456 nritems = btrfs_header_nritems(eb);
1457 data_end = btrfs_item_offset_nr(eb, nritems - 1);
1459 old_data_start = btrfs_item_offset_nr(eb, slot);
1460 size_diff = old_size - new_size;
1462 for (i = slot; i < nritems; i++) {
1464 item = btrfs_item_nr(i);
1465 ioff = btrfs_item_offset(eb, item);
1466 btrfs_set_item_offset(eb, item, ioff + size_diff);
1469 memmove_extent_buffer(eb, btrfs_leaf_data(eb) + data_end + size_diff,
1470 btrfs_leaf_data(eb) + data_end,
1471 old_data_start + new_size - data_end);
1472 item = btrfs_item_nr(slot);
1473 btrfs_set_item_size(eb, item, new_size);
1476 static int fixup_chunk_tree_block(struct mdrestore_struct *mdres,
1477 struct async_work *async, u8 *buffer,
1480 struct extent_buffer *eb;
1481 size_t size_left = size;
1482 u64 bytenr = async->start;
1485 if (size_left % mdres->leafsize)
1488 eb = alloc_dummy_eb(bytenr, mdres->leafsize);
1494 memcpy(eb->data, buffer, mdres->leafsize);
1496 if (btrfs_header_bytenr(eb) != bytenr)
1498 if (memcmp(mdres->fsid,
1499 eb->data + offsetof(struct btrfs_header, fsid),
1503 if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID)
1506 if (btrfs_header_level(eb) != 0)
1509 for (i = 0; i < btrfs_header_nritems(eb); i++) {
1510 struct btrfs_chunk chunk;
1511 struct btrfs_key key;
1514 btrfs_item_key_to_cpu(eb, &key, i);
1515 if (key.type != BTRFS_CHUNK_ITEM_KEY)
1517 truncate_item(eb, i, sizeof(chunk));
1518 read_extent_buffer(eb, &chunk,
1519 btrfs_item_ptr_offset(eb, i),
1522 /* Zero out the RAID profile */
1523 type = btrfs_stack_chunk_type(&chunk);
1524 type &= (BTRFS_BLOCK_GROUP_DATA |
1525 BTRFS_BLOCK_GROUP_SYSTEM |
1526 BTRFS_BLOCK_GROUP_METADATA |
1527 BTRFS_BLOCK_GROUP_DUP);
1528 btrfs_set_stack_chunk_type(&chunk, type);
1530 btrfs_set_stack_chunk_num_stripes(&chunk, 1);
1531 btrfs_set_stack_chunk_sub_stripes(&chunk, 0);
1532 btrfs_set_stack_stripe_devid(&chunk.stripe, mdres->devid);
1533 memcpy(chunk.stripe.dev_uuid, mdres->uuid,
1535 write_extent_buffer(eb, &chunk,
1536 btrfs_item_ptr_offset(eb, i),
1539 memcpy(buffer, eb->data, eb->len);
1540 csum_block(buffer, eb->len);
1542 size_left -= mdres->leafsize;
1543 buffer += mdres->leafsize;
1544 bytenr += mdres->leafsize;
1551 static void write_backup_supers(int fd, u8 *buf)
1553 struct btrfs_super_block *super = (struct btrfs_super_block *)buf;
1560 if (fstat(fd, &st)) {
1561 fprintf(stderr, "Couldn't stat restore point, won't be able "
1562 "to write backup supers: %d\n", errno);
1566 size = btrfs_device_size(fd, &st);
1568 for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1569 bytenr = btrfs_sb_offset(i);
1570 if (bytenr + BTRFS_SUPER_INFO_SIZE > size)
1572 btrfs_set_super_bytenr(super, bytenr);
1573 csum_block(buf, BTRFS_SUPER_INFO_SIZE);
1574 ret = pwrite64(fd, buf, BTRFS_SUPER_INFO_SIZE, bytenr);
1575 if (ret < BTRFS_SUPER_INFO_SIZE) {
1577 fprintf(stderr, "Problem writing out backup "
1578 "super block %d, err %d\n", i, errno);
1580 fprintf(stderr, "Short write writing out "
1581 "backup super block\n");
1587 static u64 logical_to_physical(struct mdrestore_struct *mdres, u64 logical, u64 *size)
1589 struct fs_chunk *fs_chunk;
1590 struct rb_node *entry;
1591 struct fs_chunk search;
1594 if (logical == BTRFS_SUPER_INFO_OFFSET)
1597 search.logical = logical;
1598 entry = tree_search(&mdres->chunk_tree, &search.n, chunk_cmp, 1);
1600 if (mdres->in != stdin)
1601 printf("Couldn't find a chunk, using logical\n");
1604 fs_chunk = rb_entry(entry, struct fs_chunk, n);
1605 if (fs_chunk->logical > logical || fs_chunk->logical + fs_chunk->bytes < logical)
1607 offset = search.logical - fs_chunk->logical;
1609 *size = min(*size, fs_chunk->bytes + fs_chunk->logical - logical);
1610 return fs_chunk->physical + offset;
1613 static void *restore_worker(void *data)
1615 struct mdrestore_struct *mdres = (struct mdrestore_struct *)data;
1616 struct async_work *async;
1622 int compress_size = MAX_PENDING_SIZE * 4;
1624 outfd = fileno(mdres->out);
1625 buffer = malloc(compress_size);
1627 fprintf(stderr, "Error allocing buffer\n");
1628 pthread_mutex_lock(&mdres->mutex);
1630 mdres->error = -ENOMEM;
1631 pthread_mutex_unlock(&mdres->mutex);
1640 pthread_mutex_lock(&mdres->mutex);
1641 while (!mdres->leafsize || list_empty(&mdres->list)) {
1643 pthread_mutex_unlock(&mdres->mutex);
1646 pthread_cond_wait(&mdres->cond, &mdres->mutex);
1648 async = list_entry(mdres->list.next, struct async_work, list);
1649 list_del_init(&async->list);
1650 pthread_mutex_unlock(&mdres->mutex);
1652 if (mdres->compress_method == COMPRESS_ZLIB) {
1653 size = compress_size;
1654 ret = uncompress(buffer, (unsigned long *)&size,
1655 async->buffer, async->bufsize);
1657 fprintf(stderr, "Error decompressing %d\n",
1663 outbuf = async->buffer;
1664 size = async->bufsize;
1667 if (!mdres->multi_devices) {
1668 if (async->start == BTRFS_SUPER_INFO_OFFSET) {
1669 if (mdres->old_restore) {
1670 update_super_old(outbuf);
1672 ret = update_super(outbuf);
1676 } else if (!mdres->old_restore) {
1677 ret = fixup_chunk_tree_block(mdres, async, outbuf, size);
1683 if (!mdres->fixup_offset) {
1685 u64 chunk_size = size;
1686 if (!mdres->multi_devices && !mdres->old_restore)
1687 bytenr = logical_to_physical(mdres,
1688 async->start + offset,
1691 bytenr = async->start + offset;
1693 ret = pwrite64(outfd, outbuf+offset, chunk_size,
1695 if (ret != chunk_size) {
1697 fprintf(stderr, "Error writing to "
1698 "device %d\n", errno);
1702 fprintf(stderr, "Short write\n");
1708 offset += chunk_size;
1710 } else if (async->start != BTRFS_SUPER_INFO_OFFSET) {
1711 ret = write_data_to_disk(mdres->info, outbuf, async->start, size, 0);
1713 printk("Error write data\n");
1719 /* backup super blocks are already there at fixup_offset stage */
1720 if (!mdres->multi_devices && async->start == BTRFS_SUPER_INFO_OFFSET)
1721 write_backup_supers(outfd, outbuf);
1723 pthread_mutex_lock(&mdres->mutex);
1724 if (err && !mdres->error)
1727 pthread_mutex_unlock(&mdres->mutex);
1729 free(async->buffer);
1737 static void mdrestore_destroy(struct mdrestore_struct *mdres, int num_threads)
1742 while ((n = rb_first(&mdres->chunk_tree))) {
1743 struct fs_chunk *entry;
1745 entry = rb_entry(n, struct fs_chunk, n);
1746 rb_erase(n, &mdres->chunk_tree);
1749 pthread_mutex_lock(&mdres->mutex);
1751 pthread_cond_broadcast(&mdres->cond);
1752 pthread_mutex_unlock(&mdres->mutex);
1754 for (i = 0; i < num_threads; i++)
1755 pthread_join(mdres->threads[i], NULL);
1757 pthread_cond_destroy(&mdres->cond);
1758 pthread_mutex_destroy(&mdres->mutex);
1759 free(mdres->threads);
1762 static int mdrestore_init(struct mdrestore_struct *mdres,
1763 FILE *in, FILE *out, int old_restore,
1764 int num_threads, int fixup_offset,
1765 struct btrfs_fs_info *info, int multi_devices)
1769 memset(mdres, 0, sizeof(*mdres));
1770 pthread_cond_init(&mdres->cond, NULL);
1771 pthread_mutex_init(&mdres->mutex, NULL);
1772 INIT_LIST_HEAD(&mdres->list);
1775 mdres->old_restore = old_restore;
1776 mdres->chunk_tree.rb_node = NULL;
1777 mdres->fixup_offset = fixup_offset;
1779 mdres->multi_devices = multi_devices;
1784 mdres->num_threads = num_threads;
1785 mdres->threads = calloc(num_threads, sizeof(pthread_t));
1786 if (!mdres->threads)
1788 for (i = 0; i < num_threads; i++) {
1789 ret = pthread_create(mdres->threads + i, NULL, restore_worker,
1795 mdrestore_destroy(mdres, i + 1);
1799 static int fill_mdres_info(struct mdrestore_struct *mdres,
1800 struct async_work *async)
1802 struct btrfs_super_block *super;
1807 /* We've already been initialized */
1808 if (mdres->leafsize)
1811 if (mdres->compress_method == COMPRESS_ZLIB) {
1812 size_t size = MAX_PENDING_SIZE * 2;
1814 buffer = malloc(MAX_PENDING_SIZE * 2);
1817 ret = uncompress(buffer, (unsigned long *)&size,
1818 async->buffer, async->bufsize);
1820 fprintf(stderr, "Error decompressing %d\n", ret);
1826 outbuf = async->buffer;
1829 super = (struct btrfs_super_block *)outbuf;
1830 mdres->leafsize = btrfs_super_leafsize(super);
1831 memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
1832 memcpy(mdres->uuid, super->dev_item.uuid,
1834 mdres->devid = le64_to_cpu(super->dev_item.devid);
1839 static int add_cluster(struct meta_cluster *cluster,
1840 struct mdrestore_struct *mdres, u64 *next)
1842 struct meta_cluster_item *item;
1843 struct meta_cluster_header *header = &cluster->header;
1844 struct async_work *async;
1849 BUG_ON(mdres->num_items);
1850 mdres->compress_method = header->compress;
1852 bytenr = le64_to_cpu(header->bytenr) + BLOCK_SIZE;
1853 nritems = le32_to_cpu(header->nritems);
1854 for (i = 0; i < nritems; i++) {
1855 item = &cluster->items[i];
1856 async = calloc(1, sizeof(*async));
1858 fprintf(stderr, "Error allocating async\n");
1861 async->start = le64_to_cpu(item->bytenr);
1862 async->bufsize = le32_to_cpu(item->size);
1863 async->buffer = malloc(async->bufsize);
1864 if (!async->buffer) {
1865 fprintf(stderr, "Error allocing async buffer\n");
1869 ret = fread(async->buffer, async->bufsize, 1, mdres->in);
1871 fprintf(stderr, "Error reading buffer %d\n", errno);
1872 free(async->buffer);
1876 bytenr += async->bufsize;
1878 pthread_mutex_lock(&mdres->mutex);
1879 if (async->start == BTRFS_SUPER_INFO_OFFSET) {
1880 ret = fill_mdres_info(mdres, async);
1882 fprintf(stderr, "Error setting up restore\n");
1883 pthread_mutex_unlock(&mdres->mutex);
1884 free(async->buffer);
1889 list_add_tail(&async->list, &mdres->list);
1891 pthread_cond_signal(&mdres->cond);
1892 pthread_mutex_unlock(&mdres->mutex);
1894 if (bytenr & BLOCK_MASK) {
1895 char buffer[BLOCK_MASK];
1896 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
1899 ret = fread(buffer, size, 1, mdres->in);
1901 fprintf(stderr, "Error reading in buffer %d\n", errno);
1909 static int wait_for_worker(struct mdrestore_struct *mdres)
1913 pthread_mutex_lock(&mdres->mutex);
1915 while (!ret && mdres->num_items > 0) {
1916 struct timespec ts = {
1918 .tv_nsec = 10000000,
1920 pthread_mutex_unlock(&mdres->mutex);
1921 nanosleep(&ts, NULL);
1922 pthread_mutex_lock(&mdres->mutex);
1925 pthread_mutex_unlock(&mdres->mutex);
1929 static int read_chunk_block(struct mdrestore_struct *mdres, u8 *buffer,
1930 u64 bytenr, u64 item_bytenr, u32 bufsize,
1933 struct extent_buffer *eb;
1937 eb = alloc_dummy_eb(bytenr, mdres->leafsize);
1943 while (item_bytenr != bytenr) {
1944 buffer += mdres->leafsize;
1945 item_bytenr += mdres->leafsize;
1948 memcpy(eb->data, buffer, mdres->leafsize);
1949 if (btrfs_header_bytenr(eb) != bytenr) {
1950 fprintf(stderr, "Eb bytenr doesn't match found bytenr\n");
1955 if (memcmp(mdres->fsid, eb->data + offsetof(struct btrfs_header, fsid),
1957 fprintf(stderr, "Fsid doesn't match\n");
1962 if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID) {
1963 fprintf(stderr, "Does not belong to the chunk tree\n");
1968 for (i = 0; i < btrfs_header_nritems(eb); i++) {
1969 struct btrfs_chunk chunk;
1970 struct fs_chunk *fs_chunk;
1971 struct btrfs_key key;
1973 if (btrfs_header_level(eb)) {
1974 u64 blockptr = btrfs_node_blockptr(eb, i);
1976 ret = search_for_chunk_blocks(mdres, blockptr,
1983 /* Yay a leaf! We loves leafs! */
1984 btrfs_item_key_to_cpu(eb, &key, i);
1985 if (key.type != BTRFS_CHUNK_ITEM_KEY)
1988 fs_chunk = malloc(sizeof(struct fs_chunk));
1990 fprintf(stderr, "Erorr allocating chunk\n");
1994 memset(fs_chunk, 0, sizeof(*fs_chunk));
1995 read_extent_buffer(eb, &chunk, btrfs_item_ptr_offset(eb, i),
1998 fs_chunk->logical = key.offset;
1999 fs_chunk->physical = btrfs_stack_stripe_offset(&chunk.stripe);
2000 fs_chunk->bytes = btrfs_stack_chunk_length(&chunk);
2001 tree_insert(&mdres->chunk_tree, &fs_chunk->n, chunk_cmp);
2008 /* If you have to ask you aren't worthy */
2009 static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
2010 u64 search, u64 cluster_bytenr)
2012 struct meta_cluster *cluster;
2013 struct meta_cluster_header *header;
2014 struct meta_cluster_item *item;
2015 u64 current_cluster = cluster_bytenr, bytenr;
2017 u32 bufsize, nritems, i;
2018 u32 max_size = MAX_PENDING_SIZE * 2;
2019 u8 *buffer, *tmp = NULL;
2022 cluster = malloc(BLOCK_SIZE);
2024 fprintf(stderr, "Error allocating cluster\n");
2028 buffer = malloc(max_size);
2030 fprintf(stderr, "Error allocing buffer\n");
2035 if (mdres->compress_method == COMPRESS_ZLIB) {
2036 tmp = malloc(max_size);
2038 fprintf(stderr, "Error allocing tmp buffer\n");
2045 bytenr = current_cluster;
2047 if (fseek(mdres->in, current_cluster, SEEK_SET)) {
2048 fprintf(stderr, "Error seeking: %d\n", errno);
2053 ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
2055 if (cluster_bytenr != 0) {
2057 current_cluster = 0;
2061 printf("ok this is where we screwed up?\n");
2064 } else if (ret < 0) {
2065 fprintf(stderr, "Error reading image\n");
2070 header = &cluster->header;
2071 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2072 le64_to_cpu(header->bytenr) != current_cluster) {
2073 fprintf(stderr, "bad header in metadump image\n");
2078 bytenr += BLOCK_SIZE;
2079 nritems = le32_to_cpu(header->nritems);
2080 for (i = 0; i < nritems; i++) {
2083 item = &cluster->items[i];
2084 bufsize = le32_to_cpu(item->size);
2085 item_bytenr = le64_to_cpu(item->bytenr);
2087 if (bufsize > max_size) {
2088 fprintf(stderr, "item %u size %u too big\n",
2094 if (mdres->compress_method == COMPRESS_ZLIB) {
2095 ret = fread(tmp, bufsize, 1, mdres->in);
2097 fprintf(stderr, "Error reading: %d\n",
2104 ret = uncompress(buffer,
2105 (unsigned long *)&size, tmp,
2108 fprintf(stderr, "Error decompressing "
2114 ret = fread(buffer, bufsize, 1, mdres->in);
2116 fprintf(stderr, "Error reading: %d\n",
2125 if (item_bytenr <= search &&
2126 item_bytenr + size > search) {
2127 ret = read_chunk_block(mdres, buffer, search,
2141 if (bytenr & BLOCK_MASK)
2142 bytenr += BLOCK_SIZE - (bytenr & BLOCK_MASK);
2143 current_cluster = bytenr;
2152 static int build_chunk_tree(struct mdrestore_struct *mdres,
2153 struct meta_cluster *cluster)
2155 struct btrfs_super_block *super;
2156 struct meta_cluster_header *header;
2157 struct meta_cluster_item *item = NULL;
2158 u64 chunk_root_bytenr = 0;
2164 /* We can't seek with stdin so don't bother doing this */
2165 if (mdres->in == stdin)
2168 ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
2170 fprintf(stderr, "Error reading in cluster: %d\n", errno);
2175 header = &cluster->header;
2176 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2177 le64_to_cpu(header->bytenr) != 0) {
2178 fprintf(stderr, "bad header in metadump image\n");
2182 bytenr += BLOCK_SIZE;
2183 mdres->compress_method = header->compress;
2184 nritems = le32_to_cpu(header->nritems);
2185 for (i = 0; i < nritems; i++) {
2186 item = &cluster->items[i];
2188 if (le64_to_cpu(item->bytenr) == BTRFS_SUPER_INFO_OFFSET)
2190 bytenr += le32_to_cpu(item->size);
2191 if (fseek(mdres->in, le32_to_cpu(item->size), SEEK_CUR)) {
2192 fprintf(stderr, "Error seeking: %d\n", errno);
2197 if (!item || le64_to_cpu(item->bytenr) != BTRFS_SUPER_INFO_OFFSET) {
2198 fprintf(stderr, "Huh, didn't find the super?\n");
2202 buffer = malloc(le32_to_cpu(item->size));
2204 fprintf(stderr, "Error allocing buffer\n");
2208 ret = fread(buffer, le32_to_cpu(item->size), 1, mdres->in);
2210 fprintf(stderr, "Error reading buffer: %d\n", errno);
2215 if (mdres->compress_method == COMPRESS_ZLIB) {
2216 size_t size = MAX_PENDING_SIZE * 2;
2219 tmp = malloc(MAX_PENDING_SIZE * 2);
2224 ret = uncompress(tmp, (unsigned long *)&size,
2225 buffer, le32_to_cpu(item->size));
2227 fprintf(stderr, "Error decompressing %d\n", ret);
2236 pthread_mutex_lock(&mdres->mutex);
2237 super = (struct btrfs_super_block *)buffer;
2238 chunk_root_bytenr = btrfs_super_chunk_root(super);
2239 mdres->leafsize = btrfs_super_leafsize(super);
2240 memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
2241 memcpy(mdres->uuid, super->dev_item.uuid,
2243 mdres->devid = le64_to_cpu(super->dev_item.devid);
2245 pthread_mutex_unlock(&mdres->mutex);
2247 return search_for_chunk_blocks(mdres, chunk_root_bytenr, 0);
2250 static int __restore_metadump(const char *input, FILE *out, int old_restore,
2251 int num_threads, int fixup_offset,
2252 const char *target, int multi_devices)
2254 struct meta_cluster *cluster = NULL;
2255 struct meta_cluster_header *header;
2256 struct mdrestore_struct mdrestore;
2257 struct btrfs_fs_info *info = NULL;
2262 if (!strcmp(input, "-")) {
2265 in = fopen(input, "r");
2267 perror("unable to open metadump image");
2272 /* NOTE: open with write mode */
2275 info = open_ctree_fs_info(target, 0, 0,
2277 OPEN_CTREE_RESTORE |
2278 OPEN_CTREE_PARTIAL);
2280 fprintf(stderr, "%s: open ctree failed\n", __func__);
2286 cluster = malloc(BLOCK_SIZE);
2288 fprintf(stderr, "Error allocating cluster\n");
2293 ret = mdrestore_init(&mdrestore, in, out, old_restore, num_threads,
2294 fixup_offset, info, multi_devices);
2296 fprintf(stderr, "Error initing mdrestore %d\n", ret);
2297 goto failed_cluster;
2300 if (!multi_devices && !old_restore) {
2301 ret = build_chunk_tree(&mdrestore, cluster);
2306 if (in != stdin && fseek(in, 0, SEEK_SET)) {
2307 fprintf(stderr, "Error seeking %d\n", errno);
2312 ret = fread(cluster, BLOCK_SIZE, 1, in);
2316 header = &cluster->header;
2317 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2318 le64_to_cpu(header->bytenr) != bytenr) {
2319 fprintf(stderr, "bad header in metadump image\n");
2323 ret = add_cluster(cluster, &mdrestore, &bytenr);
2325 fprintf(stderr, "Error adding cluster\n");
2329 ret = wait_for_worker(&mdrestore);
2331 fprintf(stderr, "One of the threads errored out %d\n",
2337 mdrestore_destroy(&mdrestore, num_threads);
2341 if (fixup_offset && info)
2342 close_ctree(info->chunk_root);
2349 static int restore_metadump(const char *input, FILE *out, int old_restore,
2350 int num_threads, int multi_devices)
2352 return __restore_metadump(input, out, old_restore, num_threads, 0, NULL,
2356 static int fixup_metadump(const char *input, FILE *out, int num_threads,
2359 return __restore_metadump(input, out, 0, num_threads, 1, target, 1);
2362 static int update_disk_super_on_device(struct btrfs_fs_info *info,
2363 const char *other_dev, u64 cur_devid)
2365 struct btrfs_key key;
2366 struct extent_buffer *leaf;
2367 struct btrfs_path path;
2368 struct btrfs_dev_item *dev_item;
2369 struct btrfs_super_block *disk_super;
2370 char dev_uuid[BTRFS_UUID_SIZE];
2371 char fs_uuid[BTRFS_UUID_SIZE];
2372 u64 devid, type, io_align, io_width;
2373 u64 sector_size, total_bytes, bytes_used;
2378 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2379 key.type = BTRFS_DEV_ITEM_KEY;
2380 key.offset = cur_devid;
2382 btrfs_init_path(&path);
2383 ret = btrfs_search_slot(NULL, info->chunk_root, &key, &path, 0, 0);
2385 fprintf(stderr, "search key fails\n");
2389 leaf = path.nodes[0];
2390 dev_item = btrfs_item_ptr(leaf, path.slots[0],
2391 struct btrfs_dev_item);
2393 devid = btrfs_device_id(leaf, dev_item);
2394 if (devid != cur_devid) {
2395 printk("devid %llu mismatch with %llu\n", devid, cur_devid);
2399 type = btrfs_device_type(leaf, dev_item);
2400 io_align = btrfs_device_io_align(leaf, dev_item);
2401 io_width = btrfs_device_io_width(leaf, dev_item);
2402 sector_size = btrfs_device_sector_size(leaf, dev_item);
2403 total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2404 bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2405 read_extent_buffer(leaf, dev_uuid, (unsigned long)btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE);
2406 read_extent_buffer(leaf, fs_uuid, (unsigned long)btrfs_device_fsid(dev_item), BTRFS_UUID_SIZE);
2408 btrfs_release_path(&path);
2410 printk("update disk super on %s devid=%llu\n", other_dev, devid);
2412 /* update other devices' super block */
2413 fp = open(other_dev, O_CREAT | O_RDWR, 0600);
2415 fprintf(stderr, "could not open %s\n", other_dev);
2419 buf = malloc(BTRFS_SUPER_INFO_SIZE);
2426 memcpy(buf, info->super_copy, BTRFS_SUPER_INFO_SIZE);
2428 disk_super = (struct btrfs_super_block *)buf;
2429 dev_item = &disk_super->dev_item;
2431 btrfs_set_stack_device_type(dev_item, type);
2432 btrfs_set_stack_device_id(dev_item, devid);
2433 btrfs_set_stack_device_total_bytes(dev_item, total_bytes);
2434 btrfs_set_stack_device_bytes_used(dev_item, bytes_used);
2435 btrfs_set_stack_device_io_align(dev_item, io_align);
2436 btrfs_set_stack_device_io_width(dev_item, io_width);
2437 btrfs_set_stack_device_sector_size(dev_item, sector_size);
2438 memcpy(dev_item->uuid, dev_uuid, BTRFS_UUID_SIZE);
2439 memcpy(dev_item->fsid, fs_uuid, BTRFS_UUID_SIZE);
2440 csum_block((u8 *)buf, BTRFS_SUPER_INFO_SIZE);
2442 ret = pwrite64(fp, buf, BTRFS_SUPER_INFO_SIZE, BTRFS_SUPER_INFO_OFFSET);
2443 if (ret != BTRFS_SUPER_INFO_SIZE) {
2448 write_backup_supers(fp, (u8 *)buf);
2456 static void print_usage(void)
2458 fprintf(stderr, "usage: btrfs-image [options] source target\n");
2459 fprintf(stderr, "\t-r \trestore metadump image\n");
2460 fprintf(stderr, "\t-c value\tcompression level (0 ~ 9)\n");
2461 fprintf(stderr, "\t-t value\tnumber of threads (1 ~ 32)\n");
2462 fprintf(stderr, "\t-o \tdon't mess with the chunk tree when restoring\n");
2463 fprintf(stderr, "\t-s \tsanitize file names, use once to just use garbage, use twice if you want crc collisions\n");
2464 fprintf(stderr, "\t-w \twalk all trees instead of using extent tree, do this if your extent tree is broken\n");
2465 fprintf(stderr, "\t-m \trestore for multiple devices\n");
2466 fprintf(stderr, "\n");
2467 fprintf(stderr, "\tIn the dump mode, source is the btrfs device and target is the output file (use '-' for stdout).\n");
2468 fprintf(stderr, "\tIn the restore mode, source is the dumped image and target is the btrfs device/file.\n");
2472 int main(int argc, char *argv[])
2476 u64 num_threads = 0;
2477 u64 compress_level = 0;
2479 int old_restore = 0;
2481 int multi_devices = 0;
2485 int usage_error = 0;
2489 int c = getopt(argc, argv, "rc:t:oswm");
2497 num_threads = arg_strtou64(optarg);
2498 if (num_threads > 32)
2502 compress_level = arg_strtou64(optarg);
2503 if (compress_level > 9)
2524 argc = argc - optind;
2526 if (check_argc_min(argc, 2))
2533 fprintf(stderr, "Usage error: create and restore cannot be used at the same time\n");
2537 if (walk_trees || sanitize || compress_level) {
2538 fprintf(stderr, "Usage error: use -w, -s, -c options for restore makes no sense\n");
2541 if (multi_devices && dev_cnt < 2) {
2542 fprintf(stderr, "Usage error: not enough devices specified for -m option\n");
2545 if (!multi_devices && dev_cnt != 1) {
2546 fprintf(stderr, "Usage error: accepts only 1 device without -m option\n");
2554 source = argv[optind];
2555 target = argv[optind + 1];
2557 if (create && !strcmp(target, "-")) {
2560 out = fopen(target, "w+");
2562 perror("unable to create target file");
2567 if (num_threads == 0 && compress_level > 0) {
2568 num_threads = sysconf(_SC_NPROCESSORS_ONLN);
2569 if (num_threads <= 0)
2574 ret = check_mounted(source);
2576 fprintf(stderr, "Could not check mount status: %s\n",
2581 "WARNING: The device is mounted. Make sure the filesystem is quiescent.\n");
2583 ret = create_metadump(source, out, num_threads,
2584 compress_level, sanitize, walk_trees);
2586 ret = restore_metadump(source, out, old_restore, 1,
2590 printk("%s failed (%s)\n", (create) ? "create" : "restore",
2595 /* extended support for multiple devices */
2596 if (!create && multi_devices) {
2597 struct btrfs_fs_info *info;
2601 info = open_ctree_fs_info(target, 0, 0,
2602 OPEN_CTREE_PARTIAL |
2603 OPEN_CTREE_RESTORE);
2606 fprintf(stderr, "unable to open %s error = %s\n",
2607 target, strerror(e));
2611 total_devs = btrfs_super_num_devices(info->super_copy);
2612 if (total_devs != dev_cnt) {
2613 printk("it needs %llu devices but has only %d\n",
2614 total_devs, dev_cnt);
2615 close_ctree(info->chunk_root);
2619 /* update super block on other disks */
2620 for (i = 2; i <= dev_cnt; i++) {
2621 ret = update_disk_super_on_device(info,
2622 argv[optind + i], (u64)i);
2624 printk("update disk super failed devid=%d (error=%d)\n",
2626 close_ctree(info->chunk_root);
2631 close_ctree(info->chunk_root);
2633 /* fix metadata block to map correct chunk */
2634 ret = fixup_metadump(source, out, 1, target);
2636 fprintf(stderr, "fix metadump failed (error=%d)\n",
2643 if (out == stdout) {
2647 if (ret && create) {
2650 unlink_ret = unlink(target);
2653 "unlink output file failed : %s\n",