2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
22 #include <sys/types.h>
28 #include "kerncompat.h"
32 #include "transaction.h"
35 #include "extent_io.h"
37 #define HEADER_MAGIC 0xbd5c25e27295668bULL
38 #define MAX_PENDING_SIZE (256 * 1024)
39 #define BLOCK_SIZE 1024
40 #define BLOCK_MASK (BLOCK_SIZE - 1)
42 #define COMPRESS_NONE 0
43 #define COMPRESS_ZLIB 1
45 struct meta_cluster_item {
48 } __attribute__ ((__packed__));
50 struct meta_cluster_header {
55 } __attribute__ ((__packed__));
57 /* cluster header + index items + buffers */
59 struct meta_cluster_header header;
60 struct meta_cluster_item items[];
61 } __attribute__ ((__packed__));
63 #define ITEMS_PER_CLUSTER ((BLOCK_SIZE - sizeof(struct meta_cluster)) / \
64 sizeof(struct meta_cluster_item))
72 struct list_head list;
76 struct list_head list;
77 struct list_head ordered;
85 struct metadump_struct {
86 struct btrfs_root *root;
89 struct meta_cluster *cluster;
93 pthread_mutex_t mutex;
95 struct rb_root name_tree;
97 struct list_head list;
98 struct list_head ordered;
120 struct mdrestore_struct {
126 pthread_mutex_t mutex;
129 struct rb_root chunk_tree;
130 struct rb_root physical_tree;
131 struct list_head list;
132 struct list_head overlapping_chunks;
137 u64 last_physical_offset;
138 u8 uuid[BTRFS_UUID_SIZE];
139 u8 fsid[BTRFS_FSID_SIZE];
147 int clear_space_cache;
148 struct btrfs_fs_info *info;
151 static void print_usage(void) __attribute__((noreturn));
152 static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
153 u64 search, u64 cluster_bytenr);
154 static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size);
156 static void csum_block(u8 *buf, size_t len)
158 char result[BTRFS_CRC32_SIZE];
160 crc = crc32c(crc, buf + BTRFS_CSUM_SIZE, len - BTRFS_CSUM_SIZE);
161 btrfs_csum_final(crc, result);
162 memcpy(buf, result, BTRFS_CRC32_SIZE);
165 static int has_name(struct btrfs_key *key)
168 case BTRFS_DIR_ITEM_KEY:
169 case BTRFS_DIR_INDEX_KEY:
170 case BTRFS_INODE_REF_KEY:
171 case BTRFS_INODE_EXTREF_KEY:
172 case BTRFS_XATTR_ITEM_KEY:
181 static char *generate_garbage(u32 name_len)
183 char *buf = malloc(name_len);
189 for (i = 0; i < name_len; i++) {
190 char c = rand() % 94 + 33;
200 static int name_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
202 struct name *entry = rb_entry(a, struct name, n);
203 struct name *ins = rb_entry(b, struct name, n);
206 len = min(ins->len, entry->len);
207 return memcmp(ins->val, entry->val, len);
210 static int chunk_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
212 struct fs_chunk *entry = rb_entry(a, struct fs_chunk, l);
213 struct fs_chunk *ins = rb_entry(b, struct fs_chunk, l);
215 if (fuzz && ins->logical >= entry->logical &&
216 ins->logical < entry->logical + entry->bytes)
219 if (ins->logical < entry->logical)
221 else if (ins->logical > entry->logical)
226 static int physical_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
228 struct fs_chunk *entry = rb_entry(a, struct fs_chunk, p);
229 struct fs_chunk *ins = rb_entry(b, struct fs_chunk, p);
231 if (fuzz && ins->physical >= entry->physical &&
232 ins->physical < entry->physical + entry->bytes)
235 if (fuzz && entry->physical >= ins->physical &&
236 entry->physical < ins->physical + ins->bytes)
239 if (ins->physical < entry->physical)
241 else if (ins->physical > entry->physical)
246 static void tree_insert(struct rb_root *root, struct rb_node *ins,
247 int (*cmp)(struct rb_node *a, struct rb_node *b,
250 struct rb_node ** p = &root->rb_node;
251 struct rb_node * parent = NULL;
257 dir = cmp(*p, ins, 1);
266 rb_link_node(ins, parent, p);
267 rb_insert_color(ins, root);
270 static struct rb_node *tree_search(struct rb_root *root,
271 struct rb_node *search,
272 int (*cmp)(struct rb_node *a,
273 struct rb_node *b, int fuzz),
276 struct rb_node *n = root->rb_node;
280 dir = cmp(n, search, fuzz);
292 static u64 logical_to_physical(struct mdrestore_struct *mdres, u64 logical, u64 *size)
294 struct fs_chunk *fs_chunk;
295 struct rb_node *entry;
296 struct fs_chunk search;
299 if (logical == BTRFS_SUPER_INFO_OFFSET)
302 search.logical = logical;
303 entry = tree_search(&mdres->chunk_tree, &search.l, chunk_cmp, 1);
305 if (mdres->in != stdin)
306 printf("Couldn't find a chunk, using logical\n");
309 fs_chunk = rb_entry(entry, struct fs_chunk, l);
310 if (fs_chunk->logical > logical || fs_chunk->logical + fs_chunk->bytes < logical)
312 offset = search.logical - fs_chunk->logical;
314 *size = min(*size, fs_chunk->bytes + fs_chunk->logical - logical);
315 return fs_chunk->physical + offset;
319 static char *find_collision(struct metadump_struct *md, char *name,
323 struct rb_node *entry;
325 unsigned long checksum;
331 entry = tree_search(&md->name_tree, &tmp.n, name_cmp, 0);
333 val = rb_entry(entry, struct name, n);
338 val = malloc(sizeof(struct name));
340 fprintf(stderr, "Couldn't sanitize name, enomem\n");
345 memset(val, 0, sizeof(*val));
349 val->sub = malloc(name_len);
351 fprintf(stderr, "Couldn't sanitize name, enomem\n");
357 checksum = crc32c(~1, val->val, name_len);
358 memset(val->sub, ' ', name_len);
361 if (crc32c(~1, val->sub, name_len) == checksum &&
362 memcmp(val->sub, val->val, val->len)) {
367 if (val->sub[i] == 127) {
372 } while (val->sub[i] == 127);
377 if (val->sub[i] == '/')
379 memset(val->sub, ' ', i);
384 if (val->sub[i] == '/')
390 fprintf(stderr, "Couldn't find a collision for '%.*s', "
391 "generating normal garbage, it won't match indexes\n",
393 for (i = 0; i < name_len; i++) {
394 char c = rand() % 94 + 33;
402 tree_insert(&md->name_tree, &val->n, name_cmp);
406 static void sanitize_dir_item(struct metadump_struct *md, struct extent_buffer *eb,
409 struct btrfs_dir_item *dir_item;
412 unsigned long name_ptr;
417 int free_garbage = (md->sanitize_names == 1);
419 dir_item = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
420 total_len = btrfs_item_size_nr(eb, slot);
421 while (cur < total_len) {
422 this_len = sizeof(*dir_item) +
423 btrfs_dir_name_len(eb, dir_item) +
424 btrfs_dir_data_len(eb, dir_item);
425 name_ptr = (unsigned long)(dir_item + 1);
426 name_len = btrfs_dir_name_len(eb, dir_item);
428 if (md->sanitize_names > 1) {
429 buf = malloc(name_len);
431 fprintf(stderr, "Couldn't sanitize name, "
435 read_extent_buffer(eb, buf, name_ptr, name_len);
436 garbage = find_collision(md, buf, name_len);
438 garbage = generate_garbage(name_len);
441 fprintf(stderr, "Couldn't sanitize name, enomem\n");
444 write_extent_buffer(eb, garbage, name_ptr, name_len);
446 dir_item = (struct btrfs_dir_item *)((char *)dir_item +
453 static void sanitize_inode_ref(struct metadump_struct *md,
454 struct extent_buffer *eb, int slot, int ext)
456 struct btrfs_inode_extref *extref;
457 struct btrfs_inode_ref *ref;
460 unsigned long name_ptr;
464 int free_garbage = (md->sanitize_names == 1);
466 item_size = btrfs_item_size_nr(eb, slot);
467 ptr = btrfs_item_ptr_offset(eb, slot);
468 while (cur_offset < item_size) {
470 extref = (struct btrfs_inode_extref *)(ptr +
472 name_ptr = (unsigned long)(&extref->name);
473 len = btrfs_inode_extref_name_len(eb, extref);
474 cur_offset += sizeof(*extref);
476 ref = (struct btrfs_inode_ref *)(ptr + cur_offset);
477 len = btrfs_inode_ref_name_len(eb, ref);
478 name_ptr = (unsigned long)(ref + 1);
479 cur_offset += sizeof(*ref);
483 if (md->sanitize_names > 1) {
486 fprintf(stderr, "Couldn't sanitize name, "
490 read_extent_buffer(eb, buf, name_ptr, len);
491 garbage = find_collision(md, buf, len);
493 garbage = generate_garbage(len);
497 fprintf(stderr, "Couldn't sanitize name, enomem\n");
500 write_extent_buffer(eb, garbage, name_ptr, len);
506 static void sanitize_xattr(struct metadump_struct *md,
507 struct extent_buffer *eb, int slot)
509 struct btrfs_dir_item *dir_item;
510 unsigned long data_ptr;
513 dir_item = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
514 data_len = btrfs_dir_data_len(eb, dir_item);
516 data_ptr = (unsigned long)((char *)(dir_item + 1) +
517 btrfs_dir_name_len(eb, dir_item));
518 memset_extent_buffer(eb, 0, data_ptr, data_len);
521 static void sanitize_name(struct metadump_struct *md, u8 *dst,
522 struct extent_buffer *src, struct btrfs_key *key,
525 struct extent_buffer *eb;
527 eb = alloc_dummy_eb(src->start, src->len);
529 fprintf(stderr, "Couldn't sanitize name, no memory\n");
533 memcpy(eb->data, dst, eb->len);
536 case BTRFS_DIR_ITEM_KEY:
537 case BTRFS_DIR_INDEX_KEY:
538 sanitize_dir_item(md, eb, slot);
540 case BTRFS_INODE_REF_KEY:
541 sanitize_inode_ref(md, eb, slot, 0);
543 case BTRFS_INODE_EXTREF_KEY:
544 sanitize_inode_ref(md, eb, slot, 1);
546 case BTRFS_XATTR_ITEM_KEY:
547 sanitize_xattr(md, eb, slot);
553 memcpy(dst, eb->data, eb->len);
558 * zero inline extents and csum items
560 static void zero_items(struct metadump_struct *md, u8 *dst,
561 struct extent_buffer *src)
563 struct btrfs_file_extent_item *fi;
564 struct btrfs_item *item;
565 struct btrfs_key key;
566 u32 nritems = btrfs_header_nritems(src);
571 for (i = 0; i < nritems; i++) {
572 item = btrfs_item_nr(i);
573 btrfs_item_key_to_cpu(src, &key, i);
574 if (key.type == BTRFS_CSUM_ITEM_KEY) {
575 size = btrfs_item_size_nr(src, i);
576 memset(dst + btrfs_leaf_data(src) +
577 btrfs_item_offset_nr(src, i), 0, size);
581 if (md->sanitize_names && has_name(&key)) {
582 sanitize_name(md, dst, src, &key, i);
586 if (key.type != BTRFS_EXTENT_DATA_KEY)
589 fi = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
590 extent_type = btrfs_file_extent_type(src, fi);
591 if (extent_type != BTRFS_FILE_EXTENT_INLINE)
594 ptr = btrfs_file_extent_inline_start(fi);
595 size = btrfs_file_extent_inline_item_len(src, item);
596 memset(dst + ptr, 0, size);
601 * copy buffer and zero useless data in the buffer
603 static void copy_buffer(struct metadump_struct *md, u8 *dst,
604 struct extent_buffer *src)
610 memcpy(dst, src->data, src->len);
611 if (src->start == BTRFS_SUPER_INFO_OFFSET)
614 level = btrfs_header_level(src);
615 nritems = btrfs_header_nritems(src);
618 size = sizeof(struct btrfs_header);
619 memset(dst + size, 0, src->len - size);
620 } else if (level == 0) {
621 size = btrfs_leaf_data(src) +
622 btrfs_item_offset_nr(src, nritems - 1) -
623 btrfs_item_nr_offset(nritems);
624 memset(dst + btrfs_item_nr_offset(nritems), 0, size);
625 zero_items(md, dst, src);
627 size = offsetof(struct btrfs_node, ptrs) +
628 sizeof(struct btrfs_key_ptr) * nritems;
629 memset(dst + size, 0, src->len - size);
631 csum_block(dst, src->len);
634 static void *dump_worker(void *data)
636 struct metadump_struct *md = (struct metadump_struct *)data;
637 struct async_work *async;
641 pthread_mutex_lock(&md->mutex);
642 while (list_empty(&md->list)) {
644 pthread_mutex_unlock(&md->mutex);
647 pthread_cond_wait(&md->cond, &md->mutex);
649 async = list_entry(md->list.next, struct async_work, list);
650 list_del_init(&async->list);
651 pthread_mutex_unlock(&md->mutex);
653 if (md->compress_level > 0) {
654 u8 *orig = async->buffer;
656 async->bufsize = compressBound(async->size);
657 async->buffer = malloc(async->bufsize);
658 if (!async->buffer) {
659 fprintf(stderr, "Error allocing buffer\n");
660 pthread_mutex_lock(&md->mutex);
663 pthread_mutex_unlock(&md->mutex);
667 ret = compress2(async->buffer,
668 (unsigned long *)&async->bufsize,
669 orig, async->size, md->compress_level);
677 pthread_mutex_lock(&md->mutex);
679 pthread_mutex_unlock(&md->mutex);
685 static void meta_cluster_init(struct metadump_struct *md, u64 start)
687 struct meta_cluster_header *header;
691 header = &md->cluster->header;
692 header->magic = cpu_to_le64(HEADER_MAGIC);
693 header->bytenr = cpu_to_le64(start);
694 header->nritems = cpu_to_le32(0);
695 header->compress = md->compress_level > 0 ?
696 COMPRESS_ZLIB : COMPRESS_NONE;
699 static void metadump_destroy(struct metadump_struct *md, int num_threads)
704 pthread_mutex_lock(&md->mutex);
706 pthread_cond_broadcast(&md->cond);
707 pthread_mutex_unlock(&md->mutex);
709 for (i = 0; i < num_threads; i++)
710 pthread_join(md->threads[i], NULL);
712 pthread_cond_destroy(&md->cond);
713 pthread_mutex_destroy(&md->mutex);
715 while ((n = rb_first(&md->name_tree))) {
718 name = rb_entry(n, struct name, n);
719 rb_erase(n, &md->name_tree);
728 static int metadump_init(struct metadump_struct *md, struct btrfs_root *root,
729 FILE *out, int num_threads, int compress_level,
734 memset(md, 0, sizeof(*md));
735 pthread_cond_init(&md->cond, NULL);
736 pthread_mutex_init(&md->mutex, NULL);
737 INIT_LIST_HEAD(&md->list);
738 INIT_LIST_HEAD(&md->ordered);
741 md->pending_start = (u64)-1;
742 md->compress_level = compress_level;
743 md->cluster = calloc(1, BLOCK_SIZE);
744 md->sanitize_names = sanitize_names;
745 if (sanitize_names > 1)
746 crc32c_optimization_init();
749 pthread_cond_destroy(&md->cond);
750 pthread_mutex_destroy(&md->mutex);
754 meta_cluster_init(md, 0);
758 md->name_tree.rb_node = NULL;
759 md->num_threads = num_threads;
760 md->threads = calloc(num_threads, sizeof(pthread_t));
763 pthread_cond_destroy(&md->cond);
764 pthread_mutex_destroy(&md->mutex);
768 for (i = 0; i < num_threads; i++) {
769 ret = pthread_create(md->threads + i, NULL, dump_worker, md);
775 metadump_destroy(md, i + 1);
780 static int write_zero(FILE *out, size_t size)
782 static char zero[BLOCK_SIZE];
783 return fwrite(zero, size, 1, out);
786 static int write_buffers(struct metadump_struct *md, u64 *next)
788 struct meta_cluster_header *header = &md->cluster->header;
789 struct meta_cluster_item *item;
790 struct async_work *async;
796 if (list_empty(&md->ordered))
799 /* wait until all buffers are compressed */
800 while (!err && md->num_items > md->num_ready) {
801 struct timespec ts = {
805 pthread_mutex_unlock(&md->mutex);
806 nanosleep(&ts, NULL);
807 pthread_mutex_lock(&md->mutex);
812 fprintf(stderr, "One of the threads errored out %s\n",
817 /* setup and write index block */
818 list_for_each_entry(async, &md->ordered, ordered) {
819 item = md->cluster->items + nritems;
820 item->bytenr = cpu_to_le64(async->start);
821 item->size = cpu_to_le32(async->bufsize);
824 header->nritems = cpu_to_le32(nritems);
826 ret = fwrite(md->cluster, BLOCK_SIZE, 1, md->out);
828 fprintf(stderr, "Error writing out cluster: %d\n", errno);
833 bytenr += le64_to_cpu(header->bytenr) + BLOCK_SIZE;
834 while (!list_empty(&md->ordered)) {
835 async = list_entry(md->ordered.next, struct async_work,
837 list_del_init(&async->ordered);
839 bytenr += async->bufsize;
841 ret = fwrite(async->buffer, async->bufsize, 1,
846 fprintf(stderr, "Error writing out cluster: %d\n",
854 /* zero unused space in the last block */
855 if (!err && bytenr & BLOCK_MASK) {
856 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
859 ret = write_zero(md->out, size);
861 fprintf(stderr, "Error zeroing out buffer: %d\n",
871 static int read_data_extent(struct metadump_struct *md,
872 struct async_work *async)
874 struct btrfs_multi_bio *multi = NULL;
875 struct btrfs_device *device;
876 u64 bytes_left = async->size;
877 u64 logical = async->start;
886 read_len = bytes_left;
887 ret = btrfs_map_block(&md->root->fs_info->mapping_tree, READ,
888 logical, &read_len, &multi, 0, NULL);
890 fprintf(stderr, "Couldn't map data block %d\n", ret);
894 device = multi->stripes[0].dev;
896 if (device->fd == 0) {
898 "Device we need to read from is not open\n");
903 bytenr = multi->stripes[0].physical;
906 read_len = min(read_len, bytes_left);
907 done = pread64(fd, async->buffer+offset, read_len, bytenr);
908 if (done < read_len) {
910 fprintf(stderr, "Error reading extent %d\n",
913 fprintf(stderr, "Short read\n");
925 static int get_dev_fd(struct btrfs_root *root)
927 struct btrfs_device *dev;
929 dev = list_first_entry(&root->fs_info->fs_devices->devices,
930 struct btrfs_device, dev_list);
934 static int flush_pending(struct metadump_struct *md, int done)
936 struct async_work *async = NULL;
937 struct extent_buffer *eb;
938 u64 blocksize = md->root->nodesize;
944 if (md->pending_size) {
945 async = calloc(1, sizeof(*async));
949 async->start = md->pending_start;
950 async->size = md->pending_size;
951 async->bufsize = async->size;
952 async->buffer = malloc(async->bufsize);
953 if (!async->buffer) {
958 start = async->start;
962 ret = read_data_extent(md, async);
971 * Balance can make the mapping not cover the super block, so
972 * just copy directly from one of the devices.
974 if (start == BTRFS_SUPER_INFO_OFFSET) {
975 int fd = get_dev_fd(md->root);
977 ret = pread64(fd, async->buffer, size, start);
981 fprintf(stderr, "Error reading superblock\n");
988 while (!md->data && size > 0) {
989 u64 this_read = min(blocksize, size);
990 eb = read_tree_block(md->root, start, this_read, 0);
991 if (!extent_buffer_uptodate(eb)) {
995 "Error reading metadata block\n");
998 copy_buffer(md, async->buffer + offset, eb);
999 free_extent_buffer(eb);
1001 offset += this_read;
1005 md->pending_start = (u64)-1;
1006 md->pending_size = 0;
1011 pthread_mutex_lock(&md->mutex);
1013 list_add_tail(&async->ordered, &md->ordered);
1015 if (md->compress_level > 0) {
1016 list_add_tail(&async->list, &md->list);
1017 pthread_cond_signal(&md->cond);
1022 if (md->num_items >= ITEMS_PER_CLUSTER || done) {
1023 ret = write_buffers(md, &start);
1025 fprintf(stderr, "Error writing buffers %d\n",
1028 meta_cluster_init(md, start);
1030 pthread_mutex_unlock(&md->mutex);
1034 static int add_extent(u64 start, u64 size, struct metadump_struct *md,
1038 if (md->data != data ||
1039 md->pending_size + size > MAX_PENDING_SIZE ||
1040 md->pending_start + md->pending_size != start) {
1041 ret = flush_pending(md, 0);
1044 md->pending_start = start;
1046 readahead_tree_block(md->root, start, size, 0);
1047 md->pending_size += size;
1052 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1053 static int is_tree_block(struct btrfs_root *extent_root,
1054 struct btrfs_path *path, u64 bytenr)
1056 struct extent_buffer *leaf;
1057 struct btrfs_key key;
1061 leaf = path->nodes[0];
1063 struct btrfs_extent_ref_v0 *ref_item;
1065 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1066 ret = btrfs_next_leaf(extent_root, path);
1071 leaf = path->nodes[0];
1073 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1074 if (key.objectid != bytenr)
1076 if (key.type != BTRFS_EXTENT_REF_V0_KEY)
1078 ref_item = btrfs_item_ptr(leaf, path->slots[0],
1079 struct btrfs_extent_ref_v0);
1080 ref_objectid = btrfs_ref_objectid_v0(leaf, ref_item);
1081 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID)
1089 static int copy_tree_blocks(struct btrfs_root *root, struct extent_buffer *eb,
1090 struct metadump_struct *metadump, int root_tree)
1092 struct extent_buffer *tmp;
1093 struct btrfs_root_item *ri;
1094 struct btrfs_key key;
1101 ret = add_extent(btrfs_header_bytenr(eb), root->leafsize, metadump, 0);
1103 fprintf(stderr, "Error adding metadata block\n");
1107 if (btrfs_header_level(eb) == 0 && !root_tree)
1110 level = btrfs_header_level(eb);
1111 nritems = btrfs_header_nritems(eb);
1112 for (i = 0; i < nritems; i++) {
1114 btrfs_item_key_to_cpu(eb, &key, i);
1115 if (key.type != BTRFS_ROOT_ITEM_KEY)
1117 ri = btrfs_item_ptr(eb, i, struct btrfs_root_item);
1118 bytenr = btrfs_disk_root_bytenr(eb, ri);
1119 tmp = read_tree_block(root, bytenr, root->leafsize, 0);
1120 if (!extent_buffer_uptodate(tmp)) {
1122 "Error reading log root block\n");
1125 ret = copy_tree_blocks(root, tmp, metadump, 0);
1126 free_extent_buffer(tmp);
1130 bytenr = btrfs_node_blockptr(eb, i);
1131 tmp = read_tree_block(root, bytenr, root->leafsize, 0);
1132 if (!extent_buffer_uptodate(tmp)) {
1133 fprintf(stderr, "Error reading log block\n");
1136 ret = copy_tree_blocks(root, tmp, metadump, root_tree);
1137 free_extent_buffer(tmp);
1146 static int copy_log_trees(struct btrfs_root *root,
1147 struct metadump_struct *metadump,
1148 struct btrfs_path *path)
1150 u64 blocknr = btrfs_super_log_root(root->fs_info->super_copy);
1155 if (!root->fs_info->log_root_tree ||
1156 !root->fs_info->log_root_tree->node) {
1157 fprintf(stderr, "Error copying tree log, it wasn't setup\n");
1161 return copy_tree_blocks(root, root->fs_info->log_root_tree->node,
1165 static int copy_space_cache(struct btrfs_root *root,
1166 struct metadump_struct *metadump,
1167 struct btrfs_path *path)
1169 struct extent_buffer *leaf;
1170 struct btrfs_file_extent_item *fi;
1171 struct btrfs_key key;
1172 u64 bytenr, num_bytes;
1175 root = root->fs_info->tree_root;
1178 key.type = BTRFS_EXTENT_DATA_KEY;
1181 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1183 fprintf(stderr, "Error searching for free space inode %d\n",
1188 leaf = path->nodes[0];
1191 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1192 ret = btrfs_next_leaf(root, path);
1194 fprintf(stderr, "Error going to next leaf "
1200 leaf = path->nodes[0];
1203 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1204 if (key.type != BTRFS_EXTENT_DATA_KEY) {
1209 fi = btrfs_item_ptr(leaf, path->slots[0],
1210 struct btrfs_file_extent_item);
1211 if (btrfs_file_extent_type(leaf, fi) !=
1212 BTRFS_FILE_EXTENT_REG) {
1217 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1218 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1219 ret = add_extent(bytenr, num_bytes, metadump, 1);
1221 fprintf(stderr, "Error adding space cache blocks %d\n",
1223 btrfs_release_path(path);
1232 static int copy_from_extent_tree(struct metadump_struct *metadump,
1233 struct btrfs_path *path)
1235 struct btrfs_root *extent_root;
1236 struct extent_buffer *leaf;
1237 struct btrfs_extent_item *ei;
1238 struct btrfs_key key;
1243 extent_root = metadump->root->fs_info->extent_root;
1244 bytenr = BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE;
1245 key.objectid = bytenr;
1246 key.type = BTRFS_EXTENT_ITEM_KEY;
1249 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1251 fprintf(stderr, "Error searching extent root %d\n", ret);
1256 leaf = path->nodes[0];
1259 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1260 ret = btrfs_next_leaf(extent_root, path);
1262 fprintf(stderr, "Error going to next leaf %d"
1270 leaf = path->nodes[0];
1273 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1274 if (key.objectid < bytenr ||
1275 (key.type != BTRFS_EXTENT_ITEM_KEY &&
1276 key.type != BTRFS_METADATA_ITEM_KEY)) {
1281 bytenr = key.objectid;
1282 if (key.type == BTRFS_METADATA_ITEM_KEY)
1283 num_bytes = extent_root->leafsize;
1285 num_bytes = key.offset;
1287 if (btrfs_item_size_nr(leaf, path->slots[0]) > sizeof(*ei)) {
1288 ei = btrfs_item_ptr(leaf, path->slots[0],
1289 struct btrfs_extent_item);
1290 if (btrfs_extent_flags(leaf, ei) &
1291 BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1292 ret = add_extent(bytenr, num_bytes, metadump,
1295 fprintf(stderr, "Error adding block "
1301 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1302 ret = is_tree_block(extent_root, path, bytenr);
1304 fprintf(stderr, "Error checking tree block "
1310 ret = add_extent(bytenr, num_bytes, metadump,
1313 fprintf(stderr, "Error adding block "
1320 fprintf(stderr, "Either extent tree corruption or "
1321 "you haven't built with V0 support\n");
1326 bytenr += num_bytes;
1329 btrfs_release_path(path);
1334 static int create_metadump(const char *input, FILE *out, int num_threads,
1335 int compress_level, int sanitize, int walk_trees)
1337 struct btrfs_root *root;
1338 struct btrfs_path *path = NULL;
1339 struct metadump_struct metadump;
1343 root = open_ctree(input, 0, 0);
1345 fprintf(stderr, "Open ctree failed\n");
1349 BUG_ON(root->nodesize != root->leafsize);
1351 ret = metadump_init(&metadump, root, out, num_threads,
1352 compress_level, sanitize);
1354 fprintf(stderr, "Error initing metadump %d\n", ret);
1359 ret = add_extent(BTRFS_SUPER_INFO_OFFSET, BTRFS_SUPER_INFO_SIZE,
1362 fprintf(stderr, "Error adding metadata %d\n", ret);
1367 path = btrfs_alloc_path();
1369 fprintf(stderr, "Out of memory allocing path\n");
1375 ret = copy_tree_blocks(root, root->fs_info->chunk_root->node,
1382 ret = copy_tree_blocks(root, root->fs_info->tree_root->node,
1389 ret = copy_from_extent_tree(&metadump, path);
1396 ret = copy_log_trees(root, &metadump, path);
1402 ret = copy_space_cache(root, &metadump, path);
1404 ret = flush_pending(&metadump, 1);
1408 fprintf(stderr, "Error flushing pending %d\n", ret);
1411 metadump_destroy(&metadump, num_threads);
1413 btrfs_free_path(path);
1414 ret = close_ctree(root);
1415 return err ? err : ret;
1418 static void update_super_old(u8 *buffer)
1420 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
1421 struct btrfs_chunk *chunk;
1422 struct btrfs_disk_key *key;
1423 u32 sectorsize = btrfs_super_sectorsize(super);
1424 u64 flags = btrfs_super_flags(super);
1426 flags |= BTRFS_SUPER_FLAG_METADUMP;
1427 btrfs_set_super_flags(super, flags);
1429 key = (struct btrfs_disk_key *)(super->sys_chunk_array);
1430 chunk = (struct btrfs_chunk *)(super->sys_chunk_array +
1431 sizeof(struct btrfs_disk_key));
1433 btrfs_set_disk_key_objectid(key, BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1434 btrfs_set_disk_key_type(key, BTRFS_CHUNK_ITEM_KEY);
1435 btrfs_set_disk_key_offset(key, 0);
1437 btrfs_set_stack_chunk_length(chunk, (u64)-1);
1438 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
1439 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
1440 btrfs_set_stack_chunk_type(chunk, BTRFS_BLOCK_GROUP_SYSTEM);
1441 btrfs_set_stack_chunk_io_align(chunk, sectorsize);
1442 btrfs_set_stack_chunk_io_width(chunk, sectorsize);
1443 btrfs_set_stack_chunk_sector_size(chunk, sectorsize);
1444 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1445 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1446 chunk->stripe.devid = super->dev_item.devid;
1447 btrfs_set_stack_stripe_offset(&chunk->stripe, 0);
1448 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
1449 btrfs_set_super_sys_array_size(super, sizeof(*key) + sizeof(*chunk));
1450 csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
1453 static int update_super(struct mdrestore_struct *mdres, u8 *buffer)
1455 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
1456 struct btrfs_chunk *chunk;
1457 struct btrfs_disk_key *disk_key;
1458 struct btrfs_key key;
1459 u64 flags = btrfs_super_flags(super);
1460 u32 new_array_size = 0;
1463 u8 *ptr, *write_ptr;
1464 int old_num_stripes;
1466 write_ptr = ptr = super->sys_chunk_array;
1467 array_size = btrfs_super_sys_array_size(super);
1469 while (cur < array_size) {
1470 disk_key = (struct btrfs_disk_key *)ptr;
1471 btrfs_disk_key_to_cpu(&key, disk_key);
1473 new_array_size += sizeof(*disk_key);
1474 memmove(write_ptr, ptr, sizeof(*disk_key));
1476 write_ptr += sizeof(*disk_key);
1477 ptr += sizeof(*disk_key);
1478 cur += sizeof(*disk_key);
1480 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1481 u64 physical, size = 0;
1483 chunk = (struct btrfs_chunk *)ptr;
1484 old_num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1485 chunk = (struct btrfs_chunk *)write_ptr;
1487 memmove(write_ptr, ptr, sizeof(*chunk));
1488 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1489 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1490 btrfs_set_stack_chunk_type(chunk,
1491 BTRFS_BLOCK_GROUP_SYSTEM);
1492 btrfs_set_stack_stripe_devid(&chunk->stripe,
1493 super->dev_item.devid);
1494 physical = logical_to_physical(mdres, key.offset,
1496 if (size != (u64)-1)
1497 btrfs_set_stack_stripe_offset(&chunk->stripe,
1499 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid,
1501 new_array_size += sizeof(*chunk);
1503 fprintf(stderr, "Bogus key in the sys chunk array "
1507 write_ptr += sizeof(*chunk);
1508 ptr += btrfs_chunk_item_size(old_num_stripes);
1509 cur += btrfs_chunk_item_size(old_num_stripes);
1512 if (mdres->clear_space_cache)
1513 btrfs_set_super_cache_generation(super, 0);
1515 flags |= BTRFS_SUPER_FLAG_METADUMP_V2;
1516 btrfs_set_super_flags(super, flags);
1517 btrfs_set_super_sys_array_size(super, new_array_size);
1518 csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
1523 static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size)
1525 struct extent_buffer *eb;
1527 eb = malloc(sizeof(struct extent_buffer) + size);
1530 memset(eb, 0, sizeof(struct extent_buffer) + size);
1537 static void truncate_item(struct extent_buffer *eb, int slot, u32 new_size)
1539 struct btrfs_item *item;
1547 old_size = btrfs_item_size_nr(eb, slot);
1548 if (old_size == new_size)
1551 nritems = btrfs_header_nritems(eb);
1552 data_end = btrfs_item_offset_nr(eb, nritems - 1);
1554 old_data_start = btrfs_item_offset_nr(eb, slot);
1555 size_diff = old_size - new_size;
1557 for (i = slot; i < nritems; i++) {
1559 item = btrfs_item_nr(i);
1560 ioff = btrfs_item_offset(eb, item);
1561 btrfs_set_item_offset(eb, item, ioff + size_diff);
1564 memmove_extent_buffer(eb, btrfs_leaf_data(eb) + data_end + size_diff,
1565 btrfs_leaf_data(eb) + data_end,
1566 old_data_start + new_size - data_end);
1567 item = btrfs_item_nr(slot);
1568 btrfs_set_item_size(eb, item, new_size);
1571 static int fixup_chunk_tree_block(struct mdrestore_struct *mdres,
1572 struct async_work *async, u8 *buffer,
1575 struct extent_buffer *eb;
1576 size_t size_left = size;
1577 u64 bytenr = async->start;
1580 if (size_left % mdres->leafsize)
1583 eb = alloc_dummy_eb(bytenr, mdres->leafsize);
1589 memcpy(eb->data, buffer, mdres->leafsize);
1591 if (btrfs_header_bytenr(eb) != bytenr)
1593 if (memcmp(mdres->fsid,
1594 eb->data + offsetof(struct btrfs_header, fsid),
1598 if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID)
1601 if (btrfs_header_level(eb) != 0)
1604 for (i = 0; i < btrfs_header_nritems(eb); i++) {
1605 struct btrfs_chunk chunk;
1606 struct btrfs_key key;
1607 u64 type, physical, size = (u64)-1;
1609 btrfs_item_key_to_cpu(eb, &key, i);
1610 if (key.type != BTRFS_CHUNK_ITEM_KEY)
1612 truncate_item(eb, i, sizeof(chunk));
1613 read_extent_buffer(eb, &chunk,
1614 btrfs_item_ptr_offset(eb, i),
1618 physical = logical_to_physical(mdres, key.offset,
1621 /* Zero out the RAID profile */
1622 type = btrfs_stack_chunk_type(&chunk);
1623 type &= (BTRFS_BLOCK_GROUP_DATA |
1624 BTRFS_BLOCK_GROUP_SYSTEM |
1625 BTRFS_BLOCK_GROUP_METADATA |
1626 BTRFS_BLOCK_GROUP_DUP);
1627 btrfs_set_stack_chunk_type(&chunk, type);
1629 btrfs_set_stack_chunk_num_stripes(&chunk, 1);
1630 btrfs_set_stack_chunk_sub_stripes(&chunk, 0);
1631 btrfs_set_stack_stripe_devid(&chunk.stripe, mdres->devid);
1632 if (size != (u64)-1)
1633 btrfs_set_stack_stripe_offset(&chunk.stripe,
1635 memcpy(chunk.stripe.dev_uuid, mdres->uuid,
1637 write_extent_buffer(eb, &chunk,
1638 btrfs_item_ptr_offset(eb, i),
1641 memcpy(buffer, eb->data, eb->len);
1642 csum_block(buffer, eb->len);
1644 size_left -= mdres->leafsize;
1645 buffer += mdres->leafsize;
1646 bytenr += mdres->leafsize;
1653 static void write_backup_supers(int fd, u8 *buf)
1655 struct btrfs_super_block *super = (struct btrfs_super_block *)buf;
1662 if (fstat(fd, &st)) {
1663 fprintf(stderr, "Couldn't stat restore point, won't be able "
1664 "to write backup supers: %d\n", errno);
1668 size = btrfs_device_size(fd, &st);
1670 for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1671 bytenr = btrfs_sb_offset(i);
1672 if (bytenr + BTRFS_SUPER_INFO_SIZE > size)
1674 btrfs_set_super_bytenr(super, bytenr);
1675 csum_block(buf, BTRFS_SUPER_INFO_SIZE);
1676 ret = pwrite64(fd, buf, BTRFS_SUPER_INFO_SIZE, bytenr);
1677 if (ret < BTRFS_SUPER_INFO_SIZE) {
1679 fprintf(stderr, "Problem writing out backup "
1680 "super block %d, err %d\n", i, errno);
1682 fprintf(stderr, "Short write writing out "
1683 "backup super block\n");
1689 static void *restore_worker(void *data)
1691 struct mdrestore_struct *mdres = (struct mdrestore_struct *)data;
1692 struct async_work *async;
1698 int compress_size = MAX_PENDING_SIZE * 4;
1700 outfd = fileno(mdres->out);
1701 buffer = malloc(compress_size);
1703 fprintf(stderr, "Error allocing buffer\n");
1704 pthread_mutex_lock(&mdres->mutex);
1706 mdres->error = -ENOMEM;
1707 pthread_mutex_unlock(&mdres->mutex);
1716 pthread_mutex_lock(&mdres->mutex);
1717 while (!mdres->leafsize || list_empty(&mdres->list)) {
1719 pthread_mutex_unlock(&mdres->mutex);
1722 pthread_cond_wait(&mdres->cond, &mdres->mutex);
1724 async = list_entry(mdres->list.next, struct async_work, list);
1725 list_del_init(&async->list);
1726 pthread_mutex_unlock(&mdres->mutex);
1728 if (mdres->compress_method == COMPRESS_ZLIB) {
1729 size = compress_size;
1730 ret = uncompress(buffer, (unsigned long *)&size,
1731 async->buffer, async->bufsize);
1733 fprintf(stderr, "Error decompressing %d\n",
1739 outbuf = async->buffer;
1740 size = async->bufsize;
1743 if (!mdres->multi_devices) {
1744 if (async->start == BTRFS_SUPER_INFO_OFFSET) {
1745 if (mdres->old_restore) {
1746 update_super_old(outbuf);
1748 ret = update_super(mdres, outbuf);
1752 } else if (!mdres->old_restore) {
1753 ret = fixup_chunk_tree_block(mdres, async, outbuf, size);
1759 if (!mdres->fixup_offset) {
1761 u64 chunk_size = size;
1762 if (!mdres->multi_devices && !mdres->old_restore)
1763 bytenr = logical_to_physical(mdres,
1764 async->start + offset,
1767 bytenr = async->start + offset;
1769 ret = pwrite64(outfd, outbuf+offset, chunk_size,
1771 if (ret != chunk_size) {
1773 fprintf(stderr, "Error writing to "
1774 "device %d\n", errno);
1778 fprintf(stderr, "Short write\n");
1784 offset += chunk_size;
1786 } else if (async->start != BTRFS_SUPER_INFO_OFFSET) {
1787 ret = write_data_to_disk(mdres->info, outbuf, async->start, size, 0);
1789 printk("Error write data\n");
1795 /* backup super blocks are already there at fixup_offset stage */
1796 if (!mdres->multi_devices && async->start == BTRFS_SUPER_INFO_OFFSET)
1797 write_backup_supers(outfd, outbuf);
1799 pthread_mutex_lock(&mdres->mutex);
1800 if (err && !mdres->error)
1803 pthread_mutex_unlock(&mdres->mutex);
1805 free(async->buffer);
1813 static void mdrestore_destroy(struct mdrestore_struct *mdres, int num_threads)
1818 while ((n = rb_first(&mdres->chunk_tree))) {
1819 struct fs_chunk *entry;
1821 entry = rb_entry(n, struct fs_chunk, l);
1822 rb_erase(n, &mdres->chunk_tree);
1823 rb_erase(&entry->p, &mdres->physical_tree);
1826 pthread_mutex_lock(&mdres->mutex);
1828 pthread_cond_broadcast(&mdres->cond);
1829 pthread_mutex_unlock(&mdres->mutex);
1831 for (i = 0; i < num_threads; i++)
1832 pthread_join(mdres->threads[i], NULL);
1834 pthread_cond_destroy(&mdres->cond);
1835 pthread_mutex_destroy(&mdres->mutex);
1836 free(mdres->threads);
1839 static int mdrestore_init(struct mdrestore_struct *mdres,
1840 FILE *in, FILE *out, int old_restore,
1841 int num_threads, int fixup_offset,
1842 struct btrfs_fs_info *info, int multi_devices)
1846 memset(mdres, 0, sizeof(*mdres));
1847 pthread_cond_init(&mdres->cond, NULL);
1848 pthread_mutex_init(&mdres->mutex, NULL);
1849 INIT_LIST_HEAD(&mdres->list);
1850 INIT_LIST_HEAD(&mdres->overlapping_chunks);
1853 mdres->old_restore = old_restore;
1854 mdres->chunk_tree.rb_node = NULL;
1855 mdres->fixup_offset = fixup_offset;
1857 mdres->multi_devices = multi_devices;
1858 mdres->clear_space_cache = 0;
1859 mdres->last_physical_offset = 0;
1860 mdres->alloced_chunks = 0;
1865 mdres->num_threads = num_threads;
1866 mdres->threads = calloc(num_threads, sizeof(pthread_t));
1867 if (!mdres->threads)
1869 for (i = 0; i < num_threads; i++) {
1870 ret = pthread_create(mdres->threads + i, NULL, restore_worker,
1876 mdrestore_destroy(mdres, i + 1);
1880 static int fill_mdres_info(struct mdrestore_struct *mdres,
1881 struct async_work *async)
1883 struct btrfs_super_block *super;
1888 /* We've already been initialized */
1889 if (mdres->leafsize)
1892 if (mdres->compress_method == COMPRESS_ZLIB) {
1893 size_t size = MAX_PENDING_SIZE * 2;
1895 buffer = malloc(MAX_PENDING_SIZE * 2);
1898 ret = uncompress(buffer, (unsigned long *)&size,
1899 async->buffer, async->bufsize);
1901 fprintf(stderr, "Error decompressing %d\n", ret);
1907 outbuf = async->buffer;
1910 super = (struct btrfs_super_block *)outbuf;
1911 mdres->leafsize = btrfs_super_leafsize(super);
1912 memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
1913 memcpy(mdres->uuid, super->dev_item.uuid,
1915 mdres->devid = le64_to_cpu(super->dev_item.devid);
1920 static int add_cluster(struct meta_cluster *cluster,
1921 struct mdrestore_struct *mdres, u64 *next)
1923 struct meta_cluster_item *item;
1924 struct meta_cluster_header *header = &cluster->header;
1925 struct async_work *async;
1930 mdres->compress_method = header->compress;
1932 bytenr = le64_to_cpu(header->bytenr) + BLOCK_SIZE;
1933 nritems = le32_to_cpu(header->nritems);
1934 for (i = 0; i < nritems; i++) {
1935 item = &cluster->items[i];
1936 async = calloc(1, sizeof(*async));
1938 fprintf(stderr, "Error allocating async\n");
1941 async->start = le64_to_cpu(item->bytenr);
1942 async->bufsize = le32_to_cpu(item->size);
1943 async->buffer = malloc(async->bufsize);
1944 if (!async->buffer) {
1945 fprintf(stderr, "Error allocing async buffer\n");
1949 ret = fread(async->buffer, async->bufsize, 1, mdres->in);
1951 fprintf(stderr, "Error reading buffer %d\n", errno);
1952 free(async->buffer);
1956 bytenr += async->bufsize;
1958 pthread_mutex_lock(&mdres->mutex);
1959 if (async->start == BTRFS_SUPER_INFO_OFFSET) {
1960 ret = fill_mdres_info(mdres, async);
1962 fprintf(stderr, "Error setting up restore\n");
1963 pthread_mutex_unlock(&mdres->mutex);
1964 free(async->buffer);
1969 list_add_tail(&async->list, &mdres->list);
1971 pthread_cond_signal(&mdres->cond);
1972 pthread_mutex_unlock(&mdres->mutex);
1974 if (bytenr & BLOCK_MASK) {
1975 char buffer[BLOCK_MASK];
1976 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
1979 ret = fread(buffer, size, 1, mdres->in);
1981 fprintf(stderr, "Error reading in buffer %d\n", errno);
1989 static int wait_for_worker(struct mdrestore_struct *mdres)
1993 pthread_mutex_lock(&mdres->mutex);
1995 while (!ret && mdres->num_items > 0) {
1996 struct timespec ts = {
1998 .tv_nsec = 10000000,
2000 pthread_mutex_unlock(&mdres->mutex);
2001 nanosleep(&ts, NULL);
2002 pthread_mutex_lock(&mdres->mutex);
2005 pthread_mutex_unlock(&mdres->mutex);
2009 static int read_chunk_block(struct mdrestore_struct *mdres, u8 *buffer,
2010 u64 bytenr, u64 item_bytenr, u32 bufsize,
2013 struct extent_buffer *eb;
2017 eb = alloc_dummy_eb(bytenr, mdres->leafsize);
2023 while (item_bytenr != bytenr) {
2024 buffer += mdres->leafsize;
2025 item_bytenr += mdres->leafsize;
2028 memcpy(eb->data, buffer, mdres->leafsize);
2029 if (btrfs_header_bytenr(eb) != bytenr) {
2030 fprintf(stderr, "Eb bytenr doesn't match found bytenr\n");
2035 if (memcmp(mdres->fsid, eb->data + offsetof(struct btrfs_header, fsid),
2037 fprintf(stderr, "Fsid doesn't match\n");
2042 if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID) {
2043 fprintf(stderr, "Does not belong to the chunk tree\n");
2048 for (i = 0; i < btrfs_header_nritems(eb); i++) {
2049 struct btrfs_chunk chunk;
2050 struct fs_chunk *fs_chunk;
2051 struct btrfs_key key;
2053 if (btrfs_header_level(eb)) {
2054 u64 blockptr = btrfs_node_blockptr(eb, i);
2056 ret = search_for_chunk_blocks(mdres, blockptr,
2063 /* Yay a leaf! We loves leafs! */
2064 btrfs_item_key_to_cpu(eb, &key, i);
2065 if (key.type != BTRFS_CHUNK_ITEM_KEY)
2068 fs_chunk = malloc(sizeof(struct fs_chunk));
2070 fprintf(stderr, "Erorr allocating chunk\n");
2074 memset(fs_chunk, 0, sizeof(*fs_chunk));
2075 read_extent_buffer(eb, &chunk, btrfs_item_ptr_offset(eb, i),
2078 fs_chunk->logical = key.offset;
2079 fs_chunk->physical = btrfs_stack_stripe_offset(&chunk.stripe);
2080 fs_chunk->bytes = btrfs_stack_chunk_length(&chunk);
2081 INIT_LIST_HEAD(&fs_chunk->list);
2082 if (tree_search(&mdres->physical_tree, &fs_chunk->p,
2083 physical_cmp, 1) != NULL)
2084 list_add(&fs_chunk->list, &mdres->overlapping_chunks);
2086 tree_insert(&mdres->physical_tree, &fs_chunk->p,
2088 if (fs_chunk->physical + fs_chunk->bytes >
2089 mdres->last_physical_offset)
2090 mdres->last_physical_offset = fs_chunk->physical +
2092 mdres->alloced_chunks += fs_chunk->bytes;
2093 tree_insert(&mdres->chunk_tree, &fs_chunk->l, chunk_cmp);
2100 /* If you have to ask you aren't worthy */
2101 static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
2102 u64 search, u64 cluster_bytenr)
2104 struct meta_cluster *cluster;
2105 struct meta_cluster_header *header;
2106 struct meta_cluster_item *item;
2107 u64 current_cluster = cluster_bytenr, bytenr;
2109 u32 bufsize, nritems, i;
2110 u32 max_size = MAX_PENDING_SIZE * 2;
2111 u8 *buffer, *tmp = NULL;
2114 cluster = malloc(BLOCK_SIZE);
2116 fprintf(stderr, "Error allocating cluster\n");
2120 buffer = malloc(max_size);
2122 fprintf(stderr, "Error allocing buffer\n");
2127 if (mdres->compress_method == COMPRESS_ZLIB) {
2128 tmp = malloc(max_size);
2130 fprintf(stderr, "Error allocing tmp buffer\n");
2137 bytenr = current_cluster;
2139 if (fseek(mdres->in, current_cluster, SEEK_SET)) {
2140 fprintf(stderr, "Error seeking: %d\n", errno);
2145 ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
2147 if (cluster_bytenr != 0) {
2149 current_cluster = 0;
2153 printf("ok this is where we screwed up?\n");
2156 } else if (ret < 0) {
2157 fprintf(stderr, "Error reading image\n");
2162 header = &cluster->header;
2163 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2164 le64_to_cpu(header->bytenr) != current_cluster) {
2165 fprintf(stderr, "bad header in metadump image\n");
2170 bytenr += BLOCK_SIZE;
2171 nritems = le32_to_cpu(header->nritems);
2172 for (i = 0; i < nritems; i++) {
2175 item = &cluster->items[i];
2176 bufsize = le32_to_cpu(item->size);
2177 item_bytenr = le64_to_cpu(item->bytenr);
2179 if (bufsize > max_size) {
2180 fprintf(stderr, "item %u size %u too big\n",
2186 if (mdres->compress_method == COMPRESS_ZLIB) {
2187 ret = fread(tmp, bufsize, 1, mdres->in);
2189 fprintf(stderr, "Error reading: %d\n",
2196 ret = uncompress(buffer,
2197 (unsigned long *)&size, tmp,
2200 fprintf(stderr, "Error decompressing "
2206 ret = fread(buffer, bufsize, 1, mdres->in);
2208 fprintf(stderr, "Error reading: %d\n",
2217 if (item_bytenr <= search &&
2218 item_bytenr + size > search) {
2219 ret = read_chunk_block(mdres, buffer, search,
2233 if (bytenr & BLOCK_MASK)
2234 bytenr += BLOCK_SIZE - (bytenr & BLOCK_MASK);
2235 current_cluster = bytenr;
2244 static int build_chunk_tree(struct mdrestore_struct *mdres,
2245 struct meta_cluster *cluster)
2247 struct btrfs_super_block *super;
2248 struct meta_cluster_header *header;
2249 struct meta_cluster_item *item = NULL;
2250 u64 chunk_root_bytenr = 0;
2256 /* We can't seek with stdin so don't bother doing this */
2257 if (mdres->in == stdin)
2260 ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
2262 fprintf(stderr, "Error reading in cluster: %d\n", errno);
2267 header = &cluster->header;
2268 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2269 le64_to_cpu(header->bytenr) != 0) {
2270 fprintf(stderr, "bad header in metadump image\n");
2274 bytenr += BLOCK_SIZE;
2275 mdres->compress_method = header->compress;
2276 nritems = le32_to_cpu(header->nritems);
2277 for (i = 0; i < nritems; i++) {
2278 item = &cluster->items[i];
2280 if (le64_to_cpu(item->bytenr) == BTRFS_SUPER_INFO_OFFSET)
2282 bytenr += le32_to_cpu(item->size);
2283 if (fseek(mdres->in, le32_to_cpu(item->size), SEEK_CUR)) {
2284 fprintf(stderr, "Error seeking: %d\n", errno);
2289 if (!item || le64_to_cpu(item->bytenr) != BTRFS_SUPER_INFO_OFFSET) {
2290 fprintf(stderr, "Huh, didn't find the super?\n");
2294 buffer = malloc(le32_to_cpu(item->size));
2296 fprintf(stderr, "Error allocing buffer\n");
2300 ret = fread(buffer, le32_to_cpu(item->size), 1, mdres->in);
2302 fprintf(stderr, "Error reading buffer: %d\n", errno);
2307 if (mdres->compress_method == COMPRESS_ZLIB) {
2308 size_t size = MAX_PENDING_SIZE * 2;
2311 tmp = malloc(MAX_PENDING_SIZE * 2);
2316 ret = uncompress(tmp, (unsigned long *)&size,
2317 buffer, le32_to_cpu(item->size));
2319 fprintf(stderr, "Error decompressing %d\n", ret);
2328 pthread_mutex_lock(&mdres->mutex);
2329 super = (struct btrfs_super_block *)buffer;
2330 chunk_root_bytenr = btrfs_super_chunk_root(super);
2331 mdres->leafsize = btrfs_super_leafsize(super);
2332 memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
2333 memcpy(mdres->uuid, super->dev_item.uuid,
2335 mdres->devid = le64_to_cpu(super->dev_item.devid);
2337 pthread_mutex_unlock(&mdres->mutex);
2339 return search_for_chunk_blocks(mdres, chunk_root_bytenr, 0);
2342 static int range_contains_super(u64 physical, u64 bytes)
2347 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2348 super_bytenr = btrfs_sb_offset(i);
2349 if (super_bytenr >= physical &&
2350 super_bytenr < physical + bytes)
2357 static void remap_overlapping_chunks(struct mdrestore_struct *mdres)
2359 struct fs_chunk *fs_chunk;
2361 while (!list_empty(&mdres->overlapping_chunks)) {
2362 fs_chunk = list_first_entry(&mdres->overlapping_chunks,
2363 struct fs_chunk, list);
2364 list_del_init(&fs_chunk->list);
2365 if (range_contains_super(fs_chunk->physical,
2367 fprintf(stderr, "Remapping a chunk that had a super "
2368 "mirror inside of it, clearing space cache "
2369 "so we don't end up with corruption\n");
2370 mdres->clear_space_cache = 1;
2372 fs_chunk->physical = mdres->last_physical_offset;
2373 tree_insert(&mdres->physical_tree, &fs_chunk->p, physical_cmp);
2374 mdres->last_physical_offset += fs_chunk->bytes;
2378 static int fixup_devices(struct btrfs_fs_info *fs_info,
2379 struct mdrestore_struct *mdres, off_t dev_size)
2381 struct btrfs_trans_handle *trans;
2382 struct btrfs_dev_item *dev_item;
2383 struct btrfs_path *path;
2384 struct extent_buffer *leaf;
2385 struct btrfs_root *root = fs_info->chunk_root;
2386 struct btrfs_key key;
2387 u64 devid, cur_devid;
2390 path = btrfs_alloc_path();
2392 fprintf(stderr, "Error alloc'ing path\n");
2396 trans = btrfs_start_transaction(fs_info->tree_root, 1);
2397 if (IS_ERR(trans)) {
2398 fprintf(stderr, "Error starting transaction %ld\n",
2400 btrfs_free_path(path);
2401 return PTR_ERR(trans);
2404 dev_item = &fs_info->super_copy->dev_item;
2406 devid = btrfs_stack_device_id(dev_item);
2408 btrfs_set_stack_device_total_bytes(dev_item, dev_size);
2409 btrfs_set_stack_device_bytes_used(dev_item, mdres->alloced_chunks);
2411 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2412 key.type = BTRFS_DEV_ITEM_KEY;
2416 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2418 fprintf(stderr, "search failed %d\n", ret);
2423 leaf = path->nodes[0];
2424 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2425 ret = btrfs_next_leaf(root, path);
2427 fprintf(stderr, "Error going to next leaf "
2435 leaf = path->nodes[0];
2438 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2439 if (key.type > BTRFS_DEV_ITEM_KEY)
2441 if (key.type != BTRFS_DEV_ITEM_KEY) {
2446 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2447 struct btrfs_dev_item);
2448 cur_devid = btrfs_device_id(leaf, dev_item);
2449 if (devid != cur_devid) {
2450 ret = btrfs_del_item(trans, root, path);
2452 fprintf(stderr, "Error deleting item %d\n",
2456 btrfs_release_path(path);
2460 btrfs_set_device_total_bytes(leaf, dev_item, dev_size);
2461 btrfs_set_device_bytes_used(leaf, dev_item,
2462 mdres->alloced_chunks);
2463 btrfs_mark_buffer_dirty(leaf);
2467 btrfs_free_path(path);
2468 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
2470 fprintf(stderr, "Commit failed %d\n", ret);
2476 static int restore_metadump(const char *input, FILE *out, int old_restore,
2477 int num_threads, int fixup_offset,
2478 const char *target, int multi_devices)
2480 struct meta_cluster *cluster = NULL;
2481 struct meta_cluster_header *header;
2482 struct mdrestore_struct mdrestore;
2483 struct btrfs_fs_info *info = NULL;
2488 if (!strcmp(input, "-")) {
2491 in = fopen(input, "r");
2493 perror("unable to open metadump image");
2498 /* NOTE: open with write mode */
2501 info = open_ctree_fs_info(target, 0, 0,
2503 OPEN_CTREE_RESTORE |
2504 OPEN_CTREE_PARTIAL);
2506 fprintf(stderr, "%s: open ctree failed\n", __func__);
2512 cluster = malloc(BLOCK_SIZE);
2514 fprintf(stderr, "Error allocating cluster\n");
2519 ret = mdrestore_init(&mdrestore, in, out, old_restore, num_threads,
2520 fixup_offset, info, multi_devices);
2522 fprintf(stderr, "Error initing mdrestore %d\n", ret);
2523 goto failed_cluster;
2526 if (!multi_devices && !old_restore) {
2527 ret = build_chunk_tree(&mdrestore, cluster);
2530 if (!list_empty(&mdrestore.overlapping_chunks))
2531 remap_overlapping_chunks(&mdrestore);
2534 if (in != stdin && fseek(in, 0, SEEK_SET)) {
2535 fprintf(stderr, "Error seeking %d\n", errno);
2539 while (!mdrestore.error) {
2540 ret = fread(cluster, BLOCK_SIZE, 1, in);
2544 header = &cluster->header;
2545 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2546 le64_to_cpu(header->bytenr) != bytenr) {
2547 fprintf(stderr, "bad header in metadump image\n");
2551 ret = add_cluster(cluster, &mdrestore, &bytenr);
2553 fprintf(stderr, "Error adding cluster\n");
2557 ret = wait_for_worker(&mdrestore);
2559 if (!ret && !multi_devices && !old_restore) {
2560 struct btrfs_root *root;
2563 root = open_ctree_fd(fileno(out), target, 0,
2564 OPEN_CTREE_PARTIAL |
2566 OPEN_CTREE_NO_DEVICES);
2568 fprintf(stderr, "unable to open %s\n", target);
2572 info = root->fs_info;
2574 if (stat(target, &st)) {
2575 fprintf(stderr, "statting %s failed\n", target);
2576 close_ctree(info->chunk_root);
2580 ret = fixup_devices(info, &mdrestore, st.st_size);
2581 close_ctree(info->chunk_root);
2586 mdrestore_destroy(&mdrestore, num_threads);
2590 if (fixup_offset && info)
2591 close_ctree(info->chunk_root);
2598 static int update_disk_super_on_device(struct btrfs_fs_info *info,
2599 const char *other_dev, u64 cur_devid)
2601 struct btrfs_key key;
2602 struct extent_buffer *leaf;
2603 struct btrfs_path path;
2604 struct btrfs_dev_item *dev_item;
2605 struct btrfs_super_block *disk_super;
2606 char dev_uuid[BTRFS_UUID_SIZE];
2607 char fs_uuid[BTRFS_UUID_SIZE];
2608 u64 devid, type, io_align, io_width;
2609 u64 sector_size, total_bytes, bytes_used;
2614 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2615 key.type = BTRFS_DEV_ITEM_KEY;
2616 key.offset = cur_devid;
2618 btrfs_init_path(&path);
2619 ret = btrfs_search_slot(NULL, info->chunk_root, &key, &path, 0, 0);
2621 fprintf(stderr, "search key fails\n");
2625 leaf = path.nodes[0];
2626 dev_item = btrfs_item_ptr(leaf, path.slots[0],
2627 struct btrfs_dev_item);
2629 devid = btrfs_device_id(leaf, dev_item);
2630 if (devid != cur_devid) {
2631 printk("devid %llu mismatch with %llu\n", devid, cur_devid);
2635 type = btrfs_device_type(leaf, dev_item);
2636 io_align = btrfs_device_io_align(leaf, dev_item);
2637 io_width = btrfs_device_io_width(leaf, dev_item);
2638 sector_size = btrfs_device_sector_size(leaf, dev_item);
2639 total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2640 bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2641 read_extent_buffer(leaf, dev_uuid, (unsigned long)btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE);
2642 read_extent_buffer(leaf, fs_uuid, (unsigned long)btrfs_device_fsid(dev_item), BTRFS_UUID_SIZE);
2644 btrfs_release_path(&path);
2646 printk("update disk super on %s devid=%llu\n", other_dev, devid);
2648 /* update other devices' super block */
2649 fp = open(other_dev, O_CREAT | O_RDWR, 0600);
2651 fprintf(stderr, "could not open %s\n", other_dev);
2655 buf = malloc(BTRFS_SUPER_INFO_SIZE);
2662 memcpy(buf, info->super_copy, BTRFS_SUPER_INFO_SIZE);
2664 disk_super = (struct btrfs_super_block *)buf;
2665 dev_item = &disk_super->dev_item;
2667 btrfs_set_stack_device_type(dev_item, type);
2668 btrfs_set_stack_device_id(dev_item, devid);
2669 btrfs_set_stack_device_total_bytes(dev_item, total_bytes);
2670 btrfs_set_stack_device_bytes_used(dev_item, bytes_used);
2671 btrfs_set_stack_device_io_align(dev_item, io_align);
2672 btrfs_set_stack_device_io_width(dev_item, io_width);
2673 btrfs_set_stack_device_sector_size(dev_item, sector_size);
2674 memcpy(dev_item->uuid, dev_uuid, BTRFS_UUID_SIZE);
2675 memcpy(dev_item->fsid, fs_uuid, BTRFS_UUID_SIZE);
2676 csum_block((u8 *)buf, BTRFS_SUPER_INFO_SIZE);
2678 ret = pwrite64(fp, buf, BTRFS_SUPER_INFO_SIZE, BTRFS_SUPER_INFO_OFFSET);
2679 if (ret != BTRFS_SUPER_INFO_SIZE) {
2684 write_backup_supers(fp, (u8 *)buf);
2692 static void print_usage(void)
2694 fprintf(stderr, "usage: btrfs-image [options] source target\n");
2695 fprintf(stderr, "\t-r \trestore metadump image\n");
2696 fprintf(stderr, "\t-c value\tcompression level (0 ~ 9)\n");
2697 fprintf(stderr, "\t-t value\tnumber of threads (1 ~ 32)\n");
2698 fprintf(stderr, "\t-o \tdon't mess with the chunk tree when restoring\n");
2699 fprintf(stderr, "\t-s \tsanitize file names, use once to just use garbage, use twice if you want crc collisions\n");
2700 fprintf(stderr, "\t-w \twalk all trees instead of using extent tree, do this if your extent tree is broken\n");
2701 fprintf(stderr, "\t-m \trestore for multiple devices\n");
2702 fprintf(stderr, "\n");
2703 fprintf(stderr, "\tIn the dump mode, source is the btrfs device and target is the output file (use '-' for stdout).\n");
2704 fprintf(stderr, "\tIn the restore mode, source is the dumped image and target is the btrfs device/file.\n");
2708 int main(int argc, char *argv[])
2712 u64 num_threads = 1;
2713 u64 compress_level = 0;
2715 int old_restore = 0;
2717 int multi_devices = 0;
2721 int usage_error = 0;
2725 int c = getopt(argc, argv, "rc:t:oswm");
2733 num_threads = arg_strtou64(optarg);
2734 if (num_threads > 32)
2738 compress_level = arg_strtou64(optarg);
2739 if (compress_level > 9)
2760 argc = argc - optind;
2762 if (check_argc_min(argc, 2))
2769 fprintf(stderr, "Usage error: create and restore cannot be used at the same time\n");
2773 if (walk_trees || sanitize || compress_level) {
2774 fprintf(stderr, "Usage error: use -w, -s, -c options for restore makes no sense\n");
2777 if (multi_devices && dev_cnt < 2) {
2778 fprintf(stderr, "Usage error: not enough devices specified for -m option\n");
2781 if (!multi_devices && dev_cnt != 1) {
2782 fprintf(stderr, "Usage error: accepts only 1 device without -m option\n");
2790 source = argv[optind];
2791 target = argv[optind + 1];
2793 if (create && !strcmp(target, "-")) {
2796 out = fopen(target, "w+");
2798 perror("unable to create target file");
2803 if (num_threads == 1 && compress_level > 0) {
2804 num_threads = sysconf(_SC_NPROCESSORS_ONLN);
2805 if (num_threads <= 0)
2810 ret = check_mounted(source);
2812 fprintf(stderr, "Could not check mount status: %s\n",
2817 "WARNING: The device is mounted. Make sure the filesystem is quiescent.\n");
2819 ret = create_metadump(source, out, num_threads,
2820 compress_level, sanitize, walk_trees);
2822 ret = restore_metadump(source, out, old_restore, num_threads,
2823 0, target, multi_devices);
2826 printk("%s failed (%s)\n", (create) ? "create" : "restore",
2831 /* extended support for multiple devices */
2832 if (!create && multi_devices) {
2833 struct btrfs_fs_info *info;
2837 info = open_ctree_fs_info(target, 0, 0,
2838 OPEN_CTREE_PARTIAL |
2839 OPEN_CTREE_RESTORE);
2842 fprintf(stderr, "unable to open %s error = %s\n",
2843 target, strerror(e));
2847 total_devs = btrfs_super_num_devices(info->super_copy);
2848 if (total_devs != dev_cnt) {
2849 printk("it needs %llu devices but has only %d\n",
2850 total_devs, dev_cnt);
2851 close_ctree(info->chunk_root);
2855 /* update super block on other disks */
2856 for (i = 2; i <= dev_cnt; i++) {
2857 ret = update_disk_super_on_device(info,
2858 argv[optind + i], (u64)i);
2860 printk("update disk super failed devid=%d (error=%d)\n",
2862 close_ctree(info->chunk_root);
2867 close_ctree(info->chunk_root);
2869 /* fix metadata block to map correct chunk */
2870 ret = restore_metadump(source, out, 0, num_threads, 1,
2873 fprintf(stderr, "fix metadump failed (error=%d)\n",
2879 if (out == stdout) {
2883 if (ret && create) {
2886 unlink_ret = unlink(target);
2889 "unlink output file failed : %s\n",