2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
22 #include <sys/types.h>
28 #include "kerncompat.h"
32 #include "transaction.h"
35 #include "extent_io.h"
37 #define HEADER_MAGIC 0xbd5c25e27295668bULL
38 #define MAX_PENDING_SIZE (256 * 1024)
39 #define BLOCK_SIZE 1024
40 #define BLOCK_MASK (BLOCK_SIZE - 1)
42 #define COMPRESS_NONE 0
43 #define COMPRESS_ZLIB 1
45 struct meta_cluster_item {
48 } __attribute__ ((__packed__));
50 struct meta_cluster_header {
55 } __attribute__ ((__packed__));
57 /* cluster header + index items + buffers */
59 struct meta_cluster_header header;
60 struct meta_cluster_item items[];
61 } __attribute__ ((__packed__));
63 #define ITEMS_PER_CLUSTER ((BLOCK_SIZE - sizeof(struct meta_cluster)) / \
64 sizeof(struct meta_cluster_item))
72 struct list_head list;
76 struct list_head list;
77 struct list_head ordered;
85 struct metadump_struct {
86 struct btrfs_root *root;
89 struct meta_cluster *cluster;
93 pthread_mutex_t mutex;
95 struct rb_root name_tree;
97 struct list_head list;
98 struct list_head ordered;
120 struct mdrestore_struct {
126 pthread_mutex_t mutex;
129 struct rb_root chunk_tree;
130 struct rb_root physical_tree;
131 struct list_head list;
132 struct list_head overlapping_chunks;
136 u64 last_physical_offset;
137 u8 uuid[BTRFS_UUID_SIZE];
138 u8 fsid[BTRFS_FSID_SIZE];
146 int clear_space_cache;
147 struct btrfs_fs_info *info;
150 static void print_usage(void) __attribute__((noreturn));
151 static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
152 u64 search, u64 cluster_bytenr);
153 static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size);
155 static void csum_block(u8 *buf, size_t len)
157 char result[BTRFS_CRC32_SIZE];
159 crc = crc32c(crc, buf + BTRFS_CSUM_SIZE, len - BTRFS_CSUM_SIZE);
160 btrfs_csum_final(crc, result);
161 memcpy(buf, result, BTRFS_CRC32_SIZE);
164 static int has_name(struct btrfs_key *key)
167 case BTRFS_DIR_ITEM_KEY:
168 case BTRFS_DIR_INDEX_KEY:
169 case BTRFS_INODE_REF_KEY:
170 case BTRFS_INODE_EXTREF_KEY:
171 case BTRFS_XATTR_ITEM_KEY:
180 static char *generate_garbage(u32 name_len)
182 char *buf = malloc(name_len);
188 for (i = 0; i < name_len; i++) {
189 char c = rand() % 94 + 33;
199 static int name_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
201 struct name *entry = rb_entry(a, struct name, n);
202 struct name *ins = rb_entry(b, struct name, n);
205 len = min(ins->len, entry->len);
206 return memcmp(ins->val, entry->val, len);
209 static int chunk_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
211 struct fs_chunk *entry = rb_entry(a, struct fs_chunk, l);
212 struct fs_chunk *ins = rb_entry(b, struct fs_chunk, l);
214 if (fuzz && ins->logical >= entry->logical &&
215 ins->logical < entry->logical + entry->bytes)
218 if (ins->logical < entry->logical)
220 else if (ins->logical > entry->logical)
225 static int physical_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
227 struct fs_chunk *entry = rb_entry(a, struct fs_chunk, p);
228 struct fs_chunk *ins = rb_entry(b, struct fs_chunk, p);
230 if (fuzz && ins->physical >= entry->physical &&
231 ins->physical < entry->physical + entry->bytes)
234 if (fuzz && entry->physical >= ins->physical &&
235 entry->physical < ins->physical + ins->bytes)
238 if (ins->physical < entry->physical)
240 else if (ins->physical > entry->physical)
245 static void tree_insert(struct rb_root *root, struct rb_node *ins,
246 int (*cmp)(struct rb_node *a, struct rb_node *b,
249 struct rb_node ** p = &root->rb_node;
250 struct rb_node * parent = NULL;
256 dir = cmp(*p, ins, 1);
265 rb_link_node(ins, parent, p);
266 rb_insert_color(ins, root);
269 static struct rb_node *tree_search(struct rb_root *root,
270 struct rb_node *search,
271 int (*cmp)(struct rb_node *a,
272 struct rb_node *b, int fuzz),
275 struct rb_node *n = root->rb_node;
279 dir = cmp(n, search, fuzz);
291 static u64 logical_to_physical(struct mdrestore_struct *mdres, u64 logical, u64 *size)
293 struct fs_chunk *fs_chunk;
294 struct rb_node *entry;
295 struct fs_chunk search;
298 if (logical == BTRFS_SUPER_INFO_OFFSET)
301 search.logical = logical;
302 entry = tree_search(&mdres->chunk_tree, &search.l, chunk_cmp, 1);
304 if (mdres->in != stdin)
305 printf("Couldn't find a chunk, using logical\n");
308 fs_chunk = rb_entry(entry, struct fs_chunk, l);
309 if (fs_chunk->logical > logical || fs_chunk->logical + fs_chunk->bytes < logical)
311 offset = search.logical - fs_chunk->logical;
313 *size = min(*size, fs_chunk->bytes + fs_chunk->logical - logical);
314 return fs_chunk->physical + offset;
318 static char *find_collision(struct metadump_struct *md, char *name,
322 struct rb_node *entry;
324 unsigned long checksum;
330 entry = tree_search(&md->name_tree, &tmp.n, name_cmp, 0);
332 val = rb_entry(entry, struct name, n);
337 val = malloc(sizeof(struct name));
339 fprintf(stderr, "Couldn't sanitize name, enomem\n");
344 memset(val, 0, sizeof(*val));
348 val->sub = malloc(name_len);
350 fprintf(stderr, "Couldn't sanitize name, enomem\n");
356 checksum = crc32c(~1, val->val, name_len);
357 memset(val->sub, ' ', name_len);
360 if (crc32c(~1, val->sub, name_len) == checksum &&
361 memcmp(val->sub, val->val, val->len)) {
366 if (val->sub[i] == 127) {
371 } while (val->sub[i] == 127);
376 if (val->sub[i] == '/')
378 memset(val->sub, ' ', i);
383 if (val->sub[i] == '/')
389 fprintf(stderr, "Couldn't find a collision for '%.*s', "
390 "generating normal garbage, it won't match indexes\n",
392 for (i = 0; i < name_len; i++) {
393 char c = rand() % 94 + 33;
401 tree_insert(&md->name_tree, &val->n, name_cmp);
405 static void sanitize_dir_item(struct metadump_struct *md, struct extent_buffer *eb,
408 struct btrfs_dir_item *dir_item;
411 unsigned long name_ptr;
416 int free_garbage = (md->sanitize_names == 1);
418 dir_item = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
419 total_len = btrfs_item_size_nr(eb, slot);
420 while (cur < total_len) {
421 this_len = sizeof(*dir_item) +
422 btrfs_dir_name_len(eb, dir_item) +
423 btrfs_dir_data_len(eb, dir_item);
424 name_ptr = (unsigned long)(dir_item + 1);
425 name_len = btrfs_dir_name_len(eb, dir_item);
427 if (md->sanitize_names > 1) {
428 buf = malloc(name_len);
430 fprintf(stderr, "Couldn't sanitize name, "
434 read_extent_buffer(eb, buf, name_ptr, name_len);
435 garbage = find_collision(md, buf, name_len);
437 garbage = generate_garbage(name_len);
440 fprintf(stderr, "Couldn't sanitize name, enomem\n");
443 write_extent_buffer(eb, garbage, name_ptr, name_len);
445 dir_item = (struct btrfs_dir_item *)((char *)dir_item +
452 static void sanitize_inode_ref(struct metadump_struct *md,
453 struct extent_buffer *eb, int slot, int ext)
455 struct btrfs_inode_extref *extref;
456 struct btrfs_inode_ref *ref;
459 unsigned long name_ptr;
463 int free_garbage = (md->sanitize_names == 1);
465 item_size = btrfs_item_size_nr(eb, slot);
466 ptr = btrfs_item_ptr_offset(eb, slot);
467 while (cur_offset < item_size) {
469 extref = (struct btrfs_inode_extref *)(ptr +
471 name_ptr = (unsigned long)(&extref->name);
472 len = btrfs_inode_extref_name_len(eb, extref);
473 cur_offset += sizeof(*extref);
475 ref = (struct btrfs_inode_ref *)(ptr + cur_offset);
476 len = btrfs_inode_ref_name_len(eb, ref);
477 name_ptr = (unsigned long)(ref + 1);
478 cur_offset += sizeof(*ref);
482 if (md->sanitize_names > 1) {
485 fprintf(stderr, "Couldn't sanitize name, "
489 read_extent_buffer(eb, buf, name_ptr, len);
490 garbage = find_collision(md, buf, len);
492 garbage = generate_garbage(len);
496 fprintf(stderr, "Couldn't sanitize name, enomem\n");
499 write_extent_buffer(eb, garbage, name_ptr, len);
505 static void sanitize_xattr(struct metadump_struct *md,
506 struct extent_buffer *eb, int slot)
508 struct btrfs_dir_item *dir_item;
509 unsigned long data_ptr;
512 dir_item = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
513 data_len = btrfs_dir_data_len(eb, dir_item);
515 data_ptr = (unsigned long)((char *)(dir_item + 1) +
516 btrfs_dir_name_len(eb, dir_item));
517 memset_extent_buffer(eb, 0, data_ptr, data_len);
520 static void sanitize_name(struct metadump_struct *md, u8 *dst,
521 struct extent_buffer *src, struct btrfs_key *key,
524 struct extent_buffer *eb;
526 eb = alloc_dummy_eb(src->start, src->len);
528 fprintf(stderr, "Couldn't sanitize name, no memory\n");
532 memcpy(eb->data, dst, eb->len);
535 case BTRFS_DIR_ITEM_KEY:
536 case BTRFS_DIR_INDEX_KEY:
537 sanitize_dir_item(md, eb, slot);
539 case BTRFS_INODE_REF_KEY:
540 sanitize_inode_ref(md, eb, slot, 0);
542 case BTRFS_INODE_EXTREF_KEY:
543 sanitize_inode_ref(md, eb, slot, 1);
545 case BTRFS_XATTR_ITEM_KEY:
546 sanitize_xattr(md, eb, slot);
552 memcpy(dst, eb->data, eb->len);
557 * zero inline extents and csum items
559 static void zero_items(struct metadump_struct *md, u8 *dst,
560 struct extent_buffer *src)
562 struct btrfs_file_extent_item *fi;
563 struct btrfs_item *item;
564 struct btrfs_key key;
565 u32 nritems = btrfs_header_nritems(src);
570 for (i = 0; i < nritems; i++) {
571 item = btrfs_item_nr(i);
572 btrfs_item_key_to_cpu(src, &key, i);
573 if (key.type == BTRFS_CSUM_ITEM_KEY) {
574 size = btrfs_item_size_nr(src, i);
575 memset(dst + btrfs_leaf_data(src) +
576 btrfs_item_offset_nr(src, i), 0, size);
580 if (md->sanitize_names && has_name(&key)) {
581 sanitize_name(md, dst, src, &key, i);
585 if (key.type != BTRFS_EXTENT_DATA_KEY)
588 fi = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
589 extent_type = btrfs_file_extent_type(src, fi);
590 if (extent_type != BTRFS_FILE_EXTENT_INLINE)
593 ptr = btrfs_file_extent_inline_start(fi);
594 size = btrfs_file_extent_inline_item_len(src, item);
595 memset(dst + ptr, 0, size);
600 * copy buffer and zero useless data in the buffer
602 static void copy_buffer(struct metadump_struct *md, u8 *dst,
603 struct extent_buffer *src)
609 memcpy(dst, src->data, src->len);
610 if (src->start == BTRFS_SUPER_INFO_OFFSET)
613 level = btrfs_header_level(src);
614 nritems = btrfs_header_nritems(src);
617 size = sizeof(struct btrfs_header);
618 memset(dst + size, 0, src->len - size);
619 } else if (level == 0) {
620 size = btrfs_leaf_data(src) +
621 btrfs_item_offset_nr(src, nritems - 1) -
622 btrfs_item_nr_offset(nritems);
623 memset(dst + btrfs_item_nr_offset(nritems), 0, size);
624 zero_items(md, dst, src);
626 size = offsetof(struct btrfs_node, ptrs) +
627 sizeof(struct btrfs_key_ptr) * nritems;
628 memset(dst + size, 0, src->len - size);
630 csum_block(dst, src->len);
633 static void *dump_worker(void *data)
635 struct metadump_struct *md = (struct metadump_struct *)data;
636 struct async_work *async;
640 pthread_mutex_lock(&md->mutex);
641 while (list_empty(&md->list)) {
643 pthread_mutex_unlock(&md->mutex);
646 pthread_cond_wait(&md->cond, &md->mutex);
648 async = list_entry(md->list.next, struct async_work, list);
649 list_del_init(&async->list);
650 pthread_mutex_unlock(&md->mutex);
652 if (md->compress_level > 0) {
653 u8 *orig = async->buffer;
655 async->bufsize = compressBound(async->size);
656 async->buffer = malloc(async->bufsize);
657 if (!async->buffer) {
658 fprintf(stderr, "Error allocing buffer\n");
659 pthread_mutex_lock(&md->mutex);
662 pthread_mutex_unlock(&md->mutex);
666 ret = compress2(async->buffer,
667 (unsigned long *)&async->bufsize,
668 orig, async->size, md->compress_level);
676 pthread_mutex_lock(&md->mutex);
678 pthread_mutex_unlock(&md->mutex);
684 static void meta_cluster_init(struct metadump_struct *md, u64 start)
686 struct meta_cluster_header *header;
690 header = &md->cluster->header;
691 header->magic = cpu_to_le64(HEADER_MAGIC);
692 header->bytenr = cpu_to_le64(start);
693 header->nritems = cpu_to_le32(0);
694 header->compress = md->compress_level > 0 ?
695 COMPRESS_ZLIB : COMPRESS_NONE;
698 static void metadump_destroy(struct metadump_struct *md, int num_threads)
703 pthread_mutex_lock(&md->mutex);
705 pthread_cond_broadcast(&md->cond);
706 pthread_mutex_unlock(&md->mutex);
708 for (i = 0; i < num_threads; i++)
709 pthread_join(md->threads[i], NULL);
711 pthread_cond_destroy(&md->cond);
712 pthread_mutex_destroy(&md->mutex);
714 while ((n = rb_first(&md->name_tree))) {
717 name = rb_entry(n, struct name, n);
718 rb_erase(n, &md->name_tree);
727 static int metadump_init(struct metadump_struct *md, struct btrfs_root *root,
728 FILE *out, int num_threads, int compress_level,
733 memset(md, 0, sizeof(*md));
734 pthread_cond_init(&md->cond, NULL);
735 pthread_mutex_init(&md->mutex, NULL);
736 INIT_LIST_HEAD(&md->list);
737 INIT_LIST_HEAD(&md->ordered);
740 md->pending_start = (u64)-1;
741 md->compress_level = compress_level;
742 md->cluster = calloc(1, BLOCK_SIZE);
743 md->sanitize_names = sanitize_names;
744 if (sanitize_names > 1)
745 crc32c_optimization_init();
748 pthread_cond_destroy(&md->cond);
749 pthread_mutex_destroy(&md->mutex);
753 meta_cluster_init(md, 0);
757 md->name_tree.rb_node = NULL;
758 md->num_threads = num_threads;
759 md->threads = calloc(num_threads, sizeof(pthread_t));
762 pthread_cond_destroy(&md->cond);
763 pthread_mutex_destroy(&md->mutex);
767 for (i = 0; i < num_threads; i++) {
768 ret = pthread_create(md->threads + i, NULL, dump_worker, md);
774 metadump_destroy(md, i + 1);
779 static int write_zero(FILE *out, size_t size)
781 static char zero[BLOCK_SIZE];
782 return fwrite(zero, size, 1, out);
785 static int write_buffers(struct metadump_struct *md, u64 *next)
787 struct meta_cluster_header *header = &md->cluster->header;
788 struct meta_cluster_item *item;
789 struct async_work *async;
795 if (list_empty(&md->ordered))
798 /* wait until all buffers are compressed */
799 while (!err && md->num_items > md->num_ready) {
800 struct timespec ts = {
804 pthread_mutex_unlock(&md->mutex);
805 nanosleep(&ts, NULL);
806 pthread_mutex_lock(&md->mutex);
811 fprintf(stderr, "One of the threads errored out %s\n",
816 /* setup and write index block */
817 list_for_each_entry(async, &md->ordered, ordered) {
818 item = md->cluster->items + nritems;
819 item->bytenr = cpu_to_le64(async->start);
820 item->size = cpu_to_le32(async->bufsize);
823 header->nritems = cpu_to_le32(nritems);
825 ret = fwrite(md->cluster, BLOCK_SIZE, 1, md->out);
827 fprintf(stderr, "Error writing out cluster: %d\n", errno);
832 bytenr += le64_to_cpu(header->bytenr) + BLOCK_SIZE;
833 while (!list_empty(&md->ordered)) {
834 async = list_entry(md->ordered.next, struct async_work,
836 list_del_init(&async->ordered);
838 bytenr += async->bufsize;
840 ret = fwrite(async->buffer, async->bufsize, 1,
845 fprintf(stderr, "Error writing out cluster: %d\n",
853 /* zero unused space in the last block */
854 if (!err && bytenr & BLOCK_MASK) {
855 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
858 ret = write_zero(md->out, size);
860 fprintf(stderr, "Error zeroing out buffer: %d\n",
870 static int read_data_extent(struct metadump_struct *md,
871 struct async_work *async)
873 struct btrfs_multi_bio *multi = NULL;
874 struct btrfs_device *device;
875 u64 bytes_left = async->size;
876 u64 logical = async->start;
885 read_len = bytes_left;
886 ret = btrfs_map_block(&md->root->fs_info->mapping_tree, READ,
887 logical, &read_len, &multi, 0, NULL);
889 fprintf(stderr, "Couldn't map data block %d\n", ret);
893 device = multi->stripes[0].dev;
895 if (device->fd == 0) {
897 "Device we need to read from is not open\n");
902 bytenr = multi->stripes[0].physical;
905 read_len = min(read_len, bytes_left);
906 done = pread64(fd, async->buffer+offset, read_len, bytenr);
907 if (done < read_len) {
909 fprintf(stderr, "Error reading extent %d\n",
912 fprintf(stderr, "Short read\n");
924 static int get_dev_fd(struct btrfs_root *root)
926 struct btrfs_device *dev;
928 dev = list_first_entry(&root->fs_info->fs_devices->devices,
929 struct btrfs_device, dev_list);
933 static int flush_pending(struct metadump_struct *md, int done)
935 struct async_work *async = NULL;
936 struct extent_buffer *eb;
937 u64 blocksize = md->root->nodesize;
943 if (md->pending_size) {
944 async = calloc(1, sizeof(*async));
948 async->start = md->pending_start;
949 async->size = md->pending_size;
950 async->bufsize = async->size;
951 async->buffer = malloc(async->bufsize);
952 if (!async->buffer) {
957 start = async->start;
961 ret = read_data_extent(md, async);
970 * Balance can make the mapping not cover the super block, so
971 * just copy directly from one of the devices.
973 if (start == BTRFS_SUPER_INFO_OFFSET) {
974 int fd = get_dev_fd(md->root);
976 ret = pread64(fd, async->buffer, size, start);
980 fprintf(stderr, "Error reading superblock\n");
987 while (!md->data && size > 0) {
988 u64 this_read = min(blocksize, size);
989 eb = read_tree_block(md->root, start, this_read, 0);
990 if (!extent_buffer_uptodate(eb)) {
994 "Error reading metadata block\n");
997 copy_buffer(md, async->buffer + offset, eb);
998 free_extent_buffer(eb);
1000 offset += this_read;
1004 md->pending_start = (u64)-1;
1005 md->pending_size = 0;
1010 pthread_mutex_lock(&md->mutex);
1012 list_add_tail(&async->ordered, &md->ordered);
1014 if (md->compress_level > 0) {
1015 list_add_tail(&async->list, &md->list);
1016 pthread_cond_signal(&md->cond);
1021 if (md->num_items >= ITEMS_PER_CLUSTER || done) {
1022 ret = write_buffers(md, &start);
1024 fprintf(stderr, "Error writing buffers %d\n",
1027 meta_cluster_init(md, start);
1029 pthread_mutex_unlock(&md->mutex);
1033 static int add_extent(u64 start, u64 size, struct metadump_struct *md,
1037 if (md->data != data ||
1038 md->pending_size + size > MAX_PENDING_SIZE ||
1039 md->pending_start + md->pending_size != start) {
1040 ret = flush_pending(md, 0);
1043 md->pending_start = start;
1045 readahead_tree_block(md->root, start, size, 0);
1046 md->pending_size += size;
1051 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1052 static int is_tree_block(struct btrfs_root *extent_root,
1053 struct btrfs_path *path, u64 bytenr)
1055 struct extent_buffer *leaf;
1056 struct btrfs_key key;
1060 leaf = path->nodes[0];
1062 struct btrfs_extent_ref_v0 *ref_item;
1064 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1065 ret = btrfs_next_leaf(extent_root, path);
1070 leaf = path->nodes[0];
1072 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1073 if (key.objectid != bytenr)
1075 if (key.type != BTRFS_EXTENT_REF_V0_KEY)
1077 ref_item = btrfs_item_ptr(leaf, path->slots[0],
1078 struct btrfs_extent_ref_v0);
1079 ref_objectid = btrfs_ref_objectid_v0(leaf, ref_item);
1080 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID)
1088 static int copy_tree_blocks(struct btrfs_root *root, struct extent_buffer *eb,
1089 struct metadump_struct *metadump, int root_tree)
1091 struct extent_buffer *tmp;
1092 struct btrfs_root_item *ri;
1093 struct btrfs_key key;
1100 ret = add_extent(btrfs_header_bytenr(eb), root->leafsize, metadump, 0);
1102 fprintf(stderr, "Error adding metadata block\n");
1106 if (btrfs_header_level(eb) == 0 && !root_tree)
1109 level = btrfs_header_level(eb);
1110 nritems = btrfs_header_nritems(eb);
1111 for (i = 0; i < nritems; i++) {
1113 btrfs_item_key_to_cpu(eb, &key, i);
1114 if (key.type != BTRFS_ROOT_ITEM_KEY)
1116 ri = btrfs_item_ptr(eb, i, struct btrfs_root_item);
1117 bytenr = btrfs_disk_root_bytenr(eb, ri);
1118 tmp = read_tree_block(root, bytenr, root->leafsize, 0);
1119 if (!extent_buffer_uptodate(tmp)) {
1121 "Error reading log root block\n");
1124 ret = copy_tree_blocks(root, tmp, metadump, 0);
1125 free_extent_buffer(tmp);
1129 bytenr = btrfs_node_blockptr(eb, i);
1130 tmp = read_tree_block(root, bytenr, root->leafsize, 0);
1131 if (!extent_buffer_uptodate(tmp)) {
1132 fprintf(stderr, "Error reading log block\n");
1135 ret = copy_tree_blocks(root, tmp, metadump, root_tree);
1136 free_extent_buffer(tmp);
1145 static int copy_log_trees(struct btrfs_root *root,
1146 struct metadump_struct *metadump,
1147 struct btrfs_path *path)
1149 u64 blocknr = btrfs_super_log_root(root->fs_info->super_copy);
1154 if (!root->fs_info->log_root_tree ||
1155 !root->fs_info->log_root_tree->node) {
1156 fprintf(stderr, "Error copying tree log, it wasn't setup\n");
1160 return copy_tree_blocks(root, root->fs_info->log_root_tree->node,
1164 static int copy_space_cache(struct btrfs_root *root,
1165 struct metadump_struct *metadump,
1166 struct btrfs_path *path)
1168 struct extent_buffer *leaf;
1169 struct btrfs_file_extent_item *fi;
1170 struct btrfs_key key;
1171 u64 bytenr, num_bytes;
1174 root = root->fs_info->tree_root;
1177 key.type = BTRFS_EXTENT_DATA_KEY;
1180 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1182 fprintf(stderr, "Error searching for free space inode %d\n",
1187 leaf = path->nodes[0];
1190 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1191 ret = btrfs_next_leaf(root, path);
1193 fprintf(stderr, "Error going to next leaf "
1199 leaf = path->nodes[0];
1202 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1203 if (key.type != BTRFS_EXTENT_DATA_KEY) {
1208 fi = btrfs_item_ptr(leaf, path->slots[0],
1209 struct btrfs_file_extent_item);
1210 if (btrfs_file_extent_type(leaf, fi) !=
1211 BTRFS_FILE_EXTENT_REG) {
1216 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1217 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1218 ret = add_extent(bytenr, num_bytes, metadump, 1);
1220 fprintf(stderr, "Error adding space cache blocks %d\n",
1222 btrfs_release_path(path);
1231 static int copy_from_extent_tree(struct metadump_struct *metadump,
1232 struct btrfs_path *path)
1234 struct btrfs_root *extent_root;
1235 struct extent_buffer *leaf;
1236 struct btrfs_extent_item *ei;
1237 struct btrfs_key key;
1242 extent_root = metadump->root->fs_info->extent_root;
1243 bytenr = BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE;
1244 key.objectid = bytenr;
1245 key.type = BTRFS_EXTENT_ITEM_KEY;
1248 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1250 fprintf(stderr, "Error searching extent root %d\n", ret);
1255 leaf = path->nodes[0];
1258 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1259 ret = btrfs_next_leaf(extent_root, path);
1261 fprintf(stderr, "Error going to next leaf %d"
1269 leaf = path->nodes[0];
1272 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1273 if (key.objectid < bytenr ||
1274 (key.type != BTRFS_EXTENT_ITEM_KEY &&
1275 key.type != BTRFS_METADATA_ITEM_KEY)) {
1280 bytenr = key.objectid;
1281 if (key.type == BTRFS_METADATA_ITEM_KEY)
1282 num_bytes = extent_root->leafsize;
1284 num_bytes = key.offset;
1286 if (btrfs_item_size_nr(leaf, path->slots[0]) > sizeof(*ei)) {
1287 ei = btrfs_item_ptr(leaf, path->slots[0],
1288 struct btrfs_extent_item);
1289 if (btrfs_extent_flags(leaf, ei) &
1290 BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1291 ret = add_extent(bytenr, num_bytes, metadump,
1294 fprintf(stderr, "Error adding block "
1300 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1301 ret = is_tree_block(extent_root, path, bytenr);
1303 fprintf(stderr, "Error checking tree block "
1309 ret = add_extent(bytenr, num_bytes, metadump,
1312 fprintf(stderr, "Error adding block "
1319 fprintf(stderr, "Either extent tree corruption or "
1320 "you haven't built with V0 support\n");
1325 bytenr += num_bytes;
1328 btrfs_release_path(path);
1333 static int create_metadump(const char *input, FILE *out, int num_threads,
1334 int compress_level, int sanitize, int walk_trees)
1336 struct btrfs_root *root;
1337 struct btrfs_path *path = NULL;
1338 struct metadump_struct metadump;
1342 root = open_ctree(input, 0, 0);
1344 fprintf(stderr, "Open ctree failed\n");
1348 BUG_ON(root->nodesize != root->leafsize);
1350 ret = metadump_init(&metadump, root, out, num_threads,
1351 compress_level, sanitize);
1353 fprintf(stderr, "Error initing metadump %d\n", ret);
1358 ret = add_extent(BTRFS_SUPER_INFO_OFFSET, BTRFS_SUPER_INFO_SIZE,
1361 fprintf(stderr, "Error adding metadata %d\n", ret);
1366 path = btrfs_alloc_path();
1368 fprintf(stderr, "Out of memory allocing path\n");
1374 ret = copy_tree_blocks(root, root->fs_info->chunk_root->node,
1381 ret = copy_tree_blocks(root, root->fs_info->tree_root->node,
1388 ret = copy_from_extent_tree(&metadump, path);
1395 ret = copy_log_trees(root, &metadump, path);
1401 ret = copy_space_cache(root, &metadump, path);
1403 ret = flush_pending(&metadump, 1);
1407 fprintf(stderr, "Error flushing pending %d\n", ret);
1410 metadump_destroy(&metadump, num_threads);
1412 btrfs_free_path(path);
1413 ret = close_ctree(root);
1414 return err ? err : ret;
1417 static void update_super_old(u8 *buffer)
1419 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
1420 struct btrfs_chunk *chunk;
1421 struct btrfs_disk_key *key;
1422 u32 sectorsize = btrfs_super_sectorsize(super);
1423 u64 flags = btrfs_super_flags(super);
1425 flags |= BTRFS_SUPER_FLAG_METADUMP;
1426 btrfs_set_super_flags(super, flags);
1428 key = (struct btrfs_disk_key *)(super->sys_chunk_array);
1429 chunk = (struct btrfs_chunk *)(super->sys_chunk_array +
1430 sizeof(struct btrfs_disk_key));
1432 btrfs_set_disk_key_objectid(key, BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1433 btrfs_set_disk_key_type(key, BTRFS_CHUNK_ITEM_KEY);
1434 btrfs_set_disk_key_offset(key, 0);
1436 btrfs_set_stack_chunk_length(chunk, (u64)-1);
1437 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
1438 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
1439 btrfs_set_stack_chunk_type(chunk, BTRFS_BLOCK_GROUP_SYSTEM);
1440 btrfs_set_stack_chunk_io_align(chunk, sectorsize);
1441 btrfs_set_stack_chunk_io_width(chunk, sectorsize);
1442 btrfs_set_stack_chunk_sector_size(chunk, sectorsize);
1443 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1444 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1445 chunk->stripe.devid = super->dev_item.devid;
1446 btrfs_set_stack_stripe_offset(&chunk->stripe, 0);
1447 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
1448 btrfs_set_super_sys_array_size(super, sizeof(*key) + sizeof(*chunk));
1449 csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
1452 static int update_super(struct mdrestore_struct *mdres, u8 *buffer)
1454 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
1455 struct btrfs_chunk *chunk;
1456 struct btrfs_disk_key *disk_key;
1457 struct btrfs_key key;
1458 u64 flags = btrfs_super_flags(super);
1459 u32 new_array_size = 0;
1462 u8 *ptr, *write_ptr;
1463 int old_num_stripes;
1465 write_ptr = ptr = super->sys_chunk_array;
1466 array_size = btrfs_super_sys_array_size(super);
1468 while (cur < array_size) {
1469 disk_key = (struct btrfs_disk_key *)ptr;
1470 btrfs_disk_key_to_cpu(&key, disk_key);
1472 new_array_size += sizeof(*disk_key);
1473 memmove(write_ptr, ptr, sizeof(*disk_key));
1475 write_ptr += sizeof(*disk_key);
1476 ptr += sizeof(*disk_key);
1477 cur += sizeof(*disk_key);
1479 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1480 u64 physical, size = 0;
1482 chunk = (struct btrfs_chunk *)ptr;
1483 old_num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1484 chunk = (struct btrfs_chunk *)write_ptr;
1486 memmove(write_ptr, ptr, sizeof(*chunk));
1487 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1488 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1489 btrfs_set_stack_chunk_type(chunk,
1490 BTRFS_BLOCK_GROUP_SYSTEM);
1491 btrfs_set_stack_stripe_devid(&chunk->stripe,
1492 super->dev_item.devid);
1493 physical = logical_to_physical(mdres, key.offset,
1495 if (size != (u64)-1)
1496 btrfs_set_stack_stripe_offset(&chunk->stripe,
1498 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid,
1500 new_array_size += sizeof(*chunk);
1502 fprintf(stderr, "Bogus key in the sys chunk array "
1506 write_ptr += sizeof(*chunk);
1507 ptr += btrfs_chunk_item_size(old_num_stripes);
1508 cur += btrfs_chunk_item_size(old_num_stripes);
1511 if (mdres->clear_space_cache)
1512 btrfs_set_super_cache_generation(super, 0);
1514 flags |= BTRFS_SUPER_FLAG_METADUMP_V2;
1515 btrfs_set_super_flags(super, flags);
1516 btrfs_set_super_sys_array_size(super, new_array_size);
1517 csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
1522 static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size)
1524 struct extent_buffer *eb;
1526 eb = malloc(sizeof(struct extent_buffer) + size);
1529 memset(eb, 0, sizeof(struct extent_buffer) + size);
1536 static void truncate_item(struct extent_buffer *eb, int slot, u32 new_size)
1538 struct btrfs_item *item;
1546 old_size = btrfs_item_size_nr(eb, slot);
1547 if (old_size == new_size)
1550 nritems = btrfs_header_nritems(eb);
1551 data_end = btrfs_item_offset_nr(eb, nritems - 1);
1553 old_data_start = btrfs_item_offset_nr(eb, slot);
1554 size_diff = old_size - new_size;
1556 for (i = slot; i < nritems; i++) {
1558 item = btrfs_item_nr(i);
1559 ioff = btrfs_item_offset(eb, item);
1560 btrfs_set_item_offset(eb, item, ioff + size_diff);
1563 memmove_extent_buffer(eb, btrfs_leaf_data(eb) + data_end + size_diff,
1564 btrfs_leaf_data(eb) + data_end,
1565 old_data_start + new_size - data_end);
1566 item = btrfs_item_nr(slot);
1567 btrfs_set_item_size(eb, item, new_size);
1570 static int fixup_chunk_tree_block(struct mdrestore_struct *mdres,
1571 struct async_work *async, u8 *buffer,
1574 struct extent_buffer *eb;
1575 size_t size_left = size;
1576 u64 bytenr = async->start;
1579 if (size_left % mdres->leafsize)
1582 eb = alloc_dummy_eb(bytenr, mdres->leafsize);
1588 memcpy(eb->data, buffer, mdres->leafsize);
1590 if (btrfs_header_bytenr(eb) != bytenr)
1592 if (memcmp(mdres->fsid,
1593 eb->data + offsetof(struct btrfs_header, fsid),
1597 if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID)
1600 if (btrfs_header_level(eb) != 0)
1603 for (i = 0; i < btrfs_header_nritems(eb); i++) {
1604 struct btrfs_chunk chunk;
1605 struct btrfs_key key;
1606 u64 type, physical, size = (u64)-1;
1608 btrfs_item_key_to_cpu(eb, &key, i);
1609 if (key.type != BTRFS_CHUNK_ITEM_KEY)
1611 truncate_item(eb, i, sizeof(chunk));
1612 read_extent_buffer(eb, &chunk,
1613 btrfs_item_ptr_offset(eb, i),
1617 physical = logical_to_physical(mdres, key.offset,
1620 /* Zero out the RAID profile */
1621 type = btrfs_stack_chunk_type(&chunk);
1622 type &= (BTRFS_BLOCK_GROUP_DATA |
1623 BTRFS_BLOCK_GROUP_SYSTEM |
1624 BTRFS_BLOCK_GROUP_METADATA |
1625 BTRFS_BLOCK_GROUP_DUP);
1626 btrfs_set_stack_chunk_type(&chunk, type);
1628 btrfs_set_stack_chunk_num_stripes(&chunk, 1);
1629 btrfs_set_stack_chunk_sub_stripes(&chunk, 0);
1630 btrfs_set_stack_stripe_devid(&chunk.stripe, mdres->devid);
1631 if (size != (u64)-1)
1632 btrfs_set_stack_stripe_offset(&chunk.stripe,
1634 memcpy(chunk.stripe.dev_uuid, mdres->uuid,
1636 write_extent_buffer(eb, &chunk,
1637 btrfs_item_ptr_offset(eb, i),
1640 memcpy(buffer, eb->data, eb->len);
1641 csum_block(buffer, eb->len);
1643 size_left -= mdres->leafsize;
1644 buffer += mdres->leafsize;
1645 bytenr += mdres->leafsize;
1652 static void write_backup_supers(int fd, u8 *buf)
1654 struct btrfs_super_block *super = (struct btrfs_super_block *)buf;
1661 if (fstat(fd, &st)) {
1662 fprintf(stderr, "Couldn't stat restore point, won't be able "
1663 "to write backup supers: %d\n", errno);
1667 size = btrfs_device_size(fd, &st);
1669 for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1670 bytenr = btrfs_sb_offset(i);
1671 if (bytenr + BTRFS_SUPER_INFO_SIZE > size)
1673 btrfs_set_super_bytenr(super, bytenr);
1674 csum_block(buf, BTRFS_SUPER_INFO_SIZE);
1675 ret = pwrite64(fd, buf, BTRFS_SUPER_INFO_SIZE, bytenr);
1676 if (ret < BTRFS_SUPER_INFO_SIZE) {
1678 fprintf(stderr, "Problem writing out backup "
1679 "super block %d, err %d\n", i, errno);
1681 fprintf(stderr, "Short write writing out "
1682 "backup super block\n");
1688 static void *restore_worker(void *data)
1690 struct mdrestore_struct *mdres = (struct mdrestore_struct *)data;
1691 struct async_work *async;
1697 int compress_size = MAX_PENDING_SIZE * 4;
1699 outfd = fileno(mdres->out);
1700 buffer = malloc(compress_size);
1702 fprintf(stderr, "Error allocing buffer\n");
1703 pthread_mutex_lock(&mdres->mutex);
1705 mdres->error = -ENOMEM;
1706 pthread_mutex_unlock(&mdres->mutex);
1715 pthread_mutex_lock(&mdres->mutex);
1716 while (!mdres->leafsize || list_empty(&mdres->list)) {
1718 pthread_mutex_unlock(&mdres->mutex);
1721 pthread_cond_wait(&mdres->cond, &mdres->mutex);
1723 async = list_entry(mdres->list.next, struct async_work, list);
1724 list_del_init(&async->list);
1725 pthread_mutex_unlock(&mdres->mutex);
1727 if (mdres->compress_method == COMPRESS_ZLIB) {
1728 size = compress_size;
1729 ret = uncompress(buffer, (unsigned long *)&size,
1730 async->buffer, async->bufsize);
1732 fprintf(stderr, "Error decompressing %d\n",
1738 outbuf = async->buffer;
1739 size = async->bufsize;
1742 if (!mdres->multi_devices) {
1743 if (async->start == BTRFS_SUPER_INFO_OFFSET) {
1744 if (mdres->old_restore) {
1745 update_super_old(outbuf);
1747 ret = update_super(mdres, outbuf);
1751 } else if (!mdres->old_restore) {
1752 ret = fixup_chunk_tree_block(mdres, async, outbuf, size);
1758 if (!mdres->fixup_offset) {
1760 u64 chunk_size = size;
1761 if (!mdres->multi_devices && !mdres->old_restore)
1762 bytenr = logical_to_physical(mdres,
1763 async->start + offset,
1766 bytenr = async->start + offset;
1768 ret = pwrite64(outfd, outbuf+offset, chunk_size,
1770 if (ret != chunk_size) {
1772 fprintf(stderr, "Error writing to "
1773 "device %d\n", errno);
1777 fprintf(stderr, "Short write\n");
1783 offset += chunk_size;
1785 } else if (async->start != BTRFS_SUPER_INFO_OFFSET) {
1786 ret = write_data_to_disk(mdres->info, outbuf, async->start, size, 0);
1788 printk("Error write data\n");
1794 /* backup super blocks are already there at fixup_offset stage */
1795 if (!mdres->multi_devices && async->start == BTRFS_SUPER_INFO_OFFSET)
1796 write_backup_supers(outfd, outbuf);
1798 pthread_mutex_lock(&mdres->mutex);
1799 if (err && !mdres->error)
1802 pthread_mutex_unlock(&mdres->mutex);
1804 free(async->buffer);
1812 static void mdrestore_destroy(struct mdrestore_struct *mdres, int num_threads)
1817 while ((n = rb_first(&mdres->chunk_tree))) {
1818 struct fs_chunk *entry;
1820 entry = rb_entry(n, struct fs_chunk, l);
1821 rb_erase(n, &mdres->chunk_tree);
1822 rb_erase(&entry->p, &mdres->physical_tree);
1825 pthread_mutex_lock(&mdres->mutex);
1827 pthread_cond_broadcast(&mdres->cond);
1828 pthread_mutex_unlock(&mdres->mutex);
1830 for (i = 0; i < num_threads; i++)
1831 pthread_join(mdres->threads[i], NULL);
1833 pthread_cond_destroy(&mdres->cond);
1834 pthread_mutex_destroy(&mdres->mutex);
1835 free(mdres->threads);
1838 static int mdrestore_init(struct mdrestore_struct *mdres,
1839 FILE *in, FILE *out, int old_restore,
1840 int num_threads, int fixup_offset,
1841 struct btrfs_fs_info *info, int multi_devices)
1845 memset(mdres, 0, sizeof(*mdres));
1846 pthread_cond_init(&mdres->cond, NULL);
1847 pthread_mutex_init(&mdres->mutex, NULL);
1848 INIT_LIST_HEAD(&mdres->list);
1849 INIT_LIST_HEAD(&mdres->overlapping_chunks);
1852 mdres->old_restore = old_restore;
1853 mdres->chunk_tree.rb_node = NULL;
1854 mdres->fixup_offset = fixup_offset;
1856 mdres->multi_devices = multi_devices;
1857 mdres->clear_space_cache = 0;
1858 mdres->last_physical_offset = 0;
1863 mdres->num_threads = num_threads;
1864 mdres->threads = calloc(num_threads, sizeof(pthread_t));
1865 if (!mdres->threads)
1867 for (i = 0; i < num_threads; i++) {
1868 ret = pthread_create(mdres->threads + i, NULL, restore_worker,
1874 mdrestore_destroy(mdres, i + 1);
1878 static int fill_mdres_info(struct mdrestore_struct *mdres,
1879 struct async_work *async)
1881 struct btrfs_super_block *super;
1886 /* We've already been initialized */
1887 if (mdres->leafsize)
1890 if (mdres->compress_method == COMPRESS_ZLIB) {
1891 size_t size = MAX_PENDING_SIZE * 2;
1893 buffer = malloc(MAX_PENDING_SIZE * 2);
1896 ret = uncompress(buffer, (unsigned long *)&size,
1897 async->buffer, async->bufsize);
1899 fprintf(stderr, "Error decompressing %d\n", ret);
1905 outbuf = async->buffer;
1908 super = (struct btrfs_super_block *)outbuf;
1909 mdres->leafsize = btrfs_super_leafsize(super);
1910 memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
1911 memcpy(mdres->uuid, super->dev_item.uuid,
1913 mdres->devid = le64_to_cpu(super->dev_item.devid);
1918 static int add_cluster(struct meta_cluster *cluster,
1919 struct mdrestore_struct *mdres, u64 *next)
1921 struct meta_cluster_item *item;
1922 struct meta_cluster_header *header = &cluster->header;
1923 struct async_work *async;
1928 mdres->compress_method = header->compress;
1930 bytenr = le64_to_cpu(header->bytenr) + BLOCK_SIZE;
1931 nritems = le32_to_cpu(header->nritems);
1932 for (i = 0; i < nritems; i++) {
1933 item = &cluster->items[i];
1934 async = calloc(1, sizeof(*async));
1936 fprintf(stderr, "Error allocating async\n");
1939 async->start = le64_to_cpu(item->bytenr);
1940 async->bufsize = le32_to_cpu(item->size);
1941 async->buffer = malloc(async->bufsize);
1942 if (!async->buffer) {
1943 fprintf(stderr, "Error allocing async buffer\n");
1947 ret = fread(async->buffer, async->bufsize, 1, mdres->in);
1949 fprintf(stderr, "Error reading buffer %d\n", errno);
1950 free(async->buffer);
1954 bytenr += async->bufsize;
1956 pthread_mutex_lock(&mdres->mutex);
1957 if (async->start == BTRFS_SUPER_INFO_OFFSET) {
1958 ret = fill_mdres_info(mdres, async);
1960 fprintf(stderr, "Error setting up restore\n");
1961 pthread_mutex_unlock(&mdres->mutex);
1962 free(async->buffer);
1967 list_add_tail(&async->list, &mdres->list);
1969 pthread_cond_signal(&mdres->cond);
1970 pthread_mutex_unlock(&mdres->mutex);
1972 if (bytenr & BLOCK_MASK) {
1973 char buffer[BLOCK_MASK];
1974 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
1977 ret = fread(buffer, size, 1, mdres->in);
1979 fprintf(stderr, "Error reading in buffer %d\n", errno);
1987 static int wait_for_worker(struct mdrestore_struct *mdres)
1991 pthread_mutex_lock(&mdres->mutex);
1993 while (!ret && mdres->num_items > 0) {
1994 struct timespec ts = {
1996 .tv_nsec = 10000000,
1998 pthread_mutex_unlock(&mdres->mutex);
1999 nanosleep(&ts, NULL);
2000 pthread_mutex_lock(&mdres->mutex);
2003 pthread_mutex_unlock(&mdres->mutex);
2007 static int read_chunk_block(struct mdrestore_struct *mdres, u8 *buffer,
2008 u64 bytenr, u64 item_bytenr, u32 bufsize,
2011 struct extent_buffer *eb;
2015 eb = alloc_dummy_eb(bytenr, mdres->leafsize);
2021 while (item_bytenr != bytenr) {
2022 buffer += mdres->leafsize;
2023 item_bytenr += mdres->leafsize;
2026 memcpy(eb->data, buffer, mdres->leafsize);
2027 if (btrfs_header_bytenr(eb) != bytenr) {
2028 fprintf(stderr, "Eb bytenr doesn't match found bytenr\n");
2033 if (memcmp(mdres->fsid, eb->data + offsetof(struct btrfs_header, fsid),
2035 fprintf(stderr, "Fsid doesn't match\n");
2040 if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID) {
2041 fprintf(stderr, "Does not belong to the chunk tree\n");
2046 for (i = 0; i < btrfs_header_nritems(eb); i++) {
2047 struct btrfs_chunk chunk;
2048 struct fs_chunk *fs_chunk;
2049 struct btrfs_key key;
2051 if (btrfs_header_level(eb)) {
2052 u64 blockptr = btrfs_node_blockptr(eb, i);
2054 ret = search_for_chunk_blocks(mdres, blockptr,
2061 /* Yay a leaf! We loves leafs! */
2062 btrfs_item_key_to_cpu(eb, &key, i);
2063 if (key.type != BTRFS_CHUNK_ITEM_KEY)
2066 fs_chunk = malloc(sizeof(struct fs_chunk));
2068 fprintf(stderr, "Erorr allocating chunk\n");
2072 memset(fs_chunk, 0, sizeof(*fs_chunk));
2073 read_extent_buffer(eb, &chunk, btrfs_item_ptr_offset(eb, i),
2076 fs_chunk->logical = key.offset;
2077 fs_chunk->physical = btrfs_stack_stripe_offset(&chunk.stripe);
2078 fs_chunk->bytes = btrfs_stack_chunk_length(&chunk);
2079 INIT_LIST_HEAD(&fs_chunk->list);
2080 if (tree_search(&mdres->physical_tree, &fs_chunk->p,
2081 physical_cmp, 1) != NULL)
2082 list_add(&fs_chunk->list, &mdres->overlapping_chunks);
2084 tree_insert(&mdres->physical_tree, &fs_chunk->p,
2086 if (fs_chunk->physical + fs_chunk->bytes >
2087 mdres->last_physical_offset)
2088 mdres->last_physical_offset = fs_chunk->physical +
2090 tree_insert(&mdres->chunk_tree, &fs_chunk->l, chunk_cmp);
2097 /* If you have to ask you aren't worthy */
2098 static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
2099 u64 search, u64 cluster_bytenr)
2101 struct meta_cluster *cluster;
2102 struct meta_cluster_header *header;
2103 struct meta_cluster_item *item;
2104 u64 current_cluster = cluster_bytenr, bytenr;
2106 u32 bufsize, nritems, i;
2107 u32 max_size = MAX_PENDING_SIZE * 2;
2108 u8 *buffer, *tmp = NULL;
2111 cluster = malloc(BLOCK_SIZE);
2113 fprintf(stderr, "Error allocating cluster\n");
2117 buffer = malloc(max_size);
2119 fprintf(stderr, "Error allocing buffer\n");
2124 if (mdres->compress_method == COMPRESS_ZLIB) {
2125 tmp = malloc(max_size);
2127 fprintf(stderr, "Error allocing tmp buffer\n");
2134 bytenr = current_cluster;
2136 if (fseek(mdres->in, current_cluster, SEEK_SET)) {
2137 fprintf(stderr, "Error seeking: %d\n", errno);
2142 ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
2144 if (cluster_bytenr != 0) {
2146 current_cluster = 0;
2150 printf("ok this is where we screwed up?\n");
2153 } else if (ret < 0) {
2154 fprintf(stderr, "Error reading image\n");
2159 header = &cluster->header;
2160 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2161 le64_to_cpu(header->bytenr) != current_cluster) {
2162 fprintf(stderr, "bad header in metadump image\n");
2167 bytenr += BLOCK_SIZE;
2168 nritems = le32_to_cpu(header->nritems);
2169 for (i = 0; i < nritems; i++) {
2172 item = &cluster->items[i];
2173 bufsize = le32_to_cpu(item->size);
2174 item_bytenr = le64_to_cpu(item->bytenr);
2176 if (bufsize > max_size) {
2177 fprintf(stderr, "item %u size %u too big\n",
2183 if (mdres->compress_method == COMPRESS_ZLIB) {
2184 ret = fread(tmp, bufsize, 1, mdres->in);
2186 fprintf(stderr, "Error reading: %d\n",
2193 ret = uncompress(buffer,
2194 (unsigned long *)&size, tmp,
2197 fprintf(stderr, "Error decompressing "
2203 ret = fread(buffer, bufsize, 1, mdres->in);
2205 fprintf(stderr, "Error reading: %d\n",
2214 if (item_bytenr <= search &&
2215 item_bytenr + size > search) {
2216 ret = read_chunk_block(mdres, buffer, search,
2230 if (bytenr & BLOCK_MASK)
2231 bytenr += BLOCK_SIZE - (bytenr & BLOCK_MASK);
2232 current_cluster = bytenr;
2241 static int build_chunk_tree(struct mdrestore_struct *mdres,
2242 struct meta_cluster *cluster)
2244 struct btrfs_super_block *super;
2245 struct meta_cluster_header *header;
2246 struct meta_cluster_item *item = NULL;
2247 u64 chunk_root_bytenr = 0;
2253 /* We can't seek with stdin so don't bother doing this */
2254 if (mdres->in == stdin)
2257 ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
2259 fprintf(stderr, "Error reading in cluster: %d\n", errno);
2264 header = &cluster->header;
2265 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2266 le64_to_cpu(header->bytenr) != 0) {
2267 fprintf(stderr, "bad header in metadump image\n");
2271 bytenr += BLOCK_SIZE;
2272 mdres->compress_method = header->compress;
2273 nritems = le32_to_cpu(header->nritems);
2274 for (i = 0; i < nritems; i++) {
2275 item = &cluster->items[i];
2277 if (le64_to_cpu(item->bytenr) == BTRFS_SUPER_INFO_OFFSET)
2279 bytenr += le32_to_cpu(item->size);
2280 if (fseek(mdres->in, le32_to_cpu(item->size), SEEK_CUR)) {
2281 fprintf(stderr, "Error seeking: %d\n", errno);
2286 if (!item || le64_to_cpu(item->bytenr) != BTRFS_SUPER_INFO_OFFSET) {
2287 fprintf(stderr, "Huh, didn't find the super?\n");
2291 buffer = malloc(le32_to_cpu(item->size));
2293 fprintf(stderr, "Error allocing buffer\n");
2297 ret = fread(buffer, le32_to_cpu(item->size), 1, mdres->in);
2299 fprintf(stderr, "Error reading buffer: %d\n", errno);
2304 if (mdres->compress_method == COMPRESS_ZLIB) {
2305 size_t size = MAX_PENDING_SIZE * 2;
2308 tmp = malloc(MAX_PENDING_SIZE * 2);
2313 ret = uncompress(tmp, (unsigned long *)&size,
2314 buffer, le32_to_cpu(item->size));
2316 fprintf(stderr, "Error decompressing %d\n", ret);
2325 pthread_mutex_lock(&mdres->mutex);
2326 super = (struct btrfs_super_block *)buffer;
2327 chunk_root_bytenr = btrfs_super_chunk_root(super);
2328 mdres->leafsize = btrfs_super_leafsize(super);
2329 memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
2330 memcpy(mdres->uuid, super->dev_item.uuid,
2332 mdres->devid = le64_to_cpu(super->dev_item.devid);
2334 pthread_mutex_unlock(&mdres->mutex);
2336 return search_for_chunk_blocks(mdres, chunk_root_bytenr, 0);
2339 static int range_contains_super(u64 physical, u64 bytes)
2344 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2345 super_bytenr = btrfs_sb_offset(i);
2346 if (super_bytenr >= physical &&
2347 super_bytenr < physical + bytes)
2354 static void remap_overlapping_chunks(struct mdrestore_struct *mdres)
2356 struct fs_chunk *fs_chunk;
2358 while (!list_empty(&mdres->overlapping_chunks)) {
2359 fs_chunk = list_first_entry(&mdres->overlapping_chunks,
2360 struct fs_chunk, list);
2361 list_del_init(&fs_chunk->list);
2362 if (range_contains_super(fs_chunk->physical,
2364 fprintf(stderr, "Remapping a chunk that had a super "
2365 "mirror inside of it, clearing space cache "
2366 "so we don't end up with corruption\n");
2367 mdres->clear_space_cache = 1;
2369 fs_chunk->physical = mdres->last_physical_offset;
2370 tree_insert(&mdres->physical_tree, &fs_chunk->p, physical_cmp);
2371 mdres->last_physical_offset += fs_chunk->bytes;
2375 static int __restore_metadump(const char *input, FILE *out, int old_restore,
2376 int num_threads, int fixup_offset,
2377 const char *target, int multi_devices)
2379 struct meta_cluster *cluster = NULL;
2380 struct meta_cluster_header *header;
2381 struct mdrestore_struct mdrestore;
2382 struct btrfs_fs_info *info = NULL;
2387 if (!strcmp(input, "-")) {
2390 in = fopen(input, "r");
2392 perror("unable to open metadump image");
2397 /* NOTE: open with write mode */
2400 info = open_ctree_fs_info(target, 0, 0,
2402 OPEN_CTREE_RESTORE |
2403 OPEN_CTREE_PARTIAL);
2405 fprintf(stderr, "%s: open ctree failed\n", __func__);
2411 cluster = malloc(BLOCK_SIZE);
2413 fprintf(stderr, "Error allocating cluster\n");
2418 ret = mdrestore_init(&mdrestore, in, out, old_restore, num_threads,
2419 fixup_offset, info, multi_devices);
2421 fprintf(stderr, "Error initing mdrestore %d\n", ret);
2422 goto failed_cluster;
2425 if (!multi_devices && !old_restore) {
2426 ret = build_chunk_tree(&mdrestore, cluster);
2429 if (!list_empty(&mdrestore.overlapping_chunks))
2430 remap_overlapping_chunks(&mdrestore);
2433 if (in != stdin && fseek(in, 0, SEEK_SET)) {
2434 fprintf(stderr, "Error seeking %d\n", errno);
2438 while (!mdrestore.error) {
2439 ret = fread(cluster, BLOCK_SIZE, 1, in);
2443 header = &cluster->header;
2444 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2445 le64_to_cpu(header->bytenr) != bytenr) {
2446 fprintf(stderr, "bad header in metadump image\n");
2450 ret = add_cluster(cluster, &mdrestore, &bytenr);
2452 fprintf(stderr, "Error adding cluster\n");
2456 ret = wait_for_worker(&mdrestore);
2458 mdrestore_destroy(&mdrestore, num_threads);
2462 if (fixup_offset && info)
2463 close_ctree(info->chunk_root);
2470 static int restore_metadump(const char *input, FILE *out, int old_restore,
2471 int num_threads, int multi_devices)
2473 return __restore_metadump(input, out, old_restore, num_threads, 0, NULL,
2477 static int fixup_metadump(const char *input, FILE *out, int num_threads,
2480 return __restore_metadump(input, out, 0, num_threads, 1, target, 1);
2483 static int update_disk_super_on_device(struct btrfs_fs_info *info,
2484 const char *other_dev, u64 cur_devid)
2486 struct btrfs_key key;
2487 struct extent_buffer *leaf;
2488 struct btrfs_path path;
2489 struct btrfs_dev_item *dev_item;
2490 struct btrfs_super_block *disk_super;
2491 char dev_uuid[BTRFS_UUID_SIZE];
2492 char fs_uuid[BTRFS_UUID_SIZE];
2493 u64 devid, type, io_align, io_width;
2494 u64 sector_size, total_bytes, bytes_used;
2499 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2500 key.type = BTRFS_DEV_ITEM_KEY;
2501 key.offset = cur_devid;
2503 btrfs_init_path(&path);
2504 ret = btrfs_search_slot(NULL, info->chunk_root, &key, &path, 0, 0);
2506 fprintf(stderr, "search key fails\n");
2510 leaf = path.nodes[0];
2511 dev_item = btrfs_item_ptr(leaf, path.slots[0],
2512 struct btrfs_dev_item);
2514 devid = btrfs_device_id(leaf, dev_item);
2515 if (devid != cur_devid) {
2516 printk("devid %llu mismatch with %llu\n", devid, cur_devid);
2520 type = btrfs_device_type(leaf, dev_item);
2521 io_align = btrfs_device_io_align(leaf, dev_item);
2522 io_width = btrfs_device_io_width(leaf, dev_item);
2523 sector_size = btrfs_device_sector_size(leaf, dev_item);
2524 total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2525 bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2526 read_extent_buffer(leaf, dev_uuid, (unsigned long)btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE);
2527 read_extent_buffer(leaf, fs_uuid, (unsigned long)btrfs_device_fsid(dev_item), BTRFS_UUID_SIZE);
2529 btrfs_release_path(&path);
2531 printk("update disk super on %s devid=%llu\n", other_dev, devid);
2533 /* update other devices' super block */
2534 fp = open(other_dev, O_CREAT | O_RDWR, 0600);
2536 fprintf(stderr, "could not open %s\n", other_dev);
2540 buf = malloc(BTRFS_SUPER_INFO_SIZE);
2547 memcpy(buf, info->super_copy, BTRFS_SUPER_INFO_SIZE);
2549 disk_super = (struct btrfs_super_block *)buf;
2550 dev_item = &disk_super->dev_item;
2552 btrfs_set_stack_device_type(dev_item, type);
2553 btrfs_set_stack_device_id(dev_item, devid);
2554 btrfs_set_stack_device_total_bytes(dev_item, total_bytes);
2555 btrfs_set_stack_device_bytes_used(dev_item, bytes_used);
2556 btrfs_set_stack_device_io_align(dev_item, io_align);
2557 btrfs_set_stack_device_io_width(dev_item, io_width);
2558 btrfs_set_stack_device_sector_size(dev_item, sector_size);
2559 memcpy(dev_item->uuid, dev_uuid, BTRFS_UUID_SIZE);
2560 memcpy(dev_item->fsid, fs_uuid, BTRFS_UUID_SIZE);
2561 csum_block((u8 *)buf, BTRFS_SUPER_INFO_SIZE);
2563 ret = pwrite64(fp, buf, BTRFS_SUPER_INFO_SIZE, BTRFS_SUPER_INFO_OFFSET);
2564 if (ret != BTRFS_SUPER_INFO_SIZE) {
2569 write_backup_supers(fp, (u8 *)buf);
2577 static void print_usage(void)
2579 fprintf(stderr, "usage: btrfs-image [options] source target\n");
2580 fprintf(stderr, "\t-r \trestore metadump image\n");
2581 fprintf(stderr, "\t-c value\tcompression level (0 ~ 9)\n");
2582 fprintf(stderr, "\t-t value\tnumber of threads (1 ~ 32)\n");
2583 fprintf(stderr, "\t-o \tdon't mess with the chunk tree when restoring\n");
2584 fprintf(stderr, "\t-s \tsanitize file names, use once to just use garbage, use twice if you want crc collisions\n");
2585 fprintf(stderr, "\t-w \twalk all trees instead of using extent tree, do this if your extent tree is broken\n");
2586 fprintf(stderr, "\t-m \trestore for multiple devices\n");
2587 fprintf(stderr, "\n");
2588 fprintf(stderr, "\tIn the dump mode, source is the btrfs device and target is the output file (use '-' for stdout).\n");
2589 fprintf(stderr, "\tIn the restore mode, source is the dumped image and target is the btrfs device/file.\n");
2593 int main(int argc, char *argv[])
2597 u64 num_threads = 1;
2598 u64 compress_level = 0;
2600 int old_restore = 0;
2602 int multi_devices = 0;
2606 int usage_error = 0;
2610 int c = getopt(argc, argv, "rc:t:oswm");
2618 num_threads = arg_strtou64(optarg);
2619 if (num_threads > 32)
2623 compress_level = arg_strtou64(optarg);
2624 if (compress_level > 9)
2645 argc = argc - optind;
2647 if (check_argc_min(argc, 2))
2654 fprintf(stderr, "Usage error: create and restore cannot be used at the same time\n");
2658 if (walk_trees || sanitize || compress_level) {
2659 fprintf(stderr, "Usage error: use -w, -s, -c options for restore makes no sense\n");
2662 if (multi_devices && dev_cnt < 2) {
2663 fprintf(stderr, "Usage error: not enough devices specified for -m option\n");
2666 if (!multi_devices && dev_cnt != 1) {
2667 fprintf(stderr, "Usage error: accepts only 1 device without -m option\n");
2675 source = argv[optind];
2676 target = argv[optind + 1];
2678 if (create && !strcmp(target, "-")) {
2681 out = fopen(target, "w+");
2683 perror("unable to create target file");
2688 if (num_threads == 1 && compress_level > 0) {
2689 num_threads = sysconf(_SC_NPROCESSORS_ONLN);
2690 if (num_threads <= 0)
2695 ret = check_mounted(source);
2697 fprintf(stderr, "Could not check mount status: %s\n",
2702 "WARNING: The device is mounted. Make sure the filesystem is quiescent.\n");
2704 ret = create_metadump(source, out, num_threads,
2705 compress_level, sanitize, walk_trees);
2707 ret = restore_metadump(source, out, old_restore, num_threads,
2711 printk("%s failed (%s)\n", (create) ? "create" : "restore",
2716 /* extended support for multiple devices */
2717 if (!create && multi_devices) {
2718 struct btrfs_fs_info *info;
2722 info = open_ctree_fs_info(target, 0, 0,
2723 OPEN_CTREE_PARTIAL |
2724 OPEN_CTREE_RESTORE);
2727 fprintf(stderr, "unable to open %s error = %s\n",
2728 target, strerror(e));
2732 total_devs = btrfs_super_num_devices(info->super_copy);
2733 if (total_devs != dev_cnt) {
2734 printk("it needs %llu devices but has only %d\n",
2735 total_devs, dev_cnt);
2736 close_ctree(info->chunk_root);
2740 /* update super block on other disks */
2741 for (i = 2; i <= dev_cnt; i++) {
2742 ret = update_disk_super_on_device(info,
2743 argv[optind + i], (u64)i);
2745 printk("update disk super failed devid=%d (error=%d)\n",
2747 close_ctree(info->chunk_root);
2752 close_ctree(info->chunk_root);
2754 /* fix metadata block to map correct chunk */
2755 ret = fixup_metadump(source, out, 1, target);
2757 fprintf(stderr, "fix metadump failed (error=%d)\n",
2764 if (out == stdout) {
2768 if (ret && create) {
2771 unlink_ret = unlink(target);
2774 "unlink output file failed : %s\n",