2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
22 #include <sys/types.h>
30 #include "kerncompat.h"
34 #include "transaction.h"
37 #include "extent_io.h"
39 #define HEADER_MAGIC 0xbd5c25e27295668bULL
40 #define MAX_PENDING_SIZE (256 * 1024)
41 #define BLOCK_SIZE 1024
42 #define BLOCK_MASK (BLOCK_SIZE - 1)
44 #define COMPRESS_NONE 0
45 #define COMPRESS_ZLIB 1
47 struct meta_cluster_item {
50 } __attribute__ ((__packed__));
52 struct meta_cluster_header {
57 } __attribute__ ((__packed__));
59 /* cluster header + index items + buffers */
61 struct meta_cluster_header header;
62 struct meta_cluster_item items[];
63 } __attribute__ ((__packed__));
65 #define ITEMS_PER_CLUSTER ((BLOCK_SIZE - sizeof(struct meta_cluster)) / \
66 sizeof(struct meta_cluster_item))
74 struct list_head list;
78 struct list_head list;
79 struct list_head ordered;
87 struct metadump_struct {
88 struct btrfs_root *root;
91 struct meta_cluster *cluster;
95 pthread_mutex_t mutex;
97 struct rb_root name_tree;
99 struct list_head list;
100 struct list_head ordered;
122 struct mdrestore_struct {
128 pthread_mutex_t mutex;
131 struct rb_root chunk_tree;
132 struct rb_root physical_tree;
133 struct list_head list;
134 struct list_head overlapping_chunks;
139 u64 last_physical_offset;
140 u8 uuid[BTRFS_UUID_SIZE];
141 u8 fsid[BTRFS_FSID_SIZE];
149 int clear_space_cache;
150 struct btrfs_fs_info *info;
153 static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
154 u64 search, u64 cluster_bytenr);
155 static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size);
157 static void csum_block(u8 *buf, size_t len)
159 char result[BTRFS_CRC32_SIZE];
161 crc = crc32c(crc, buf + BTRFS_CSUM_SIZE, len - BTRFS_CSUM_SIZE);
162 btrfs_csum_final(crc, result);
163 memcpy(buf, result, BTRFS_CRC32_SIZE);
166 static int has_name(struct btrfs_key *key)
169 case BTRFS_DIR_ITEM_KEY:
170 case BTRFS_DIR_INDEX_KEY:
171 case BTRFS_INODE_REF_KEY:
172 case BTRFS_INODE_EXTREF_KEY:
173 case BTRFS_XATTR_ITEM_KEY:
182 static char *generate_garbage(u32 name_len)
184 char *buf = malloc(name_len);
190 for (i = 0; i < name_len; i++) {
191 char c = rand() % 94 + 33;
201 static int name_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
203 struct name *entry = rb_entry(a, struct name, n);
204 struct name *ins = rb_entry(b, struct name, n);
207 len = min(ins->len, entry->len);
208 return memcmp(ins->val, entry->val, len);
211 static int chunk_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
213 struct fs_chunk *entry = rb_entry(a, struct fs_chunk, l);
214 struct fs_chunk *ins = rb_entry(b, struct fs_chunk, l);
216 if (fuzz && ins->logical >= entry->logical &&
217 ins->logical < entry->logical + entry->bytes)
220 if (ins->logical < entry->logical)
222 else if (ins->logical > entry->logical)
227 static int physical_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
229 struct fs_chunk *entry = rb_entry(a, struct fs_chunk, p);
230 struct fs_chunk *ins = rb_entry(b, struct fs_chunk, p);
232 if (fuzz && ins->physical >= entry->physical &&
233 ins->physical < entry->physical + entry->bytes)
236 if (fuzz && entry->physical >= ins->physical &&
237 entry->physical < ins->physical + ins->bytes)
240 if (ins->physical < entry->physical)
242 else if (ins->physical > entry->physical)
247 static void tree_insert(struct rb_root *root, struct rb_node *ins,
248 int (*cmp)(struct rb_node *a, struct rb_node *b,
251 struct rb_node ** p = &root->rb_node;
252 struct rb_node * parent = NULL;
258 dir = cmp(*p, ins, 1);
267 rb_link_node(ins, parent, p);
268 rb_insert_color(ins, root);
271 static struct rb_node *tree_search(struct rb_root *root,
272 struct rb_node *search,
273 int (*cmp)(struct rb_node *a,
274 struct rb_node *b, int fuzz),
277 struct rb_node *n = root->rb_node;
281 dir = cmp(n, search, fuzz);
293 static u64 logical_to_physical(struct mdrestore_struct *mdres, u64 logical, u64 *size)
295 struct fs_chunk *fs_chunk;
296 struct rb_node *entry;
297 struct fs_chunk search;
300 if (logical == BTRFS_SUPER_INFO_OFFSET)
303 search.logical = logical;
304 entry = tree_search(&mdres->chunk_tree, &search.l, chunk_cmp, 1);
306 if (mdres->in != stdin)
307 printf("Couldn't find a chunk, using logical\n");
310 fs_chunk = rb_entry(entry, struct fs_chunk, l);
311 if (fs_chunk->logical > logical || fs_chunk->logical + fs_chunk->bytes < logical)
313 offset = search.logical - fs_chunk->logical;
315 *size = min(*size, fs_chunk->bytes + fs_chunk->logical - logical);
316 return fs_chunk->physical + offset;
320 static char *find_collision(struct metadump_struct *md, char *name,
324 struct rb_node *entry;
326 unsigned long checksum;
332 entry = tree_search(&md->name_tree, &tmp.n, name_cmp, 0);
334 val = rb_entry(entry, struct name, n);
339 val = malloc(sizeof(struct name));
341 fprintf(stderr, "Couldn't sanitize name, enomem\n");
346 memset(val, 0, sizeof(*val));
350 val->sub = malloc(name_len);
352 fprintf(stderr, "Couldn't sanitize name, enomem\n");
358 checksum = crc32c(~1, val->val, name_len);
359 memset(val->sub, ' ', name_len);
362 if (crc32c(~1, val->sub, name_len) == checksum &&
363 memcmp(val->sub, val->val, val->len)) {
368 if (val->sub[i] == 127) {
373 } while (val->sub[i] == 127);
378 if (val->sub[i] == '/')
380 memset(val->sub, ' ', i);
385 if (val->sub[i] == '/')
391 fprintf(stderr, "Couldn't find a collision for '%.*s', "
392 "generating normal garbage, it won't match indexes\n",
394 for (i = 0; i < name_len; i++) {
395 char c = rand() % 94 + 33;
403 tree_insert(&md->name_tree, &val->n, name_cmp);
407 static void sanitize_dir_item(struct metadump_struct *md, struct extent_buffer *eb,
410 struct btrfs_dir_item *dir_item;
413 unsigned long name_ptr;
418 int free_garbage = (md->sanitize_names == 1);
420 dir_item = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
421 total_len = btrfs_item_size_nr(eb, slot);
422 while (cur < total_len) {
423 this_len = sizeof(*dir_item) +
424 btrfs_dir_name_len(eb, dir_item) +
425 btrfs_dir_data_len(eb, dir_item);
426 name_ptr = (unsigned long)(dir_item + 1);
427 name_len = btrfs_dir_name_len(eb, dir_item);
429 if (md->sanitize_names > 1) {
430 buf = malloc(name_len);
432 fprintf(stderr, "Couldn't sanitize name, "
436 read_extent_buffer(eb, buf, name_ptr, name_len);
437 garbage = find_collision(md, buf, name_len);
439 garbage = generate_garbage(name_len);
442 fprintf(stderr, "Couldn't sanitize name, enomem\n");
445 write_extent_buffer(eb, garbage, name_ptr, name_len);
447 dir_item = (struct btrfs_dir_item *)((char *)dir_item +
454 static void sanitize_inode_ref(struct metadump_struct *md,
455 struct extent_buffer *eb, int slot, int ext)
457 struct btrfs_inode_extref *extref;
458 struct btrfs_inode_ref *ref;
461 unsigned long name_ptr;
465 int free_garbage = (md->sanitize_names == 1);
467 item_size = btrfs_item_size_nr(eb, slot);
468 ptr = btrfs_item_ptr_offset(eb, slot);
469 while (cur_offset < item_size) {
471 extref = (struct btrfs_inode_extref *)(ptr +
473 name_ptr = (unsigned long)(&extref->name);
474 len = btrfs_inode_extref_name_len(eb, extref);
475 cur_offset += sizeof(*extref);
477 ref = (struct btrfs_inode_ref *)(ptr + cur_offset);
478 len = btrfs_inode_ref_name_len(eb, ref);
479 name_ptr = (unsigned long)(ref + 1);
480 cur_offset += sizeof(*ref);
484 if (md->sanitize_names > 1) {
487 fprintf(stderr, "Couldn't sanitize name, "
491 read_extent_buffer(eb, buf, name_ptr, len);
492 garbage = find_collision(md, buf, len);
494 garbage = generate_garbage(len);
498 fprintf(stderr, "Couldn't sanitize name, enomem\n");
501 write_extent_buffer(eb, garbage, name_ptr, len);
507 static void sanitize_xattr(struct metadump_struct *md,
508 struct extent_buffer *eb, int slot)
510 struct btrfs_dir_item *dir_item;
511 unsigned long data_ptr;
514 dir_item = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
515 data_len = btrfs_dir_data_len(eb, dir_item);
517 data_ptr = (unsigned long)((char *)(dir_item + 1) +
518 btrfs_dir_name_len(eb, dir_item));
519 memset_extent_buffer(eb, 0, data_ptr, data_len);
522 static void sanitize_name(struct metadump_struct *md, u8 *dst,
523 struct extent_buffer *src, struct btrfs_key *key,
526 struct extent_buffer *eb;
528 eb = alloc_dummy_eb(src->start, src->len);
530 fprintf(stderr, "Couldn't sanitize name, no memory\n");
534 memcpy(eb->data, dst, eb->len);
537 case BTRFS_DIR_ITEM_KEY:
538 case BTRFS_DIR_INDEX_KEY:
539 sanitize_dir_item(md, eb, slot);
541 case BTRFS_INODE_REF_KEY:
542 sanitize_inode_ref(md, eb, slot, 0);
544 case BTRFS_INODE_EXTREF_KEY:
545 sanitize_inode_ref(md, eb, slot, 1);
547 case BTRFS_XATTR_ITEM_KEY:
548 sanitize_xattr(md, eb, slot);
554 memcpy(dst, eb->data, eb->len);
559 * zero inline extents and csum items
561 static void zero_items(struct metadump_struct *md, u8 *dst,
562 struct extent_buffer *src)
564 struct btrfs_file_extent_item *fi;
565 struct btrfs_item *item;
566 struct btrfs_key key;
567 u32 nritems = btrfs_header_nritems(src);
572 for (i = 0; i < nritems; i++) {
573 item = btrfs_item_nr(i);
574 btrfs_item_key_to_cpu(src, &key, i);
575 if (key.type == BTRFS_CSUM_ITEM_KEY) {
576 size = btrfs_item_size_nr(src, i);
577 memset(dst + btrfs_leaf_data(src) +
578 btrfs_item_offset_nr(src, i), 0, size);
582 if (md->sanitize_names && has_name(&key)) {
583 sanitize_name(md, dst, src, &key, i);
587 if (key.type != BTRFS_EXTENT_DATA_KEY)
590 fi = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
591 extent_type = btrfs_file_extent_type(src, fi);
592 if (extent_type != BTRFS_FILE_EXTENT_INLINE)
595 ptr = btrfs_file_extent_inline_start(fi);
596 size = btrfs_file_extent_inline_item_len(src, item);
597 memset(dst + ptr, 0, size);
602 * copy buffer and zero useless data in the buffer
604 static void copy_buffer(struct metadump_struct *md, u8 *dst,
605 struct extent_buffer *src)
611 memcpy(dst, src->data, src->len);
612 if (src->start == BTRFS_SUPER_INFO_OFFSET)
615 level = btrfs_header_level(src);
616 nritems = btrfs_header_nritems(src);
619 size = sizeof(struct btrfs_header);
620 memset(dst + size, 0, src->len - size);
621 } else if (level == 0) {
622 size = btrfs_leaf_data(src) +
623 btrfs_item_offset_nr(src, nritems - 1) -
624 btrfs_item_nr_offset(nritems);
625 memset(dst + btrfs_item_nr_offset(nritems), 0, size);
626 zero_items(md, dst, src);
628 size = offsetof(struct btrfs_node, ptrs) +
629 sizeof(struct btrfs_key_ptr) * nritems;
630 memset(dst + size, 0, src->len - size);
632 csum_block(dst, src->len);
635 static void *dump_worker(void *data)
637 struct metadump_struct *md = (struct metadump_struct *)data;
638 struct async_work *async;
642 pthread_mutex_lock(&md->mutex);
643 while (list_empty(&md->list)) {
645 pthread_mutex_unlock(&md->mutex);
648 pthread_cond_wait(&md->cond, &md->mutex);
650 async = list_entry(md->list.next, struct async_work, list);
651 list_del_init(&async->list);
652 pthread_mutex_unlock(&md->mutex);
654 if (md->compress_level > 0) {
655 u8 *orig = async->buffer;
657 async->bufsize = compressBound(async->size);
658 async->buffer = malloc(async->bufsize);
659 if (!async->buffer) {
660 fprintf(stderr, "Error allocing buffer\n");
661 pthread_mutex_lock(&md->mutex);
664 pthread_mutex_unlock(&md->mutex);
668 ret = compress2(async->buffer,
669 (unsigned long *)&async->bufsize,
670 orig, async->size, md->compress_level);
678 pthread_mutex_lock(&md->mutex);
680 pthread_mutex_unlock(&md->mutex);
686 static void meta_cluster_init(struct metadump_struct *md, u64 start)
688 struct meta_cluster_header *header;
692 header = &md->cluster->header;
693 header->magic = cpu_to_le64(HEADER_MAGIC);
694 header->bytenr = cpu_to_le64(start);
695 header->nritems = cpu_to_le32(0);
696 header->compress = md->compress_level > 0 ?
697 COMPRESS_ZLIB : COMPRESS_NONE;
700 static void metadump_destroy(struct metadump_struct *md, int num_threads)
705 pthread_mutex_lock(&md->mutex);
707 pthread_cond_broadcast(&md->cond);
708 pthread_mutex_unlock(&md->mutex);
710 for (i = 0; i < num_threads; i++)
711 pthread_join(md->threads[i], NULL);
713 pthread_cond_destroy(&md->cond);
714 pthread_mutex_destroy(&md->mutex);
716 while ((n = rb_first(&md->name_tree))) {
719 name = rb_entry(n, struct name, n);
720 rb_erase(n, &md->name_tree);
729 static int metadump_init(struct metadump_struct *md, struct btrfs_root *root,
730 FILE *out, int num_threads, int compress_level,
735 memset(md, 0, sizeof(*md));
736 pthread_cond_init(&md->cond, NULL);
737 pthread_mutex_init(&md->mutex, NULL);
738 INIT_LIST_HEAD(&md->list);
739 INIT_LIST_HEAD(&md->ordered);
742 md->pending_start = (u64)-1;
743 md->compress_level = compress_level;
744 md->cluster = calloc(1, BLOCK_SIZE);
745 md->sanitize_names = sanitize_names;
746 if (sanitize_names > 1)
747 crc32c_optimization_init();
750 pthread_cond_destroy(&md->cond);
751 pthread_mutex_destroy(&md->mutex);
755 meta_cluster_init(md, 0);
759 md->name_tree.rb_node = NULL;
760 md->num_threads = num_threads;
761 md->threads = calloc(num_threads, sizeof(pthread_t));
764 pthread_cond_destroy(&md->cond);
765 pthread_mutex_destroy(&md->mutex);
769 for (i = 0; i < num_threads; i++) {
770 ret = pthread_create(md->threads + i, NULL, dump_worker, md);
776 metadump_destroy(md, i + 1);
781 static int write_zero(FILE *out, size_t size)
783 static char zero[BLOCK_SIZE];
784 return fwrite(zero, size, 1, out);
787 static int write_buffers(struct metadump_struct *md, u64 *next)
789 struct meta_cluster_header *header = &md->cluster->header;
790 struct meta_cluster_item *item;
791 struct async_work *async;
797 if (list_empty(&md->ordered))
800 /* wait until all buffers are compressed */
801 while (!err && md->num_items > md->num_ready) {
802 struct timespec ts = {
806 pthread_mutex_unlock(&md->mutex);
807 nanosleep(&ts, NULL);
808 pthread_mutex_lock(&md->mutex);
813 fprintf(stderr, "One of the threads errored out %s\n",
818 /* setup and write index block */
819 list_for_each_entry(async, &md->ordered, ordered) {
820 item = md->cluster->items + nritems;
821 item->bytenr = cpu_to_le64(async->start);
822 item->size = cpu_to_le32(async->bufsize);
825 header->nritems = cpu_to_le32(nritems);
827 ret = fwrite(md->cluster, BLOCK_SIZE, 1, md->out);
829 fprintf(stderr, "Error writing out cluster: %d\n", errno);
834 bytenr += le64_to_cpu(header->bytenr) + BLOCK_SIZE;
835 while (!list_empty(&md->ordered)) {
836 async = list_entry(md->ordered.next, struct async_work,
838 list_del_init(&async->ordered);
840 bytenr += async->bufsize;
842 ret = fwrite(async->buffer, async->bufsize, 1,
847 fprintf(stderr, "Error writing out cluster: %d\n",
855 /* zero unused space in the last block */
856 if (!err && bytenr & BLOCK_MASK) {
857 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
860 ret = write_zero(md->out, size);
862 fprintf(stderr, "Error zeroing out buffer: %d\n",
872 static int read_data_extent(struct metadump_struct *md,
873 struct async_work *async)
875 struct btrfs_multi_bio *multi = NULL;
876 struct btrfs_device *device;
877 u64 bytes_left = async->size;
878 u64 logical = async->start;
887 read_len = bytes_left;
888 ret = btrfs_map_block(&md->root->fs_info->mapping_tree, READ,
889 logical, &read_len, &multi, 0, NULL);
891 fprintf(stderr, "Couldn't map data block %d\n", ret);
895 device = multi->stripes[0].dev;
897 if (device->fd == 0) {
899 "Device we need to read from is not open\n");
904 bytenr = multi->stripes[0].physical;
907 read_len = min(read_len, bytes_left);
908 done = pread64(fd, async->buffer+offset, read_len, bytenr);
909 if (done < read_len) {
911 fprintf(stderr, "Error reading extent %d\n",
914 fprintf(stderr, "Short read\n");
926 static int get_dev_fd(struct btrfs_root *root)
928 struct btrfs_device *dev;
930 dev = list_first_entry(&root->fs_info->fs_devices->devices,
931 struct btrfs_device, dev_list);
935 static int flush_pending(struct metadump_struct *md, int done)
937 struct async_work *async = NULL;
938 struct extent_buffer *eb;
939 u64 blocksize = md->root->nodesize;
945 if (md->pending_size) {
946 async = calloc(1, sizeof(*async));
950 async->start = md->pending_start;
951 async->size = md->pending_size;
952 async->bufsize = async->size;
953 async->buffer = malloc(async->bufsize);
954 if (!async->buffer) {
959 start = async->start;
963 ret = read_data_extent(md, async);
972 * Balance can make the mapping not cover the super block, so
973 * just copy directly from one of the devices.
975 if (start == BTRFS_SUPER_INFO_OFFSET) {
976 int fd = get_dev_fd(md->root);
978 ret = pread64(fd, async->buffer, size, start);
982 fprintf(stderr, "Error reading superblock\n");
989 while (!md->data && size > 0) {
990 u64 this_read = min(blocksize, size);
991 eb = read_tree_block(md->root, start, this_read, 0);
992 if (!extent_buffer_uptodate(eb)) {
996 "Error reading metadata block\n");
999 copy_buffer(md, async->buffer + offset, eb);
1000 free_extent_buffer(eb);
1002 offset += this_read;
1006 md->pending_start = (u64)-1;
1007 md->pending_size = 0;
1012 pthread_mutex_lock(&md->mutex);
1014 list_add_tail(&async->ordered, &md->ordered);
1016 if (md->compress_level > 0) {
1017 list_add_tail(&async->list, &md->list);
1018 pthread_cond_signal(&md->cond);
1023 if (md->num_items >= ITEMS_PER_CLUSTER || done) {
1024 ret = write_buffers(md, &start);
1026 fprintf(stderr, "Error writing buffers %d\n",
1029 meta_cluster_init(md, start);
1031 pthread_mutex_unlock(&md->mutex);
1035 static int add_extent(u64 start, u64 size, struct metadump_struct *md,
1039 if (md->data != data ||
1040 md->pending_size + size > MAX_PENDING_SIZE ||
1041 md->pending_start + md->pending_size != start) {
1042 ret = flush_pending(md, 0);
1045 md->pending_start = start;
1047 readahead_tree_block(md->root, start, size, 0);
1048 md->pending_size += size;
1053 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1054 static int is_tree_block(struct btrfs_root *extent_root,
1055 struct btrfs_path *path, u64 bytenr)
1057 struct extent_buffer *leaf;
1058 struct btrfs_key key;
1062 leaf = path->nodes[0];
1064 struct btrfs_extent_ref_v0 *ref_item;
1066 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1067 ret = btrfs_next_leaf(extent_root, path);
1072 leaf = path->nodes[0];
1074 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1075 if (key.objectid != bytenr)
1077 if (key.type != BTRFS_EXTENT_REF_V0_KEY)
1079 ref_item = btrfs_item_ptr(leaf, path->slots[0],
1080 struct btrfs_extent_ref_v0);
1081 ref_objectid = btrfs_ref_objectid_v0(leaf, ref_item);
1082 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID)
1090 static int copy_tree_blocks(struct btrfs_root *root, struct extent_buffer *eb,
1091 struct metadump_struct *metadump, int root_tree)
1093 struct extent_buffer *tmp;
1094 struct btrfs_root_item *ri;
1095 struct btrfs_key key;
1102 ret = add_extent(btrfs_header_bytenr(eb), root->leafsize, metadump, 0);
1104 fprintf(stderr, "Error adding metadata block\n");
1108 if (btrfs_header_level(eb) == 0 && !root_tree)
1111 level = btrfs_header_level(eb);
1112 nritems = btrfs_header_nritems(eb);
1113 for (i = 0; i < nritems; i++) {
1115 btrfs_item_key_to_cpu(eb, &key, i);
1116 if (key.type != BTRFS_ROOT_ITEM_KEY)
1118 ri = btrfs_item_ptr(eb, i, struct btrfs_root_item);
1119 bytenr = btrfs_disk_root_bytenr(eb, ri);
1120 tmp = read_tree_block(root, bytenr, root->leafsize, 0);
1121 if (!extent_buffer_uptodate(tmp)) {
1123 "Error reading log root block\n");
1126 ret = copy_tree_blocks(root, tmp, metadump, 0);
1127 free_extent_buffer(tmp);
1131 bytenr = btrfs_node_blockptr(eb, i);
1132 tmp = read_tree_block(root, bytenr, root->leafsize, 0);
1133 if (!extent_buffer_uptodate(tmp)) {
1134 fprintf(stderr, "Error reading log block\n");
1137 ret = copy_tree_blocks(root, tmp, metadump, root_tree);
1138 free_extent_buffer(tmp);
1147 static int copy_log_trees(struct btrfs_root *root,
1148 struct metadump_struct *metadump,
1149 struct btrfs_path *path)
1151 u64 blocknr = btrfs_super_log_root(root->fs_info->super_copy);
1156 if (!root->fs_info->log_root_tree ||
1157 !root->fs_info->log_root_tree->node) {
1158 fprintf(stderr, "Error copying tree log, it wasn't setup\n");
1162 return copy_tree_blocks(root, root->fs_info->log_root_tree->node,
1166 static int copy_space_cache(struct btrfs_root *root,
1167 struct metadump_struct *metadump,
1168 struct btrfs_path *path)
1170 struct extent_buffer *leaf;
1171 struct btrfs_file_extent_item *fi;
1172 struct btrfs_key key;
1173 u64 bytenr, num_bytes;
1176 root = root->fs_info->tree_root;
1179 key.type = BTRFS_EXTENT_DATA_KEY;
1182 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1184 fprintf(stderr, "Error searching for free space inode %d\n",
1189 leaf = path->nodes[0];
1192 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1193 ret = btrfs_next_leaf(root, path);
1195 fprintf(stderr, "Error going to next leaf "
1201 leaf = path->nodes[0];
1204 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1205 if (key.type != BTRFS_EXTENT_DATA_KEY) {
1210 fi = btrfs_item_ptr(leaf, path->slots[0],
1211 struct btrfs_file_extent_item);
1212 if (btrfs_file_extent_type(leaf, fi) !=
1213 BTRFS_FILE_EXTENT_REG) {
1218 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1219 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1220 ret = add_extent(bytenr, num_bytes, metadump, 1);
1222 fprintf(stderr, "Error adding space cache blocks %d\n",
1224 btrfs_release_path(path);
1233 static int copy_from_extent_tree(struct metadump_struct *metadump,
1234 struct btrfs_path *path)
1236 struct btrfs_root *extent_root;
1237 struct extent_buffer *leaf;
1238 struct btrfs_extent_item *ei;
1239 struct btrfs_key key;
1244 extent_root = metadump->root->fs_info->extent_root;
1245 bytenr = BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE;
1246 key.objectid = bytenr;
1247 key.type = BTRFS_EXTENT_ITEM_KEY;
1250 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1252 fprintf(stderr, "Error searching extent root %d\n", ret);
1257 leaf = path->nodes[0];
1260 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1261 ret = btrfs_next_leaf(extent_root, path);
1263 fprintf(stderr, "Error going to next leaf %d"
1271 leaf = path->nodes[0];
1274 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1275 if (key.objectid < bytenr ||
1276 (key.type != BTRFS_EXTENT_ITEM_KEY &&
1277 key.type != BTRFS_METADATA_ITEM_KEY)) {
1282 bytenr = key.objectid;
1283 if (key.type == BTRFS_METADATA_ITEM_KEY)
1284 num_bytes = extent_root->leafsize;
1286 num_bytes = key.offset;
1288 if (btrfs_item_size_nr(leaf, path->slots[0]) > sizeof(*ei)) {
1289 ei = btrfs_item_ptr(leaf, path->slots[0],
1290 struct btrfs_extent_item);
1291 if (btrfs_extent_flags(leaf, ei) &
1292 BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1293 ret = add_extent(bytenr, num_bytes, metadump,
1296 fprintf(stderr, "Error adding block "
1302 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1303 ret = is_tree_block(extent_root, path, bytenr);
1305 fprintf(stderr, "Error checking tree block "
1311 ret = add_extent(bytenr, num_bytes, metadump,
1314 fprintf(stderr, "Error adding block "
1321 fprintf(stderr, "Either extent tree corruption or "
1322 "you haven't built with V0 support\n");
1327 bytenr += num_bytes;
1330 btrfs_release_path(path);
1335 static int create_metadump(const char *input, FILE *out, int num_threads,
1336 int compress_level, int sanitize, int walk_trees)
1338 struct btrfs_root *root;
1339 struct btrfs_path *path = NULL;
1340 struct metadump_struct metadump;
1344 root = open_ctree(input, 0, 0);
1346 fprintf(stderr, "Open ctree failed\n");
1350 BUG_ON(root->nodesize != root->leafsize);
1352 ret = metadump_init(&metadump, root, out, num_threads,
1353 compress_level, sanitize);
1355 fprintf(stderr, "Error initing metadump %d\n", ret);
1360 ret = add_extent(BTRFS_SUPER_INFO_OFFSET, BTRFS_SUPER_INFO_SIZE,
1363 fprintf(stderr, "Error adding metadata %d\n", ret);
1368 path = btrfs_alloc_path();
1370 fprintf(stderr, "Out of memory allocing path\n");
1376 ret = copy_tree_blocks(root, root->fs_info->chunk_root->node,
1383 ret = copy_tree_blocks(root, root->fs_info->tree_root->node,
1390 ret = copy_from_extent_tree(&metadump, path);
1397 ret = copy_log_trees(root, &metadump, path);
1403 ret = copy_space_cache(root, &metadump, path);
1405 ret = flush_pending(&metadump, 1);
1409 fprintf(stderr, "Error flushing pending %d\n", ret);
1412 metadump_destroy(&metadump, num_threads);
1414 btrfs_free_path(path);
1415 ret = close_ctree(root);
1416 return err ? err : ret;
1419 static void update_super_old(u8 *buffer)
1421 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
1422 struct btrfs_chunk *chunk;
1423 struct btrfs_disk_key *key;
1424 u32 sectorsize = btrfs_super_sectorsize(super);
1425 u64 flags = btrfs_super_flags(super);
1427 flags |= BTRFS_SUPER_FLAG_METADUMP;
1428 btrfs_set_super_flags(super, flags);
1430 key = (struct btrfs_disk_key *)(super->sys_chunk_array);
1431 chunk = (struct btrfs_chunk *)(super->sys_chunk_array +
1432 sizeof(struct btrfs_disk_key));
1434 btrfs_set_disk_key_objectid(key, BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1435 btrfs_set_disk_key_type(key, BTRFS_CHUNK_ITEM_KEY);
1436 btrfs_set_disk_key_offset(key, 0);
1438 btrfs_set_stack_chunk_length(chunk, (u64)-1);
1439 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
1440 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
1441 btrfs_set_stack_chunk_type(chunk, BTRFS_BLOCK_GROUP_SYSTEM);
1442 btrfs_set_stack_chunk_io_align(chunk, sectorsize);
1443 btrfs_set_stack_chunk_io_width(chunk, sectorsize);
1444 btrfs_set_stack_chunk_sector_size(chunk, sectorsize);
1445 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1446 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1447 chunk->stripe.devid = super->dev_item.devid;
1448 btrfs_set_stack_stripe_offset(&chunk->stripe, 0);
1449 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
1450 btrfs_set_super_sys_array_size(super, sizeof(*key) + sizeof(*chunk));
1451 csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
1454 static int update_super(struct mdrestore_struct *mdres, u8 *buffer)
1456 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
1457 struct btrfs_chunk *chunk;
1458 struct btrfs_disk_key *disk_key;
1459 struct btrfs_key key;
1460 u64 flags = btrfs_super_flags(super);
1461 u32 new_array_size = 0;
1464 u8 *ptr, *write_ptr;
1465 int old_num_stripes;
1467 write_ptr = ptr = super->sys_chunk_array;
1468 array_size = btrfs_super_sys_array_size(super);
1470 while (cur < array_size) {
1471 disk_key = (struct btrfs_disk_key *)ptr;
1472 btrfs_disk_key_to_cpu(&key, disk_key);
1474 new_array_size += sizeof(*disk_key);
1475 memmove(write_ptr, ptr, sizeof(*disk_key));
1477 write_ptr += sizeof(*disk_key);
1478 ptr += sizeof(*disk_key);
1479 cur += sizeof(*disk_key);
1481 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1482 u64 physical, size = 0;
1484 chunk = (struct btrfs_chunk *)ptr;
1485 old_num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1486 chunk = (struct btrfs_chunk *)write_ptr;
1488 memmove(write_ptr, ptr, sizeof(*chunk));
1489 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1490 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1491 btrfs_set_stack_chunk_type(chunk,
1492 BTRFS_BLOCK_GROUP_SYSTEM);
1493 btrfs_set_stack_stripe_devid(&chunk->stripe,
1494 super->dev_item.devid);
1495 physical = logical_to_physical(mdres, key.offset,
1497 if (size != (u64)-1)
1498 btrfs_set_stack_stripe_offset(&chunk->stripe,
1500 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid,
1502 new_array_size += sizeof(*chunk);
1504 fprintf(stderr, "Bogus key in the sys chunk array "
1508 write_ptr += sizeof(*chunk);
1509 ptr += btrfs_chunk_item_size(old_num_stripes);
1510 cur += btrfs_chunk_item_size(old_num_stripes);
1513 if (mdres->clear_space_cache)
1514 btrfs_set_super_cache_generation(super, 0);
1516 flags |= BTRFS_SUPER_FLAG_METADUMP_V2;
1517 btrfs_set_super_flags(super, flags);
1518 btrfs_set_super_sys_array_size(super, new_array_size);
1519 csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
1524 static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size)
1526 struct extent_buffer *eb;
1528 eb = malloc(sizeof(struct extent_buffer) + size);
1531 memset(eb, 0, sizeof(struct extent_buffer) + size);
1538 static void truncate_item(struct extent_buffer *eb, int slot, u32 new_size)
1540 struct btrfs_item *item;
1548 old_size = btrfs_item_size_nr(eb, slot);
1549 if (old_size == new_size)
1552 nritems = btrfs_header_nritems(eb);
1553 data_end = btrfs_item_offset_nr(eb, nritems - 1);
1555 old_data_start = btrfs_item_offset_nr(eb, slot);
1556 size_diff = old_size - new_size;
1558 for (i = slot; i < nritems; i++) {
1560 item = btrfs_item_nr(i);
1561 ioff = btrfs_item_offset(eb, item);
1562 btrfs_set_item_offset(eb, item, ioff + size_diff);
1565 memmove_extent_buffer(eb, btrfs_leaf_data(eb) + data_end + size_diff,
1566 btrfs_leaf_data(eb) + data_end,
1567 old_data_start + new_size - data_end);
1568 item = btrfs_item_nr(slot);
1569 btrfs_set_item_size(eb, item, new_size);
1572 static int fixup_chunk_tree_block(struct mdrestore_struct *mdres,
1573 struct async_work *async, u8 *buffer,
1576 struct extent_buffer *eb;
1577 size_t size_left = size;
1578 u64 bytenr = async->start;
1581 if (size_left % mdres->leafsize)
1584 eb = alloc_dummy_eb(bytenr, mdres->leafsize);
1590 memcpy(eb->data, buffer, mdres->leafsize);
1592 if (btrfs_header_bytenr(eb) != bytenr)
1594 if (memcmp(mdres->fsid,
1595 eb->data + offsetof(struct btrfs_header, fsid),
1599 if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID)
1602 if (btrfs_header_level(eb) != 0)
1605 for (i = 0; i < btrfs_header_nritems(eb); i++) {
1606 struct btrfs_chunk chunk;
1607 struct btrfs_key key;
1608 u64 type, physical, size = (u64)-1;
1610 btrfs_item_key_to_cpu(eb, &key, i);
1611 if (key.type != BTRFS_CHUNK_ITEM_KEY)
1613 truncate_item(eb, i, sizeof(chunk));
1614 read_extent_buffer(eb, &chunk,
1615 btrfs_item_ptr_offset(eb, i),
1619 physical = logical_to_physical(mdres, key.offset,
1622 /* Zero out the RAID profile */
1623 type = btrfs_stack_chunk_type(&chunk);
1624 type &= (BTRFS_BLOCK_GROUP_DATA |
1625 BTRFS_BLOCK_GROUP_SYSTEM |
1626 BTRFS_BLOCK_GROUP_METADATA |
1627 BTRFS_BLOCK_GROUP_DUP);
1628 btrfs_set_stack_chunk_type(&chunk, type);
1630 btrfs_set_stack_chunk_num_stripes(&chunk, 1);
1631 btrfs_set_stack_chunk_sub_stripes(&chunk, 0);
1632 btrfs_set_stack_stripe_devid(&chunk.stripe, mdres->devid);
1633 if (size != (u64)-1)
1634 btrfs_set_stack_stripe_offset(&chunk.stripe,
1636 memcpy(chunk.stripe.dev_uuid, mdres->uuid,
1638 write_extent_buffer(eb, &chunk,
1639 btrfs_item_ptr_offset(eb, i),
1642 memcpy(buffer, eb->data, eb->len);
1643 csum_block(buffer, eb->len);
1645 size_left -= mdres->leafsize;
1646 buffer += mdres->leafsize;
1647 bytenr += mdres->leafsize;
1654 static void write_backup_supers(int fd, u8 *buf)
1656 struct btrfs_super_block *super = (struct btrfs_super_block *)buf;
1663 if (fstat(fd, &st)) {
1664 fprintf(stderr, "Couldn't stat restore point, won't be able "
1665 "to write backup supers: %d\n", errno);
1669 size = btrfs_device_size(fd, &st);
1671 for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1672 bytenr = btrfs_sb_offset(i);
1673 if (bytenr + BTRFS_SUPER_INFO_SIZE > size)
1675 btrfs_set_super_bytenr(super, bytenr);
1676 csum_block(buf, BTRFS_SUPER_INFO_SIZE);
1677 ret = pwrite64(fd, buf, BTRFS_SUPER_INFO_SIZE, bytenr);
1678 if (ret < BTRFS_SUPER_INFO_SIZE) {
1680 fprintf(stderr, "Problem writing out backup "
1681 "super block %d, err %d\n", i, errno);
1683 fprintf(stderr, "Short write writing out "
1684 "backup super block\n");
1690 static void *restore_worker(void *data)
1692 struct mdrestore_struct *mdres = (struct mdrestore_struct *)data;
1693 struct async_work *async;
1699 int compress_size = MAX_PENDING_SIZE * 4;
1701 outfd = fileno(mdres->out);
1702 buffer = malloc(compress_size);
1704 fprintf(stderr, "Error allocing buffer\n");
1705 pthread_mutex_lock(&mdres->mutex);
1707 mdres->error = -ENOMEM;
1708 pthread_mutex_unlock(&mdres->mutex);
1717 pthread_mutex_lock(&mdres->mutex);
1718 while (!mdres->leafsize || list_empty(&mdres->list)) {
1720 pthread_mutex_unlock(&mdres->mutex);
1723 pthread_cond_wait(&mdres->cond, &mdres->mutex);
1725 async = list_entry(mdres->list.next, struct async_work, list);
1726 list_del_init(&async->list);
1727 pthread_mutex_unlock(&mdres->mutex);
1729 if (mdres->compress_method == COMPRESS_ZLIB) {
1730 size = compress_size;
1731 ret = uncompress(buffer, (unsigned long *)&size,
1732 async->buffer, async->bufsize);
1734 fprintf(stderr, "Error decompressing %d\n",
1740 outbuf = async->buffer;
1741 size = async->bufsize;
1744 if (!mdres->multi_devices) {
1745 if (async->start == BTRFS_SUPER_INFO_OFFSET) {
1746 if (mdres->old_restore) {
1747 update_super_old(outbuf);
1749 ret = update_super(mdres, outbuf);
1753 } else if (!mdres->old_restore) {
1754 ret = fixup_chunk_tree_block(mdres, async, outbuf, size);
1760 if (!mdres->fixup_offset) {
1762 u64 chunk_size = size;
1763 if (!mdres->multi_devices && !mdres->old_restore)
1764 bytenr = logical_to_physical(mdres,
1765 async->start + offset,
1768 bytenr = async->start + offset;
1770 ret = pwrite64(outfd, outbuf+offset, chunk_size,
1772 if (ret != chunk_size) {
1774 fprintf(stderr, "Error writing to "
1775 "device %d\n", errno);
1779 fprintf(stderr, "Short write\n");
1785 offset += chunk_size;
1787 } else if (async->start != BTRFS_SUPER_INFO_OFFSET) {
1788 ret = write_data_to_disk(mdres->info, outbuf, async->start, size, 0);
1790 printk("Error write data\n");
1796 /* backup super blocks are already there at fixup_offset stage */
1797 if (!mdres->multi_devices && async->start == BTRFS_SUPER_INFO_OFFSET)
1798 write_backup_supers(outfd, outbuf);
1800 pthread_mutex_lock(&mdres->mutex);
1801 if (err && !mdres->error)
1804 pthread_mutex_unlock(&mdres->mutex);
1806 free(async->buffer);
1814 static void mdrestore_destroy(struct mdrestore_struct *mdres, int num_threads)
1819 while ((n = rb_first(&mdres->chunk_tree))) {
1820 struct fs_chunk *entry;
1822 entry = rb_entry(n, struct fs_chunk, l);
1823 rb_erase(n, &mdres->chunk_tree);
1824 rb_erase(&entry->p, &mdres->physical_tree);
1827 pthread_mutex_lock(&mdres->mutex);
1829 pthread_cond_broadcast(&mdres->cond);
1830 pthread_mutex_unlock(&mdres->mutex);
1832 for (i = 0; i < num_threads; i++)
1833 pthread_join(mdres->threads[i], NULL);
1835 pthread_cond_destroy(&mdres->cond);
1836 pthread_mutex_destroy(&mdres->mutex);
1837 free(mdres->threads);
1840 static int mdrestore_init(struct mdrestore_struct *mdres,
1841 FILE *in, FILE *out, int old_restore,
1842 int num_threads, int fixup_offset,
1843 struct btrfs_fs_info *info, int multi_devices)
1847 memset(mdres, 0, sizeof(*mdres));
1848 pthread_cond_init(&mdres->cond, NULL);
1849 pthread_mutex_init(&mdres->mutex, NULL);
1850 INIT_LIST_HEAD(&mdres->list);
1851 INIT_LIST_HEAD(&mdres->overlapping_chunks);
1854 mdres->old_restore = old_restore;
1855 mdres->chunk_tree.rb_node = NULL;
1856 mdres->fixup_offset = fixup_offset;
1858 mdres->multi_devices = multi_devices;
1859 mdres->clear_space_cache = 0;
1860 mdres->last_physical_offset = 0;
1861 mdres->alloced_chunks = 0;
1866 mdres->num_threads = num_threads;
1867 mdres->threads = calloc(num_threads, sizeof(pthread_t));
1868 if (!mdres->threads)
1870 for (i = 0; i < num_threads; i++) {
1871 ret = pthread_create(mdres->threads + i, NULL, restore_worker,
1877 mdrestore_destroy(mdres, i + 1);
1881 static int fill_mdres_info(struct mdrestore_struct *mdres,
1882 struct async_work *async)
1884 struct btrfs_super_block *super;
1889 /* We've already been initialized */
1890 if (mdres->leafsize)
1893 if (mdres->compress_method == COMPRESS_ZLIB) {
1894 size_t size = MAX_PENDING_SIZE * 2;
1896 buffer = malloc(MAX_PENDING_SIZE * 2);
1899 ret = uncompress(buffer, (unsigned long *)&size,
1900 async->buffer, async->bufsize);
1902 fprintf(stderr, "Error decompressing %d\n", ret);
1908 outbuf = async->buffer;
1911 super = (struct btrfs_super_block *)outbuf;
1912 mdres->leafsize = btrfs_super_leafsize(super);
1913 memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
1914 memcpy(mdres->uuid, super->dev_item.uuid,
1916 mdres->devid = le64_to_cpu(super->dev_item.devid);
1921 static int add_cluster(struct meta_cluster *cluster,
1922 struct mdrestore_struct *mdres, u64 *next)
1924 struct meta_cluster_item *item;
1925 struct meta_cluster_header *header = &cluster->header;
1926 struct async_work *async;
1931 mdres->compress_method = header->compress;
1933 bytenr = le64_to_cpu(header->bytenr) + BLOCK_SIZE;
1934 nritems = le32_to_cpu(header->nritems);
1935 for (i = 0; i < nritems; i++) {
1936 item = &cluster->items[i];
1937 async = calloc(1, sizeof(*async));
1939 fprintf(stderr, "Error allocating async\n");
1942 async->start = le64_to_cpu(item->bytenr);
1943 async->bufsize = le32_to_cpu(item->size);
1944 async->buffer = malloc(async->bufsize);
1945 if (!async->buffer) {
1946 fprintf(stderr, "Error allocing async buffer\n");
1950 ret = fread(async->buffer, async->bufsize, 1, mdres->in);
1952 fprintf(stderr, "Error reading buffer %d\n", errno);
1953 free(async->buffer);
1957 bytenr += async->bufsize;
1959 pthread_mutex_lock(&mdres->mutex);
1960 if (async->start == BTRFS_SUPER_INFO_OFFSET) {
1961 ret = fill_mdres_info(mdres, async);
1963 fprintf(stderr, "Error setting up restore\n");
1964 pthread_mutex_unlock(&mdres->mutex);
1965 free(async->buffer);
1970 list_add_tail(&async->list, &mdres->list);
1972 pthread_cond_signal(&mdres->cond);
1973 pthread_mutex_unlock(&mdres->mutex);
1975 if (bytenr & BLOCK_MASK) {
1976 char buffer[BLOCK_MASK];
1977 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
1980 ret = fread(buffer, size, 1, mdres->in);
1982 fprintf(stderr, "Error reading in buffer %d\n", errno);
1990 static int wait_for_worker(struct mdrestore_struct *mdres)
1994 pthread_mutex_lock(&mdres->mutex);
1996 while (!ret && mdres->num_items > 0) {
1997 struct timespec ts = {
1999 .tv_nsec = 10000000,
2001 pthread_mutex_unlock(&mdres->mutex);
2002 nanosleep(&ts, NULL);
2003 pthread_mutex_lock(&mdres->mutex);
2006 pthread_mutex_unlock(&mdres->mutex);
2010 static int read_chunk_block(struct mdrestore_struct *mdres, u8 *buffer,
2011 u64 bytenr, u64 item_bytenr, u32 bufsize,
2014 struct extent_buffer *eb;
2018 eb = alloc_dummy_eb(bytenr, mdres->leafsize);
2024 while (item_bytenr != bytenr) {
2025 buffer += mdres->leafsize;
2026 item_bytenr += mdres->leafsize;
2029 memcpy(eb->data, buffer, mdres->leafsize);
2030 if (btrfs_header_bytenr(eb) != bytenr) {
2031 fprintf(stderr, "Eb bytenr doesn't match found bytenr\n");
2036 if (memcmp(mdres->fsid, eb->data + offsetof(struct btrfs_header, fsid),
2038 fprintf(stderr, "Fsid doesn't match\n");
2043 if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID) {
2044 fprintf(stderr, "Does not belong to the chunk tree\n");
2049 for (i = 0; i < btrfs_header_nritems(eb); i++) {
2050 struct btrfs_chunk chunk;
2051 struct fs_chunk *fs_chunk;
2052 struct btrfs_key key;
2054 if (btrfs_header_level(eb)) {
2055 u64 blockptr = btrfs_node_blockptr(eb, i);
2057 ret = search_for_chunk_blocks(mdres, blockptr,
2064 /* Yay a leaf! We loves leafs! */
2065 btrfs_item_key_to_cpu(eb, &key, i);
2066 if (key.type != BTRFS_CHUNK_ITEM_KEY)
2069 fs_chunk = malloc(sizeof(struct fs_chunk));
2071 fprintf(stderr, "Erorr allocating chunk\n");
2075 memset(fs_chunk, 0, sizeof(*fs_chunk));
2076 read_extent_buffer(eb, &chunk, btrfs_item_ptr_offset(eb, i),
2079 fs_chunk->logical = key.offset;
2080 fs_chunk->physical = btrfs_stack_stripe_offset(&chunk.stripe);
2081 fs_chunk->bytes = btrfs_stack_chunk_length(&chunk);
2082 INIT_LIST_HEAD(&fs_chunk->list);
2083 if (tree_search(&mdres->physical_tree, &fs_chunk->p,
2084 physical_cmp, 1) != NULL)
2085 list_add(&fs_chunk->list, &mdres->overlapping_chunks);
2087 tree_insert(&mdres->physical_tree, &fs_chunk->p,
2089 if (fs_chunk->physical + fs_chunk->bytes >
2090 mdres->last_physical_offset)
2091 mdres->last_physical_offset = fs_chunk->physical +
2093 mdres->alloced_chunks += fs_chunk->bytes;
2094 tree_insert(&mdres->chunk_tree, &fs_chunk->l, chunk_cmp);
2101 /* If you have to ask you aren't worthy */
2102 static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
2103 u64 search, u64 cluster_bytenr)
2105 struct meta_cluster *cluster;
2106 struct meta_cluster_header *header;
2107 struct meta_cluster_item *item;
2108 u64 current_cluster = cluster_bytenr, bytenr;
2110 u32 bufsize, nritems, i;
2111 u32 max_size = MAX_PENDING_SIZE * 2;
2112 u8 *buffer, *tmp = NULL;
2115 cluster = malloc(BLOCK_SIZE);
2117 fprintf(stderr, "Error allocating cluster\n");
2121 buffer = malloc(max_size);
2123 fprintf(stderr, "Error allocing buffer\n");
2128 if (mdres->compress_method == COMPRESS_ZLIB) {
2129 tmp = malloc(max_size);
2131 fprintf(stderr, "Error allocing tmp buffer\n");
2138 bytenr = current_cluster;
2140 if (fseek(mdres->in, current_cluster, SEEK_SET)) {
2141 fprintf(stderr, "Error seeking: %d\n", errno);
2146 ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
2148 if (cluster_bytenr != 0) {
2150 current_cluster = 0;
2154 printf("ok this is where we screwed up?\n");
2157 } else if (ret < 0) {
2158 fprintf(stderr, "Error reading image\n");
2163 header = &cluster->header;
2164 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2165 le64_to_cpu(header->bytenr) != current_cluster) {
2166 fprintf(stderr, "bad header in metadump image\n");
2171 bytenr += BLOCK_SIZE;
2172 nritems = le32_to_cpu(header->nritems);
2173 for (i = 0; i < nritems; i++) {
2176 item = &cluster->items[i];
2177 bufsize = le32_to_cpu(item->size);
2178 item_bytenr = le64_to_cpu(item->bytenr);
2180 if (bufsize > max_size) {
2181 fprintf(stderr, "item %u size %u too big\n",
2187 if (mdres->compress_method == COMPRESS_ZLIB) {
2188 ret = fread(tmp, bufsize, 1, mdres->in);
2190 fprintf(stderr, "Error reading: %d\n",
2197 ret = uncompress(buffer,
2198 (unsigned long *)&size, tmp,
2201 fprintf(stderr, "Error decompressing "
2207 ret = fread(buffer, bufsize, 1, mdres->in);
2209 fprintf(stderr, "Error reading: %d\n",
2218 if (item_bytenr <= search &&
2219 item_bytenr + size > search) {
2220 ret = read_chunk_block(mdres, buffer, search,
2234 if (bytenr & BLOCK_MASK)
2235 bytenr += BLOCK_SIZE - (bytenr & BLOCK_MASK);
2236 current_cluster = bytenr;
2245 static int build_chunk_tree(struct mdrestore_struct *mdres,
2246 struct meta_cluster *cluster)
2248 struct btrfs_super_block *super;
2249 struct meta_cluster_header *header;
2250 struct meta_cluster_item *item = NULL;
2251 u64 chunk_root_bytenr = 0;
2257 /* We can't seek with stdin so don't bother doing this */
2258 if (mdres->in == stdin)
2261 ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
2263 fprintf(stderr, "Error reading in cluster: %d\n", errno);
2268 header = &cluster->header;
2269 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2270 le64_to_cpu(header->bytenr) != 0) {
2271 fprintf(stderr, "bad header in metadump image\n");
2275 bytenr += BLOCK_SIZE;
2276 mdres->compress_method = header->compress;
2277 nritems = le32_to_cpu(header->nritems);
2278 for (i = 0; i < nritems; i++) {
2279 item = &cluster->items[i];
2281 if (le64_to_cpu(item->bytenr) == BTRFS_SUPER_INFO_OFFSET)
2283 bytenr += le32_to_cpu(item->size);
2284 if (fseek(mdres->in, le32_to_cpu(item->size), SEEK_CUR)) {
2285 fprintf(stderr, "Error seeking: %d\n", errno);
2290 if (!item || le64_to_cpu(item->bytenr) != BTRFS_SUPER_INFO_OFFSET) {
2291 fprintf(stderr, "Huh, didn't find the super?\n");
2295 buffer = malloc(le32_to_cpu(item->size));
2297 fprintf(stderr, "Error allocing buffer\n");
2301 ret = fread(buffer, le32_to_cpu(item->size), 1, mdres->in);
2303 fprintf(stderr, "Error reading buffer: %d\n", errno);
2308 if (mdres->compress_method == COMPRESS_ZLIB) {
2309 size_t size = MAX_PENDING_SIZE * 2;
2312 tmp = malloc(MAX_PENDING_SIZE * 2);
2317 ret = uncompress(tmp, (unsigned long *)&size,
2318 buffer, le32_to_cpu(item->size));
2320 fprintf(stderr, "Error decompressing %d\n", ret);
2329 pthread_mutex_lock(&mdres->mutex);
2330 super = (struct btrfs_super_block *)buffer;
2331 chunk_root_bytenr = btrfs_super_chunk_root(super);
2332 mdres->leafsize = btrfs_super_leafsize(super);
2333 memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
2334 memcpy(mdres->uuid, super->dev_item.uuid,
2336 mdres->devid = le64_to_cpu(super->dev_item.devid);
2338 pthread_mutex_unlock(&mdres->mutex);
2340 return search_for_chunk_blocks(mdres, chunk_root_bytenr, 0);
2343 static int range_contains_super(u64 physical, u64 bytes)
2348 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2349 super_bytenr = btrfs_sb_offset(i);
2350 if (super_bytenr >= physical &&
2351 super_bytenr < physical + bytes)
2358 static void remap_overlapping_chunks(struct mdrestore_struct *mdres)
2360 struct fs_chunk *fs_chunk;
2362 while (!list_empty(&mdres->overlapping_chunks)) {
2363 fs_chunk = list_first_entry(&mdres->overlapping_chunks,
2364 struct fs_chunk, list);
2365 list_del_init(&fs_chunk->list);
2366 if (range_contains_super(fs_chunk->physical,
2368 fprintf(stderr, "Remapping a chunk that had a super "
2369 "mirror inside of it, clearing space cache "
2370 "so we don't end up with corruption\n");
2371 mdres->clear_space_cache = 1;
2373 fs_chunk->physical = mdres->last_physical_offset;
2374 tree_insert(&mdres->physical_tree, &fs_chunk->p, physical_cmp);
2375 mdres->last_physical_offset += fs_chunk->bytes;
2379 static int fixup_devices(struct btrfs_fs_info *fs_info,
2380 struct mdrestore_struct *mdres, off_t dev_size)
2382 struct btrfs_trans_handle *trans;
2383 struct btrfs_dev_item *dev_item;
2384 struct btrfs_path *path;
2385 struct extent_buffer *leaf;
2386 struct btrfs_root *root = fs_info->chunk_root;
2387 struct btrfs_key key;
2388 u64 devid, cur_devid;
2391 path = btrfs_alloc_path();
2393 fprintf(stderr, "Error alloc'ing path\n");
2397 trans = btrfs_start_transaction(fs_info->tree_root, 1);
2398 if (IS_ERR(trans)) {
2399 fprintf(stderr, "Error starting transaction %ld\n",
2401 btrfs_free_path(path);
2402 return PTR_ERR(trans);
2405 dev_item = &fs_info->super_copy->dev_item;
2407 devid = btrfs_stack_device_id(dev_item);
2409 btrfs_set_stack_device_total_bytes(dev_item, dev_size);
2410 btrfs_set_stack_device_bytes_used(dev_item, mdres->alloced_chunks);
2412 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2413 key.type = BTRFS_DEV_ITEM_KEY;
2417 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2419 fprintf(stderr, "search failed %d\n", ret);
2424 leaf = path->nodes[0];
2425 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2426 ret = btrfs_next_leaf(root, path);
2428 fprintf(stderr, "Error going to next leaf "
2436 leaf = path->nodes[0];
2439 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2440 if (key.type > BTRFS_DEV_ITEM_KEY)
2442 if (key.type != BTRFS_DEV_ITEM_KEY) {
2447 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2448 struct btrfs_dev_item);
2449 cur_devid = btrfs_device_id(leaf, dev_item);
2450 if (devid != cur_devid) {
2451 ret = btrfs_del_item(trans, root, path);
2453 fprintf(stderr, "Error deleting item %d\n",
2457 btrfs_release_path(path);
2461 btrfs_set_device_total_bytes(leaf, dev_item, dev_size);
2462 btrfs_set_device_bytes_used(leaf, dev_item,
2463 mdres->alloced_chunks);
2464 btrfs_mark_buffer_dirty(leaf);
2468 btrfs_free_path(path);
2469 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
2471 fprintf(stderr, "Commit failed %d\n", ret);
2477 static int restore_metadump(const char *input, FILE *out, int old_restore,
2478 int num_threads, int fixup_offset,
2479 const char *target, int multi_devices)
2481 struct meta_cluster *cluster = NULL;
2482 struct meta_cluster_header *header;
2483 struct mdrestore_struct mdrestore;
2484 struct btrfs_fs_info *info = NULL;
2489 if (!strcmp(input, "-")) {
2492 in = fopen(input, "r");
2494 perror("unable to open metadump image");
2499 /* NOTE: open with write mode */
2502 info = open_ctree_fs_info(target, 0, 0,
2504 OPEN_CTREE_RESTORE |
2505 OPEN_CTREE_PARTIAL);
2507 fprintf(stderr, "%s: open ctree failed\n", __func__);
2513 cluster = malloc(BLOCK_SIZE);
2515 fprintf(stderr, "Error allocating cluster\n");
2520 ret = mdrestore_init(&mdrestore, in, out, old_restore, num_threads,
2521 fixup_offset, info, multi_devices);
2523 fprintf(stderr, "Error initing mdrestore %d\n", ret);
2524 goto failed_cluster;
2527 if (!multi_devices && !old_restore) {
2528 ret = build_chunk_tree(&mdrestore, cluster);
2531 if (!list_empty(&mdrestore.overlapping_chunks))
2532 remap_overlapping_chunks(&mdrestore);
2535 if (in != stdin && fseek(in, 0, SEEK_SET)) {
2536 fprintf(stderr, "Error seeking %d\n", errno);
2540 while (!mdrestore.error) {
2541 ret = fread(cluster, BLOCK_SIZE, 1, in);
2545 header = &cluster->header;
2546 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2547 le64_to_cpu(header->bytenr) != bytenr) {
2548 fprintf(stderr, "bad header in metadump image\n");
2552 ret = add_cluster(cluster, &mdrestore, &bytenr);
2554 fprintf(stderr, "Error adding cluster\n");
2558 ret = wait_for_worker(&mdrestore);
2560 if (!ret && !multi_devices && !old_restore) {
2561 struct btrfs_root *root;
2564 root = open_ctree_fd(fileno(out), target, 0,
2565 OPEN_CTREE_PARTIAL |
2567 OPEN_CTREE_NO_DEVICES);
2569 fprintf(stderr, "unable to open %s\n", target);
2573 info = root->fs_info;
2575 if (stat(target, &st)) {
2576 fprintf(stderr, "statting %s failed\n", target);
2577 close_ctree(info->chunk_root);
2581 ret = fixup_devices(info, &mdrestore, st.st_size);
2582 close_ctree(info->chunk_root);
2587 mdrestore_destroy(&mdrestore, num_threads);
2591 if (fixup_offset && info)
2592 close_ctree(info->chunk_root);
2599 static int update_disk_super_on_device(struct btrfs_fs_info *info,
2600 const char *other_dev, u64 cur_devid)
2602 struct btrfs_key key;
2603 struct extent_buffer *leaf;
2604 struct btrfs_path path;
2605 struct btrfs_dev_item *dev_item;
2606 struct btrfs_super_block *disk_super;
2607 char dev_uuid[BTRFS_UUID_SIZE];
2608 char fs_uuid[BTRFS_UUID_SIZE];
2609 u64 devid, type, io_align, io_width;
2610 u64 sector_size, total_bytes, bytes_used;
2615 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2616 key.type = BTRFS_DEV_ITEM_KEY;
2617 key.offset = cur_devid;
2619 btrfs_init_path(&path);
2620 ret = btrfs_search_slot(NULL, info->chunk_root, &key, &path, 0, 0);
2622 fprintf(stderr, "search key fails\n");
2626 leaf = path.nodes[0];
2627 dev_item = btrfs_item_ptr(leaf, path.slots[0],
2628 struct btrfs_dev_item);
2630 devid = btrfs_device_id(leaf, dev_item);
2631 if (devid != cur_devid) {
2632 printk("devid %llu mismatch with %llu\n", devid, cur_devid);
2636 type = btrfs_device_type(leaf, dev_item);
2637 io_align = btrfs_device_io_align(leaf, dev_item);
2638 io_width = btrfs_device_io_width(leaf, dev_item);
2639 sector_size = btrfs_device_sector_size(leaf, dev_item);
2640 total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2641 bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2642 read_extent_buffer(leaf, dev_uuid, (unsigned long)btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE);
2643 read_extent_buffer(leaf, fs_uuid, (unsigned long)btrfs_device_fsid(dev_item), BTRFS_UUID_SIZE);
2645 btrfs_release_path(&path);
2647 printk("update disk super on %s devid=%llu\n", other_dev, devid);
2649 /* update other devices' super block */
2650 fp = open(other_dev, O_CREAT | O_RDWR, 0600);
2652 fprintf(stderr, "could not open %s\n", other_dev);
2656 buf = malloc(BTRFS_SUPER_INFO_SIZE);
2663 memcpy(buf, info->super_copy, BTRFS_SUPER_INFO_SIZE);
2665 disk_super = (struct btrfs_super_block *)buf;
2666 dev_item = &disk_super->dev_item;
2668 btrfs_set_stack_device_type(dev_item, type);
2669 btrfs_set_stack_device_id(dev_item, devid);
2670 btrfs_set_stack_device_total_bytes(dev_item, total_bytes);
2671 btrfs_set_stack_device_bytes_used(dev_item, bytes_used);
2672 btrfs_set_stack_device_io_align(dev_item, io_align);
2673 btrfs_set_stack_device_io_width(dev_item, io_width);
2674 btrfs_set_stack_device_sector_size(dev_item, sector_size);
2675 memcpy(dev_item->uuid, dev_uuid, BTRFS_UUID_SIZE);
2676 memcpy(dev_item->fsid, fs_uuid, BTRFS_UUID_SIZE);
2677 csum_block((u8 *)buf, BTRFS_SUPER_INFO_SIZE);
2679 ret = pwrite64(fp, buf, BTRFS_SUPER_INFO_SIZE, BTRFS_SUPER_INFO_OFFSET);
2680 if (ret != BTRFS_SUPER_INFO_SIZE) {
2685 write_backup_supers(fp, (u8 *)buf);
2693 static void print_usage(int ret)
2695 fprintf(stderr, "usage: btrfs-image [options] source target\n");
2696 fprintf(stderr, "\t-r \trestore metadump image\n");
2697 fprintf(stderr, "\t-c value\tcompression level (0 ~ 9)\n");
2698 fprintf(stderr, "\t-t value\tnumber of threads (1 ~ 32)\n");
2699 fprintf(stderr, "\t-o \tdon't mess with the chunk tree when restoring\n");
2700 fprintf(stderr, "\t-s \tsanitize file names, use once to just use garbage, use twice if you want crc collisions\n");
2701 fprintf(stderr, "\t-w \twalk all trees instead of using extent tree, do this if your extent tree is broken\n");
2702 fprintf(stderr, "\t-m \trestore for multiple devices\n");
2703 fprintf(stderr, "\n");
2704 fprintf(stderr, "\tIn the dump mode, source is the btrfs device and target is the output file (use '-' for stdout).\n");
2705 fprintf(stderr, "\tIn the restore mode, source is the dumped image and target is the btrfs device/file.\n");
2709 int main(int argc, char *argv[])
2713 u64 num_threads = 1;
2714 u64 compress_level = 0;
2716 int old_restore = 0;
2718 int multi_devices = 0;
2722 int usage_error = 0;
2726 static const struct option long_options[] = {
2727 { "help", no_argument, NULL, GETOPT_VAL_HELP},
2728 { NULL, 0, NULL, 0 }
2730 int c = getopt_long(argc, argv, "rc:t:oswm", long_options, NULL);
2738 num_threads = arg_strtou64(optarg);
2739 if (num_threads > 32)
2743 compress_level = arg_strtou64(optarg);
2744 if (compress_level > 9)
2760 case GETOPT_VAL_HELP:
2762 print_usage(c != GETOPT_VAL_HELP);
2766 argc = argc - optind;
2768 if (check_argc_min(argc, 2))
2775 fprintf(stderr, "Usage error: create and restore cannot be used at the same time\n");
2779 if (walk_trees || sanitize || compress_level) {
2780 fprintf(stderr, "Usage error: use -w, -s, -c options for restore makes no sense\n");
2783 if (multi_devices && dev_cnt < 2) {
2784 fprintf(stderr, "Usage error: not enough devices specified for -m option\n");
2787 if (!multi_devices && dev_cnt != 1) {
2788 fprintf(stderr, "Usage error: accepts only 1 device without -m option\n");
2796 source = argv[optind];
2797 target = argv[optind + 1];
2799 if (create && !strcmp(target, "-")) {
2802 out = fopen(target, "w+");
2804 perror("unable to create target file");
2809 if (num_threads == 1 && compress_level > 0) {
2810 num_threads = sysconf(_SC_NPROCESSORS_ONLN);
2811 if (num_threads <= 0)
2816 ret = check_mounted(source);
2818 fprintf(stderr, "Could not check mount status: %s\n",
2823 "WARNING: The device is mounted. Make sure the filesystem is quiescent.\n");
2825 ret = create_metadump(source, out, num_threads,
2826 compress_level, sanitize, walk_trees);
2828 ret = restore_metadump(source, out, old_restore, num_threads,
2829 0, target, multi_devices);
2832 printk("%s failed (%s)\n", (create) ? "create" : "restore",
2837 /* extended support for multiple devices */
2838 if (!create && multi_devices) {
2839 struct btrfs_fs_info *info;
2843 info = open_ctree_fs_info(target, 0, 0,
2844 OPEN_CTREE_PARTIAL |
2845 OPEN_CTREE_RESTORE);
2848 fprintf(stderr, "unable to open %s error = %s\n",
2849 target, strerror(e));
2853 total_devs = btrfs_super_num_devices(info->super_copy);
2854 if (total_devs != dev_cnt) {
2855 printk("it needs %llu devices but has only %d\n",
2856 total_devs, dev_cnt);
2857 close_ctree(info->chunk_root);
2861 /* update super block on other disks */
2862 for (i = 2; i <= dev_cnt; i++) {
2863 ret = update_disk_super_on_device(info,
2864 argv[optind + i], (u64)i);
2866 printk("update disk super failed devid=%d (error=%d)\n",
2868 close_ctree(info->chunk_root);
2873 close_ctree(info->chunk_root);
2875 /* fix metadata block to map correct chunk */
2876 ret = restore_metadump(source, out, 0, num_threads, 1,
2879 fprintf(stderr, "fix metadump failed (error=%d)\n",
2885 if (out == stdout) {
2889 if (ret && create) {
2892 unlink_ret = unlink(target);
2895 "unlink output file failed : %s\n",