2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
22 #include <sys/types.h>
30 #include "kerncompat.h"
34 #include "transaction.h"
37 #include "extent_io.h"
39 #include "image/metadump.h"
40 #include "image/sanitize.h"
42 #define MAX_WORKER_THREADS (32)
45 struct list_head list;
46 struct list_head ordered;
54 struct metadump_struct {
55 struct btrfs_root *root;
59 struct meta_cluster cluster;
60 char meta_cluster_bytes[BLOCK_SIZE];
63 pthread_t threads[MAX_WORKER_THREADS];
65 pthread_mutex_t mutex;
67 struct rb_root name_tree;
69 struct list_head list;
70 struct list_head ordered;
80 enum sanitize_mode sanitize_names;
85 struct mdrestore_struct {
89 pthread_t threads[MAX_WORKER_THREADS];
91 pthread_mutex_t mutex;
94 struct rb_root chunk_tree;
95 struct rb_root physical_tree;
96 struct list_head list;
97 struct list_head overlapping_chunks;
102 u64 last_physical_offset;
103 u8 uuid[BTRFS_UUID_SIZE];
104 u8 fsid[BTRFS_FSID_SIZE];
112 int clear_space_cache;
113 struct btrfs_fs_info *info;
116 static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
117 u64 search, u64 cluster_bytenr);
118 static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size);
120 static void csum_block(u8 *buf, size_t len)
122 u8 result[BTRFS_CRC32_SIZE];
124 crc = crc32c(crc, buf + BTRFS_CSUM_SIZE, len - BTRFS_CSUM_SIZE);
125 btrfs_csum_final(crc, result);
126 memcpy(buf, result, BTRFS_CRC32_SIZE);
129 static int has_name(struct btrfs_key *key)
132 case BTRFS_DIR_ITEM_KEY:
133 case BTRFS_DIR_INDEX_KEY:
134 case BTRFS_INODE_REF_KEY:
135 case BTRFS_INODE_EXTREF_KEY:
136 case BTRFS_XATTR_ITEM_KEY:
145 static int chunk_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
147 struct fs_chunk *entry = rb_entry(a, struct fs_chunk, l);
148 struct fs_chunk *ins = rb_entry(b, struct fs_chunk, l);
150 if (fuzz && ins->logical >= entry->logical &&
151 ins->logical < entry->logical + entry->bytes)
154 if (ins->logical < entry->logical)
156 else if (ins->logical > entry->logical)
161 static int physical_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
163 struct fs_chunk *entry = rb_entry(a, struct fs_chunk, p);
164 struct fs_chunk *ins = rb_entry(b, struct fs_chunk, p);
166 if (fuzz && ins->physical >= entry->physical &&
167 ins->physical < entry->physical + entry->bytes)
170 if (fuzz && entry->physical >= ins->physical &&
171 entry->physical < ins->physical + ins->bytes)
174 if (ins->physical < entry->physical)
176 else if (ins->physical > entry->physical)
181 static void tree_insert(struct rb_root *root, struct rb_node *ins,
182 int (*cmp)(struct rb_node *a, struct rb_node *b,
185 struct rb_node ** p = &root->rb_node;
186 struct rb_node * parent = NULL;
192 dir = cmp(*p, ins, 1);
201 rb_link_node(ins, parent, p);
202 rb_insert_color(ins, root);
205 static struct rb_node *tree_search(struct rb_root *root,
206 struct rb_node *search,
207 int (*cmp)(struct rb_node *a,
208 struct rb_node *b, int fuzz),
211 struct rb_node *n = root->rb_node;
215 dir = cmp(n, search, fuzz);
227 static u64 logical_to_physical(struct mdrestore_struct *mdres, u64 logical,
228 u64 *size, u64 *physical_dup)
230 struct fs_chunk *fs_chunk;
231 struct rb_node *entry;
232 struct fs_chunk search;
235 if (logical == BTRFS_SUPER_INFO_OFFSET)
238 search.logical = logical;
239 entry = tree_search(&mdres->chunk_tree, &search.l, chunk_cmp, 1);
241 if (mdres->in != stdin)
242 warning("cannot find a chunk, using logical");
245 fs_chunk = rb_entry(entry, struct fs_chunk, l);
246 if (fs_chunk->logical > logical || fs_chunk->logical + fs_chunk->bytes < logical)
248 offset = search.logical - fs_chunk->logical;
251 /* Only in dup case, physical_dup is not equal to 0 */
252 if (fs_chunk->physical_dup)
253 *physical_dup = fs_chunk->physical_dup + offset;
258 *size = min(*size, fs_chunk->bytes + fs_chunk->logical - logical);
259 return fs_chunk->physical + offset;
263 * zero inline extents and csum items
265 static void zero_items(struct metadump_struct *md, u8 *dst,
266 struct extent_buffer *src)
268 struct btrfs_file_extent_item *fi;
269 struct btrfs_item *item;
270 struct btrfs_key key;
271 u32 nritems = btrfs_header_nritems(src);
276 for (i = 0; i < nritems; i++) {
277 item = btrfs_item_nr(i);
278 btrfs_item_key_to_cpu(src, &key, i);
279 if (key.type == BTRFS_CSUM_ITEM_KEY) {
280 size = btrfs_item_size_nr(src, i);
281 memset(dst + btrfs_leaf_data(src) +
282 btrfs_item_offset_nr(src, i), 0, size);
286 if (md->sanitize_names && has_name(&key)) {
287 sanitize_name(md->sanitize_names, &md->name_tree, dst,
292 if (key.type != BTRFS_EXTENT_DATA_KEY)
295 fi = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
296 extent_type = btrfs_file_extent_type(src, fi);
297 if (extent_type != BTRFS_FILE_EXTENT_INLINE)
300 ptr = btrfs_file_extent_inline_start(fi);
301 size = btrfs_file_extent_inline_item_len(src, item);
302 memset(dst + ptr, 0, size);
307 * copy buffer and zero useless data in the buffer
309 static void copy_buffer(struct metadump_struct *md, u8 *dst,
310 struct extent_buffer *src)
316 memcpy(dst, src->data, src->len);
317 if (src->start == BTRFS_SUPER_INFO_OFFSET)
320 level = btrfs_header_level(src);
321 nritems = btrfs_header_nritems(src);
324 size = sizeof(struct btrfs_header);
325 memset(dst + size, 0, src->len - size);
326 } else if (level == 0) {
327 size = btrfs_leaf_data(src) +
328 btrfs_item_offset_nr(src, nritems - 1) -
329 btrfs_item_nr_offset(nritems);
330 memset(dst + btrfs_item_nr_offset(nritems), 0, size);
331 zero_items(md, dst, src);
333 size = offsetof(struct btrfs_node, ptrs) +
334 sizeof(struct btrfs_key_ptr) * nritems;
335 memset(dst + size, 0, src->len - size);
337 csum_block(dst, src->len);
340 static void *dump_worker(void *data)
342 struct metadump_struct *md = (struct metadump_struct *)data;
343 struct async_work *async;
347 pthread_mutex_lock(&md->mutex);
348 while (list_empty(&md->list)) {
350 pthread_mutex_unlock(&md->mutex);
353 pthread_cond_wait(&md->cond, &md->mutex);
355 async = list_entry(md->list.next, struct async_work, list);
356 list_del_init(&async->list);
357 pthread_mutex_unlock(&md->mutex);
359 if (md->compress_level > 0) {
360 u8 *orig = async->buffer;
362 async->bufsize = compressBound(async->size);
363 async->buffer = malloc(async->bufsize);
364 if (!async->buffer) {
365 error("not enough memory for async buffer");
366 pthread_mutex_lock(&md->mutex);
369 pthread_mutex_unlock(&md->mutex);
373 ret = compress2(async->buffer,
374 (unsigned long *)&async->bufsize,
375 orig, async->size, md->compress_level);
383 pthread_mutex_lock(&md->mutex);
385 pthread_mutex_unlock(&md->mutex);
391 static void meta_cluster_init(struct metadump_struct *md, u64 start)
393 struct meta_cluster_header *header;
397 header = &md->cluster.header;
398 header->magic = cpu_to_le64(HEADER_MAGIC);
399 header->bytenr = cpu_to_le64(start);
400 header->nritems = cpu_to_le32(0);
401 header->compress = md->compress_level > 0 ?
402 COMPRESS_ZLIB : COMPRESS_NONE;
405 static void metadump_destroy(struct metadump_struct *md, int num_threads)
410 pthread_mutex_lock(&md->mutex);
412 pthread_cond_broadcast(&md->cond);
413 pthread_mutex_unlock(&md->mutex);
415 for (i = 0; i < num_threads; i++)
416 pthread_join(md->threads[i], NULL);
418 pthread_cond_destroy(&md->cond);
419 pthread_mutex_destroy(&md->mutex);
421 while ((n = rb_first(&md->name_tree))) {
424 name = rb_entry(n, struct name, n);
425 rb_erase(n, &md->name_tree);
432 static int metadump_init(struct metadump_struct *md, struct btrfs_root *root,
433 FILE *out, int num_threads, int compress_level,
434 enum sanitize_mode sanitize_names)
438 memset(md, 0, sizeof(*md));
439 INIT_LIST_HEAD(&md->list);
440 INIT_LIST_HEAD(&md->ordered);
443 md->pending_start = (u64)-1;
444 md->compress_level = compress_level;
445 md->sanitize_names = sanitize_names;
446 if (sanitize_names == SANITIZE_COLLISIONS)
447 crc32c_optimization_init();
449 md->name_tree.rb_node = NULL;
450 md->num_threads = num_threads;
451 pthread_cond_init(&md->cond, NULL);
452 pthread_mutex_init(&md->mutex, NULL);
453 meta_cluster_init(md, 0);
458 for (i = 0; i < num_threads; i++) {
459 ret = pthread_create(md->threads + i, NULL, dump_worker, md);
465 metadump_destroy(md, i + 1);
470 static int write_zero(FILE *out, size_t size)
472 static char zero[BLOCK_SIZE];
473 return fwrite(zero, size, 1, out);
476 static int write_buffers(struct metadump_struct *md, u64 *next)
478 struct meta_cluster_header *header = &md->cluster.header;
479 struct meta_cluster_item *item;
480 struct async_work *async;
486 if (list_empty(&md->ordered))
489 /* wait until all buffers are compressed */
490 while (!err && md->num_items > md->num_ready) {
491 struct timespec ts = {
495 pthread_mutex_unlock(&md->mutex);
496 nanosleep(&ts, NULL);
497 pthread_mutex_lock(&md->mutex);
502 error("one of the threads failed: %s", strerror(-err));
506 /* setup and write index block */
507 list_for_each_entry(async, &md->ordered, ordered) {
508 item = &md->cluster.items[nritems];
509 item->bytenr = cpu_to_le64(async->start);
510 item->size = cpu_to_le32(async->bufsize);
513 header->nritems = cpu_to_le32(nritems);
515 ret = fwrite(&md->cluster, BLOCK_SIZE, 1, md->out);
517 error("unable to write out cluster: %s", strerror(errno));
522 bytenr += le64_to_cpu(header->bytenr) + BLOCK_SIZE;
523 while (!list_empty(&md->ordered)) {
524 async = list_entry(md->ordered.next, struct async_work,
526 list_del_init(&async->ordered);
528 bytenr += async->bufsize;
530 ret = fwrite(async->buffer, async->bufsize, 1,
533 error("unable to write out cluster: %s",
543 /* zero unused space in the last block */
544 if (!err && bytenr & BLOCK_MASK) {
545 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
548 ret = write_zero(md->out, size);
550 error("unable to zero out buffer: %s",
560 static int read_data_extent(struct metadump_struct *md,
561 struct async_work *async)
563 struct btrfs_root *root = md->root;
564 struct btrfs_fs_info *fs_info = root->fs_info;
565 u64 bytes_left = async->size;
566 u64 logical = async->start;
573 num_copies = btrfs_num_copies(root->fs_info, logical, bytes_left);
575 /* Try our best to read data, just like read_tree_block() */
576 for (cur_mirror = 0; cur_mirror < num_copies; cur_mirror++) {
578 read_len = bytes_left;
579 ret = read_extent_data(fs_info,
580 (char *)(async->buffer + offset),
581 logical, &read_len, cur_mirror);
586 bytes_left -= read_len;
594 static int get_dev_fd(struct btrfs_root *root)
596 struct btrfs_device *dev;
598 dev = list_first_entry(&root->fs_info->fs_devices->devices,
599 struct btrfs_device, dev_list);
603 static int flush_pending(struct metadump_struct *md, int done)
605 struct async_work *async = NULL;
606 struct extent_buffer *eb;
612 if (md->pending_size) {
613 async = calloc(1, sizeof(*async));
617 async->start = md->pending_start;
618 async->size = md->pending_size;
619 async->bufsize = async->size;
620 async->buffer = malloc(async->bufsize);
621 if (!async->buffer) {
626 start = async->start;
630 ret = read_data_extent(md, async);
639 * Balance can make the mapping not cover the super block, so
640 * just copy directly from one of the devices.
642 if (start == BTRFS_SUPER_INFO_OFFSET) {
643 int fd = get_dev_fd(md->root);
645 ret = pread64(fd, async->buffer, size, start);
649 error("unable to read superblock at %llu: %s",
650 (unsigned long long)start,
658 while (!md->data && size > 0) {
659 u64 this_read = min((u64)md->root->fs_info->nodesize,
662 eb = read_tree_block(md->root->fs_info, start, 0);
663 if (!extent_buffer_uptodate(eb)) {
666 error("unable to read metadata block %llu",
667 (unsigned long long)start);
670 copy_buffer(md, async->buffer + offset, eb);
671 free_extent_buffer(eb);
677 md->pending_start = (u64)-1;
678 md->pending_size = 0;
683 pthread_mutex_lock(&md->mutex);
685 list_add_tail(&async->ordered, &md->ordered);
687 if (md->compress_level > 0) {
688 list_add_tail(&async->list, &md->list);
689 pthread_cond_signal(&md->cond);
694 if (md->num_items >= ITEMS_PER_CLUSTER || done) {
695 ret = write_buffers(md, &start);
697 error("unable to write buffers: %s", strerror(-ret));
699 meta_cluster_init(md, start);
701 pthread_mutex_unlock(&md->mutex);
705 static int add_extent(u64 start, u64 size, struct metadump_struct *md,
709 if (md->data != data ||
710 md->pending_size + size > MAX_PENDING_SIZE ||
711 md->pending_start + md->pending_size != start) {
712 ret = flush_pending(md, 0);
715 md->pending_start = start;
717 readahead_tree_block(md->root->fs_info, start, 0);
718 md->pending_size += size;
723 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
724 static int is_tree_block(struct btrfs_root *extent_root,
725 struct btrfs_path *path, u64 bytenr)
727 struct extent_buffer *leaf;
728 struct btrfs_key key;
732 leaf = path->nodes[0];
734 struct btrfs_extent_ref_v0 *ref_item;
736 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
737 ret = btrfs_next_leaf(extent_root, path);
742 leaf = path->nodes[0];
744 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
745 if (key.objectid != bytenr)
747 if (key.type != BTRFS_EXTENT_REF_V0_KEY)
749 ref_item = btrfs_item_ptr(leaf, path->slots[0],
750 struct btrfs_extent_ref_v0);
751 ref_objectid = btrfs_ref_objectid_v0(leaf, ref_item);
752 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID)
760 static int copy_tree_blocks(struct btrfs_root *root, struct extent_buffer *eb,
761 struct metadump_struct *metadump, int root_tree)
763 struct extent_buffer *tmp;
764 struct btrfs_root_item *ri;
765 struct btrfs_key key;
766 struct btrfs_fs_info *fs_info = root->fs_info;
773 ret = add_extent(btrfs_header_bytenr(eb), fs_info->nodesize,
776 error("unable to add metadata block %llu: %d",
777 btrfs_header_bytenr(eb), ret);
781 if (btrfs_header_level(eb) == 0 && !root_tree)
784 level = btrfs_header_level(eb);
785 nritems = btrfs_header_nritems(eb);
786 for (i = 0; i < nritems; i++) {
788 btrfs_item_key_to_cpu(eb, &key, i);
789 if (key.type != BTRFS_ROOT_ITEM_KEY)
791 ri = btrfs_item_ptr(eb, i, struct btrfs_root_item);
792 bytenr = btrfs_disk_root_bytenr(eb, ri);
793 tmp = read_tree_block(fs_info, bytenr, 0);
794 if (!extent_buffer_uptodate(tmp)) {
795 error("unable to read log root block");
798 ret = copy_tree_blocks(root, tmp, metadump, 0);
799 free_extent_buffer(tmp);
803 bytenr = btrfs_node_blockptr(eb, i);
804 tmp = read_tree_block(fs_info, bytenr, 0);
805 if (!extent_buffer_uptodate(tmp)) {
806 error("unable to read log root block");
809 ret = copy_tree_blocks(root, tmp, metadump, root_tree);
810 free_extent_buffer(tmp);
819 static int copy_log_trees(struct btrfs_root *root,
820 struct metadump_struct *metadump)
822 u64 blocknr = btrfs_super_log_root(root->fs_info->super_copy);
827 if (!root->fs_info->log_root_tree ||
828 !root->fs_info->log_root_tree->node) {
829 error("unable to copy tree log, it has not been setup");
833 return copy_tree_blocks(root, root->fs_info->log_root_tree->node,
837 static int copy_space_cache(struct btrfs_root *root,
838 struct metadump_struct *metadump,
839 struct btrfs_path *path)
841 struct extent_buffer *leaf;
842 struct btrfs_file_extent_item *fi;
843 struct btrfs_key key;
844 u64 bytenr, num_bytes;
847 root = root->fs_info->tree_root;
850 key.type = BTRFS_EXTENT_DATA_KEY;
853 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
855 error("free space inode not found: %d", ret);
859 leaf = path->nodes[0];
862 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
863 ret = btrfs_next_leaf(root, path);
865 error("cannot go to next leaf %d", ret);
870 leaf = path->nodes[0];
873 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
874 if (key.type != BTRFS_EXTENT_DATA_KEY) {
879 fi = btrfs_item_ptr(leaf, path->slots[0],
880 struct btrfs_file_extent_item);
881 if (btrfs_file_extent_type(leaf, fi) !=
882 BTRFS_FILE_EXTENT_REG) {
887 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
888 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
889 ret = add_extent(bytenr, num_bytes, metadump, 1);
891 error("unable to add space cache blocks %d", ret);
892 btrfs_release_path(path);
901 static int copy_from_extent_tree(struct metadump_struct *metadump,
902 struct btrfs_path *path)
904 struct btrfs_root *extent_root;
905 struct extent_buffer *leaf;
906 struct btrfs_extent_item *ei;
907 struct btrfs_key key;
912 extent_root = metadump->root->fs_info->extent_root;
913 bytenr = BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE;
914 key.objectid = bytenr;
915 key.type = BTRFS_EXTENT_ITEM_KEY;
918 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
920 error("extent root not found: %d", ret);
925 leaf = path->nodes[0];
928 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
929 ret = btrfs_next_leaf(extent_root, path);
931 error("cannot go to next leaf %d", ret);
938 leaf = path->nodes[0];
941 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
942 if (key.objectid < bytenr ||
943 (key.type != BTRFS_EXTENT_ITEM_KEY &&
944 key.type != BTRFS_METADATA_ITEM_KEY)) {
949 bytenr = key.objectid;
950 if (key.type == BTRFS_METADATA_ITEM_KEY) {
951 num_bytes = extent_root->fs_info->nodesize;
953 num_bytes = key.offset;
956 if (num_bytes == 0) {
957 error("extent length 0 at bytenr %llu key type %d",
958 (unsigned long long)bytenr, key.type);
963 if (btrfs_item_size_nr(leaf, path->slots[0]) > sizeof(*ei)) {
964 ei = btrfs_item_ptr(leaf, path->slots[0],
965 struct btrfs_extent_item);
966 if (btrfs_extent_flags(leaf, ei) &
967 BTRFS_EXTENT_FLAG_TREE_BLOCK) {
968 ret = add_extent(bytenr, num_bytes, metadump,
971 error("unable to add block %llu: %d",
972 (unsigned long long)bytenr, ret);
977 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
978 ret = is_tree_block(extent_root, path, bytenr);
980 error("failed to check tree block %llu: %d",
981 (unsigned long long)bytenr, ret);
986 ret = add_extent(bytenr, num_bytes, metadump,
989 error("unable to add block %llu: %d",
990 (unsigned long long)bytenr, ret);
997 "either extent tree is corrupted or you haven't built with V0 support");
1002 bytenr += num_bytes;
1005 btrfs_release_path(path);
1010 static int create_metadump(const char *input, FILE *out, int num_threads,
1011 int compress_level, enum sanitize_mode sanitize,
1014 struct btrfs_root *root;
1015 struct btrfs_path path;
1016 struct metadump_struct metadump;
1020 root = open_ctree(input, 0, 0);
1022 error("open ctree failed");
1026 ret = metadump_init(&metadump, root, out, num_threads,
1027 compress_level, sanitize);
1029 error("failed to initialize metadump: %d", ret);
1034 ret = add_extent(BTRFS_SUPER_INFO_OFFSET, BTRFS_SUPER_INFO_SIZE,
1037 error("unable to add metadata: %d", ret);
1042 btrfs_init_path(&path);
1045 ret = copy_tree_blocks(root, root->fs_info->chunk_root->node,
1052 ret = copy_tree_blocks(root, root->fs_info->tree_root->node,
1059 ret = copy_from_extent_tree(&metadump, &path);
1066 ret = copy_log_trees(root, &metadump);
1072 ret = copy_space_cache(root, &metadump, &path);
1074 ret = flush_pending(&metadump, 1);
1078 error("failed to flush pending data: %d", ret);
1081 metadump_destroy(&metadump, num_threads);
1083 btrfs_release_path(&path);
1084 ret = close_ctree(root);
1085 return err ? err : ret;
1088 static void update_super_old(u8 *buffer)
1090 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
1091 struct btrfs_chunk *chunk;
1092 struct btrfs_disk_key *key;
1093 u32 sectorsize = btrfs_super_sectorsize(super);
1094 u64 flags = btrfs_super_flags(super);
1096 flags |= BTRFS_SUPER_FLAG_METADUMP;
1097 btrfs_set_super_flags(super, flags);
1099 key = (struct btrfs_disk_key *)(super->sys_chunk_array);
1100 chunk = (struct btrfs_chunk *)(super->sys_chunk_array +
1101 sizeof(struct btrfs_disk_key));
1103 btrfs_set_disk_key_objectid(key, BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1104 btrfs_set_disk_key_type(key, BTRFS_CHUNK_ITEM_KEY);
1105 btrfs_set_disk_key_offset(key, 0);
1107 btrfs_set_stack_chunk_length(chunk, (u64)-1);
1108 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
1109 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
1110 btrfs_set_stack_chunk_type(chunk, BTRFS_BLOCK_GROUP_SYSTEM);
1111 btrfs_set_stack_chunk_io_align(chunk, sectorsize);
1112 btrfs_set_stack_chunk_io_width(chunk, sectorsize);
1113 btrfs_set_stack_chunk_sector_size(chunk, sectorsize);
1114 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1115 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1116 chunk->stripe.devid = super->dev_item.devid;
1117 btrfs_set_stack_stripe_offset(&chunk->stripe, 0);
1118 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
1119 btrfs_set_super_sys_array_size(super, sizeof(*key) + sizeof(*chunk));
1120 csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
1123 static int update_super(struct mdrestore_struct *mdres, u8 *buffer)
1125 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
1126 struct btrfs_chunk *chunk;
1127 struct btrfs_disk_key *disk_key;
1128 struct btrfs_key key;
1129 u64 flags = btrfs_super_flags(super);
1130 u32 new_array_size = 0;
1133 u8 *ptr, *write_ptr;
1134 int old_num_stripes;
1136 write_ptr = ptr = super->sys_chunk_array;
1137 array_size = btrfs_super_sys_array_size(super);
1139 while (cur < array_size) {
1140 disk_key = (struct btrfs_disk_key *)ptr;
1141 btrfs_disk_key_to_cpu(&key, disk_key);
1143 new_array_size += sizeof(*disk_key);
1144 memmove(write_ptr, ptr, sizeof(*disk_key));
1146 write_ptr += sizeof(*disk_key);
1147 ptr += sizeof(*disk_key);
1148 cur += sizeof(*disk_key);
1150 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1151 u64 type, physical, physical_dup, size = 0;
1153 chunk = (struct btrfs_chunk *)ptr;
1154 old_num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1155 chunk = (struct btrfs_chunk *)write_ptr;
1157 memmove(write_ptr, ptr, sizeof(*chunk));
1158 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1159 type = btrfs_stack_chunk_type(chunk);
1160 if (type & BTRFS_BLOCK_GROUP_DUP) {
1161 new_array_size += sizeof(struct btrfs_stripe);
1162 write_ptr += sizeof(struct btrfs_stripe);
1164 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1165 btrfs_set_stack_chunk_type(chunk,
1166 BTRFS_BLOCK_GROUP_SYSTEM);
1168 chunk->stripe.devid = super->dev_item.devid;
1169 physical = logical_to_physical(mdres, key.offset,
1170 &size, &physical_dup);
1171 if (size != (u64)-1)
1172 btrfs_set_stack_stripe_offset(&chunk->stripe,
1174 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid,
1176 new_array_size += sizeof(*chunk);
1178 error("bogus key in the sys array %d", key.type);
1181 write_ptr += sizeof(*chunk);
1182 ptr += btrfs_chunk_item_size(old_num_stripes);
1183 cur += btrfs_chunk_item_size(old_num_stripes);
1186 if (mdres->clear_space_cache)
1187 btrfs_set_super_cache_generation(super, 0);
1189 flags |= BTRFS_SUPER_FLAG_METADUMP_V2;
1190 btrfs_set_super_flags(super, flags);
1191 btrfs_set_super_sys_array_size(super, new_array_size);
1192 btrfs_set_super_num_devices(super, 1);
1193 csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
1198 static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size)
1200 struct extent_buffer *eb;
1202 eb = calloc(1, sizeof(struct extent_buffer) + size);
1211 static void truncate_item(struct extent_buffer *eb, int slot, u32 new_size)
1213 struct btrfs_item *item;
1221 old_size = btrfs_item_size_nr(eb, slot);
1222 if (old_size == new_size)
1225 nritems = btrfs_header_nritems(eb);
1226 data_end = btrfs_item_offset_nr(eb, nritems - 1);
1228 old_data_start = btrfs_item_offset_nr(eb, slot);
1229 size_diff = old_size - new_size;
1231 for (i = slot; i < nritems; i++) {
1233 item = btrfs_item_nr(i);
1234 ioff = btrfs_item_offset(eb, item);
1235 btrfs_set_item_offset(eb, item, ioff + size_diff);
1238 memmove_extent_buffer(eb, btrfs_leaf_data(eb) + data_end + size_diff,
1239 btrfs_leaf_data(eb) + data_end,
1240 old_data_start + new_size - data_end);
1241 item = btrfs_item_nr(slot);
1242 btrfs_set_item_size(eb, item, new_size);
1245 static int fixup_chunk_tree_block(struct mdrestore_struct *mdres,
1246 struct async_work *async, u8 *buffer,
1249 struct extent_buffer *eb;
1250 size_t size_left = size;
1251 u64 bytenr = async->start;
1254 if (size_left % mdres->nodesize)
1257 eb = alloc_dummy_eb(bytenr, mdres->nodesize);
1263 memcpy(eb->data, buffer, mdres->nodesize);
1265 if (btrfs_header_bytenr(eb) != bytenr)
1267 if (memcmp(mdres->fsid,
1268 eb->data + offsetof(struct btrfs_header, fsid),
1272 if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID)
1275 if (btrfs_header_level(eb) != 0)
1278 for (i = 0; i < btrfs_header_nritems(eb); i++) {
1279 struct btrfs_chunk *chunk;
1280 struct btrfs_key key;
1281 u64 type, physical, physical_dup, size = (u64)-1;
1283 btrfs_item_key_to_cpu(eb, &key, i);
1284 if (key.type != BTRFS_CHUNK_ITEM_KEY)
1288 physical = logical_to_physical(mdres, key.offset,
1289 &size, &physical_dup);
1292 truncate_item(eb, i, sizeof(*chunk));
1293 chunk = btrfs_item_ptr(eb, i, struct btrfs_chunk);
1296 /* Zero out the RAID profile */
1297 type = btrfs_chunk_type(eb, chunk);
1298 type &= (BTRFS_BLOCK_GROUP_DATA |
1299 BTRFS_BLOCK_GROUP_SYSTEM |
1300 BTRFS_BLOCK_GROUP_METADATA |
1301 BTRFS_BLOCK_GROUP_DUP);
1302 btrfs_set_chunk_type(eb, chunk, type);
1305 btrfs_set_chunk_num_stripes(eb, chunk, 1);
1306 btrfs_set_chunk_sub_stripes(eb, chunk, 0);
1307 btrfs_set_stripe_devid_nr(eb, chunk, 0, mdres->devid);
1308 if (size != (u64)-1)
1309 btrfs_set_stripe_offset_nr(eb, chunk, 0,
1311 /* update stripe 2 offset */
1313 btrfs_set_stripe_offset_nr(eb, chunk, 1,
1316 write_extent_buffer(eb, mdres->uuid,
1317 (unsigned long)btrfs_stripe_dev_uuid_nr(
1321 memcpy(buffer, eb->data, eb->len);
1322 csum_block(buffer, eb->len);
1324 size_left -= mdres->nodesize;
1325 buffer += mdres->nodesize;
1326 bytenr += mdres->nodesize;
1333 static void write_backup_supers(int fd, u8 *buf)
1335 struct btrfs_super_block *super = (struct btrfs_super_block *)buf;
1342 if (fstat(fd, &st)) {
1344 "cannot stat restore point, won't be able to write backup supers: %s",
1349 size = btrfs_device_size(fd, &st);
1351 for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1352 bytenr = btrfs_sb_offset(i);
1353 if (bytenr + BTRFS_SUPER_INFO_SIZE > size)
1355 btrfs_set_super_bytenr(super, bytenr);
1356 csum_block(buf, BTRFS_SUPER_INFO_SIZE);
1357 ret = pwrite64(fd, buf, BTRFS_SUPER_INFO_SIZE, bytenr);
1358 if (ret < BTRFS_SUPER_INFO_SIZE) {
1361 "problem writing out backup super block %d: %s",
1362 i, strerror(errno));
1364 error("short write writing out backup super block");
1370 static void *restore_worker(void *data)
1372 struct mdrestore_struct *mdres = (struct mdrestore_struct *)data;
1373 struct async_work *async;
1379 int compress_size = MAX_PENDING_SIZE * 4;
1381 outfd = fileno(mdres->out);
1382 buffer = malloc(compress_size);
1384 error("not enough memory for restore worker buffer");
1385 pthread_mutex_lock(&mdres->mutex);
1387 mdres->error = -ENOMEM;
1388 pthread_mutex_unlock(&mdres->mutex);
1393 u64 bytenr, physical_dup;
1397 pthread_mutex_lock(&mdres->mutex);
1398 while (!mdres->nodesize || list_empty(&mdres->list)) {
1400 pthread_mutex_unlock(&mdres->mutex);
1403 pthread_cond_wait(&mdres->cond, &mdres->mutex);
1405 async = list_entry(mdres->list.next, struct async_work, list);
1406 list_del_init(&async->list);
1408 if (mdres->compress_method == COMPRESS_ZLIB) {
1409 size = compress_size;
1410 pthread_mutex_unlock(&mdres->mutex);
1411 ret = uncompress(buffer, (unsigned long *)&size,
1412 async->buffer, async->bufsize);
1413 pthread_mutex_lock(&mdres->mutex);
1415 error("decompression failed with %d", ret);
1420 outbuf = async->buffer;
1421 size = async->bufsize;
1424 if (!mdres->multi_devices) {
1425 if (async->start == BTRFS_SUPER_INFO_OFFSET) {
1426 if (mdres->old_restore) {
1427 update_super_old(outbuf);
1429 ret = update_super(mdres, outbuf);
1433 } else if (!mdres->old_restore) {
1434 ret = fixup_chunk_tree_block(mdres, async, outbuf, size);
1440 if (!mdres->fixup_offset) {
1442 u64 chunk_size = size;
1444 if (!mdres->multi_devices && !mdres->old_restore)
1445 bytenr = logical_to_physical(mdres,
1446 async->start + offset,
1450 bytenr = async->start + offset;
1452 ret = pwrite64(outfd, outbuf+offset, chunk_size,
1454 if (ret != chunk_size)
1458 ret = pwrite64(outfd, outbuf+offset,
1461 if (ret != chunk_size)
1465 offset += chunk_size;
1470 error("unable to write to device: %s",
1474 error("short write");
1478 } else if (async->start != BTRFS_SUPER_INFO_OFFSET) {
1479 ret = write_data_to_disk(mdres->info, outbuf, async->start, size, 0);
1481 error("failed to write data");
1487 /* backup super blocks are already there at fixup_offset stage */
1488 if (!mdres->multi_devices && async->start == BTRFS_SUPER_INFO_OFFSET)
1489 write_backup_supers(outfd, outbuf);
1491 if (err && !mdres->error)
1494 pthread_mutex_unlock(&mdres->mutex);
1496 free(async->buffer);
1504 static void mdrestore_destroy(struct mdrestore_struct *mdres, int num_threads)
1509 while ((n = rb_first(&mdres->chunk_tree))) {
1510 struct fs_chunk *entry;
1512 entry = rb_entry(n, struct fs_chunk, l);
1513 rb_erase(n, &mdres->chunk_tree);
1514 rb_erase(&entry->p, &mdres->physical_tree);
1517 pthread_mutex_lock(&mdres->mutex);
1519 pthread_cond_broadcast(&mdres->cond);
1520 pthread_mutex_unlock(&mdres->mutex);
1522 for (i = 0; i < num_threads; i++)
1523 pthread_join(mdres->threads[i], NULL);
1525 pthread_cond_destroy(&mdres->cond);
1526 pthread_mutex_destroy(&mdres->mutex);
1529 static int mdrestore_init(struct mdrestore_struct *mdres,
1530 FILE *in, FILE *out, int old_restore,
1531 int num_threads, int fixup_offset,
1532 struct btrfs_fs_info *info, int multi_devices)
1536 memset(mdres, 0, sizeof(*mdres));
1537 pthread_cond_init(&mdres->cond, NULL);
1538 pthread_mutex_init(&mdres->mutex, NULL);
1539 INIT_LIST_HEAD(&mdres->list);
1540 INIT_LIST_HEAD(&mdres->overlapping_chunks);
1543 mdres->old_restore = old_restore;
1544 mdres->chunk_tree.rb_node = NULL;
1545 mdres->fixup_offset = fixup_offset;
1547 mdres->multi_devices = multi_devices;
1548 mdres->clear_space_cache = 0;
1549 mdres->last_physical_offset = 0;
1550 mdres->alloced_chunks = 0;
1555 mdres->num_threads = num_threads;
1556 for (i = 0; i < num_threads; i++) {
1557 ret = pthread_create(&mdres->threads[i], NULL, restore_worker,
1560 /* pthread_create returns errno directly */
1566 mdrestore_destroy(mdres, i + 1);
1570 static int fill_mdres_info(struct mdrestore_struct *mdres,
1571 struct async_work *async)
1573 struct btrfs_super_block *super;
1578 /* We've already been initialized */
1579 if (mdres->nodesize)
1582 if (mdres->compress_method == COMPRESS_ZLIB) {
1583 size_t size = MAX_PENDING_SIZE * 2;
1585 buffer = malloc(MAX_PENDING_SIZE * 2);
1588 ret = uncompress(buffer, (unsigned long *)&size,
1589 async->buffer, async->bufsize);
1591 error("decompression failed with %d", ret);
1597 outbuf = async->buffer;
1600 super = (struct btrfs_super_block *)outbuf;
1601 mdres->nodesize = btrfs_super_nodesize(super);
1602 memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
1603 memcpy(mdres->uuid, super->dev_item.uuid,
1605 mdres->devid = le64_to_cpu(super->dev_item.devid);
1610 static int add_cluster(struct meta_cluster *cluster,
1611 struct mdrestore_struct *mdres, u64 *next)
1613 struct meta_cluster_item *item;
1614 struct meta_cluster_header *header = &cluster->header;
1615 struct async_work *async;
1620 pthread_mutex_lock(&mdres->mutex);
1621 mdres->compress_method = header->compress;
1622 pthread_mutex_unlock(&mdres->mutex);
1624 bytenr = le64_to_cpu(header->bytenr) + BLOCK_SIZE;
1625 nritems = le32_to_cpu(header->nritems);
1626 for (i = 0; i < nritems; i++) {
1627 item = &cluster->items[i];
1628 async = calloc(1, sizeof(*async));
1630 error("not enough memory for async data");
1633 async->start = le64_to_cpu(item->bytenr);
1634 async->bufsize = le32_to_cpu(item->size);
1635 async->buffer = malloc(async->bufsize);
1636 if (!async->buffer) {
1637 error("not enough memory for async buffer");
1641 ret = fread(async->buffer, async->bufsize, 1, mdres->in);
1643 error("unable to read buffer: %s", strerror(errno));
1644 free(async->buffer);
1648 bytenr += async->bufsize;
1650 pthread_mutex_lock(&mdres->mutex);
1651 if (async->start == BTRFS_SUPER_INFO_OFFSET) {
1652 ret = fill_mdres_info(mdres, async);
1654 error("unable to set up restore state");
1655 pthread_mutex_unlock(&mdres->mutex);
1656 free(async->buffer);
1661 list_add_tail(&async->list, &mdres->list);
1663 pthread_cond_signal(&mdres->cond);
1664 pthread_mutex_unlock(&mdres->mutex);
1666 if (bytenr & BLOCK_MASK) {
1667 char buffer[BLOCK_MASK];
1668 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
1671 ret = fread(buffer, size, 1, mdres->in);
1673 error("failed to read buffer: %s", strerror(errno));
1681 static int wait_for_worker(struct mdrestore_struct *mdres)
1685 pthread_mutex_lock(&mdres->mutex);
1687 while (!ret && mdres->num_items > 0) {
1688 struct timespec ts = {
1690 .tv_nsec = 10000000,
1692 pthread_mutex_unlock(&mdres->mutex);
1693 nanosleep(&ts, NULL);
1694 pthread_mutex_lock(&mdres->mutex);
1697 pthread_mutex_unlock(&mdres->mutex);
1701 static int read_chunk_block(struct mdrestore_struct *mdres, u8 *buffer,
1702 u64 bytenr, u64 item_bytenr, u32 bufsize,
1705 struct extent_buffer *eb;
1709 eb = alloc_dummy_eb(bytenr, mdres->nodesize);
1715 while (item_bytenr != bytenr) {
1716 buffer += mdres->nodesize;
1717 item_bytenr += mdres->nodesize;
1720 memcpy(eb->data, buffer, mdres->nodesize);
1721 if (btrfs_header_bytenr(eb) != bytenr) {
1722 error("eb bytenr does not match found bytenr: %llu != %llu",
1723 (unsigned long long)btrfs_header_bytenr(eb),
1724 (unsigned long long)bytenr);
1729 if (memcmp(mdres->fsid, eb->data + offsetof(struct btrfs_header, fsid),
1731 error("filesystem UUID of eb %llu does not match",
1732 (unsigned long long)bytenr);
1737 if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID) {
1738 error("wrong eb %llu owner %llu",
1739 (unsigned long long)bytenr,
1740 (unsigned long long)btrfs_header_owner(eb));
1745 for (i = 0; i < btrfs_header_nritems(eb); i++) {
1746 struct btrfs_chunk *chunk;
1747 struct fs_chunk *fs_chunk;
1748 struct btrfs_key key;
1751 if (btrfs_header_level(eb)) {
1752 u64 blockptr = btrfs_node_blockptr(eb, i);
1754 ret = search_for_chunk_blocks(mdres, blockptr,
1761 /* Yay a leaf! We loves leafs! */
1762 btrfs_item_key_to_cpu(eb, &key, i);
1763 if (key.type != BTRFS_CHUNK_ITEM_KEY)
1766 fs_chunk = malloc(sizeof(struct fs_chunk));
1768 error("not enough memory to allocate chunk");
1772 memset(fs_chunk, 0, sizeof(*fs_chunk));
1773 chunk = btrfs_item_ptr(eb, i, struct btrfs_chunk);
1775 fs_chunk->logical = key.offset;
1776 fs_chunk->physical = btrfs_stripe_offset_nr(eb, chunk, 0);
1777 fs_chunk->bytes = btrfs_chunk_length(eb, chunk);
1778 INIT_LIST_HEAD(&fs_chunk->list);
1779 if (tree_search(&mdres->physical_tree, &fs_chunk->p,
1780 physical_cmp, 1) != NULL)
1781 list_add(&fs_chunk->list, &mdres->overlapping_chunks);
1783 tree_insert(&mdres->physical_tree, &fs_chunk->p,
1786 type = btrfs_chunk_type(eb, chunk);
1787 if (type & BTRFS_BLOCK_GROUP_DUP) {
1788 fs_chunk->physical_dup =
1789 btrfs_stripe_offset_nr(eb, chunk, 1);
1792 if (fs_chunk->physical_dup + fs_chunk->bytes >
1793 mdres->last_physical_offset)
1794 mdres->last_physical_offset = fs_chunk->physical_dup +
1796 else if (fs_chunk->physical + fs_chunk->bytes >
1797 mdres->last_physical_offset)
1798 mdres->last_physical_offset = fs_chunk->physical +
1800 mdres->alloced_chunks += fs_chunk->bytes;
1801 /* in dup case, fs_chunk->bytes should add twice */
1802 if (fs_chunk->physical_dup)
1803 mdres->alloced_chunks += fs_chunk->bytes;
1804 tree_insert(&mdres->chunk_tree, &fs_chunk->l, chunk_cmp);
1811 /* If you have to ask you aren't worthy */
1812 static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
1813 u64 search, u64 cluster_bytenr)
1815 struct meta_cluster *cluster;
1816 struct meta_cluster_header *header;
1817 struct meta_cluster_item *item;
1818 u64 current_cluster = cluster_bytenr, bytenr;
1820 u32 bufsize, nritems, i;
1821 u32 max_size = MAX_PENDING_SIZE * 2;
1822 u8 *buffer, *tmp = NULL;
1825 cluster = malloc(BLOCK_SIZE);
1827 error("not enough memory for cluster");
1831 buffer = malloc(max_size);
1833 error("not enough memory for buffer");
1838 if (mdres->compress_method == COMPRESS_ZLIB) {
1839 tmp = malloc(max_size);
1841 error("not enough memory for buffer");
1848 bytenr = current_cluster;
1850 if (fseek(mdres->in, current_cluster, SEEK_SET)) {
1851 error("seek failed: %s", strerror(errno));
1856 ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
1858 if (cluster_bytenr != 0) {
1860 current_cluster = 0;
1865 "unknown state after reading cluster at %llu, probably corrupted data",
1869 } else if (ret < 0) {
1870 error("unable to read image at %llu: %s",
1871 (unsigned long long)cluster_bytenr,
1877 header = &cluster->header;
1878 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
1879 le64_to_cpu(header->bytenr) != current_cluster) {
1880 error("bad header in metadump image");
1885 bytenr += BLOCK_SIZE;
1886 nritems = le32_to_cpu(header->nritems);
1887 for (i = 0; i < nritems; i++) {
1890 item = &cluster->items[i];
1891 bufsize = le32_to_cpu(item->size);
1892 item_bytenr = le64_to_cpu(item->bytenr);
1894 if (bufsize > max_size) {
1895 error("item %u too big: %u > %u", i, bufsize,
1901 if (mdres->compress_method == COMPRESS_ZLIB) {
1902 ret = fread(tmp, bufsize, 1, mdres->in);
1904 error("read error: %s", strerror(errno));
1910 ret = uncompress(buffer,
1911 (unsigned long *)&size, tmp,
1914 error("decompression failed with %d",
1920 ret = fread(buffer, bufsize, 1, mdres->in);
1922 error("read error: %s",
1931 if (item_bytenr <= search &&
1932 item_bytenr + size > search) {
1933 ret = read_chunk_block(mdres, buffer, search,
1947 if (bytenr & BLOCK_MASK)
1948 bytenr += BLOCK_SIZE - (bytenr & BLOCK_MASK);
1949 current_cluster = bytenr;
1958 static int build_chunk_tree(struct mdrestore_struct *mdres,
1959 struct meta_cluster *cluster)
1961 struct btrfs_super_block *super;
1962 struct meta_cluster_header *header;
1963 struct meta_cluster_item *item = NULL;
1964 u64 chunk_root_bytenr = 0;
1970 /* We can't seek with stdin so don't bother doing this */
1971 if (mdres->in == stdin)
1974 ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
1976 error("unable to read cluster: %s", strerror(errno));
1981 header = &cluster->header;
1982 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
1983 le64_to_cpu(header->bytenr) != 0) {
1984 error("bad header in metadump image");
1988 bytenr += BLOCK_SIZE;
1989 mdres->compress_method = header->compress;
1990 nritems = le32_to_cpu(header->nritems);
1991 for (i = 0; i < nritems; i++) {
1992 item = &cluster->items[i];
1994 if (le64_to_cpu(item->bytenr) == BTRFS_SUPER_INFO_OFFSET)
1996 bytenr += le32_to_cpu(item->size);
1997 if (fseek(mdres->in, le32_to_cpu(item->size), SEEK_CUR)) {
1998 error("seek failed: %s", strerror(errno));
2003 if (!item || le64_to_cpu(item->bytenr) != BTRFS_SUPER_INFO_OFFSET) {
2004 error("did not find superblock at %llu",
2005 le64_to_cpu(item->bytenr));
2009 buffer = malloc(le32_to_cpu(item->size));
2011 error("not enough memory to allocate buffer");
2015 ret = fread(buffer, le32_to_cpu(item->size), 1, mdres->in);
2017 error("unable to read buffer: %s", strerror(errno));
2022 if (mdres->compress_method == COMPRESS_ZLIB) {
2023 size_t size = MAX_PENDING_SIZE * 2;
2026 tmp = malloc(MAX_PENDING_SIZE * 2);
2031 ret = uncompress(tmp, (unsigned long *)&size,
2032 buffer, le32_to_cpu(item->size));
2034 error("decompression failed with %d", ret);
2043 pthread_mutex_lock(&mdres->mutex);
2044 super = (struct btrfs_super_block *)buffer;
2045 chunk_root_bytenr = btrfs_super_chunk_root(super);
2046 mdres->nodesize = btrfs_super_nodesize(super);
2047 memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
2048 memcpy(mdres->uuid, super->dev_item.uuid,
2050 mdres->devid = le64_to_cpu(super->dev_item.devid);
2052 pthread_mutex_unlock(&mdres->mutex);
2054 return search_for_chunk_blocks(mdres, chunk_root_bytenr, 0);
2057 static int range_contains_super(u64 physical, u64 bytes)
2062 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2063 super_bytenr = btrfs_sb_offset(i);
2064 if (super_bytenr >= physical &&
2065 super_bytenr < physical + bytes)
2072 static void remap_overlapping_chunks(struct mdrestore_struct *mdres)
2074 struct fs_chunk *fs_chunk;
2076 while (!list_empty(&mdres->overlapping_chunks)) {
2077 fs_chunk = list_first_entry(&mdres->overlapping_chunks,
2078 struct fs_chunk, list);
2079 list_del_init(&fs_chunk->list);
2080 if (range_contains_super(fs_chunk->physical,
2083 "remapping a chunk that had a super mirror inside of it, clearing space cache so we don't end up with corruption");
2084 mdres->clear_space_cache = 1;
2086 fs_chunk->physical = mdres->last_physical_offset;
2087 tree_insert(&mdres->physical_tree, &fs_chunk->p, physical_cmp);
2088 mdres->last_physical_offset += fs_chunk->bytes;
2092 static int fixup_devices(struct btrfs_fs_info *fs_info,
2093 struct mdrestore_struct *mdres, off_t dev_size)
2095 struct btrfs_trans_handle *trans;
2096 struct btrfs_dev_item *dev_item;
2097 struct btrfs_path path;
2098 struct extent_buffer *leaf;
2099 struct btrfs_root *root = fs_info->chunk_root;
2100 struct btrfs_key key;
2101 u64 devid, cur_devid;
2104 trans = btrfs_start_transaction(fs_info->tree_root, 1);
2105 if (IS_ERR(trans)) {
2106 error("cannot starting transaction %ld", PTR_ERR(trans));
2107 return PTR_ERR(trans);
2110 dev_item = &fs_info->super_copy->dev_item;
2112 devid = btrfs_stack_device_id(dev_item);
2114 btrfs_set_stack_device_total_bytes(dev_item, dev_size);
2115 btrfs_set_stack_device_bytes_used(dev_item, mdres->alloced_chunks);
2117 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2118 key.type = BTRFS_DEV_ITEM_KEY;
2121 btrfs_init_path(&path);
2124 ret = btrfs_search_slot(trans, root, &key, &path, -1, 1);
2126 error("search failed: %d", ret);
2131 leaf = path.nodes[0];
2132 if (path.slots[0] >= btrfs_header_nritems(leaf)) {
2133 ret = btrfs_next_leaf(root, &path);
2135 error("cannot go to next leaf %d", ret);
2142 leaf = path.nodes[0];
2145 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
2146 if (key.type > BTRFS_DEV_ITEM_KEY)
2148 if (key.type != BTRFS_DEV_ITEM_KEY) {
2153 dev_item = btrfs_item_ptr(leaf, path.slots[0],
2154 struct btrfs_dev_item);
2155 cur_devid = btrfs_device_id(leaf, dev_item);
2156 if (devid != cur_devid) {
2157 ret = btrfs_del_item(trans, root, &path);
2159 error("cannot delete item: %d", ret);
2162 btrfs_release_path(&path);
2166 btrfs_set_device_total_bytes(leaf, dev_item, dev_size);
2167 btrfs_set_device_bytes_used(leaf, dev_item,
2168 mdres->alloced_chunks);
2169 btrfs_mark_buffer_dirty(leaf);
2173 btrfs_release_path(&path);
2174 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
2176 error("unable to commit transaction: %d", ret);
2182 static int restore_metadump(const char *input, FILE *out, int old_restore,
2183 int num_threads, int fixup_offset,
2184 const char *target, int multi_devices)
2186 struct meta_cluster *cluster = NULL;
2187 struct meta_cluster_header *header;
2188 struct mdrestore_struct mdrestore;
2189 struct btrfs_fs_info *info = NULL;
2194 if (!strcmp(input, "-")) {
2197 in = fopen(input, "r");
2199 error("unable to open metadump image: %s",
2205 /* NOTE: open with write mode */
2207 info = open_ctree_fs_info(target, 0, 0, 0,
2209 OPEN_CTREE_RESTORE |
2210 OPEN_CTREE_PARTIAL);
2212 error("open ctree failed");
2218 cluster = malloc(BLOCK_SIZE);
2220 error("not enough memory for cluster");
2225 ret = mdrestore_init(&mdrestore, in, out, old_restore, num_threads,
2226 fixup_offset, info, multi_devices);
2228 error("failed to initialize metadata restore state: %d", ret);
2229 goto failed_cluster;
2232 if (!multi_devices && !old_restore) {
2233 ret = build_chunk_tree(&mdrestore, cluster);
2236 if (!list_empty(&mdrestore.overlapping_chunks))
2237 remap_overlapping_chunks(&mdrestore);
2240 if (in != stdin && fseek(in, 0, SEEK_SET)) {
2241 error("seek failed: %s", strerror(errno));
2245 while (!mdrestore.error) {
2246 ret = fread(cluster, BLOCK_SIZE, 1, in);
2250 header = &cluster->header;
2251 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2252 le64_to_cpu(header->bytenr) != bytenr) {
2253 error("bad header in metadump image");
2257 ret = add_cluster(cluster, &mdrestore, &bytenr);
2259 error("failed to add cluster: %d", ret);
2263 ret = wait_for_worker(&mdrestore);
2265 if (!ret && !multi_devices && !old_restore) {
2266 struct btrfs_root *root;
2269 root = open_ctree_fd(fileno(out), target, 0,
2270 OPEN_CTREE_PARTIAL |
2272 OPEN_CTREE_NO_DEVICES);
2274 error("open ctree failed in %s", target);
2278 info = root->fs_info;
2280 if (stat(target, &st)) {
2281 error("stat %s failed: %s", target, strerror(errno));
2282 close_ctree(info->chunk_root);
2287 ret = fixup_devices(info, &mdrestore, st.st_size);
2288 close_ctree(info->chunk_root);
2293 mdrestore_destroy(&mdrestore, num_threads);
2297 if (fixup_offset && info)
2298 close_ctree(info->chunk_root);
2305 static int update_disk_super_on_device(struct btrfs_fs_info *info,
2306 const char *other_dev, u64 cur_devid)
2308 struct btrfs_key key;
2309 struct extent_buffer *leaf;
2310 struct btrfs_path path;
2311 struct btrfs_dev_item *dev_item;
2312 struct btrfs_super_block *disk_super;
2313 char dev_uuid[BTRFS_UUID_SIZE];
2314 char fs_uuid[BTRFS_UUID_SIZE];
2315 u64 devid, type, io_align, io_width;
2316 u64 sector_size, total_bytes, bytes_used;
2317 char buf[BTRFS_SUPER_INFO_SIZE];
2321 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2322 key.type = BTRFS_DEV_ITEM_KEY;
2323 key.offset = cur_devid;
2325 btrfs_init_path(&path);
2326 ret = btrfs_search_slot(NULL, info->chunk_root, &key, &path, 0, 0);
2328 error("search key failed: %d", ret);
2333 leaf = path.nodes[0];
2334 dev_item = btrfs_item_ptr(leaf, path.slots[0],
2335 struct btrfs_dev_item);
2337 devid = btrfs_device_id(leaf, dev_item);
2338 if (devid != cur_devid) {
2339 error("devid mismatch: %llu != %llu",
2340 (unsigned long long)devid,
2341 (unsigned long long)cur_devid);
2346 type = btrfs_device_type(leaf, dev_item);
2347 io_align = btrfs_device_io_align(leaf, dev_item);
2348 io_width = btrfs_device_io_width(leaf, dev_item);
2349 sector_size = btrfs_device_sector_size(leaf, dev_item);
2350 total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2351 bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2352 read_extent_buffer(leaf, dev_uuid, (unsigned long)btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE);
2353 read_extent_buffer(leaf, fs_uuid, (unsigned long)btrfs_device_fsid(dev_item), BTRFS_UUID_SIZE);
2355 btrfs_release_path(&path);
2357 printf("update disk super on %s devid=%llu\n", other_dev, devid);
2359 /* update other devices' super block */
2360 fp = open(other_dev, O_CREAT | O_RDWR, 0600);
2362 error("could not open %s: %s", other_dev, strerror(errno));
2367 memcpy(buf, info->super_copy, BTRFS_SUPER_INFO_SIZE);
2369 disk_super = (struct btrfs_super_block *)buf;
2370 dev_item = &disk_super->dev_item;
2372 btrfs_set_stack_device_type(dev_item, type);
2373 btrfs_set_stack_device_id(dev_item, devid);
2374 btrfs_set_stack_device_total_bytes(dev_item, total_bytes);
2375 btrfs_set_stack_device_bytes_used(dev_item, bytes_used);
2376 btrfs_set_stack_device_io_align(dev_item, io_align);
2377 btrfs_set_stack_device_io_width(dev_item, io_width);
2378 btrfs_set_stack_device_sector_size(dev_item, sector_size);
2379 memcpy(dev_item->uuid, dev_uuid, BTRFS_UUID_SIZE);
2380 memcpy(dev_item->fsid, fs_uuid, BTRFS_UUID_SIZE);
2381 csum_block((u8 *)buf, BTRFS_SUPER_INFO_SIZE);
2383 ret = pwrite64(fp, buf, BTRFS_SUPER_INFO_SIZE, BTRFS_SUPER_INFO_OFFSET);
2384 if (ret != BTRFS_SUPER_INFO_SIZE) {
2386 error("cannot write superblock: %s", strerror(ret));
2388 error("cannot write superblock");
2393 write_backup_supers(fp, (u8 *)buf);
2401 static void print_usage(int ret)
2403 printf("usage: btrfs-image [options] source target\n");
2404 printf("\t-r \trestore metadump image\n");
2405 printf("\t-c value\tcompression level (0 ~ 9)\n");
2406 printf("\t-t value\tnumber of threads (1 ~ 32)\n");
2407 printf("\t-o \tdon't mess with the chunk tree when restoring\n");
2408 printf("\t-s \tsanitize file names, use once to just use garbage, use twice if you want crc collisions\n");
2409 printf("\t-w \twalk all trees instead of using extent tree, do this if your extent tree is broken\n");
2410 printf("\t-m \trestore for multiple devices\n");
2412 printf("\tIn the dump mode, source is the btrfs device and target is the output file (use '-' for stdout).\n");
2413 printf("\tIn the restore mode, source is the dumped image and target is the btrfs device/file.\n");
2417 int main(int argc, char *argv[])
2421 u64 num_threads = 0;
2422 u64 compress_level = 0;
2424 int old_restore = 0;
2426 int multi_devices = 0;
2428 enum sanitize_mode sanitize = SANITIZE_NONE;
2430 int usage_error = 0;
2434 static const struct option long_options[] = {
2435 { "help", no_argument, NULL, GETOPT_VAL_HELP},
2436 { NULL, 0, NULL, 0 }
2438 int c = getopt_long(argc, argv, "rc:t:oswm", long_options, NULL);
2446 num_threads = arg_strtou64(optarg);
2447 if (num_threads > MAX_WORKER_THREADS) {
2448 error("number of threads out of range: %llu > %d",
2449 (unsigned long long)num_threads,
2450 MAX_WORKER_THREADS);
2455 compress_level = arg_strtou64(optarg);
2456 if (compress_level > 9) {
2457 error("compression level out of range: %llu",
2458 (unsigned long long)compress_level);
2466 if (sanitize == SANITIZE_NONE)
2467 sanitize = SANITIZE_NAMES;
2468 else if (sanitize == SANITIZE_NAMES)
2469 sanitize = SANITIZE_COLLISIONS;
2478 case GETOPT_VAL_HELP:
2480 print_usage(c != GETOPT_VAL_HELP);
2485 if (check_argc_min(argc - optind, 2))
2488 dev_cnt = argc - optind - 1;
2493 "create and restore cannot be used at the same time");
2497 if (walk_trees || sanitize != SANITIZE_NONE || compress_level) {
2499 "useing -w, -s, -c options for restore makes no sense");
2502 if (multi_devices && dev_cnt < 2) {
2503 error("not enough devices specified for -m option");
2506 if (!multi_devices && dev_cnt != 1) {
2507 error("accepts only 1 device without -m option");
2515 source = argv[optind];
2516 target = argv[optind + 1];
2518 if (create && !strcmp(target, "-")) {
2521 out = fopen(target, "w+");
2523 error("unable to create target file %s", target);
2528 if (compress_level > 0 || create == 0) {
2529 if (num_threads == 0) {
2530 long tmp = sysconf(_SC_NPROCESSORS_ONLN);
2541 ret = check_mounted(source);
2543 warning("unable to check mount status of: %s",
2546 warning("%s already mounted, results may be inaccurate",
2550 ret = create_metadump(source, out, num_threads,
2551 compress_level, sanitize, walk_trees);
2553 ret = restore_metadump(source, out, old_restore, num_threads,
2554 0, target, multi_devices);
2557 error("%s failed: %s", (create) ? "create" : "restore",
2562 /* extended support for multiple devices */
2563 if (!create && multi_devices) {
2564 struct btrfs_fs_info *info;
2568 info = open_ctree_fs_info(target, 0, 0, 0,
2569 OPEN_CTREE_PARTIAL |
2570 OPEN_CTREE_RESTORE);
2572 error("open ctree failed at %s", target);
2576 total_devs = btrfs_super_num_devices(info->super_copy);
2577 if (total_devs != dev_cnt) {
2578 error("it needs %llu devices but has only %d",
2579 total_devs, dev_cnt);
2580 close_ctree(info->chunk_root);
2584 /* update super block on other disks */
2585 for (i = 2; i <= dev_cnt; i++) {
2586 ret = update_disk_super_on_device(info,
2587 argv[optind + i], (u64)i);
2589 error("update disk superblock failed devid %d: %d",
2591 close_ctree(info->chunk_root);
2596 close_ctree(info->chunk_root);
2598 /* fix metadata block to map correct chunk */
2599 ret = restore_metadump(source, out, 0, num_threads, 1,
2602 error("unable to fixup metadump: %d", ret);
2607 if (out == stdout) {
2611 if (ret && create) {
2614 unlink_ret = unlink(target);
2616 error("unlink output file %s failed: %s",
2617 target, strerror(errno));
2621 btrfs_close_all_devices();