Merge branch 'cov-fixes-v1-integration-20130201' of http://git.zabbo.net/cgit/btrfs...
[platform/upstream/btrfs-progs.git] / btrfs-image.c
1 /*
2  * Copyright (C) 2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #define _XOPEN_SOURCE 500
20 #define _GNU_SOURCE 1
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <fcntl.h>
27 #include <unistd.h>
28 #include <dirent.h>
29 #include <zlib.h>
30 #include "kerncompat.h"
31 #include "crc32c.h"
32 #include "ctree.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "utils.h"
36 #include "version.h"
37
38
39 #define HEADER_MAGIC            0xbd5c25e27295668bULL
40 #define MAX_PENDING_SIZE        (256 * 1024)
41 #define BLOCK_SIZE              1024
42 #define BLOCK_MASK              (BLOCK_SIZE - 1)
43
44 #define COMPRESS_NONE           0
45 #define COMPRESS_ZLIB           1
46
47 struct meta_cluster_item {
48         __le64 bytenr;
49         __le32 size;
50 } __attribute__ ((__packed__));
51
52 struct meta_cluster_header {
53         __le64 magic;
54         __le64 bytenr;
55         __le32 nritems;
56         u8 compress;
57 } __attribute__ ((__packed__));
58
59 /* cluster header + index items + buffers */
60 struct meta_cluster {
61         struct meta_cluster_header header;
62         struct meta_cluster_item items[];
63 } __attribute__ ((__packed__));
64
65 #define ITEMS_PER_CLUSTER ((BLOCK_SIZE - sizeof(struct meta_cluster)) / \
66                            sizeof(struct meta_cluster_item))
67
68 struct async_work {
69         struct list_head list;
70         struct list_head ordered;
71         u64 start;
72         u64 size;
73         u8 *buffer;
74         size_t bufsize;
75 };
76
77 struct metadump_struct {
78         struct btrfs_root *root;
79         FILE *out;
80
81         struct meta_cluster *cluster;
82
83         pthread_t *threads;
84         size_t num_threads;
85         pthread_mutex_t mutex;
86         pthread_cond_t cond;
87
88         struct list_head list;
89         struct list_head ordered;
90         size_t num_items;
91         size_t num_ready;
92
93         u64 pending_start;
94         u64 pending_size;
95
96         int compress_level;
97         int done;
98 };
99
100 struct mdrestore_struct {
101         FILE *in;
102         FILE *out;
103
104         pthread_t *threads;
105         size_t num_threads;
106         pthread_mutex_t mutex;
107         pthread_cond_t cond;
108
109         struct list_head list;
110         size_t num_items;
111
112         int compress_method;
113         int done;
114 };
115
116 static void csum_block(u8 *buf, size_t len)
117 {
118         char result[BTRFS_CRC32_SIZE];
119         u32 crc = ~(u32)0;
120         crc = crc32c(crc, buf + BTRFS_CSUM_SIZE, len - BTRFS_CSUM_SIZE);
121         btrfs_csum_final(crc, result);
122         memcpy(buf, result, BTRFS_CRC32_SIZE);
123 }
124
125 /*
126  * zero inline extents and csum items
127  */
128 static void zero_items(u8 *dst, struct extent_buffer *src)
129 {
130         struct btrfs_file_extent_item *fi;
131         struct btrfs_item *item;
132         struct btrfs_key key;
133         u32 nritems = btrfs_header_nritems(src);
134         size_t size;
135         unsigned long ptr;
136         int i, extent_type;
137
138         for (i = 0; i < nritems; i++) {
139                 item = btrfs_item_nr(src, i);
140                 btrfs_item_key_to_cpu(src, &key, i);
141                 if (key.type == BTRFS_CSUM_ITEM_KEY) {
142                         size = btrfs_item_size_nr(src, i);
143                         memset(dst + btrfs_leaf_data(src) +
144                                btrfs_item_offset_nr(src, i), 0, size);
145                         continue;
146                 }
147                 if (key.type != BTRFS_EXTENT_DATA_KEY)
148                         continue;
149
150                 fi = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
151                 extent_type = btrfs_file_extent_type(src, fi);
152                 if (extent_type != BTRFS_FILE_EXTENT_INLINE)
153                         continue;
154
155                 ptr = btrfs_file_extent_inline_start(fi);
156                 size = btrfs_file_extent_inline_item_len(src, item);
157                 memset(dst + ptr, 0, size);
158         }
159 }
160
161 /*
162  * copy buffer and zero useless data in the buffer
163  */
164 static void copy_buffer(u8 *dst, struct extent_buffer *src)
165 {
166         int level;
167         size_t size;
168         u32 nritems;
169
170         memcpy(dst, src->data, src->len);
171         if (src->start == BTRFS_SUPER_INFO_OFFSET)
172                 return;
173
174         level = btrfs_header_level(src);
175         nritems = btrfs_header_nritems(src);
176
177         if (nritems == 0) {
178                 size = sizeof(struct btrfs_header);
179                 memset(dst + size, 0, src->len - size);
180         } else if (level == 0) {
181                 size = btrfs_leaf_data(src) +
182                         btrfs_item_offset_nr(src, nritems - 1) -
183                         btrfs_item_nr_offset(nritems);
184                 memset(dst + btrfs_item_nr_offset(nritems), 0, size);
185                 zero_items(dst, src);
186         } else {
187                 size = offsetof(struct btrfs_node, ptrs) +
188                         sizeof(struct btrfs_key_ptr) * nritems;
189                 memset(dst + size, 0, src->len - size);
190         }
191         csum_block(dst, src->len);
192 }
193
194 static void *dump_worker(void *data)
195 {
196         struct metadump_struct *md = (struct metadump_struct *)data;
197         struct async_work *async;
198         int ret;
199
200         while (1) {
201                 pthread_mutex_lock(&md->mutex);
202                 while (list_empty(&md->list)) {
203                         if (md->done) {
204                                 pthread_mutex_unlock(&md->mutex);
205                                 goto out;
206                         }
207                         pthread_cond_wait(&md->cond, &md->mutex);
208                 }
209                 async = list_entry(md->list.next, struct async_work, list);
210                 list_del_init(&async->list);
211                 pthread_mutex_unlock(&md->mutex);
212
213                 if (md->compress_level > 0) {
214                         u8 *orig = async->buffer;
215
216                         async->bufsize = compressBound(async->size);
217                         async->buffer = malloc(async->bufsize);
218
219                         ret = compress2(async->buffer,
220                                          (unsigned long *)&async->bufsize,
221                                          orig, async->size, md->compress_level);
222                         BUG_ON(ret != Z_OK);
223
224                         free(orig);
225                 }
226
227                 pthread_mutex_lock(&md->mutex);
228                 md->num_ready++;
229                 pthread_mutex_unlock(&md->mutex);
230         }
231 out:
232         pthread_exit(NULL);
233 }
234
235 static void meta_cluster_init(struct metadump_struct *md, u64 start)
236 {
237         struct meta_cluster_header *header;
238
239         md->num_items = 0;
240         md->num_ready = 0;
241         header = &md->cluster->header;
242         header->magic = cpu_to_le64(HEADER_MAGIC);
243         header->bytenr = cpu_to_le64(start);
244         header->nritems = cpu_to_le32(0);
245         header->compress = md->compress_level > 0 ?
246                            COMPRESS_ZLIB : COMPRESS_NONE;
247 }
248
249 static int metadump_init(struct metadump_struct *md, struct btrfs_root *root,
250                          FILE *out, int num_threads, int compress_level)
251 {
252         int i, ret;
253
254         memset(md, 0, sizeof(*md));
255         pthread_cond_init(&md->cond, NULL);
256         pthread_mutex_init(&md->mutex, NULL);
257         INIT_LIST_HEAD(&md->list);
258         INIT_LIST_HEAD(&md->ordered);
259         md->root = root;
260         md->out = out;
261         md->pending_start = (u64)-1;
262         md->compress_level = compress_level;
263         md->cluster = calloc(1, BLOCK_SIZE);
264         if (!md->cluster)
265                 return -ENOMEM;
266
267         meta_cluster_init(md, 0);
268         if (!num_threads)
269                 return 0;
270
271         md->num_threads = num_threads;
272         md->threads = calloc(num_threads, sizeof(pthread_t));
273         if (!md->threads)
274                 return -ENOMEM;
275         for (i = 0; i < num_threads; i++) {
276                 ret = pthread_create(md->threads + i, NULL, dump_worker, md);
277                 if (ret)
278                         break;
279         }
280         return ret;
281 }
282
283 static void metadump_destroy(struct metadump_struct *md)
284 {
285         int i;
286         pthread_mutex_lock(&md->mutex);
287         md->done = 1;
288         pthread_cond_broadcast(&md->cond);
289         pthread_mutex_unlock(&md->mutex);
290
291         for (i = 0; i < md->num_threads; i++)
292                 pthread_join(md->threads[i], NULL);
293
294         pthread_cond_destroy(&md->cond);
295         pthread_mutex_destroy(&md->mutex);
296         free(md->threads);
297         free(md->cluster);
298 }
299
300 static int write_zero(FILE *out, size_t size)
301 {
302         static char zero[BLOCK_SIZE];
303         return fwrite(zero, size, 1, out);
304 }
305
306 static int write_buffers(struct metadump_struct *md, u64 *next)
307 {
308         struct meta_cluster_header *header = &md->cluster->header;
309         struct meta_cluster_item *item;
310         struct async_work *async;
311         u64 bytenr = 0;
312         u32 nritems = 0;
313         int ret;
314
315         if (list_empty(&md->ordered))
316                 goto out;
317
318         /* wait until all buffers are compressed */
319         while (md->num_items > md->num_ready) {
320                 struct timespec ts = {
321                         .tv_sec = 0,
322                         .tv_nsec = 10000000,
323                 };
324                 pthread_mutex_unlock(&md->mutex);
325                 nanosleep(&ts, NULL);
326                 pthread_mutex_lock(&md->mutex);
327         }
328
329         /* setup and write index block */
330         list_for_each_entry(async, &md->ordered, ordered) {
331                 item = md->cluster->items + nritems;
332                 item->bytenr = cpu_to_le64(async->start);
333                 item->size = cpu_to_le32(async->bufsize);
334                 nritems++;
335         }
336         header->nritems = cpu_to_le32(nritems);
337
338         ret = fwrite(md->cluster, BLOCK_SIZE, 1, md->out);
339         BUG_ON(ret != 1);
340
341         /* write buffers */
342         bytenr += le64_to_cpu(header->bytenr) + BLOCK_SIZE;
343         while (!list_empty(&md->ordered)) {
344                 async = list_entry(md->ordered.next, struct async_work,
345                                    ordered);
346                 list_del_init(&async->ordered);
347
348                 bytenr += async->bufsize;
349                 ret = fwrite(async->buffer, async->bufsize, 1, md->out);
350                 BUG_ON(ret != 1);
351
352                 free(async->buffer);
353                 free(async);
354         }
355
356         /* zero unused space in the last block */
357         if (bytenr & BLOCK_MASK) {
358                 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
359
360                 bytenr += size;
361                 ret = write_zero(md->out, size);
362                 BUG_ON(ret != 1);
363         }
364 out:
365         *next = bytenr;
366         return 0;
367 }
368
369 static int flush_pending(struct metadump_struct *md, int done)
370 {
371         struct async_work *async = NULL;
372         struct extent_buffer *eb;
373         u64 blocksize = md->root->nodesize;
374         u64 start;
375         u64 size;
376         size_t offset;
377         int ret;
378
379         if (md->pending_size) {
380                 async = calloc(1, sizeof(*async));
381                 if (!async)
382                         return -ENOMEM;
383
384                 async->start = md->pending_start;
385                 async->size = md->pending_size;
386                 async->bufsize = async->size;
387                 async->buffer = malloc(async->bufsize);
388
389                 offset = 0;
390                 start = async->start;
391                 size = async->size;
392                 while (size > 0) {
393                         eb = read_tree_block(md->root, start, blocksize, 0);
394                         BUG_ON(!eb);
395                         copy_buffer(async->buffer + offset, eb);
396                         free_extent_buffer(eb);
397                         start += blocksize;
398                         offset += blocksize;
399                         size -= blocksize;
400                 }
401
402                 md->pending_start = (u64)-1;
403                 md->pending_size = 0;
404         } else if (!done) {
405                 return 0;
406         }
407
408         pthread_mutex_lock(&md->mutex);
409         if (async) {
410                 list_add_tail(&async->ordered, &md->ordered);
411                 md->num_items++;
412                 if (md->compress_level > 0) {
413                         list_add_tail(&async->list, &md->list);
414                         pthread_cond_signal(&md->cond);
415                 } else {
416                         md->num_ready++;
417                 }
418         }
419         if (md->num_items >= ITEMS_PER_CLUSTER || done) {
420                 ret = write_buffers(md, &start);
421                 BUG_ON(ret);
422                 meta_cluster_init(md, start);
423         }
424         pthread_mutex_unlock(&md->mutex);
425         return 0;
426 }
427
428 static int add_metadata(u64 start, u64 size, struct metadump_struct *md)
429 {
430         int ret;
431         if (md->pending_size + size > MAX_PENDING_SIZE ||
432             md->pending_start + md->pending_size != start) {
433                 ret = flush_pending(md, 0);
434                 if (ret)
435                         return ret;
436                 md->pending_start = start;
437         }
438         readahead_tree_block(md->root, start, size, 0);
439         md->pending_size += size;
440         return 0;
441 }
442
443 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
444 static int is_tree_block(struct btrfs_root *extent_root,
445                          struct btrfs_path *path, u64 bytenr)
446 {
447         struct extent_buffer *leaf;
448         struct btrfs_key key;
449         u64 ref_objectid;
450         int ret;
451
452         leaf = path->nodes[0];
453         while (1) {
454                 struct btrfs_extent_ref_v0 *ref_item;
455                 path->slots[0]++;
456                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
457                         ret = btrfs_next_leaf(extent_root, path);
458                         BUG_ON(ret < 0);
459                         if (ret > 0)
460                                 break;
461                         leaf = path->nodes[0];
462                 }
463                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
464                 if (key.objectid != bytenr)
465                         break;
466                 if (key.type != BTRFS_EXTENT_REF_V0_KEY)
467                         continue;
468                 ref_item = btrfs_item_ptr(leaf, path->slots[0],
469                                           struct btrfs_extent_ref_v0);
470                 ref_objectid = btrfs_ref_objectid_v0(leaf, ref_item);
471                 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID)
472                         return 1;
473                 break;
474         }
475         return 0;
476 }
477 #endif
478
479 static int create_metadump(const char *input, FILE *out, int num_threads,
480                            int compress_level)
481 {
482         struct btrfs_root *root;
483         struct btrfs_root *extent_root;
484         struct btrfs_path *path;
485         struct extent_buffer *leaf;
486         struct btrfs_extent_item *ei;
487         struct btrfs_key key;
488         struct metadump_struct metadump;
489         u64 bytenr;
490         u64 num_bytes;
491         int ret;
492
493         root = open_ctree(input, 0, 0);
494         if (!root) {
495                 fprintf(stderr, "Open ctree failed\n");
496                 exit(1);
497         }
498
499         BUG_ON(root->nodesize != root->leafsize);
500
501         ret = metadump_init(&metadump, root, out, num_threads,
502                             compress_level);
503         BUG_ON(ret);
504
505         ret = add_metadata(BTRFS_SUPER_INFO_OFFSET, 4096, &metadump);
506         BUG_ON(ret);
507
508         extent_root = root->fs_info->extent_root;
509         path = btrfs_alloc_path();
510
511         bytenr = BTRFS_SUPER_INFO_OFFSET + 4096;
512         key.objectid = bytenr;
513         key.type = BTRFS_EXTENT_ITEM_KEY;
514         key.offset = 0;
515
516         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
517         BUG_ON(ret < 0);
518
519         while (1) {
520                 leaf = path->nodes[0];
521                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
522                         ret = btrfs_next_leaf(extent_root, path);
523                         BUG_ON(ret < 0);
524                         if (ret > 0)
525                                 break;
526                         leaf = path->nodes[0];
527                 }
528
529                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
530                 if (key.objectid < bytenr ||
531                     key.type != BTRFS_EXTENT_ITEM_KEY) {
532                         path->slots[0]++;
533                         continue;
534                 }
535
536                 bytenr = key.objectid;
537                 num_bytes = key.offset;
538
539                 if (btrfs_item_size_nr(leaf, path->slots[0]) > sizeof(*ei)) {
540                         ei = btrfs_item_ptr(leaf, path->slots[0],
541                                             struct btrfs_extent_item);
542                         if (btrfs_extent_flags(leaf, ei) &
543                             BTRFS_EXTENT_FLAG_TREE_BLOCK) {
544                                 ret = add_metadata(bytenr, num_bytes,
545                                                    &metadump);
546                                 BUG_ON(ret);
547                         }
548                 } else {
549 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
550                         if (is_tree_block(extent_root, path, bytenr)) {
551                                 ret = add_metadata(bytenr, num_bytes,
552                                                    &metadump);
553                                 BUG_ON(ret);
554                         }
555 #else
556                         BUG_ON(1);
557 #endif
558                 }
559                 bytenr += num_bytes;
560         }
561
562         ret = flush_pending(&metadump, 1);
563         BUG_ON(ret);
564
565         metadump_destroy(&metadump);
566
567         btrfs_free_path(path);
568         ret = close_ctree(root);
569         return 0;
570 }
571
572 static void update_super(u8 *buffer)
573 {
574         struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
575         struct btrfs_chunk *chunk;
576         struct btrfs_disk_key *key;
577         u32 sectorsize = btrfs_super_sectorsize(super);
578         u64 flags = btrfs_super_flags(super);
579
580         flags |= BTRFS_SUPER_FLAG_METADUMP;
581         btrfs_set_super_flags(super, flags);
582
583         key = (struct btrfs_disk_key *)(super->sys_chunk_array);
584         chunk = (struct btrfs_chunk *)(super->sys_chunk_array +
585                                        sizeof(struct btrfs_disk_key));
586
587         btrfs_set_disk_key_objectid(key, BTRFS_FIRST_CHUNK_TREE_OBJECTID);
588         btrfs_set_disk_key_type(key, BTRFS_CHUNK_ITEM_KEY);
589         btrfs_set_disk_key_offset(key, 0);
590
591         btrfs_set_stack_chunk_length(chunk, (u64)-1);
592         btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
593         btrfs_set_stack_chunk_stripe_len(chunk, 64 * 1024);
594         btrfs_set_stack_chunk_type(chunk, BTRFS_BLOCK_GROUP_SYSTEM);
595         btrfs_set_stack_chunk_io_align(chunk, sectorsize);
596         btrfs_set_stack_chunk_io_width(chunk, sectorsize);
597         btrfs_set_stack_chunk_sector_size(chunk, sectorsize);
598         btrfs_set_stack_chunk_num_stripes(chunk, 1);
599         btrfs_set_stack_chunk_sub_stripes(chunk, 0);
600         chunk->stripe.devid = super->dev_item.devid;
601         chunk->stripe.offset = cpu_to_le64(0);
602         memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
603         btrfs_set_super_sys_array_size(super, sizeof(*key) + sizeof(*chunk));
604         csum_block(buffer, 4096);
605 }
606
607 static void *restore_worker(void *data)
608 {
609         struct mdrestore_struct *mdres = (struct mdrestore_struct *)data;
610         struct async_work *async;
611         size_t size;
612         u8 *buffer;
613         u8 *outbuf;
614         int outfd;
615         int ret;
616
617         outfd = fileno(mdres->out);
618         buffer = malloc(MAX_PENDING_SIZE * 2);
619         BUG_ON(!buffer);
620
621         while (1) {
622                 pthread_mutex_lock(&mdres->mutex);
623                 while (list_empty(&mdres->list)) {
624                         if (mdres->done) {
625                                 pthread_mutex_unlock(&mdres->mutex);
626                                 goto out;
627                         }
628                         pthread_cond_wait(&mdres->cond, &mdres->mutex);
629                 }
630                 async = list_entry(mdres->list.next, struct async_work, list);
631                 list_del_init(&async->list);
632                 pthread_mutex_unlock(&mdres->mutex);
633
634                 if (mdres->compress_method == COMPRESS_ZLIB) {
635                         size = MAX_PENDING_SIZE * 2;
636                         ret = uncompress(buffer, (unsigned long *)&size,
637                                          async->buffer, async->bufsize);
638                         BUG_ON(ret != Z_OK);
639                         outbuf = buffer;
640                 } else {
641                         outbuf = async->buffer;
642                         size = async->bufsize;
643                 }
644
645                 if (async->start == BTRFS_SUPER_INFO_OFFSET)
646                         update_super(outbuf);
647
648                 ret = pwrite64(outfd, outbuf, size, async->start);
649                 BUG_ON(ret != size);
650
651                 pthread_mutex_lock(&mdres->mutex);
652                 mdres->num_items--;
653                 pthread_mutex_unlock(&mdres->mutex);
654
655                 free(async->buffer);
656                 free(async);
657         }
658 out:
659         free(buffer);
660         pthread_exit(NULL);
661 }
662
663 static int mdrestore_init(struct mdrestore_struct *mdres,
664                           FILE *in, FILE *out, int num_threads)
665 {
666         int i, ret = 0;
667
668         memset(mdres, 0, sizeof(*mdres));
669         pthread_cond_init(&mdres->cond, NULL);
670         pthread_mutex_init(&mdres->mutex, NULL);
671         INIT_LIST_HEAD(&mdres->list);
672         mdres->in = in;
673         mdres->out = out;
674
675         if (!num_threads)
676                 return 0;
677
678         mdres->num_threads = num_threads;
679         mdres->threads = calloc(num_threads, sizeof(pthread_t));
680         if (!mdres->threads)
681                 return -ENOMEM;
682         for (i = 0; i < num_threads; i++) {
683                 ret = pthread_create(mdres->threads + i, NULL, restore_worker,
684                                      mdres);
685                 if (ret)
686                         break;
687         }
688         return ret;
689 }
690
691 static void mdrestore_destroy(struct mdrestore_struct *mdres)
692 {
693         int i;
694         pthread_mutex_lock(&mdres->mutex);
695         mdres->done = 1;
696         pthread_cond_broadcast(&mdres->cond);
697         pthread_mutex_unlock(&mdres->mutex);
698
699         for (i = 0; i < mdres->num_threads; i++)
700                 pthread_join(mdres->threads[i], NULL);
701
702         pthread_cond_destroy(&mdres->cond);
703         pthread_mutex_destroy(&mdres->mutex);
704         free(mdres->threads);
705 }
706
707 static int add_cluster(struct meta_cluster *cluster,
708                        struct mdrestore_struct *mdres, u64 *next)
709 {
710         struct meta_cluster_item *item;
711         struct meta_cluster_header *header = &cluster->header;
712         struct async_work *async;
713         u64 bytenr;
714         u32 i, nritems;
715         int ret;
716
717         BUG_ON(mdres->num_items);
718         mdres->compress_method = header->compress;
719
720         bytenr = le64_to_cpu(header->bytenr) + BLOCK_SIZE;
721         nritems = le32_to_cpu(header->nritems);
722         for (i = 0; i < nritems; i++) {
723                 item = &cluster->items[i];
724                 async = calloc(1, sizeof(*async));
725                 async->start = le64_to_cpu(item->bytenr);
726                 async->bufsize = le32_to_cpu(item->size);
727                 async->buffer = malloc(async->bufsize);
728                 ret = fread(async->buffer, async->bufsize, 1, mdres->in);
729                 BUG_ON(ret != 1);
730                 bytenr += async->bufsize;
731
732                 pthread_mutex_lock(&mdres->mutex);
733                 list_add_tail(&async->list, &mdres->list);
734                 mdres->num_items++;
735                 pthread_cond_signal(&mdres->cond);
736                 pthread_mutex_unlock(&mdres->mutex);
737         }
738         if (bytenr & BLOCK_MASK) {
739                 char buffer[BLOCK_MASK];
740                 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
741
742                 bytenr += size;
743                 ret = fread(buffer, size, 1, mdres->in);
744                 BUG_ON(ret != 1);
745         }
746         *next = bytenr;
747         return 0;
748 }
749
750 static int wait_for_worker(struct mdrestore_struct *mdres)
751 {
752         pthread_mutex_lock(&mdres->mutex);
753         while (mdres->num_items > 0) {
754                 struct timespec ts = {
755                         .tv_sec = 0,
756                         .tv_nsec = 10000000,
757                 };
758                 pthread_mutex_unlock(&mdres->mutex);
759                 nanosleep(&ts, NULL);
760                 pthread_mutex_lock(&mdres->mutex);
761         }
762         pthread_mutex_unlock(&mdres->mutex);
763         return 0;
764 }
765
766 static int restore_metadump(const char *input, FILE *out, int num_threads)
767 {
768         struct meta_cluster *cluster;
769         struct meta_cluster_header *header;
770         struct mdrestore_struct mdrestore;
771         u64 bytenr = 0;
772         FILE *in;
773         int ret;
774
775         if (!strcmp(input, "-")) {
776                 in = stdin;
777         } else {
778                 in = fopen(input, "r");
779                 if (!in) {
780                         perror("unable to open metadump image");
781                         return 1;
782                 }
783         }
784
785         cluster = malloc(BLOCK_SIZE);
786         BUG_ON(!cluster);
787
788         ret = mdrestore_init(&mdrestore, in, out, num_threads);
789         BUG_ON(ret);
790
791         while (1) {
792                 ret = fread(cluster, BLOCK_SIZE, 1, in);
793                 if (!ret)
794                         break;
795
796                 header = &cluster->header;
797                 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
798                     le64_to_cpu(header->bytenr) != bytenr) {
799                         fprintf(stderr, "bad header in metadump image\n");
800                         return 1;
801                 }
802                 ret = add_cluster(cluster, &mdrestore, &bytenr);
803                 BUG_ON(ret);
804
805                 wait_for_worker(&mdrestore);
806         }
807
808         mdrestore_destroy(&mdrestore);
809         free(cluster);
810         if (in != stdin)
811                 fclose(in);
812         return ret;
813 }
814
815 static void print_usage(void)
816 {
817         fprintf(stderr, "usage: btrfs-image [options] source target\n");
818         fprintf(stderr, "\t-r      \trestore metadump image\n");
819         fprintf(stderr, "\t-c value\tcompression level (0 ~ 9)\n");
820         fprintf(stderr, "\t-t value\tnumber of threads (1 ~ 32)\n");
821         exit(1);
822 }
823
824 int main(int argc, char *argv[])
825 {
826         char *source;
827         char *target;
828         int num_threads = 0;
829         int compress_level = 0;
830         int create = 1;
831         int ret;
832         FILE *out;
833
834         while (1) {
835                 int c = getopt(argc, argv, "rc:t:");
836                 if (c < 0)
837                         break;
838                 switch (c) {
839                 case 'r':
840                         create = 0;
841                         break;
842                 case 't':
843                         num_threads = atoi(optarg);
844                         if (num_threads <= 0 || num_threads > 32)
845                                 print_usage();
846                         break;
847                 case 'c':
848                         compress_level = atoi(optarg);
849                         if (compress_level < 0 || compress_level > 9)
850                                 print_usage();
851                         break;
852                 default:
853                         print_usage();
854                 }
855         }
856
857         argc = argc - optind;
858         if (argc != 2)
859                 print_usage();
860         source = argv[optind];
861         target = argv[optind + 1];
862
863         if (create && !strcmp(target, "-")) {
864                 out = stdout;
865         } else {
866                 out = fopen(target, "w+");
867                 if (!out) {
868                         perror("unable to create target file");
869                         exit(1);
870                 }
871         }
872
873         if (num_threads == 0 && compress_level > 0) {
874                 num_threads = sysconf(_SC_NPROCESSORS_ONLN);
875                 if (num_threads <= 0)
876                         num_threads = 1;
877         }
878
879         if (create)
880                 ret = create_metadump(source, out, num_threads,
881                                       compress_level);
882         else
883                 ret = restore_metadump(source, out, 1);
884
885         if (out == stdout)
886                 fflush(out);
887         else
888                 fclose(out);
889
890         exit(ret);
891 }