btrfs-image.c: fix return values
[platform/upstream/btrfs-progs.git] / btrfs-image.c
1 /*
2  * Copyright (C) 2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #define _XOPEN_SOURCE 500
20 #define _GNU_SOURCE 1
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <fcntl.h>
27 #include <unistd.h>
28 #include <dirent.h>
29 #include <zlib.h>
30 #include "kerncompat.h"
31 #include "crc32c.h"
32 #include "ctree.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "utils.h"
36 #include "version.h"
37
38
39 #define HEADER_MAGIC            0xbd5c25e27295668bULL
40 #define MAX_PENDING_SIZE        (256 * 1024)
41 #define BLOCK_SIZE              1024
42 #define BLOCK_MASK              (BLOCK_SIZE - 1)
43
44 #define COMPRESS_NONE           0
45 #define COMPRESS_ZLIB           1
46
47 struct meta_cluster_item {
48         __le64 bytenr;
49         __le32 size;
50 } __attribute__ ((__packed__));
51
52 struct meta_cluster_header {
53         __le64 magic;
54         __le64 bytenr;
55         __le32 nritems;
56         u8 compress;
57 } __attribute__ ((__packed__));
58
59 /* cluster header + index items + buffers */
60 struct meta_cluster {
61         struct meta_cluster_header header;
62         struct meta_cluster_item items[];
63 } __attribute__ ((__packed__));
64
65 #define ITEMS_PER_CLUSTER ((BLOCK_SIZE - sizeof(struct meta_cluster)) / \
66                            sizeof(struct meta_cluster_item))
67
68 struct async_work {
69         struct list_head list;
70         struct list_head ordered;
71         u64 start;
72         u64 size;
73         u8 *buffer;
74         size_t bufsize;
75 };
76
77 struct metadump_struct {
78         struct btrfs_root *root;
79         FILE *out;
80
81         struct meta_cluster *cluster;
82
83         pthread_t *threads;
84         size_t num_threads;
85         pthread_mutex_t mutex;
86         pthread_cond_t cond;
87
88         struct list_head list;
89         struct list_head ordered;
90         size_t num_items;
91         size_t num_ready;
92
93         u64 pending_start;
94         u64 pending_size;
95
96         int compress_level;
97         int done;
98 };
99
100 struct mdrestore_struct {
101         FILE *in;
102         FILE *out;
103
104         pthread_t *threads;
105         size_t num_threads;
106         pthread_mutex_t mutex;
107         pthread_cond_t cond;
108
109         struct list_head list;
110         size_t num_items;
111
112         int compress_method;
113         int done;
114 };
115
116 static void csum_block(u8 *buf, size_t len)
117 {
118         char result[BTRFS_CRC32_SIZE];
119         u32 crc = ~(u32)0;
120         crc = crc32c(crc, buf + BTRFS_CSUM_SIZE, len - BTRFS_CSUM_SIZE);
121         btrfs_csum_final(crc, result);
122         memcpy(buf, result, BTRFS_CRC32_SIZE);
123 }
124
125 /*
126  * zero inline extents and csum items
127  */
128 static void zero_items(u8 *dst, struct extent_buffer *src)
129 {
130         struct btrfs_file_extent_item *fi;
131         struct btrfs_item *item;
132         struct btrfs_key key;
133         u32 nritems = btrfs_header_nritems(src);
134         size_t size;
135         unsigned long ptr;
136         int i, extent_type;
137
138         for (i = 0; i < nritems; i++) {
139                 item = btrfs_item_nr(src, i);
140                 btrfs_item_key_to_cpu(src, &key, i);
141                 if (key.type == BTRFS_CSUM_ITEM_KEY) {
142                         size = btrfs_item_size_nr(src, i);
143                         memset(dst + btrfs_leaf_data(src) +
144                                btrfs_item_offset_nr(src, i), 0, size);
145                         continue;
146                 }
147                 if (key.type != BTRFS_EXTENT_DATA_KEY)
148                         continue;
149
150                 fi = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
151                 extent_type = btrfs_file_extent_type(src, fi);
152                 if (extent_type != BTRFS_FILE_EXTENT_INLINE)
153                         continue;
154
155                 ptr = btrfs_file_extent_inline_start(fi);
156                 size = btrfs_file_extent_inline_item_len(src, item);
157                 memset(dst + ptr, 0, size);
158         }
159 }
160
161 /*
162  * copy buffer and zero useless data in the buffer
163  */
164 static void copy_buffer(u8 *dst, struct extent_buffer *src)
165 {
166         int level;
167         size_t size;
168         u32 nritems;
169
170         memcpy(dst, src->data, src->len);
171         if (src->start == BTRFS_SUPER_INFO_OFFSET)
172                 return;
173
174         level = btrfs_header_level(src);
175         nritems = btrfs_header_nritems(src);
176
177         if (nritems == 0) {
178                 size = sizeof(struct btrfs_header);
179                 memset(dst + size, 0, src->len - size);
180         } else if (level == 0) {
181                 size = btrfs_leaf_data(src) +
182                         btrfs_item_offset_nr(src, nritems - 1) -
183                         btrfs_item_nr_offset(nritems);
184                 memset(dst + btrfs_item_nr_offset(nritems), 0, size);
185                 zero_items(dst, src);
186         } else {
187                 size = offsetof(struct btrfs_node, ptrs) +
188                         sizeof(struct btrfs_key_ptr) * nritems;
189                 memset(dst + size, 0, src->len - size);
190         }
191         csum_block(dst, src->len);
192 }
193
194 static void *dump_worker(void *data)
195 {
196         struct metadump_struct *md = (struct metadump_struct *)data;
197         struct async_work *async;
198         int ret;
199
200         while (1) {
201                 pthread_mutex_lock(&md->mutex);
202                 while (list_empty(&md->list)) {
203                         if (md->done) {
204                                 pthread_mutex_unlock(&md->mutex);
205                                 goto out;
206                         }
207                         pthread_cond_wait(&md->cond, &md->mutex);
208                 }
209                 async = list_entry(md->list.next, struct async_work, list);
210                 list_del_init(&async->list);
211                 pthread_mutex_unlock(&md->mutex);
212
213                 if (md->compress_level > 0) {
214                         u8 *orig = async->buffer;
215
216                         async->bufsize = compressBound(async->size);
217                         async->buffer = malloc(async->bufsize);
218
219                         ret = compress2(async->buffer,
220                                          (unsigned long *)&async->bufsize,
221                                          orig, async->size, md->compress_level);
222                         BUG_ON(ret != Z_OK);
223
224                         free(orig);
225                 }
226
227                 pthread_mutex_lock(&md->mutex);
228                 md->num_ready++;
229                 pthread_mutex_unlock(&md->mutex);
230         }
231 out:
232         pthread_exit(NULL);
233 }
234
235 static void meta_cluster_init(struct metadump_struct *md, u64 start)
236 {
237         struct meta_cluster_header *header;
238
239         md->num_items = 0;
240         md->num_ready = 0;
241         header = &md->cluster->header;
242         header->magic = cpu_to_le64(HEADER_MAGIC);
243         header->bytenr = cpu_to_le64(start);
244         header->nritems = cpu_to_le32(0);
245         header->compress = md->compress_level > 0 ?
246                            COMPRESS_ZLIB : COMPRESS_NONE;
247 }
248
249 static int metadump_init(struct metadump_struct *md, struct btrfs_root *root,
250                          FILE *out, int num_threads, int compress_level)
251 {
252         int i, ret;
253
254         memset(md, 0, sizeof(*md));
255         pthread_cond_init(&md->cond, NULL);
256         pthread_mutex_init(&md->mutex, NULL);
257         INIT_LIST_HEAD(&md->list);
258         INIT_LIST_HEAD(&md->ordered);
259         md->root = root;
260         md->out = out;
261         md->pending_start = (u64)-1;
262         md->compress_level = compress_level;
263         md->cluster = calloc(1, BLOCK_SIZE);
264         if (!md->cluster)
265                 return -ENOMEM;
266
267         meta_cluster_init(md, 0);
268         if (!num_threads)
269                 return 0;
270
271         md->num_threads = num_threads;
272         md->threads = calloc(num_threads, sizeof(pthread_t));
273         if (!md->threads)
274                 return -ENOMEM;
275         for (i = 0; i < num_threads; i++) {
276                 ret = pthread_create(md->threads + i, NULL, dump_worker, md);
277                 if (ret)
278                         break;
279         }
280         return ret;
281 }
282
283 static void metadump_destroy(struct metadump_struct *md)
284 {
285         int i;
286         pthread_mutex_lock(&md->mutex);
287         md->done = 1;
288         pthread_cond_broadcast(&md->cond);
289         pthread_mutex_unlock(&md->mutex);
290
291         for (i = 0; i < md->num_threads; i++)
292                 pthread_join(md->threads[i], NULL);
293
294         pthread_cond_destroy(&md->cond);
295         pthread_mutex_destroy(&md->mutex);
296         free(md->threads);
297         free(md->cluster);
298 }
299
300 static int write_zero(FILE *out, size_t size)
301 {
302         static char zero[BLOCK_SIZE];
303         return fwrite(zero, size, 1, out);
304 }
305
306 static int write_buffers(struct metadump_struct *md, u64 *next)
307 {
308         struct meta_cluster_header *header = &md->cluster->header;
309         struct meta_cluster_item *item;
310         struct async_work *async;
311         u64 bytenr = 0;
312         u32 nritems = 0;
313         int ret;
314
315         if (list_empty(&md->ordered))
316                 goto out;
317
318         /* wait until all buffers are compressed */
319         while (md->num_items > md->num_ready) {
320                 struct timespec ts = {
321                         .tv_sec = 0,
322                         .tv_nsec = 10000000,
323                 };
324                 pthread_mutex_unlock(&md->mutex);
325                 nanosleep(&ts, NULL);
326                 pthread_mutex_lock(&md->mutex);
327         }
328
329         /* setup and write index block */
330         list_for_each_entry(async, &md->ordered, ordered) {
331                 item = md->cluster->items + nritems;
332                 item->bytenr = cpu_to_le64(async->start);
333                 item->size = cpu_to_le32(async->bufsize);
334                 nritems++;
335         }
336         header->nritems = cpu_to_le32(nritems);
337
338         ret = fwrite(md->cluster, BLOCK_SIZE, 1, md->out);
339         BUG_ON(ret != 1);
340
341         /* write buffers */
342         bytenr += le64_to_cpu(header->bytenr) + BLOCK_SIZE;
343         while (!list_empty(&md->ordered)) {
344                 async = list_entry(md->ordered.next, struct async_work,
345                                    ordered);
346                 list_del_init(&async->ordered);
347
348                 bytenr += async->bufsize;
349                 ret = fwrite(async->buffer, async->bufsize, 1, md->out);
350                 BUG_ON(ret != 1);
351
352                 free(async->buffer);
353                 free(async);
354         }
355
356         /* zero unused space in the last block */
357         if (bytenr & BLOCK_MASK) {
358                 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
359
360                 bytenr += size;
361                 ret = write_zero(md->out, size);
362                 BUG_ON(ret != 1);
363         }
364 out:
365         *next = bytenr;
366         return 0;
367 }
368
369 static int flush_pending(struct metadump_struct *md, int done)
370 {
371         struct async_work *async = NULL;
372         struct extent_buffer *eb;
373         u64 blocksize = md->root->nodesize;
374         u64 start;
375         u64 size;
376         size_t offset;
377         int ret;
378
379         if (md->pending_size) {
380                 async = calloc(1, sizeof(*async));
381                 if (!async)
382                         return -ENOMEM;
383
384                 async->start = md->pending_start;
385                 async->size = md->pending_size;
386                 async->bufsize = async->size;
387                 async->buffer = malloc(async->bufsize);
388
389                 offset = 0;
390                 start = async->start;
391                 size = async->size;
392                 while (size > 0) {
393                         eb = read_tree_block(md->root, start, blocksize, 0);
394                         BUG_ON(!eb);
395                         copy_buffer(async->buffer + offset, eb);
396                         free_extent_buffer(eb);
397                         start += blocksize;
398                         offset += blocksize;
399                         size -= blocksize;
400                 }
401
402                 md->pending_start = (u64)-1;
403                 md->pending_size = 0;
404         } else if (!done) {
405                 return 0;
406         }
407
408         pthread_mutex_lock(&md->mutex);
409         if (async) {
410                 list_add_tail(&async->ordered, &md->ordered);
411                 md->num_items++;
412                 if (md->compress_level > 0) {
413                         list_add_tail(&async->list, &md->list);
414                         pthread_cond_signal(&md->cond);
415                 } else {
416                         md->num_ready++;
417                 }
418         }
419         if (md->num_items >= ITEMS_PER_CLUSTER || done) {
420                 ret = write_buffers(md, &start);
421                 BUG_ON(ret);
422                 meta_cluster_init(md, start);
423         }
424         pthread_mutex_unlock(&md->mutex);
425         return 0;
426 }
427
428 static int add_metadata(u64 start, u64 size, struct metadump_struct *md)
429 {
430         int ret;
431         if (md->pending_size + size > MAX_PENDING_SIZE ||
432             md->pending_start + md->pending_size != start) {
433                 ret = flush_pending(md, 0);
434                 if (ret)
435                         return ret;
436                 md->pending_start = start;
437         }
438         readahead_tree_block(md->root, start, size, 0);
439         md->pending_size += size;
440         return 0;
441 }
442
443 static int create_metadump(const char *input, FILE *out, int num_threads,
444                            int compress_level)
445 {
446         struct btrfs_root *root;
447         struct btrfs_root *extent_root;
448         struct btrfs_path *path;
449         struct extent_buffer *leaf;
450         struct btrfs_extent_ref *ref_item;
451         struct btrfs_key key;
452         struct metadump_struct metadump;
453         u64 bytenr;
454         u64 num_bytes;
455         u64 ref_objectid;
456         int ret;
457
458         root = open_ctree(input, 0, 0);
459         BUG_ON(root->nodesize != root->leafsize);
460
461         ret = metadump_init(&metadump, root, out, num_threads,
462                             compress_level);
463         BUG_ON(ret);
464
465         ret = add_metadata(BTRFS_SUPER_INFO_OFFSET, 4096, &metadump);
466         BUG_ON(ret);
467
468         extent_root = root->fs_info->extent_root;
469         path = btrfs_alloc_path();
470
471         bytenr = BTRFS_SUPER_INFO_OFFSET + 4096;
472         key.objectid = bytenr;
473         key.type = BTRFS_EXTENT_ITEM_KEY;
474         key.offset = 0;
475
476         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
477         BUG_ON(ret < 0);
478
479         while (1) {
480                 leaf = path->nodes[0];
481                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
482                         ret = btrfs_next_leaf(extent_root, path);
483                         BUG_ON(ret < 0);
484                         if (ret > 0)
485                                 break;
486                         leaf = path->nodes[0];
487                 }
488
489                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
490                 if (key.objectid < bytenr ||
491                     key.type != BTRFS_EXTENT_ITEM_KEY) {
492                         path->slots[0]++;
493                         continue;
494                 }
495
496                 bytenr = key.objectid;
497                 num_bytes = key.offset;
498                 while (1) {
499                         path->slots[0]++;
500                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
501                                 ret = btrfs_next_leaf(extent_root, path);
502                                 BUG_ON(ret < 0);
503                                 if (ret > 0)
504                                         break;
505                                 leaf = path->nodes[0];
506                         }
507                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
508                         if (key.objectid != bytenr)
509                                 break;
510                         if (key.type != BTRFS_EXTENT_REF_KEY)
511                                 continue;
512                         ref_item = btrfs_item_ptr(leaf, path->slots[0],
513                                                   struct btrfs_extent_ref);
514                         ref_objectid = btrfs_ref_objectid(leaf, ref_item);
515                         if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
516                                 ret = add_metadata(bytenr, num_bytes,
517                                                    &metadump);
518                                 BUG_ON(ret);
519                                 break;
520                         }
521                 }
522                 bytenr += num_bytes;
523         }
524
525         ret = flush_pending(&metadump, 1);
526         BUG_ON(ret);
527
528         metadump_destroy(&metadump);
529
530         btrfs_free_path(path);
531         ret = close_ctree(root);
532         return 0;
533 }
534
535 static void update_super(u8 *buffer)
536 {
537         struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
538         struct btrfs_chunk *chunk;
539         struct btrfs_disk_key *key;
540         u32 sectorsize = btrfs_super_sectorsize(super);
541         u64 flags = btrfs_super_flags(super);
542
543         flags |= BTRFS_SUPER_FLAG_METADUMP;
544         btrfs_set_super_flags(super, flags);
545
546         key = (struct btrfs_disk_key *)(super->sys_chunk_array);
547         chunk = (struct btrfs_chunk *)(super->sys_chunk_array +
548                                        sizeof(struct btrfs_disk_key));
549
550         btrfs_set_disk_key_objectid(key, BTRFS_FIRST_CHUNK_TREE_OBJECTID);
551         btrfs_set_disk_key_type(key, BTRFS_CHUNK_ITEM_KEY);
552         btrfs_set_disk_key_offset(key, 0);
553
554         btrfs_set_stack_chunk_length(chunk, (u64)-1);
555         btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
556         btrfs_set_stack_chunk_stripe_len(chunk, 64 * 1024);
557         btrfs_set_stack_chunk_type(chunk, BTRFS_BLOCK_GROUP_SYSTEM);
558         btrfs_set_stack_chunk_io_align(chunk, sectorsize);
559         btrfs_set_stack_chunk_io_width(chunk, sectorsize);
560         btrfs_set_stack_chunk_sector_size(chunk, sectorsize);
561         btrfs_set_stack_chunk_num_stripes(chunk, 1);
562         btrfs_set_stack_chunk_sub_stripes(chunk, 0);
563         chunk->stripe.devid = super->dev_item.devid;
564         chunk->stripe.offset = cpu_to_le64(0);
565         memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
566         btrfs_set_super_sys_array_size(super, sizeof(*key) + sizeof(*chunk));
567         csum_block(buffer, 4096);
568 }
569
570 static void *restore_worker(void *data)
571 {
572         struct mdrestore_struct *mdres = (struct mdrestore_struct *)data;
573         struct async_work *async;
574         size_t size;
575         u8 *buffer;
576         u8 *outbuf;
577         int outfd;
578         int ret;
579
580         outfd = fileno(mdres->out);
581         buffer = malloc(MAX_PENDING_SIZE * 2);
582         BUG_ON(!buffer);
583
584         while (1) {
585                 pthread_mutex_lock(&mdres->mutex);
586                 while (list_empty(&mdres->list)) {
587                         if (mdres->done) {
588                                 pthread_mutex_unlock(&mdres->mutex);
589                                 goto out;
590                         }
591                         pthread_cond_wait(&mdres->cond, &mdres->mutex);
592                 }
593                 async = list_entry(mdres->list.next, struct async_work, list);
594                 list_del_init(&async->list);
595                 pthread_mutex_unlock(&mdres->mutex);
596
597                 if (mdres->compress_method == COMPRESS_ZLIB) {
598                         size = MAX_PENDING_SIZE * 2;
599                         ret = uncompress(buffer, (unsigned long *)&size,
600                                          async->buffer, async->bufsize);
601                         BUG_ON(ret != Z_OK);
602                         outbuf = buffer;
603                 } else {
604                         outbuf = async->buffer;
605                         size = async->bufsize;
606                 }
607
608                 if (async->start == BTRFS_SUPER_INFO_OFFSET)
609                         update_super(outbuf);
610
611                 ret = pwrite64(outfd, outbuf, size, async->start);
612                 BUG_ON(ret != size);
613
614                 pthread_mutex_lock(&mdres->mutex);
615                 mdres->num_items--;
616                 pthread_mutex_unlock(&mdres->mutex);
617
618                 free(async->buffer);
619                 free(async);
620         }
621 out:
622         free(buffer);
623         pthread_exit(NULL);
624 }
625
626 static int mdresotre_init(struct mdrestore_struct *mdres,
627                           FILE *in, FILE *out, int num_threads)
628 {
629         int i, ret = 0;
630
631         memset(mdres, 0, sizeof(*mdres));
632         pthread_cond_init(&mdres->cond, NULL);
633         pthread_mutex_init(&mdres->mutex, NULL);
634         INIT_LIST_HEAD(&mdres->list);
635         mdres->in = in;
636         mdres->out = out;
637
638         if (!num_threads)
639                 return 0;
640
641         mdres->num_threads = num_threads;
642         mdres->threads = calloc(num_threads, sizeof(pthread_t));
643         if (!mdres->threads)
644                 return -ENOMEM;
645         for (i = 0; i < num_threads; i++) {
646                 ret = pthread_create(mdres->threads + i, NULL, restore_worker,
647                                      mdres);
648                 if (ret)
649                         break;
650         }
651         return ret;
652 }
653
654 static void mdresotre_destroy(struct mdrestore_struct *mdres)
655 {
656         int i;
657         pthread_mutex_lock(&mdres->mutex);
658         mdres->done = 1;
659         pthread_cond_broadcast(&mdres->cond);
660         pthread_mutex_unlock(&mdres->mutex);
661
662         for (i = 0; i < mdres->num_threads; i++)
663                 pthread_join(mdres->threads[i], NULL);
664
665         pthread_cond_destroy(&mdres->cond);
666         pthread_mutex_destroy(&mdres->mutex);
667         free(mdres->threads);
668 }
669
670 static int add_cluster(struct meta_cluster *cluster,
671                        struct mdrestore_struct *mdres, u64 *next)
672 {
673         struct meta_cluster_item *item;
674         struct meta_cluster_header *header = &cluster->header;
675         struct async_work *async;
676         u64 bytenr;
677         u32 i, nritems;
678         int ret;
679
680         BUG_ON(mdres->num_items);
681         mdres->compress_method = header->compress;
682
683         bytenr = le64_to_cpu(header->bytenr) + BLOCK_SIZE;
684         nritems = le32_to_cpu(header->nritems);
685         for (i = 0; i < nritems; i++) {
686                 item = &cluster->items[i];
687                 async = calloc(1, sizeof(*async));
688                 async->start = le64_to_cpu(item->bytenr);
689                 async->bufsize = le32_to_cpu(item->size);
690                 async->buffer = malloc(async->bufsize);
691                 ret = fread(async->buffer, async->bufsize, 1, mdres->in);
692                 BUG_ON(ret != 1);
693                 bytenr += async->bufsize;
694
695                 pthread_mutex_lock(&mdres->mutex);
696                 list_add_tail(&async->list, &mdres->list);
697                 mdres->num_items++;
698                 pthread_cond_signal(&mdres->cond);
699                 pthread_mutex_unlock(&mdres->mutex);
700         }
701         if (bytenr & BLOCK_MASK) {
702                 char buffer[BLOCK_MASK];
703                 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
704
705                 bytenr += size;
706                 ret = fread(buffer, size, 1, mdres->in);
707                 BUG_ON(ret != 1);
708         }
709         *next = bytenr;
710         return 0;
711 }
712
713 static int wait_for_worker(struct mdrestore_struct *mdres)
714 {
715         pthread_mutex_lock(&mdres->mutex);
716         while (mdres->num_items > 0) {
717                 struct timespec ts = {
718                         .tv_sec = 0,
719                         .tv_nsec = 10000000,
720                 };
721                 pthread_mutex_unlock(&mdres->mutex);
722                 nanosleep(&ts, NULL);
723                 pthread_mutex_lock(&mdres->mutex);
724         }
725         pthread_mutex_unlock(&mdres->mutex);
726         return 0;
727 }
728
729 static int restore_metadump(const char *input, FILE *out, int num_threads)
730 {
731         struct meta_cluster *cluster;
732         struct meta_cluster_header *header;
733         struct mdrestore_struct mdrestore;
734         u64 bytenr = 0;
735         FILE *in;
736         int ret;
737
738         if (!strcmp(input, "-")) {
739                 in = stdin;
740         } else {
741                 in = fopen(input, "r");
742                 if (!in) {
743                         perror("unable to open metadump image");
744                         return 1;
745                 }
746         }
747
748         cluster = malloc(BLOCK_SIZE);
749         BUG_ON(!cluster);
750
751         ret = mdresotre_init(&mdrestore, in, out, num_threads);
752         BUG_ON(ret);
753
754         while (1) {
755                 ret = fread(cluster, BLOCK_SIZE, 1, in);
756                 if (!ret)
757                         break;
758
759                 header = &cluster->header;
760                 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
761                     le64_to_cpu(header->bytenr) != bytenr) {
762                         fprintf(stderr, "bad header in metadump image\n");
763                         return 1;
764                 }
765                 ret = add_cluster(cluster, &mdrestore, &bytenr);
766                 BUG_ON(ret);
767
768                 wait_for_worker(&mdrestore);
769         }
770
771         mdresotre_destroy(&mdrestore);
772         free(cluster);
773         if (in != stdin)
774                 fclose(in);
775         return ret;
776 }
777
778 static void print_usage(void)
779 {
780         fprintf(stderr, "usage: btrfs-image [options] source target\n");
781         fprintf(stderr, "\t-r      \trestore metadump image\n");
782         fprintf(stderr, "\t-c value\tcompression level (0 ~ 9)\n");
783         fprintf(stderr, "\t-t value\tnumber of threads (1 ~ 32)\n");
784         exit(1);
785 }
786
787 int main(int argc, char *argv[])
788 {
789         char *source;
790         char *target;
791         int num_threads = 0;
792         int compress_level = 0;
793         int create = 1;
794         int ret;
795         FILE *out;
796
797         while (1) {
798                 int c = getopt(argc, argv, "rc:t:");
799                 if (c < 0)
800                         break;
801                 switch (c) {
802                 case 'r':
803                         create = 0;
804                         break;
805                 case 't':
806                         num_threads = atoi(optarg);
807                         if (num_threads <= 0 || num_threads > 32)
808                                 print_usage();
809                         break;
810                 case 'c':
811                         compress_level = atoi(optarg);
812                         if (compress_level < 0 || compress_level > 9)
813                                 print_usage();
814                         break;
815                 default:
816                         print_usage();
817                 }
818         }
819
820         argc = argc - optind;
821         if (argc != 2)
822                 print_usage();
823         source = argv[optind];
824         target = argv[optind + 1];
825
826         if (create && !strcmp(target, "-")) {
827                 out = stdout;
828         } else {
829                 out = fopen(target, "w+");
830                 if (!out) {
831                         perror("unable to create target file");
832                         exit(1);
833                 }
834         }
835
836         if (num_threads == 0 && compress_level > 0) {
837                 num_threads = sysconf(_SC_NPROCESSORS_ONLN);
838                 if (num_threads <= 0)
839                         num_threads = 1;
840         }
841
842         if (create)
843                 ret = create_metadump(source, out, num_threads,
844                                       compress_level);
845         else
846                 ret = restore_metadump(source, out, 1);
847
848         if (out == stdout)
849                 fflush(out);
850         else
851                 fclose(out);
852
853         exit(ret);
854 }