* Boston, MA 021110-1307, USA.
*/
-#define _XOPEN_SOURCE 500
-#define _GNU_SOURCE 1
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <dirent.h>
#include <zlib.h>
+#include <getopt.h>
+
#include "kerncompat.h"
#include "crc32c.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "utils.h"
-#include "version.h"
#include "volumes.h"
#include "extent_io.h"
struct fs_chunk {
u64 logical;
u64 physical;
+ /*
+ * physical_dup only store additonal physical for BTRFS_BLOCK_GROUP_DUP
+ * currently restore only support single and DUP
+ * TODO: modify this structure and the function related to this
+ * structure for support RAID*
+ */
+ u64 physical_dup;
u64 bytes;
- struct rb_node n;
+ struct rb_node l;
+ struct rb_node p;
+ struct list_head list;
};
struct async_work {
pthread_cond_t cond;
struct rb_root chunk_tree;
+ struct rb_root physical_tree;
struct list_head list;
+ struct list_head overlapping_chunks;
size_t num_items;
- u32 leafsize;
+ u32 nodesize;
u64 devid;
+ u64 alloced_chunks;
+ u64 last_physical_offset;
u8 uuid[BTRFS_UUID_SIZE];
u8 fsid[BTRFS_FSID_SIZE];
int old_restore;
int fixup_offset;
int multi_devices;
+ int clear_space_cache;
struct btrfs_fs_info *info;
};
-static void print_usage(void) __attribute__((noreturn));
static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
u64 search, u64 cluster_bytenr);
static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size);
return NULL;
for (i = 0; i < name_len; i++) {
- char c = rand() % 94 + 33;
+ char c = rand_range(94) + 33;
if (c == '/')
c++;
static int chunk_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
{
- struct fs_chunk *entry = rb_entry(a, struct fs_chunk, n);
- struct fs_chunk *ins = rb_entry(b, struct fs_chunk, n);
+ struct fs_chunk *entry = rb_entry(a, struct fs_chunk, l);
+ struct fs_chunk *ins = rb_entry(b, struct fs_chunk, l);
if (fuzz && ins->logical >= entry->logical &&
ins->logical < entry->logical + entry->bytes)
return 0;
}
+static int physical_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
+{
+ struct fs_chunk *entry = rb_entry(a, struct fs_chunk, p);
+ struct fs_chunk *ins = rb_entry(b, struct fs_chunk, p);
+
+ if (fuzz && ins->physical >= entry->physical &&
+ ins->physical < entry->physical + entry->bytes)
+ return 0;
+
+ if (fuzz && entry->physical >= ins->physical &&
+ entry->physical < ins->physical + ins->bytes)
+ return 0;
+
+ if (ins->physical < entry->physical)
+ return -1;
+ else if (ins->physical > entry->physical)
+ return 1;
+ return 0;
+}
+
static void tree_insert(struct rb_root *root, struct rb_node *ins,
int (*cmp)(struct rb_node *a, struct rb_node *b,
int fuzz))
while(*p) {
parent = *p;
- dir = cmp(*p, ins, 0);
+ dir = cmp(*p, ins, 1);
if (dir < 0)
p = &(*p)->rb_left;
else if (dir > 0)
return NULL;
}
+static u64 logical_to_physical(struct mdrestore_struct *mdres, u64 logical,
+ u64 *size, u64 *physical_dup)
+{
+ struct fs_chunk *fs_chunk;
+ struct rb_node *entry;
+ struct fs_chunk search;
+ u64 offset;
+
+ if (logical == BTRFS_SUPER_INFO_OFFSET)
+ return logical;
+
+ search.logical = logical;
+ entry = tree_search(&mdres->chunk_tree, &search.l, chunk_cmp, 1);
+ if (!entry) {
+ if (mdres->in != stdin)
+ printf("Couldn't find a chunk, using logical\n");
+ return logical;
+ }
+ fs_chunk = rb_entry(entry, struct fs_chunk, l);
+ if (fs_chunk->logical > logical || fs_chunk->logical + fs_chunk->bytes < logical)
+ BUG();
+ offset = search.logical - fs_chunk->logical;
+
+ if (physical_dup) {
+ /* Only in dup case, physical_dup is not equal to 0 */
+ if (fs_chunk->physical_dup)
+ *physical_dup = fs_chunk->physical_dup + offset;
+ else
+ *physical_dup = 0;
+ }
+
+ *size = min(*size, fs_chunk->bytes + fs_chunk->logical - logical);
+ return fs_chunk->physical + offset;
+}
+
+
static char *find_collision(struct metadump_struct *md, char *name,
u32 name_len)
{
"generating normal garbage, it won't match indexes\n",
val->len, val->val);
for (i = 0; i < name_len; i++) {
- char c = rand() % 94 + 33;
+ char c = rand_range(94) + 33;
if (c == '/')
c++;
async->bufsize = compressBound(async->size);
async->buffer = malloc(async->bufsize);
if (!async->buffer) {
- fprintf(stderr, "Error allocing buffer\n");
+ fprintf(stderr, "Error allocating buffer\n");
pthread_mutex_lock(&md->mutex);
if (!md->error)
md->error = -ENOMEM;
int i, ret = 0;
memset(md, 0, sizeof(*md));
- pthread_cond_init(&md->cond, NULL);
- pthread_mutex_init(&md->mutex, NULL);
+ md->cluster = calloc(1, BLOCK_SIZE);
+ if (!md->cluster)
+ return -ENOMEM;
+ md->threads = calloc(num_threads, sizeof(pthread_t));
+ if (!md->threads) {
+ free(md->cluster);
+ return -ENOMEM;
+ }
INIT_LIST_HEAD(&md->list);
INIT_LIST_HEAD(&md->ordered);
md->root = root;
md->out = out;
md->pending_start = (u64)-1;
md->compress_level = compress_level;
- md->cluster = calloc(1, BLOCK_SIZE);
md->sanitize_names = sanitize_names;
if (sanitize_names > 1)
crc32c_optimization_init();
- if (!md->cluster) {
- pthread_cond_destroy(&md->cond);
- pthread_mutex_destroy(&md->mutex);
- return -ENOMEM;
- }
-
+ md->name_tree.rb_node = NULL;
+ md->num_threads = num_threads;
+ pthread_cond_init(&md->cond, NULL);
+ pthread_mutex_init(&md->mutex, NULL);
meta_cluster_init(md, 0);
+
if (!num_threads)
return 0;
- md->name_tree.rb_node = NULL;
- md->num_threads = num_threads;
- md->threads = calloc(num_threads, sizeof(pthread_t));
- if (!md->threads) {
- free(md->cluster);
- pthread_cond_destroy(&md->cond);
- pthread_mutex_destroy(&md->mutex);
- return -ENOMEM;
- }
-
for (i = 0; i < num_threads; i++) {
ret = pthread_create(md->threads + i, NULL, dump_worker, md);
if (ret)
static int read_data_extent(struct metadump_struct *md,
struct async_work *async)
{
- struct btrfs_multi_bio *multi = NULL;
- struct btrfs_device *device;
+ struct btrfs_root *root = md->root;
u64 bytes_left = async->size;
u64 logical = async->start;
u64 offset = 0;
- u64 bytenr;
u64 read_len;
- ssize_t done;
- int fd;
+ int num_copies;
+ int cur_mirror;
int ret;
- while (bytes_left) {
- read_len = bytes_left;
- ret = btrfs_map_block(&md->root->fs_info->mapping_tree, READ,
- logical, &read_len, &multi, 0, NULL);
- if (ret) {
- fprintf(stderr, "Couldn't map data block %d\n", ret);
- return ret;
- }
-
- device = multi->stripes[0].dev;
+ num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, logical,
+ bytes_left);
- if (device->fd == 0) {
- fprintf(stderr,
- "Device we need to read from is not open\n");
- free(multi);
- return -EIO;
- }
- fd = device->fd;
- bytenr = multi->stripes[0].physical;
- free(multi);
-
- read_len = min(read_len, bytes_left);
- done = pread64(fd, async->buffer+offset, read_len, bytenr);
- if (done < read_len) {
- if (done < 0)
- fprintf(stderr, "Error reading extent %d\n",
- errno);
- else
- fprintf(stderr, "Short read\n");
- return -EIO;
+ /* Try our best to read data, just like read_tree_block() */
+ for (cur_mirror = 0; cur_mirror < num_copies; cur_mirror++) {
+ while (bytes_left) {
+ read_len = bytes_left;
+ ret = read_extent_data(root,
+ (char *)(async->buffer + offset),
+ logical, &read_len, cur_mirror);
+ if (ret < 0)
+ break;
+ offset += read_len;
+ logical += read_len;
+ bytes_left -= read_len;
}
-
- bytes_left -= done;
- offset += done;
- logical += done;
}
-
+ if (bytes_left)
+ return -EIO;
return 0;
}
+static int get_dev_fd(struct btrfs_root *root)
+{
+ struct btrfs_device *dev;
+
+ dev = list_first_entry(&root->fs_info->fs_devices->devices,
+ struct btrfs_device, dev_list);
+ return dev->fd;
+}
+
static int flush_pending(struct metadump_struct *md, int done)
{
struct async_work *async = NULL;
}
}
+ /*
+ * Balance can make the mapping not cover the super block, so
+ * just copy directly from one of the devices.
+ */
+ if (start == BTRFS_SUPER_INFO_OFFSET) {
+ int fd = get_dev_fd(md->root);
+
+ ret = pread64(fd, async->buffer, size, start);
+ if (ret < size) {
+ free(async->buffer);
+ free(async);
+ fprintf(stderr, "Error reading superblock\n");
+ return -EIO;
+ }
+ size = 0;
+ ret = 0;
+ }
+
while (!md->data && size > 0) {
u64 this_read = min(blocksize, size);
eb = read_tree_block(md->root, start, this_read, 0);
- if (!eb) {
+ if (!extent_buffer_uptodate(eb)) {
free(async->buffer);
free(async);
fprintf(stderr,
int i = 0;
int ret;
- ret = add_extent(btrfs_header_bytenr(eb), root->leafsize, metadump, 0);
+ ret = add_extent(btrfs_header_bytenr(eb), root->nodesize, metadump, 0);
if (ret) {
fprintf(stderr, "Error adding metadata block\n");
return ret;
continue;
ri = btrfs_item_ptr(eb, i, struct btrfs_root_item);
bytenr = btrfs_disk_root_bytenr(eb, ri);
- tmp = read_tree_block(root, bytenr, root->leafsize, 0);
- if (!tmp) {
+ tmp = read_tree_block(root, bytenr, root->nodesize, 0);
+ if (!extent_buffer_uptodate(tmp)) {
fprintf(stderr,
"Error reading log root block\n");
return -EIO;
return ret;
} else {
bytenr = btrfs_node_blockptr(eb, i);
- tmp = read_tree_block(root, bytenr, root->leafsize, 0);
- if (!tmp) {
+ tmp = read_tree_block(root, bytenr, root->nodesize, 0);
+ if (!extent_buffer_uptodate(tmp)) {
fprintf(stderr, "Error reading log block\n");
return -EIO;
}
int ret;
extent_root = metadump->root->fs_info->extent_root;
- bytenr = BTRFS_SUPER_INFO_OFFSET + 4096;
+ bytenr = BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE;
key.objectid = bytenr;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = 0;
bytenr = key.objectid;
if (key.type == BTRFS_METADATA_ITEM_KEY)
- num_bytes = extent_root->leafsize;
+ num_bytes = extent_root->nodesize;
else
num_bytes = key.offset;
return -EIO;
}
- BUG_ON(root->nodesize != root->leafsize);
-
ret = metadump_init(&metadump, root, out, num_threads,
compress_level, sanitize);
if (ret) {
- fprintf(stderr, "Error initing metadump %d\n", ret);
+ fprintf(stderr, "Error initializing metadump %d\n", ret);
close_ctree(root);
return ret;
}
- ret = add_extent(BTRFS_SUPER_INFO_OFFSET, 4096, &metadump, 0);
+ ret = add_extent(BTRFS_SUPER_INFO_OFFSET, BTRFS_SUPER_INFO_SIZE,
+ &metadump, 0);
if (ret) {
fprintf(stderr, "Error adding metadata %d\n", ret);
err = ret;
path = btrfs_alloc_path();
if (!path) {
- fprintf(stderr, "Out of memory allocing path\n");
+ fprintf(stderr, "Out of memory allocating path\n");
err = -ENOMEM;
goto out;
}
btrfs_set_stack_stripe_offset(&chunk->stripe, 0);
memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
btrfs_set_super_sys_array_size(super, sizeof(*key) + sizeof(*chunk));
- csum_block(buffer, 4096);
+ csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
}
-static int update_super(u8 *buffer)
+static int update_super(struct mdrestore_struct *mdres, u8 *buffer)
{
struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
struct btrfs_chunk *chunk;
struct btrfs_disk_key *disk_key;
struct btrfs_key key;
+ u64 flags = btrfs_super_flags(super);
u32 new_array_size = 0;
u32 array_size;
u32 cur = 0;
cur += sizeof(*disk_key);
if (key.type == BTRFS_CHUNK_ITEM_KEY) {
+ u64 type, physical, physical_dup, size = 0;
+
chunk = (struct btrfs_chunk *)ptr;
old_num_stripes = btrfs_stack_chunk_num_stripes(chunk);
chunk = (struct btrfs_chunk *)write_ptr;
memmove(write_ptr, ptr, sizeof(*chunk));
- btrfs_set_stack_chunk_num_stripes(chunk, 1);
btrfs_set_stack_chunk_sub_stripes(chunk, 0);
- btrfs_set_stack_chunk_type(chunk,
- BTRFS_BLOCK_GROUP_SYSTEM);
+ type = btrfs_stack_chunk_type(chunk);
+ if (type & BTRFS_BLOCK_GROUP_DUP) {
+ new_array_size += sizeof(struct btrfs_stripe);
+ write_ptr += sizeof(struct btrfs_stripe);
+ } else {
+ btrfs_set_stack_chunk_num_stripes(chunk, 1);
+ btrfs_set_stack_chunk_type(chunk,
+ BTRFS_BLOCK_GROUP_SYSTEM);
+ }
chunk->stripe.devid = super->dev_item.devid;
+ physical = logical_to_physical(mdres, key.offset,
+ &size, &physical_dup);
+ if (size != (u64)-1)
+ btrfs_set_stack_stripe_offset(&chunk->stripe,
+ physical);
memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid,
BTRFS_UUID_SIZE);
new_array_size += sizeof(*chunk);
cur += btrfs_chunk_item_size(old_num_stripes);
}
+ if (mdres->clear_space_cache)
+ btrfs_set_super_cache_generation(super, 0);
+
+ flags |= BTRFS_SUPER_FLAG_METADUMP_V2;
+ btrfs_set_super_flags(super, flags);
btrfs_set_super_sys_array_size(super, new_array_size);
- csum_block(buffer, 4096);
+ csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
return 0;
}
{
struct extent_buffer *eb;
- eb = malloc(sizeof(struct extent_buffer) + size);
+ eb = calloc(1, sizeof(struct extent_buffer) + size);
if (!eb)
return NULL;
- memset(eb, 0, sizeof(struct extent_buffer) + size);
eb->start = bytenr;
eb->len = size;
u64 bytenr = async->start;
int i;
- if (size_left % mdres->leafsize)
+ if (size_left % mdres->nodesize)
return 0;
- eb = alloc_dummy_eb(bytenr, mdres->leafsize);
+ eb = alloc_dummy_eb(bytenr, mdres->nodesize);
if (!eb)
return -ENOMEM;
while (size_left) {
eb->start = bytenr;
- memcpy(eb->data, buffer, mdres->leafsize);
+ memcpy(eb->data, buffer, mdres->nodesize);
if (btrfs_header_bytenr(eb) != bytenr)
break;
goto next;
for (i = 0; i < btrfs_header_nritems(eb); i++) {
- struct btrfs_chunk chunk;
+ struct btrfs_chunk *chunk;
struct btrfs_key key;
- u64 type;
+ u64 type, physical, physical_dup, size = (u64)-1;
btrfs_item_key_to_cpu(eb, &key, i);
if (key.type != BTRFS_CHUNK_ITEM_KEY)
continue;
- truncate_item(eb, i, sizeof(chunk));
- read_extent_buffer(eb, &chunk,
- btrfs_item_ptr_offset(eb, i),
- sizeof(chunk));
+
+ size = 0;
+ physical = logical_to_physical(mdres, key.offset,
+ &size, &physical_dup);
+
+ if (!physical_dup)
+ truncate_item(eb, i, sizeof(*chunk));
+ chunk = btrfs_item_ptr(eb, i, struct btrfs_chunk);
+
/* Zero out the RAID profile */
- type = btrfs_stack_chunk_type(&chunk);
+ type = btrfs_chunk_type(eb, chunk);
type &= (BTRFS_BLOCK_GROUP_DATA |
BTRFS_BLOCK_GROUP_SYSTEM |
BTRFS_BLOCK_GROUP_METADATA |
BTRFS_BLOCK_GROUP_DUP);
- btrfs_set_stack_chunk_type(&chunk, type);
-
- btrfs_set_stack_chunk_num_stripes(&chunk, 1);
- btrfs_set_stack_chunk_sub_stripes(&chunk, 0);
- btrfs_set_stack_stripe_devid(&chunk.stripe, mdres->devid);
- memcpy(chunk.stripe.dev_uuid, mdres->uuid,
- BTRFS_UUID_SIZE);
- write_extent_buffer(eb, &chunk,
- btrfs_item_ptr_offset(eb, i),
- sizeof(chunk));
+ btrfs_set_chunk_type(eb, chunk, type);
+
+ if (!physical_dup)
+ btrfs_set_chunk_num_stripes(eb, chunk, 1);
+ btrfs_set_chunk_sub_stripes(eb, chunk, 0);
+ btrfs_set_stripe_devid_nr(eb, chunk, 0, mdres->devid);
+ if (size != (u64)-1)
+ btrfs_set_stripe_offset_nr(eb, chunk, 0,
+ physical);
+ /* update stripe 2 offset */
+ if (physical_dup)
+ btrfs_set_stripe_offset_nr(eb, chunk, 1,
+ physical_dup);
+
+ write_extent_buffer(eb, mdres->uuid,
+ (unsigned long)btrfs_stripe_dev_uuid_nr(
+ chunk, 0),
+ BTRFS_UUID_SIZE);
}
memcpy(buffer, eb->data, eb->len);
csum_block(buffer, eb->len);
next:
- size_left -= mdres->leafsize;
- buffer += mdres->leafsize;
- bytenr += mdres->leafsize;
+ size_left -= mdres->nodesize;
+ buffer += mdres->nodesize;
+ bytenr += mdres->nodesize;
}
free(eb);
for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
- if (bytenr + 4096 > size)
+ if (bytenr + BTRFS_SUPER_INFO_SIZE > size)
break;
btrfs_set_super_bytenr(super, bytenr);
- csum_block(buf, 4096);
- ret = pwrite64(fd, buf, 4096, bytenr);
- if (ret < 4096) {
+ csum_block(buf, BTRFS_SUPER_INFO_SIZE);
+ ret = pwrite64(fd, buf, BTRFS_SUPER_INFO_SIZE, bytenr);
+ if (ret < BTRFS_SUPER_INFO_SIZE) {
if (ret < 0)
fprintf(stderr, "Problem writing out backup "
"super block %d, err %d\n", i, errno);
}
}
-static u64 logical_to_physical(struct mdrestore_struct *mdres, u64 logical, u64 *size)
-{
- struct fs_chunk *fs_chunk;
- struct rb_node *entry;
- struct fs_chunk search;
- u64 offset;
-
- if (logical == BTRFS_SUPER_INFO_OFFSET)
- return logical;
-
- search.logical = logical;
- entry = tree_search(&mdres->chunk_tree, &search.n, chunk_cmp, 1);
- if (!entry) {
- if (mdres->in != stdin)
- printf("Couldn't find a chunk, using logical\n");
- return logical;
- }
- fs_chunk = rb_entry(entry, struct fs_chunk, n);
- if (fs_chunk->logical > logical || fs_chunk->logical + fs_chunk->bytes < logical)
- BUG();
- offset = search.logical - fs_chunk->logical;
-
- *size = min(*size, fs_chunk->bytes + fs_chunk->logical - logical);
- return fs_chunk->physical + offset;
-}
-
static void *restore_worker(void *data)
{
struct mdrestore_struct *mdres = (struct mdrestore_struct *)data;
outfd = fileno(mdres->out);
buffer = malloc(compress_size);
if (!buffer) {
- fprintf(stderr, "Error allocing buffer\n");
+ fprintf(stderr, "Error allocating buffer\n");
pthread_mutex_lock(&mdres->mutex);
if (!mdres->error)
mdres->error = -ENOMEM;
pthread_mutex_unlock(&mdres->mutex);
- goto out;
+ pthread_exit(NULL);
}
while (1) {
- u64 bytenr;
+ u64 bytenr, physical_dup;
off_t offset = 0;
int err = 0;
pthread_mutex_lock(&mdres->mutex);
- while (!mdres->leafsize || list_empty(&mdres->list)) {
+ while (!mdres->nodesize || list_empty(&mdres->list)) {
if (mdres->done) {
pthread_mutex_unlock(&mdres->mutex);
goto out;
if (mdres->old_restore) {
update_super_old(outbuf);
} else {
- ret = update_super(outbuf);
+ ret = update_super(mdres, outbuf);
if (ret)
err = ret;
}
if (!mdres->fixup_offset) {
while (size) {
u64 chunk_size = size;
- if (!mdres->multi_devices)
+ physical_dup = 0;
+ if (!mdres->multi_devices && !mdres->old_restore)
bytenr = logical_to_physical(mdres,
- async->start + offset,
- &chunk_size);
+ async->start + offset,
+ &chunk_size,
+ &physical_dup);
else
bytenr = async->start + offset;
ret = pwrite64(outfd, outbuf+offset, chunk_size,
bytenr);
- if (ret != chunk_size) {
- if (ret < 0) {
- fprintf(stderr, "Error writing to "
- "device %d\n", errno);
- err = errno;
- break;
- } else {
- fprintf(stderr, "Short write\n");
- err = -EIO;
- break;
- }
- }
+ if (ret != chunk_size)
+ goto error;
+
+ if (physical_dup)
+ ret = pwrite64(outfd, outbuf+offset,
+ chunk_size,
+ physical_dup);
+ if (ret != chunk_size)
+ goto error;
+
size -= chunk_size;
offset += chunk_size;
+ continue;
+
+error:
+ if (ret < 0) {
+ fprintf(stderr, "Error writing to device %d\n",
+ errno);
+ err = errno;
+ } else {
+ fprintf(stderr, "Short write\n");
+ err = -EIO;
+ }
}
} else if (async->start != BTRFS_SUPER_INFO_OFFSET) {
ret = write_data_to_disk(mdres->info, outbuf, async->start, size, 0);
while ((n = rb_first(&mdres->chunk_tree))) {
struct fs_chunk *entry;
- entry = rb_entry(n, struct fs_chunk, n);
+ entry = rb_entry(n, struct fs_chunk, l);
rb_erase(n, &mdres->chunk_tree);
+ rb_erase(&entry->p, &mdres->physical_tree);
free(entry);
}
pthread_mutex_lock(&mdres->mutex);
pthread_cond_init(&mdres->cond, NULL);
pthread_mutex_init(&mdres->mutex, NULL);
INIT_LIST_HEAD(&mdres->list);
+ INIT_LIST_HEAD(&mdres->overlapping_chunks);
mdres->in = in;
mdres->out = out;
mdres->old_restore = old_restore;
mdres->fixup_offset = fixup_offset;
mdres->info = info;
mdres->multi_devices = multi_devices;
+ mdres->clear_space_cache = 0;
+ mdres->last_physical_offset = 0;
+ mdres->alloced_chunks = 0;
if (!num_threads)
return 0;
int ret;
/* We've already been initialized */
- if (mdres->leafsize)
+ if (mdres->nodesize)
return 0;
if (mdres->compress_method == COMPRESS_ZLIB) {
}
super = (struct btrfs_super_block *)outbuf;
- mdres->leafsize = btrfs_super_leafsize(super);
+ mdres->nodesize = btrfs_super_nodesize(super);
memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
memcpy(mdres->uuid, super->dev_item.uuid,
BTRFS_UUID_SIZE);
u32 i, nritems;
int ret;
- BUG_ON(mdres->num_items);
mdres->compress_method = header->compress;
bytenr = le64_to_cpu(header->bytenr) + BLOCK_SIZE;
async->bufsize = le32_to_cpu(item->size);
async->buffer = malloc(async->bufsize);
if (!async->buffer) {
- fprintf(stderr, "Error allocing async buffer\n");
+ fprintf(stderr, "Error allocating async buffer\n");
free(async);
return -ENOMEM;
}
int ret = 0;
int i;
- eb = alloc_dummy_eb(bytenr, mdres->leafsize);
+ eb = alloc_dummy_eb(bytenr, mdres->nodesize);
if (!eb) {
ret = -ENOMEM;
goto out;
}
while (item_bytenr != bytenr) {
- buffer += mdres->leafsize;
- item_bytenr += mdres->leafsize;
+ buffer += mdres->nodesize;
+ item_bytenr += mdres->nodesize;
}
- memcpy(eb->data, buffer, mdres->leafsize);
+ memcpy(eb->data, buffer, mdres->nodesize);
if (btrfs_header_bytenr(eb) != bytenr) {
fprintf(stderr, "Eb bytenr doesn't match found bytenr\n");
ret = -EIO;
}
for (i = 0; i < btrfs_header_nritems(eb); i++) {
- struct btrfs_chunk chunk;
+ struct btrfs_chunk *chunk;
struct fs_chunk *fs_chunk;
struct btrfs_key key;
+ u64 type;
if (btrfs_header_level(eb)) {
u64 blockptr = btrfs_node_blockptr(eb, i);
fs_chunk = malloc(sizeof(struct fs_chunk));
if (!fs_chunk) {
- fprintf(stderr, "Erorr allocating chunk\n");
+ fprintf(stderr, "Error allocating chunk\n");
ret = -ENOMEM;
break;
}
memset(fs_chunk, 0, sizeof(*fs_chunk));
- read_extent_buffer(eb, &chunk, btrfs_item_ptr_offset(eb, i),
- sizeof(chunk));
+ chunk = btrfs_item_ptr(eb, i, struct btrfs_chunk);
fs_chunk->logical = key.offset;
- fs_chunk->physical = btrfs_stack_stripe_offset(&chunk.stripe);
- fs_chunk->bytes = btrfs_stack_chunk_length(&chunk);
- tree_insert(&mdres->chunk_tree, &fs_chunk->n, chunk_cmp);
+ fs_chunk->physical = btrfs_stripe_offset_nr(eb, chunk, 0);
+ fs_chunk->bytes = btrfs_chunk_length(eb, chunk);
+ INIT_LIST_HEAD(&fs_chunk->list);
+ if (tree_search(&mdres->physical_tree, &fs_chunk->p,
+ physical_cmp, 1) != NULL)
+ list_add(&fs_chunk->list, &mdres->overlapping_chunks);
+ else
+ tree_insert(&mdres->physical_tree, &fs_chunk->p,
+ physical_cmp);
+
+ type = btrfs_chunk_type(eb, chunk);
+ if (type & BTRFS_BLOCK_GROUP_DUP) {
+ fs_chunk->physical_dup =
+ btrfs_stripe_offset_nr(eb, chunk, 1);
+ }
+
+ if (fs_chunk->physical_dup + fs_chunk->bytes >
+ mdres->last_physical_offset)
+ mdres->last_physical_offset = fs_chunk->physical_dup +
+ fs_chunk->bytes;
+ else if (fs_chunk->physical + fs_chunk->bytes >
+ mdres->last_physical_offset)
+ mdres->last_physical_offset = fs_chunk->physical +
+ fs_chunk->bytes;
+ mdres->alloced_chunks += fs_chunk->bytes;
+ /* in dup case, fs_chunk->bytes should add twice */
+ if (fs_chunk->physical_dup)
+ mdres->alloced_chunks += fs_chunk->bytes;
+ tree_insert(&mdres->chunk_tree, &fs_chunk->l, chunk_cmp);
}
out:
free(eb);
buffer = malloc(max_size);
if (!buffer) {
- fprintf(stderr, "Error allocing buffer\n");
+ fprintf(stderr, "Error allocating buffer\n");
free(cluster);
return -ENOMEM;
}
if (mdres->compress_method == COMPRESS_ZLIB) {
tmp = malloc(max_size);
if (!tmp) {
- fprintf(stderr, "Error allocing tmp buffer\n");
+ fprintf(stderr, "Error allocating tmp buffer\n");
free(cluster);
free(buffer);
return -ENOMEM;
buffer = malloc(le32_to_cpu(item->size));
if (!buffer) {
- fprintf(stderr, "Error allocing buffer\n");
+ fprintf(stderr, "Error allocating buffer\n");
return -ENOMEM;
}
pthread_mutex_lock(&mdres->mutex);
super = (struct btrfs_super_block *)buffer;
chunk_root_bytenr = btrfs_super_chunk_root(super);
- mdres->leafsize = btrfs_super_leafsize(super);
+ mdres->nodesize = btrfs_super_nodesize(super);
memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
memcpy(mdres->uuid, super->dev_item.uuid,
BTRFS_UUID_SIZE);
return search_for_chunk_blocks(mdres, chunk_root_bytenr, 0);
}
-static int __restore_metadump(const char *input, FILE *out, int old_restore,
- int num_threads, int fixup_offset,
- const char *target, int multi_devices)
+static int range_contains_super(u64 physical, u64 bytes)
+{
+ u64 super_bytenr;
+ int i;
+
+ for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+ super_bytenr = btrfs_sb_offset(i);
+ if (super_bytenr >= physical &&
+ super_bytenr < physical + bytes)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void remap_overlapping_chunks(struct mdrestore_struct *mdres)
+{
+ struct fs_chunk *fs_chunk;
+
+ while (!list_empty(&mdres->overlapping_chunks)) {
+ fs_chunk = list_first_entry(&mdres->overlapping_chunks,
+ struct fs_chunk, list);
+ list_del_init(&fs_chunk->list);
+ if (range_contains_super(fs_chunk->physical,
+ fs_chunk->bytes)) {
+ fprintf(stderr, "Remapping a chunk that had a super "
+ "mirror inside of it, clearing space cache "
+ "so we don't end up with corruption\n");
+ mdres->clear_space_cache = 1;
+ }
+ fs_chunk->physical = mdres->last_physical_offset;
+ tree_insert(&mdres->physical_tree, &fs_chunk->p, physical_cmp);
+ mdres->last_physical_offset += fs_chunk->bytes;
+ }
+}
+
+static int fixup_devices(struct btrfs_fs_info *fs_info,
+ struct mdrestore_struct *mdres, off_t dev_size)
+{
+ struct btrfs_trans_handle *trans;
+ struct btrfs_dev_item *dev_item;
+ struct btrfs_path *path;
+ struct extent_buffer *leaf;
+ struct btrfs_root *root = fs_info->chunk_root;
+ struct btrfs_key key;
+ u64 devid, cur_devid;
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ fprintf(stderr, "Error allocating path\n");
+ return -ENOMEM;
+ }
+
+ trans = btrfs_start_transaction(fs_info->tree_root, 1);
+ if (IS_ERR(trans)) {
+ fprintf(stderr, "Error starting transaction %ld\n",
+ PTR_ERR(trans));
+ btrfs_free_path(path);
+ return PTR_ERR(trans);
+ }
+
+ dev_item = &fs_info->super_copy->dev_item;
+
+ devid = btrfs_stack_device_id(dev_item);
+
+ btrfs_set_stack_device_total_bytes(dev_item, dev_size);
+ btrfs_set_stack_device_bytes_used(dev_item, mdres->alloced_chunks);
+
+ key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
+ key.type = BTRFS_DEV_ITEM_KEY;
+ key.offset = 0;
+
+again:
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ if (ret < 0) {
+ fprintf(stderr, "search failed %d\n", ret);
+ exit(1);
+ }
+
+ while (1) {
+ leaf = path->nodes[0];
+ if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0) {
+ fprintf(stderr, "Error going to next leaf "
+ "%d\n", ret);
+ exit(1);
+ }
+ if (ret > 0) {
+ ret = 0;
+ break;
+ }
+ leaf = path->nodes[0];
+ }
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.type > BTRFS_DEV_ITEM_KEY)
+ break;
+ if (key.type != BTRFS_DEV_ITEM_KEY) {
+ path->slots[0]++;
+ continue;
+ }
+
+ dev_item = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_dev_item);
+ cur_devid = btrfs_device_id(leaf, dev_item);
+ if (devid != cur_devid) {
+ ret = btrfs_del_item(trans, root, path);
+ if (ret) {
+ fprintf(stderr, "Error deleting item %d\n",
+ ret);
+ exit(1);
+ }
+ btrfs_release_path(path);
+ goto again;
+ }
+
+ btrfs_set_device_total_bytes(leaf, dev_item, dev_size);
+ btrfs_set_device_bytes_used(leaf, dev_item,
+ mdres->alloced_chunks);
+ btrfs_mark_buffer_dirty(leaf);
+ path->slots[0]++;
+ }
+
+ btrfs_free_path(path);
+ ret = btrfs_commit_transaction(trans, fs_info->tree_root);
+ if (ret) {
+ fprintf(stderr, "Commit failed %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int restore_metadump(const char *input, FILE *out, int old_restore,
+ int num_threads, int fixup_offset,
+ const char *target, int multi_devices)
{
struct meta_cluster *cluster = NULL;
struct meta_cluster_header *header;
/* NOTE: open with write mode */
if (fixup_offset) {
BUG_ON(!target);
- info = open_ctree_fs_info(target, 0, 0,
+ info = open_ctree_fs_info(target, 0, 0, 0,
OPEN_CTREE_WRITES |
OPEN_CTREE_RESTORE |
OPEN_CTREE_PARTIAL);
ret = mdrestore_init(&mdrestore, in, out, old_restore, num_threads,
fixup_offset, info, multi_devices);
if (ret) {
- fprintf(stderr, "Error initing mdrestore %d\n", ret);
+ fprintf(stderr, "Error initializing mdrestore %d\n", ret);
goto failed_cluster;
}
- if (!multi_devices) {
+ if (!multi_devices && !old_restore) {
ret = build_chunk_tree(&mdrestore, cluster);
if (ret)
goto out;
+ if (!list_empty(&mdrestore.overlapping_chunks))
+ remap_overlapping_chunks(&mdrestore);
}
if (in != stdin && fseek(in, 0, SEEK_SET)) {
goto out;
}
- while (1) {
+ while (!mdrestore.error) {
ret = fread(cluster, BLOCK_SIZE, 1, in);
if (!ret)
break;
fprintf(stderr, "Error adding cluster\n");
break;
}
+ }
+ ret = wait_for_worker(&mdrestore);
- ret = wait_for_worker(&mdrestore);
- if (ret) {
- fprintf(stderr, "One of the threads errored out %d\n",
- ret);
- break;
+ if (!ret && !multi_devices && !old_restore) {
+ struct btrfs_root *root;
+ struct stat st;
+
+ root = open_ctree_fd(fileno(out), target, 0,
+ OPEN_CTREE_PARTIAL |
+ OPEN_CTREE_WRITES |
+ OPEN_CTREE_NO_DEVICES);
+ if (!root) {
+ fprintf(stderr, "unable to open %s\n", target);
+ ret = -EIO;
+ goto out;
}
+ info = root->fs_info;
+
+ if (stat(target, &st)) {
+ fprintf(stderr, "statting %s failed\n", target);
+ close_ctree(info->chunk_root);
+ free(cluster);
+ return 1;
+ }
+
+ ret = fixup_devices(info, &mdrestore, st.st_size);
+ close_ctree(info->chunk_root);
+ if (ret)
+ goto out;
}
out:
mdrestore_destroy(&mdrestore, num_threads);
return ret;
}
-static int restore_metadump(const char *input, FILE *out, int old_restore,
- int num_threads, int multi_devices)
-{
- return __restore_metadump(input, out, old_restore, num_threads, 0, NULL,
- multi_devices);
-}
-
-static int fixup_metadump(const char *input, FILE *out, int num_threads,
- const char *target)
-{
- return __restore_metadump(input, out, 0, num_threads, 1, target, 1);
-}
-
static int update_disk_super_on_device(struct btrfs_fs_info *info,
const char *other_dev, u64 cur_devid)
{
char fs_uuid[BTRFS_UUID_SIZE];
u64 devid, type, io_align, io_width;
u64 sector_size, total_bytes, bytes_used;
- char *buf;
- int fp;
+ char buf[BTRFS_SUPER_INFO_SIZE];
+ int fp = -1;
int ret;
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
btrfs_init_path(&path);
ret = btrfs_search_slot(NULL, info->chunk_root, &key, &path, 0, 0);
if (ret) {
- fprintf(stderr, "search key fails\n");
- exit(1);
+ fprintf(stderr, "ERROR: search key failed\n");
+ ret = -EIO;
+ goto out;
}
leaf = path.nodes[0];
devid = btrfs_device_id(leaf, dev_item);
if (devid != cur_devid) {
- printk("devid %llu mismatch with %llu\n", devid, cur_devid);
- exit(1);
+ printk("ERROR: devid %llu mismatch with %llu\n", devid, cur_devid);
+ ret = -EIO;
+ goto out;
}
type = btrfs_device_type(leaf, dev_item);
/* update other devices' super block */
fp = open(other_dev, O_CREAT | O_RDWR, 0600);
if (fp < 0) {
- fprintf(stderr, "could not open %s\n", other_dev);
- exit(1);
- }
-
- buf = malloc(BTRFS_SUPER_INFO_SIZE);
- if (!buf) {
- ret = -ENOMEM;
- close(fp);
- return ret;
+ fprintf(stderr, "ERROR: could not open %s\n", other_dev);
+ ret = -EIO;
+ goto out;
}
memcpy(buf, info->super_copy, BTRFS_SUPER_INFO_SIZE);
ret = pwrite64(fp, buf, BTRFS_SUPER_INFO_SIZE, BTRFS_SUPER_INFO_OFFSET);
if (ret != BTRFS_SUPER_INFO_SIZE) {
+ if (ret < 0)
+ fprintf(stderr, "ERROR: cannot write superblock: %s\n", strerror(ret));
+ else
+ fprintf(stderr, "ERROR: cannot write superblock\n");
ret = -EIO;
goto out;
}
write_backup_supers(fp, (u8 *)buf);
out:
- free(buf);
- close(fp);
- return 0;
+ if (fp != -1)
+ close(fp);
+ return ret;
}
-static void print_usage(void)
+static void print_usage(int ret)
{
fprintf(stderr, "usage: btrfs-image [options] source target\n");
fprintf(stderr, "\t-r \trestore metadump image\n");
fprintf(stderr, "\t-s \tsanitize file names, use once to just use garbage, use twice if you want crc collisions\n");
fprintf(stderr, "\t-w \twalk all trees instead of using extent tree, do this if your extent tree is broken\n");
fprintf(stderr, "\t-m \trestore for multiple devices\n");
- exit(1);
+ fprintf(stderr, "\n");
+ fprintf(stderr, "\tIn the dump mode, source is the btrfs device and target is the output file (use '-' for stdout).\n");
+ fprintf(stderr, "\tIn the restore mode, source is the dumped image and target is the btrfs device/file.\n");
+ exit(ret);
}
int main(int argc, char *argv[])
FILE *out;
while (1) {
- int c = getopt(argc, argv, "rc:t:oswm");
+ static const struct option long_options[] = {
+ { "help", no_argument, NULL, GETOPT_VAL_HELP},
+ { NULL, 0, NULL, 0 }
+ };
+ int c = getopt_long(argc, argv, "rc:t:oswm", long_options, NULL);
if (c < 0)
break;
switch (c) {
case 't':
num_threads = arg_strtou64(optarg);
if (num_threads > 32)
- print_usage();
+ print_usage(1);
break;
case 'c':
compress_level = arg_strtou64(optarg);
if (compress_level > 9)
- print_usage();
+ print_usage(1);
break;
case 'o':
old_restore = 1;
create = 0;
multi_devices = 1;
break;
+ case GETOPT_VAL_HELP:
default:
- print_usage();
+ print_usage(c != GETOPT_VAL_HELP);
}
}
- argc = argc - optind;
- dev_cnt = argc - 1;
+ set_argv0(argv);
+ if (check_argc_min(argc - optind, 2))
+ print_usage(1);
+
+ dev_cnt = argc - optind - 1;
if (create) {
if (old_restore) {
}
if (usage_error)
- print_usage();
+ print_usage(1);
source = argv[optind];
target = argv[optind + 1];
}
}
- if (num_threads == 0 && compress_level > 0) {
- num_threads = sysconf(_SC_NPROCESSORS_ONLN);
- if (num_threads <= 0)
- num_threads = 1;
+ if (compress_level > 0 || create == 0) {
+ if (num_threads == 0) {
+ long tmp = sysconf(_SC_NPROCESSORS_ONLN);
+
+ if (tmp <= 0)
+ tmp = 1;
+ num_threads = tmp;
+ }
+ } else {
+ num_threads = 0;
}
- if (create)
+ if (create) {
+ ret = check_mounted(source);
+ if (ret < 0) {
+ fprintf(stderr, "Could not check mount status: %s\n",
+ strerror(-ret));
+ exit(1);
+ } else if (ret)
+ fprintf(stderr,
+ "WARNING: The device is mounted. Make sure the filesystem is quiescent.\n");
+
ret = create_metadump(source, out, num_threads,
compress_level, sanitize, walk_trees);
- else
- ret = restore_metadump(source, out, old_restore, 1,
- multi_devices);
+ } else {
+ ret = restore_metadump(source, out, old_restore, num_threads,
+ 0, target, multi_devices);
+ }
if (ret) {
printk("%s failed (%s)\n", (create) ? "create" : "restore",
strerror(errno));
u64 total_devs;
int i;
- info = open_ctree_fs_info(target, 0, 0,
+ info = open_ctree_fs_info(target, 0, 0, 0,
OPEN_CTREE_PARTIAL |
OPEN_CTREE_RESTORE);
if (!info) {
- int e = errno;
fprintf(stderr, "unable to open %s error = %s\n",
- target, strerror(e));
+ target, strerror(errno));
return 1;
}
close_ctree(info->chunk_root);
/* fix metadata block to map correct chunk */
- ret = fixup_metadump(source, out, 1, target);
+ ret = restore_metadump(source, out, 0, num_threads, 1,
+ target, 1);
if (ret) {
fprintf(stderr, "fix metadump failed (error=%d)\n",
ret);
exit(1);
}
}
-
out:
if (out == stdout) {
fflush(out);
}
}
+ btrfs_close_all_devices();
+
return !!ret;
}