+static int read_chunk_block(struct mdrestore_struct *mdres, u8 *buffer,
+ u64 bytenr, u64 item_bytenr, u32 bufsize,
+ u64 cluster_bytenr)
+{
+ struct extent_buffer *eb;
+ int ret = 0;
+ int i;
+
+ eb = alloc_dummy_eb(bytenr, mdres->nodesize);
+ if (!eb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ while (item_bytenr != bytenr) {
+ buffer += mdres->nodesize;
+ item_bytenr += mdres->nodesize;
+ }
+
+ memcpy(eb->data, buffer, mdres->nodesize);
+ if (btrfs_header_bytenr(eb) != bytenr) {
+ fprintf(stderr, "Eb bytenr doesn't match found bytenr\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ if (memcmp(mdres->fsid, eb->data + offsetof(struct btrfs_header, fsid),
+ BTRFS_FSID_SIZE)) {
+ fprintf(stderr, "Fsid doesn't match\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID) {
+ fprintf(stderr, "Does not belong to the chunk tree\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < btrfs_header_nritems(eb); i++) {
+ struct btrfs_chunk *chunk;
+ struct fs_chunk *fs_chunk;
+ struct btrfs_key key;
+ u64 type;
+
+ if (btrfs_header_level(eb)) {
+ u64 blockptr = btrfs_node_blockptr(eb, i);
+
+ ret = search_for_chunk_blocks(mdres, blockptr,
+ cluster_bytenr);
+ if (ret)
+ break;
+ continue;
+ }
+
+ /* Yay a leaf! We loves leafs! */
+ btrfs_item_key_to_cpu(eb, &key, i);
+ if (key.type != BTRFS_CHUNK_ITEM_KEY)
+ continue;
+
+ fs_chunk = malloc(sizeof(struct fs_chunk));
+ if (!fs_chunk) {
+ fprintf(stderr, "Error allocating chunk\n");
+ ret = -ENOMEM;
+ break;
+ }
+ memset(fs_chunk, 0, sizeof(*fs_chunk));
+ chunk = btrfs_item_ptr(eb, i, struct btrfs_chunk);
+
+ fs_chunk->logical = key.offset;
+ fs_chunk->physical = btrfs_stripe_offset_nr(eb, chunk, 0);
+ fs_chunk->bytes = btrfs_chunk_length(eb, chunk);
+ INIT_LIST_HEAD(&fs_chunk->list);
+ if (tree_search(&mdres->physical_tree, &fs_chunk->p,
+ physical_cmp, 1) != NULL)
+ list_add(&fs_chunk->list, &mdres->overlapping_chunks);
+ else
+ tree_insert(&mdres->physical_tree, &fs_chunk->p,
+ physical_cmp);
+
+ type = btrfs_chunk_type(eb, chunk);
+ if (type & BTRFS_BLOCK_GROUP_DUP) {
+ fs_chunk->physical_dup =
+ btrfs_stripe_offset_nr(eb, chunk, 1);
+ }
+
+ if (fs_chunk->physical_dup + fs_chunk->bytes >
+ mdres->last_physical_offset)
+ mdres->last_physical_offset = fs_chunk->physical_dup +
+ fs_chunk->bytes;
+ else if (fs_chunk->physical + fs_chunk->bytes >
+ mdres->last_physical_offset)
+ mdres->last_physical_offset = fs_chunk->physical +
+ fs_chunk->bytes;
+ mdres->alloced_chunks += fs_chunk->bytes;
+ /* in dup case, fs_chunk->bytes should add twice */
+ if (fs_chunk->physical_dup)
+ mdres->alloced_chunks += fs_chunk->bytes;
+ tree_insert(&mdres->chunk_tree, &fs_chunk->l, chunk_cmp);
+ }
+out:
+ free(eb);
+ return ret;
+}
+
+/* If you have to ask you aren't worthy */
+static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
+ u64 search, u64 cluster_bytenr)
+{
+ struct meta_cluster *cluster;
+ struct meta_cluster_header *header;
+ struct meta_cluster_item *item;
+ u64 current_cluster = cluster_bytenr, bytenr;
+ u64 item_bytenr;
+ u32 bufsize, nritems, i;
+ u32 max_size = MAX_PENDING_SIZE * 2;
+ u8 *buffer, *tmp = NULL;
+ int ret = 0;
+
+ cluster = malloc(BLOCK_SIZE);
+ if (!cluster) {
+ fprintf(stderr, "Error allocating cluster\n");
+ return -ENOMEM;
+ }
+
+ buffer = malloc(max_size);
+ if (!buffer) {
+ fprintf(stderr, "Error allocating buffer\n");
+ free(cluster);
+ return -ENOMEM;
+ }
+
+ if (mdres->compress_method == COMPRESS_ZLIB) {
+ tmp = malloc(max_size);
+ if (!tmp) {
+ fprintf(stderr, "Error allocating tmp buffer\n");
+ free(cluster);
+ free(buffer);
+ return -ENOMEM;
+ }
+ }
+
+ bytenr = current_cluster;
+ while (1) {
+ if (fseek(mdres->in, current_cluster, SEEK_SET)) {
+ fprintf(stderr, "Error seeking: %d\n", errno);
+ ret = -EIO;
+ break;
+ }
+
+ ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
+ if (ret == 0) {
+ if (cluster_bytenr != 0) {
+ cluster_bytenr = 0;
+ current_cluster = 0;
+ bytenr = 0;
+ continue;
+ }
+ printf("ok this is where we screwed up?\n");
+ ret = -EIO;
+ break;
+ } else if (ret < 0) {
+ fprintf(stderr, "Error reading image\n");
+ break;
+ }
+ ret = 0;
+
+ header = &cluster->header;
+ if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
+ le64_to_cpu(header->bytenr) != current_cluster) {
+ fprintf(stderr, "bad header in metadump image\n");
+ ret = -EIO;
+ break;
+ }
+
+ bytenr += BLOCK_SIZE;
+ nritems = le32_to_cpu(header->nritems);
+ for (i = 0; i < nritems; i++) {
+ size_t size;
+
+ item = &cluster->items[i];
+ bufsize = le32_to_cpu(item->size);
+ item_bytenr = le64_to_cpu(item->bytenr);
+
+ if (bufsize > max_size) {
+ fprintf(stderr, "item %u size %u too big\n",
+ i, bufsize);
+ ret = -EIO;
+ break;
+ }
+
+ if (mdres->compress_method == COMPRESS_ZLIB) {
+ ret = fread(tmp, bufsize, 1, mdres->in);
+ if (ret != 1) {
+ fprintf(stderr, "Error reading: %d\n",
+ errno);
+ ret = -EIO;
+ break;
+ }
+
+ size = max_size;
+ ret = uncompress(buffer,
+ (unsigned long *)&size, tmp,
+ bufsize);
+ if (ret != Z_OK) {
+ fprintf(stderr, "Error decompressing "
+ "%d\n", ret);
+ ret = -EIO;
+ break;
+ }
+ } else {
+ ret = fread(buffer, bufsize, 1, mdres->in);
+ if (ret != 1) {
+ fprintf(stderr, "Error reading: %d\n",
+ errno);
+ ret = -EIO;
+ break;
+ }
+ size = bufsize;
+ }
+ ret = 0;
+
+ if (item_bytenr <= search &&
+ item_bytenr + size > search) {
+ ret = read_chunk_block(mdres, buffer, search,
+ item_bytenr, size,
+ current_cluster);
+ if (!ret)
+ ret = 1;
+ break;
+ }
+ bytenr += bufsize;
+ }
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ break;
+ }
+ if (bytenr & BLOCK_MASK)
+ bytenr += BLOCK_SIZE - (bytenr & BLOCK_MASK);
+ current_cluster = bytenr;
+ }
+
+ free(tmp);
+ free(buffer);
+ free(cluster);
+ return ret;
+}
+
+static int build_chunk_tree(struct mdrestore_struct *mdres,
+ struct meta_cluster *cluster)
+{
+ struct btrfs_super_block *super;
+ struct meta_cluster_header *header;
+ struct meta_cluster_item *item = NULL;
+ u64 chunk_root_bytenr = 0;
+ u32 i, nritems;
+ u64 bytenr = 0;
+ u8 *buffer;
+ int ret;
+
+ /* We can't seek with stdin so don't bother doing this */
+ if (mdres->in == stdin)
+ return 0;
+
+ ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
+ if (ret <= 0) {
+ fprintf(stderr, "Error reading in cluster: %d\n", errno);
+ return -EIO;
+ }
+ ret = 0;
+
+ header = &cluster->header;
+ if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
+ le64_to_cpu(header->bytenr) != 0) {
+ fprintf(stderr, "bad header in metadump image\n");
+ return -EIO;
+ }
+
+ bytenr += BLOCK_SIZE;
+ mdres->compress_method = header->compress;
+ nritems = le32_to_cpu(header->nritems);
+ for (i = 0; i < nritems; i++) {
+ item = &cluster->items[i];
+
+ if (le64_to_cpu(item->bytenr) == BTRFS_SUPER_INFO_OFFSET)
+ break;
+ bytenr += le32_to_cpu(item->size);
+ if (fseek(mdres->in, le32_to_cpu(item->size), SEEK_CUR)) {
+ fprintf(stderr, "Error seeking: %d\n", errno);
+ return -EIO;
+ }
+ }
+
+ if (!item || le64_to_cpu(item->bytenr) != BTRFS_SUPER_INFO_OFFSET) {
+ fprintf(stderr, "Huh, didn't find the super?\n");
+ return -EINVAL;
+ }
+
+ buffer = malloc(le32_to_cpu(item->size));
+ if (!buffer) {
+ fprintf(stderr, "Error allocating buffer\n");
+ return -ENOMEM;
+ }
+
+ ret = fread(buffer, le32_to_cpu(item->size), 1, mdres->in);
+ if (ret != 1) {
+ fprintf(stderr, "Error reading buffer: %d\n", errno);
+ free(buffer);
+ return -EIO;
+ }
+
+ if (mdres->compress_method == COMPRESS_ZLIB) {
+ size_t size = MAX_PENDING_SIZE * 2;
+ u8 *tmp;
+
+ tmp = malloc(MAX_PENDING_SIZE * 2);
+ if (!tmp) {
+ free(buffer);
+ return -ENOMEM;
+ }
+ ret = uncompress(tmp, (unsigned long *)&size,
+ buffer, le32_to_cpu(item->size));
+ if (ret != Z_OK) {
+ fprintf(stderr, "Error decompressing %d\n", ret);
+ free(buffer);
+ free(tmp);
+ return -EIO;
+ }
+ free(buffer);
+ buffer = tmp;
+ }
+
+ pthread_mutex_lock(&mdres->mutex);
+ super = (struct btrfs_super_block *)buffer;
+ chunk_root_bytenr = btrfs_super_chunk_root(super);
+ mdres->nodesize = btrfs_super_nodesize(super);
+ memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
+ memcpy(mdres->uuid, super->dev_item.uuid,
+ BTRFS_UUID_SIZE);
+ mdres->devid = le64_to_cpu(super->dev_item.devid);
+ free(buffer);
+ pthread_mutex_unlock(&mdres->mutex);
+
+ return search_for_chunk_blocks(mdres, chunk_root_bytenr, 0);
+}
+
+static int range_contains_super(u64 physical, u64 bytes)
+{
+ u64 super_bytenr;
+ int i;
+
+ for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+ super_bytenr = btrfs_sb_offset(i);
+ if (super_bytenr >= physical &&
+ super_bytenr < physical + bytes)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void remap_overlapping_chunks(struct mdrestore_struct *mdres)
+{
+ struct fs_chunk *fs_chunk;
+
+ while (!list_empty(&mdres->overlapping_chunks)) {
+ fs_chunk = list_first_entry(&mdres->overlapping_chunks,
+ struct fs_chunk, list);
+ list_del_init(&fs_chunk->list);
+ if (range_contains_super(fs_chunk->physical,
+ fs_chunk->bytes)) {
+ fprintf(stderr, "Remapping a chunk that had a super "
+ "mirror inside of it, clearing space cache "
+ "so we don't end up with corruption\n");
+ mdres->clear_space_cache = 1;
+ }
+ fs_chunk->physical = mdres->last_physical_offset;
+ tree_insert(&mdres->physical_tree, &fs_chunk->p, physical_cmp);
+ mdres->last_physical_offset += fs_chunk->bytes;
+ }
+}
+
+static int fixup_devices(struct btrfs_fs_info *fs_info,
+ struct mdrestore_struct *mdres, off_t dev_size)
+{
+ struct btrfs_trans_handle *trans;
+ struct btrfs_dev_item *dev_item;
+ struct btrfs_path *path;
+ struct extent_buffer *leaf;
+ struct btrfs_root *root = fs_info->chunk_root;
+ struct btrfs_key key;
+ u64 devid, cur_devid;
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ fprintf(stderr, "Error allocating path\n");
+ return -ENOMEM;
+ }
+
+ trans = btrfs_start_transaction(fs_info->tree_root, 1);
+ if (IS_ERR(trans)) {
+ fprintf(stderr, "Error starting transaction %ld\n",
+ PTR_ERR(trans));
+ btrfs_free_path(path);
+ return PTR_ERR(trans);
+ }
+
+ dev_item = &fs_info->super_copy->dev_item;
+
+ devid = btrfs_stack_device_id(dev_item);
+
+ btrfs_set_stack_device_total_bytes(dev_item, dev_size);
+ btrfs_set_stack_device_bytes_used(dev_item, mdres->alloced_chunks);
+
+ key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
+ key.type = BTRFS_DEV_ITEM_KEY;
+ key.offset = 0;
+
+again:
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ if (ret < 0) {
+ fprintf(stderr, "search failed %d\n", ret);
+ exit(1);
+ }
+
+ while (1) {
+ leaf = path->nodes[0];
+ if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0) {
+ fprintf(stderr, "Error going to next leaf "
+ "%d\n", ret);
+ exit(1);
+ }
+ if (ret > 0) {
+ ret = 0;
+ break;
+ }
+ leaf = path->nodes[0];
+ }
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.type > BTRFS_DEV_ITEM_KEY)
+ break;
+ if (key.type != BTRFS_DEV_ITEM_KEY) {
+ path->slots[0]++;
+ continue;
+ }
+
+ dev_item = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_dev_item);
+ cur_devid = btrfs_device_id(leaf, dev_item);
+ if (devid != cur_devid) {
+ ret = btrfs_del_item(trans, root, path);
+ if (ret) {
+ fprintf(stderr, "Error deleting item %d\n",
+ ret);
+ exit(1);
+ }
+ btrfs_release_path(path);
+ goto again;
+ }
+
+ btrfs_set_device_total_bytes(leaf, dev_item, dev_size);
+ btrfs_set_device_bytes_used(leaf, dev_item,
+ mdres->alloced_chunks);
+ btrfs_mark_buffer_dirty(leaf);
+ path->slots[0]++;
+ }
+
+ btrfs_free_path(path);
+ ret = btrfs_commit_transaction(trans, fs_info->tree_root);
+ if (ret) {
+ fprintf(stderr, "Commit failed %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+