- if (data.num_blocks > 0) {
- ret = record_file_blocks(&data, data.first_block,
- data.disk_block, data.num_blocks);
- if (ret)
- goto fail;
- }
-
- key.objectid = extent_key->objectid;
- key.offset = 0;
- key.type = BTRFS_INODE_ITEM_KEY;
- ret = btrfs_lookup_inode(trans, root, &path, &key, 1);
- if (ret)
- goto fail;
-
- leaf = path.nodes[0];
- ptr = btrfs_item_ptr_offset(leaf, path.slots[0]);
- write_extent_buffer(leaf, &inode, ptr, sizeof(inode));
- btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(&path);
-
-fail:
- btrfs_release_path(&path);
- return ret;
-}
-
-static int relocate_extents_range(struct btrfs_root *fs_root,
- struct btrfs_root *image_root,
- u64 start_byte, u64 end_byte)
-{
- struct btrfs_fs_info *info = fs_root->fs_info;
- struct btrfs_root *extent_root = info->extent_root;
- struct btrfs_root *cur_root = NULL;
- struct btrfs_trans_handle *trans;
- struct btrfs_extent_data_ref *dref;
- struct btrfs_extent_inline_ref *iref;
- struct btrfs_extent_item *ei;
- struct extent_buffer *leaf;
- struct btrfs_key key;
- struct btrfs_key extent_key;
- struct btrfs_path path;
- struct extent_io_tree reloc_tree;
- unsigned long ptr;
- unsigned long end;
- u64 cur_byte;
- u64 num_bytes;
- u64 ref_root;
- u64 num_extents;
- int pass = 0;
- int ret;
-
- btrfs_init_path(&path);
- extent_io_tree_init(&reloc_tree);
-
- key.objectid = start_byte;
- key.offset = 0;
- key.type = BTRFS_EXTENT_ITEM_KEY;
- ret = btrfs_search_slot(NULL, extent_root, &key, &path, 0, 0);
- if (ret < 0)
- goto fail;
- if (ret > 0) {
- ret = btrfs_previous_item(extent_root, &path, 0,
- BTRFS_EXTENT_ITEM_KEY);
- if (ret < 0)
- goto fail;
- if (ret == 0) {
- leaf = path.nodes[0];
- btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
- if (key.objectid + key.offset > start_byte)
- start_byte = key.objectid;
- }
- }
- btrfs_release_path(&path);
-again:
- cur_root = (pass % 2 == 0) ? image_root : fs_root;
- num_extents = 0;
-
- trans = btrfs_start_transaction(cur_root, 1);
- BUG_ON(!trans);
-
- cur_byte = start_byte;
- while (1) {
- key.objectid = cur_byte;
- key.offset = 0;
- key.type = BTRFS_EXTENT_ITEM_KEY;
- ret = btrfs_search_slot(trans, extent_root,
- &key, &path, 0, 0);
- if (ret < 0)
- goto fail;
-next:
- leaf = path.nodes[0];
- if (path.slots[0] >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(extent_root, &path);
- if (ret < 0)
- goto fail;
- if (ret > 0)
- break;
- leaf = path.nodes[0];
- }
-
- btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
- if (key.objectid < cur_byte ||
- key.type != BTRFS_EXTENT_ITEM_KEY) {
- path.slots[0]++;
- goto next;
- }
- if (key.objectid >= end_byte)
- break;
-
- num_extents++;
-
- cur_byte = key.objectid;
- num_bytes = key.offset;
- ei = btrfs_item_ptr(leaf, path.slots[0],
- struct btrfs_extent_item);
- BUG_ON(!(btrfs_extent_flags(leaf, ei) &
- BTRFS_EXTENT_FLAG_DATA));
-
- ptr = btrfs_item_ptr_offset(leaf, path.slots[0]);
- end = ptr + btrfs_item_size_nr(leaf, path.slots[0]);
-
- ptr += sizeof(struct btrfs_extent_item);
-
- while (ptr < end) {
- iref = (struct btrfs_extent_inline_ref *)ptr;
- key.type = btrfs_extent_inline_ref_type(leaf, iref);
- BUG_ON(key.type != BTRFS_EXTENT_DATA_REF_KEY);
- dref = (struct btrfs_extent_data_ref *)(&iref->offset);
- ref_root = btrfs_extent_data_ref_root(leaf, dref);
- extent_key.objectid =
- btrfs_extent_data_ref_objectid(leaf, dref);
- extent_key.offset =
- btrfs_extent_data_ref_offset(leaf, dref);
- extent_key.type = BTRFS_EXTENT_DATA_KEY;
- BUG_ON(btrfs_extent_data_ref_count(leaf, dref) != 1);
-
- if (ref_root == cur_root->root_key.objectid)
- break;
-
- ptr += btrfs_extent_inline_ref_size(key.type);
- }
-
- if (ptr >= end) {
- path.slots[0]++;
- goto next;
- }
-
- ret = relocate_one_reference(trans, cur_root, cur_byte,
- num_bytes, &extent_key,
- &reloc_tree);
- if (ret < 0)
- goto fail;
-
- cur_byte += num_bytes;
- btrfs_release_path(&path);
-
- if (trans->blocks_used >= 4096) {
- ret = btrfs_commit_transaction(trans, cur_root);
- BUG_ON(ret);
- trans = btrfs_start_transaction(cur_root, 1);
- BUG_ON(!trans);
- }
- }
- btrfs_release_path(&path);
-
- ret = btrfs_commit_transaction(trans, cur_root);
- BUG_ON(ret);
-
- if (num_extents > 0 && pass++ < 16)
- goto again;
-
- ret = (num_extents > 0) ? -1 : 0;
-fail:
- btrfs_release_path(&path);
- extent_io_tree_cleanup(&reloc_tree);
- return ret;
-}
-
-/*
- * relocate data in system chunk
- */
-static int cleanup_sys_chunk(struct btrfs_root *fs_root,
- struct btrfs_root *image_root)
-{
- struct btrfs_block_group_cache *cache;
- int i, ret = 0;
- u64 offset = 0;
- u64 end_byte;
-
- while(1) {
- cache = btrfs_lookup_block_group(fs_root->fs_info, offset);
- if (!cache)
- break;
-
- end_byte = cache->key.objectid + cache->key.offset;
- if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
- ret = relocate_extents_range(fs_root, image_root,
- cache->key.objectid,
- end_byte);
- if (ret)
- goto fail;
- }
- offset = end_byte;
- }
- for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
- offset = btrfs_sb_offset(i);
- offset &= ~((u64)BTRFS_STRIPE_LEN - 1);
-
- ret = relocate_extents_range(fs_root, image_root,
- offset, offset + BTRFS_STRIPE_LEN);
- if (ret)
- goto fail;
- }
- ret = 0;
-fail:
- return ret;
-}
-
-static int fixup_chunk_mapping(struct btrfs_root *root)
-{
- struct btrfs_trans_handle *trans;
- struct btrfs_fs_info *info = root->fs_info;
- struct btrfs_root *chunk_root = info->chunk_root;
- struct extent_buffer *leaf;
- struct btrfs_key key;
- struct btrfs_path path;
- struct btrfs_chunk chunk;
- unsigned long ptr;
- u32 size;
- u64 type;
- int ret;
-
- btrfs_init_path(&path);
-
- trans = btrfs_start_transaction(root, 1);
- BUG_ON(!trans);
-
- /*
- * recow the whole chunk tree. this will move all chunk tree blocks
- * into system block group.
- */
- memset(&key, 0, sizeof(key));
- while (1) {
- ret = btrfs_search_slot(trans, chunk_root, &key, &path, 0, 1);
- if (ret < 0)
- goto err;
-
- ret = btrfs_next_leaf(chunk_root, &path);
- if (ret < 0)
- goto err;
- if (ret > 0)
- break;
-
- btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
- btrfs_release_path(&path);
- }
- btrfs_release_path(&path);
-
- /* fixup the system chunk array in super block */
- btrfs_set_super_sys_array_size(info->super_copy, 0);
-
- key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
- key.offset = 0;
- key.type = BTRFS_CHUNK_ITEM_KEY;
-
- ret = btrfs_search_slot(trans, chunk_root, &key, &path, 0, 0);
- if (ret < 0)
- goto err;
- BUG_ON(ret != 0);
- while(1) {
- leaf = path.nodes[0];
- if (path.slots[0] >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(chunk_root, &path);
- if (ret < 0)
- goto err;
- if (ret > 0)
- break;
- leaf = path.nodes[0];
- }
- btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
- if (key.type != BTRFS_CHUNK_ITEM_KEY)
- goto next;
-
- ptr = btrfs_item_ptr_offset(leaf, path.slots[0]);
- size = btrfs_item_size_nr(leaf, path.slots[0]);
- BUG_ON(size != sizeof(chunk));
- read_extent_buffer(leaf, &chunk, ptr, size);
- type = btrfs_stack_chunk_type(&chunk);
-
- if (!(type & BTRFS_BLOCK_GROUP_SYSTEM))
- goto next;
-
- ret = btrfs_add_system_chunk(trans, chunk_root, &key,
- &chunk, size);
- if (ret)
- goto err;
-next:
- path.slots[0]++;
- }
-
- ret = btrfs_commit_transaction(trans, root);
- BUG_ON(ret);
-err:
- btrfs_release_path(&path);
- return ret;
-}
-
-static const struct btrfs_convert_operations ext2_convert_ops = {
- .name = "ext2",
- .open_fs = ext2_open_fs,
- .read_used_space = ext2_read_used_space,
- .alloc_block = ext2_alloc_block,
- .alloc_block_range = ext2_alloc_block_range,
- .copy_inodes = ext2_copy_inodes,
- .test_block = ext2_test_block,
- .free_block = ext2_free_block,
- .free_block_range = ext2_free_block_range,
- .close_fs = ext2_close_fs,
-};
-
-static const struct btrfs_convert_operations *convert_operations[] = {
- &ext2_convert_ops,
-};
-
-static int convert_open_fs(const char *devname,
- struct btrfs_convert_context *cctx)
-{
- int i;
-
- memset(cctx, 0, sizeof(*cctx));
-
- for (i = 0; i < ARRAY_SIZE(convert_operations); i++) {
- int ret = convert_operations[i]->open_fs(cctx, devname);
-
- if (ret == 0) {
- cctx->convert_ops = convert_operations[i];
- return ret;
- }
- }
-
- fprintf(stderr, "No file system found to convert.\n");
- return -1;
-}
-
-/*
- * Remove one reserve range from given cache tree
- * if min_stripe_size is non-zero, it will ensure for split case,
- * all its split cache extent is no smaller than @min_strip_size / 2.
- */
-static int wipe_one_reserved_range(struct cache_tree *tree,
- u64 start, u64 len, u64 min_stripe_size,
- int ensure_size)
-{
- struct cache_extent *cache;
- int ret;
-
- BUG_ON(ensure_size && min_stripe_size == 0);
- /*
- * The logical here is simplified to handle special cases only
- * So we don't need to consider merge case for ensure_size
- */
- BUG_ON(min_stripe_size && (min_stripe_size < len * 2 ||
- min_stripe_size / 2 < BTRFS_STRIPE_LEN));
-
- /* Also, wipe range should already be aligned */
- BUG_ON(start != round_down(start, BTRFS_STRIPE_LEN) ||
- start + len != round_up(start + len, BTRFS_STRIPE_LEN));
-
- min_stripe_size /= 2;
-
- cache = lookup_cache_extent(tree, start, len);
- if (!cache)
- return 0;
-
- if (start <= cache->start) {
- /*
- * |--------cache---------|
- * |-wipe-|
- */
- BUG_ON(start + len <= cache->start);
-
- /*
- * The wipe size is smaller than min_stripe_size / 2,
- * so the result length should still meet min_stripe_size
- * And no need to do alignment
- */
- cache->size -= (start + len - cache->start);
- if (cache->size == 0) {
- remove_cache_extent(tree, cache);
- free(cache);
- return 0;
- }
-
- BUG_ON(ensure_size && cache->size < min_stripe_size);
-
- cache->start = start + len;
- return 0;
- } else if (start > cache->start && start + len < cache->start +
- cache->size) {
- /*
- * |-------cache-----|
- * |-wipe-|
- */
- u64 old_len = cache->size;
- u64 insert_start = start + len;
- u64 insert_len;
-
- cache->size = start - cache->start;
- if (ensure_size)
- cache->size = max(cache->size, min_stripe_size);
- cache->start = start - cache->size;
-
- /* And insert the new one */
- insert_len = old_len - start - len;
- if (ensure_size)
- insert_len = max(insert_len, min_stripe_size);
-
- ret = add_merge_cache_extent(tree, insert_start, insert_len);