2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include "kerncompat.h"
21 #include <sys/ioctl.h>
22 #include <sys/mount.h>
25 #include <sys/types.h>
29 #include <uuid/uuid.h>
30 #include <linux/limits.h>
36 #include "transaction.h"
39 #include "task-utils.h"
41 #include <ext2fs/ext2_fs.h>
42 #include <ext2fs/ext2fs.h>
43 #include <ext2fs/ext2_ext_attr.h>
45 #define INO_OFFSET (BTRFS_FIRST_FREE_OBJECTID - EXT2_ROOT_INO)
46 #define CONV_IMAGE_SUBVOL_OBJECTID BTRFS_FIRST_FREE_OBJECTID
49 * Compatibility code for e2fsprogs 1.41 which doesn't support RO compat flag
51 * Unlike normal RO compat flag, BIGALLOC affects how e2fsprogs check used
52 * space, and btrfs-convert heavily relies on it.
54 #ifdef HAVE_OLD_E2FSPROGS
55 #define EXT2FS_CLUSTER_RATIO(fs) (1)
56 #define EXT2_CLUSTERS_PER_GROUP(s) (EXT2_BLOCKS_PER_GROUP(s))
57 #define EXT2FS_B2C(fs, blk) (blk)
61 uint32_t max_copy_inodes;
62 uint32_t cur_copy_inodes;
63 struct task_info *info;
66 static void *print_copied_inodes(void *p)
68 struct task_ctx *priv = p;
69 const char work_indicator[] = { '.', 'o', 'O', 'o' };
72 task_period_start(priv->info, 1000 /* 1s */);
75 printf("copy inodes [%c] [%10d/%10d]\r",
76 work_indicator[count % 4], priv->cur_copy_inodes,
77 priv->max_copy_inodes);
79 task_period_wait(priv->info);
85 static int after_copied_inodes(void *p)
93 struct btrfs_convert_context;
94 struct btrfs_convert_operations {
96 int (*open_fs)(struct btrfs_convert_context *cctx, const char *devname);
97 int (*read_used_space)(struct btrfs_convert_context *cctx);
98 int (*copy_inodes)(struct btrfs_convert_context *cctx,
99 struct btrfs_root *root, int datacsum,
100 int packing, int noxattr, struct task_ctx *p);
101 void (*close_fs)(struct btrfs_convert_context *cctx);
104 static void init_convert_context(struct btrfs_convert_context *cctx)
106 cache_tree_init(&cctx->used);
107 cache_tree_init(&cctx->data_chunks);
108 cache_tree_init(&cctx->free);
111 static void clean_convert_context(struct btrfs_convert_context *cctx)
113 free_extent_cache_tree(&cctx->used);
114 free_extent_cache_tree(&cctx->data_chunks);
115 free_extent_cache_tree(&cctx->free);
118 static inline int copy_inodes(struct btrfs_convert_context *cctx,
119 struct btrfs_root *root, int datacsum,
120 int packing, int noxattr, struct task_ctx *p)
122 return cctx->convert_ops->copy_inodes(cctx, root, datacsum, packing,
126 static inline void convert_close_fs(struct btrfs_convert_context *cctx)
128 cctx->convert_ops->close_fs(cctx);
131 static int intersect_with_sb(u64 bytenr, u64 num_bytes)
136 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
137 offset = btrfs_sb_offset(i);
138 offset &= ~((u64)BTRFS_STRIPE_LEN - 1);
140 if (bytenr < offset + BTRFS_STRIPE_LEN &&
141 bytenr + num_bytes > offset)
147 static int convert_insert_dirent(struct btrfs_trans_handle *trans,
148 struct btrfs_root *root,
149 const char *name, size_t name_len,
150 u64 dir, u64 objectid,
151 u8 file_type, u64 index_cnt,
152 struct btrfs_inode_item *inode)
156 struct btrfs_key location = {
157 .objectid = objectid,
159 .type = BTRFS_INODE_ITEM_KEY,
162 ret = btrfs_insert_dir_item(trans, root, name, name_len,
163 dir, &location, file_type, index_cnt);
166 ret = btrfs_insert_inode_ref(trans, root, name, name_len,
167 objectid, dir, index_cnt);
170 inode_size = btrfs_stack_inode_size(inode) + name_len * 2;
171 btrfs_set_stack_inode_size(inode, inode_size);
176 static int read_disk_extent(struct btrfs_root *root, u64 bytenr,
177 u32 num_bytes, char *buffer)
180 struct btrfs_fs_devices *fs_devs = root->fs_info->fs_devices;
182 ret = pread(fs_devs->latest_bdev, buffer, num_bytes, bytenr);
183 if (ret != num_bytes)
192 static int csum_disk_extent(struct btrfs_trans_handle *trans,
193 struct btrfs_root *root,
194 u64 disk_bytenr, u64 num_bytes)
196 u32 blocksize = root->sectorsize;
201 buffer = malloc(blocksize);
204 for (offset = 0; offset < num_bytes; offset += blocksize) {
205 ret = read_disk_extent(root, disk_bytenr + offset,
209 ret = btrfs_csum_file_block(trans,
210 root->fs_info->csum_root,
211 disk_bytenr + num_bytes,
212 disk_bytenr + offset,
221 struct blk_iterate_data {
222 struct btrfs_trans_handle *trans;
223 struct btrfs_root *root;
224 struct btrfs_root *convert_root;
225 struct btrfs_inode_item *inode;
236 static void init_blk_iterate_data(struct blk_iterate_data *data,
237 struct btrfs_trans_handle *trans,
238 struct btrfs_root *root,
239 struct btrfs_inode_item *inode,
240 u64 objectid, int checksum)
242 struct btrfs_key key;
247 data->objectid = objectid;
248 data->first_block = 0;
249 data->disk_block = 0;
250 data->num_blocks = 0;
251 data->boundary = (u64)-1;
252 data->checksum = checksum;
255 key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
256 key.type = BTRFS_ROOT_ITEM_KEY;
257 key.offset = (u64)-1;
258 data->convert_root = btrfs_read_fs_root(root->fs_info, &key);
259 /* Impossible as we just opened it before */
260 BUG_ON(!data->convert_root || IS_ERR(data->convert_root));
261 data->convert_ino = BTRFS_FIRST_FREE_OBJECTID + 1;
265 * Record a file extent in original filesystem into btrfs one.
266 * The special point is, old disk_block can point to a reserved range.
267 * So here, we don't use disk_block directly but search convert_root
268 * to get the real disk_bytenr.
270 static int record_file_blocks(struct blk_iterate_data *data,
271 u64 file_block, u64 disk_block, u64 num_blocks)
274 struct btrfs_root *root = data->root;
275 struct btrfs_root *convert_root = data->convert_root;
276 struct btrfs_path *path;
277 u64 file_pos = file_block * root->sectorsize;
278 u64 old_disk_bytenr = disk_block * root->sectorsize;
279 u64 num_bytes = num_blocks * root->sectorsize;
280 u64 cur_off = old_disk_bytenr;
282 /* Hole, pass it to record_file_extent directly */
283 if (old_disk_bytenr == 0)
284 return btrfs_record_file_extent(data->trans, root,
285 data->objectid, data->inode, file_pos, 0,
288 path = btrfs_alloc_path();
293 * Search real disk bytenr from convert root
295 while (cur_off < old_disk_bytenr + num_bytes) {
296 struct btrfs_key key;
297 struct btrfs_file_extent_item *fi;
298 struct extent_buffer *node;
300 u64 extent_disk_bytenr;
301 u64 extent_num_bytes;
302 u64 real_disk_bytenr;
305 key.objectid = data->convert_ino;
306 key.type = BTRFS_EXTENT_DATA_KEY;
307 key.offset = cur_off;
309 ret = btrfs_search_slot(NULL, convert_root, &key, path, 0, 0);
313 ret = btrfs_previous_item(convert_root, path,
315 BTRFS_EXTENT_DATA_KEY);
323 node = path->nodes[0];
324 slot = path->slots[0];
325 btrfs_item_key_to_cpu(node, &key, slot);
326 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY ||
327 key.objectid != data->convert_ino ||
328 key.offset > cur_off);
329 fi = btrfs_item_ptr(node, slot, struct btrfs_file_extent_item);
330 extent_disk_bytenr = btrfs_file_extent_disk_bytenr(node, fi);
331 extent_num_bytes = btrfs_file_extent_disk_num_bytes(node, fi);
332 BUG_ON(cur_off - key.offset >= extent_num_bytes);
333 btrfs_release_path(path);
335 if (extent_disk_bytenr)
336 real_disk_bytenr = cur_off - key.offset +
339 real_disk_bytenr = 0;
340 cur_len = min(key.offset + extent_num_bytes,
341 old_disk_bytenr + num_bytes) - cur_off;
342 ret = btrfs_record_file_extent(data->trans, data->root,
343 data->objectid, data->inode, file_pos,
344 real_disk_bytenr, cur_len);
351 * No need to care about csum
352 * As every byte of old fs image is calculated for csum, no
353 * need to waste CPU cycles now.
356 btrfs_free_path(path);
360 static int block_iterate_proc(u64 disk_block, u64 file_block,
361 struct blk_iterate_data *idata)
366 struct btrfs_root *root = idata->root;
367 struct btrfs_block_group_cache *cache;
368 u64 bytenr = disk_block * root->sectorsize;
370 sb_region = intersect_with_sb(bytenr, root->sectorsize);
371 do_barrier = sb_region || disk_block >= idata->boundary;
372 if ((idata->num_blocks > 0 && do_barrier) ||
373 (file_block > idata->first_block + idata->num_blocks) ||
374 (disk_block != idata->disk_block + idata->num_blocks)) {
375 if (idata->num_blocks > 0) {
376 ret = record_file_blocks(idata, idata->first_block,
381 idata->first_block += idata->num_blocks;
382 idata->num_blocks = 0;
384 if (file_block > idata->first_block) {
385 ret = record_file_blocks(idata, idata->first_block,
386 0, file_block - idata->first_block);
392 bytenr += BTRFS_STRIPE_LEN - 1;
393 bytenr &= ~((u64)BTRFS_STRIPE_LEN - 1);
395 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
397 bytenr = cache->key.objectid + cache->key.offset;
400 idata->first_block = file_block;
401 idata->disk_block = disk_block;
402 idata->boundary = bytenr / root->sectorsize;
409 static int create_image_file_range(struct btrfs_trans_handle *trans,
410 struct btrfs_root *root,
411 struct cache_tree *used,
412 struct btrfs_inode_item *inode,
413 u64 ino, u64 bytenr, u64 *ret_len,
416 struct cache_extent *cache;
417 struct btrfs_block_group_cache *bg_cache;
423 BUG_ON(bytenr != round_down(bytenr, root->sectorsize));
424 BUG_ON(len != round_down(len, root->sectorsize));
425 len = min_t(u64, len, BTRFS_MAX_EXTENT_SIZE);
428 * Skip sb ranges first
429 * [0, 1M), [sb_offset(1), +64K), [sb_offset(2), +64K].
431 * Or we will insert a hole into current image file, and later
432 * migrate block will fail as there is already a file extent.
434 if (bytenr < 1024 * 1024) {
435 *ret_len = 1024 * 1024 - bytenr;
438 for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
439 u64 cur = btrfs_sb_offset(i);
441 if (bytenr >= cur && bytenr < cur + BTRFS_STRIPE_LEN) {
442 *ret_len = cur + BTRFS_STRIPE_LEN - bytenr;
446 for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
447 u64 cur = btrfs_sb_offset(i);
452 * May still need to go through file extent inserts
454 if (bytenr < cur && bytenr + len >= cur) {
455 len = min_t(u64, len, cur - bytenr);
461 * Drop out, no need to insert anything
463 if (bytenr >= cur && bytenr < cur + BTRFS_STRIPE_LEN) {
464 *ret_len = cur + BTRFS_STRIPE_LEN - bytenr;
469 cache = search_cache_extent(used, bytenr);
471 if (cache->start <= bytenr) {
473 * |///////Used///////|
477 len = min_t(u64, len, cache->start + cache->size -
479 disk_bytenr = bytenr;
486 len = min(len, cache->start - bytenr);
501 /* Check if the range is in a data block group */
502 bg_cache = btrfs_lookup_block_group(root->fs_info, bytenr);
505 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_DATA))
508 /* The extent should never cross block group boundary */
509 len = min_t(u64, len, bg_cache->key.objectid +
510 bg_cache->key.offset - bytenr);
513 BUG_ON(len != round_down(len, root->sectorsize));
514 ret = btrfs_record_file_extent(trans, root, ino, inode, bytenr,
520 ret = csum_disk_extent(trans, root, bytenr, len);
526 * Open Ext2fs in readonly mode, read block allocation bitmap and
527 * inode bitmap into memory.
529 static int ext2_open_fs(struct btrfs_convert_context *cctx, const char *name)
536 ret = ext2fs_open(name, 0, 0, 0, unix_io_manager, &ext2_fs);
538 fprintf(stderr, "ext2fs_open: %s\n", error_message(ret));
542 * We need to know exactly the used space, some RO compat flags like
543 * BIGALLOC will affect how used space is present.
544 * So we need manuall check any unsupported RO compat flags
546 ro_feature = ext2_fs->super->s_feature_ro_compat;
547 if (ro_feature & ~EXT2_LIB_FEATURE_RO_COMPAT_SUPP) {
549 "unsupported RO features detected: %x, abort convert to avoid possible corruption",
550 ro_feature & ~EXT2_LIB_FEATURE_COMPAT_SUPP);
553 ret = ext2fs_read_inode_bitmap(ext2_fs);
555 fprintf(stderr, "ext2fs_read_inode_bitmap: %s\n",
559 ret = ext2fs_read_block_bitmap(ext2_fs);
561 fprintf(stderr, "ext2fs_read_block_bitmap: %s\n",
566 * search each block group for a free inode. this set up
567 * uninit block/inode bitmaps appropriately.
570 while (ino <= ext2_fs->super->s_inodes_count) {
572 ext2fs_new_inode(ext2_fs, ino, 0, NULL, &foo);
573 ino += EXT2_INODES_PER_GROUP(ext2_fs->super);
576 if (!(ext2_fs->super->s_feature_incompat &
577 EXT2_FEATURE_INCOMPAT_FILETYPE)) {
578 fprintf(stderr, "filetype feature is missing\n");
582 cctx->fs_data = ext2_fs;
583 cctx->blocksize = ext2_fs->blocksize;
584 cctx->block_count = ext2_fs->super->s_blocks_count;
585 cctx->total_bytes = ext2_fs->blocksize * ext2_fs->super->s_blocks_count;
586 cctx->volume_name = strndup(ext2_fs->super->s_volume_name, 16);
587 cctx->first_data_block = ext2_fs->super->s_first_data_block;
588 cctx->inodes_count = ext2_fs->super->s_inodes_count;
589 cctx->free_inodes_count = ext2_fs->super->s_free_inodes_count;
592 ext2fs_close(ext2_fs);
596 static int __ext2_add_one_block(ext2_filsys fs, char *bitmap,
597 unsigned long group_nr, struct cache_tree *used)
599 unsigned long offset;
603 offset = fs->super->s_first_data_block;
604 offset /= EXT2FS_CLUSTER_RATIO(fs);
605 offset += group_nr * EXT2_CLUSTERS_PER_GROUP(fs->super);
606 for (i = 0; i < EXT2_CLUSTERS_PER_GROUP(fs->super); i++) {
607 if (ext2fs_test_bit(i, bitmap)) {
610 start = (i + offset) * EXT2FS_CLUSTER_RATIO(fs);
611 start *= fs->blocksize;
612 ret = add_merge_cache_extent(used, start,
622 * Read all used ext2 space into cctx->used cache tree
624 static int ext2_read_used_space(struct btrfs_convert_context *cctx)
626 ext2_filsys fs = (ext2_filsys)cctx->fs_data;
627 blk64_t blk_itr = EXT2FS_B2C(fs, fs->super->s_first_data_block);
628 struct cache_tree *used_tree = &cctx->used;
629 char *block_bitmap = NULL;
634 block_nbytes = EXT2_CLUSTERS_PER_GROUP(fs->super) / 8;
635 /* Shouldn't happen */
636 BUG_ON(!fs->block_map);
638 block_bitmap = malloc(block_nbytes);
642 for (i = 0; i < fs->group_desc_count; i++) {
643 ret = ext2fs_get_block_bitmap_range(fs->block_map, blk_itr,
644 block_nbytes * 8, block_bitmap);
646 error("fail to get bitmap from ext2, %s",
650 ret = __ext2_add_one_block(fs, block_bitmap, i, used_tree);
652 error("fail to build used space tree, %s",
656 blk_itr += EXT2_CLUSTERS_PER_GROUP(fs->super);
663 static void ext2_close_fs(struct btrfs_convert_context *cctx)
665 if (cctx->volume_name) {
666 free(cctx->volume_name);
667 cctx->volume_name = NULL;
669 ext2fs_close(cctx->fs_data);
672 struct dir_iterate_data {
673 struct btrfs_trans_handle *trans;
674 struct btrfs_root *root;
675 struct btrfs_inode_item *inode;
682 static u8 ext2_filetype_conversion_table[EXT2_FT_MAX] = {
683 [EXT2_FT_UNKNOWN] = BTRFS_FT_UNKNOWN,
684 [EXT2_FT_REG_FILE] = BTRFS_FT_REG_FILE,
685 [EXT2_FT_DIR] = BTRFS_FT_DIR,
686 [EXT2_FT_CHRDEV] = BTRFS_FT_CHRDEV,
687 [EXT2_FT_BLKDEV] = BTRFS_FT_BLKDEV,
688 [EXT2_FT_FIFO] = BTRFS_FT_FIFO,
689 [EXT2_FT_SOCK] = BTRFS_FT_SOCK,
690 [EXT2_FT_SYMLINK] = BTRFS_FT_SYMLINK,
693 static int ext2_dir_iterate_proc(ext2_ino_t dir, int entry,
694 struct ext2_dir_entry *dirent,
695 int offset, int blocksize,
696 char *buf,void *priv_data)
701 char dotdot[] = "..";
702 struct dir_iterate_data *idata = (struct dir_iterate_data *)priv_data;
705 name_len = dirent->name_len & 0xFF;
707 objectid = dirent->inode + INO_OFFSET;
708 if (!strncmp(dirent->name, dotdot, name_len)) {
710 BUG_ON(idata->parent != 0);
711 idata->parent = objectid;
715 if (dirent->inode < EXT2_GOOD_OLD_FIRST_INO)
718 file_type = dirent->name_len >> 8;
719 BUG_ON(file_type > EXT2_FT_SYMLINK);
721 ret = convert_insert_dirent(idata->trans, idata->root, dirent->name,
722 name_len, idata->objectid, objectid,
723 ext2_filetype_conversion_table[file_type],
724 idata->index_cnt, idata->inode);
726 idata->errcode = ret;
734 static int ext2_create_dir_entries(struct btrfs_trans_handle *trans,
735 struct btrfs_root *root, u64 objectid,
736 struct btrfs_inode_item *btrfs_inode,
737 ext2_filsys ext2_fs, ext2_ino_t ext2_ino)
741 struct dir_iterate_data data = {
744 .inode = btrfs_inode,
745 .objectid = objectid,
751 err = ext2fs_dir_iterate2(ext2_fs, ext2_ino, 0, NULL,
752 ext2_dir_iterate_proc, &data);
756 if (ret == 0 && data.parent == objectid) {
757 ret = btrfs_insert_inode_ref(trans, root, "..", 2,
758 objectid, objectid, 0);
762 fprintf(stderr, "ext2fs_dir_iterate2: %s\n", error_message(err));
766 static int ext2_block_iterate_proc(ext2_filsys fs, blk_t *blocknr,
767 e2_blkcnt_t blockcnt, blk_t ref_block,
768 int ref_offset, void *priv_data)
771 struct blk_iterate_data *idata;
772 idata = (struct blk_iterate_data *)priv_data;
773 ret = block_iterate_proc(*blocknr, blockcnt, idata);
775 idata->errcode = ret;
782 * traverse file's data blocks, record these data blocks as file extents.
784 static int ext2_create_file_extents(struct btrfs_trans_handle *trans,
785 struct btrfs_root *root, u64 objectid,
786 struct btrfs_inode_item *btrfs_inode,
787 ext2_filsys ext2_fs, ext2_ino_t ext2_ino,
788 int datacsum, int packing)
794 u32 sectorsize = root->sectorsize;
795 u64 inode_size = btrfs_stack_inode_size(btrfs_inode);
796 struct blk_iterate_data data;
798 init_blk_iterate_data(&data, trans, root, btrfs_inode, objectid,
801 err = ext2fs_block_iterate2(ext2_fs, ext2_ino, BLOCK_FLAG_DATA_ONLY,
802 NULL, ext2_block_iterate_proc, &data);
808 if (packing && data.first_block == 0 && data.num_blocks > 0 &&
809 inode_size <= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
810 u64 num_bytes = data.num_blocks * sectorsize;
811 u64 disk_bytenr = data.disk_block * sectorsize;
814 buffer = malloc(num_bytes);
817 ret = read_disk_extent(root, disk_bytenr, num_bytes, buffer);
820 if (num_bytes > inode_size)
821 num_bytes = inode_size;
822 ret = btrfs_insert_inline_extent(trans, root, objectid,
823 0, buffer, num_bytes);
826 nbytes = btrfs_stack_inode_nbytes(btrfs_inode) + num_bytes;
827 btrfs_set_stack_inode_nbytes(btrfs_inode, nbytes);
828 } else if (data.num_blocks > 0) {
829 ret = record_file_blocks(&data, data.first_block,
830 data.disk_block, data.num_blocks);
834 data.first_block += data.num_blocks;
835 last_block = (inode_size + sectorsize - 1) / sectorsize;
836 if (last_block > data.first_block) {
837 ret = record_file_blocks(&data, data.first_block, 0,
838 last_block - data.first_block);
844 fprintf(stderr, "ext2fs_block_iterate2: %s\n", error_message(err));
848 static int ext2_create_symbol_link(struct btrfs_trans_handle *trans,
849 struct btrfs_root *root, u64 objectid,
850 struct btrfs_inode_item *btrfs_inode,
851 ext2_filsys ext2_fs, ext2_ino_t ext2_ino,
852 struct ext2_inode *ext2_inode)
856 u64 inode_size = btrfs_stack_inode_size(btrfs_inode);
857 if (ext2fs_inode_data_blocks(ext2_fs, ext2_inode)) {
858 btrfs_set_stack_inode_size(btrfs_inode, inode_size + 1);
859 ret = ext2_create_file_extents(trans, root, objectid,
860 btrfs_inode, ext2_fs, ext2_ino, 1, 1);
861 btrfs_set_stack_inode_size(btrfs_inode, inode_size);
865 pathname = (char *)&(ext2_inode->i_block[0]);
866 BUG_ON(pathname[inode_size] != 0);
867 ret = btrfs_insert_inline_extent(trans, root, objectid, 0,
868 pathname, inode_size + 1);
869 btrfs_set_stack_inode_nbytes(btrfs_inode, inode_size + 1);
874 * Following xattr/acl related codes are based on codes in
875 * fs/ext3/xattr.c and fs/ext3/acl.c
877 #define EXT2_XATTR_BHDR(ptr) ((struct ext2_ext_attr_header *)(ptr))
878 #define EXT2_XATTR_BFIRST(ptr) \
879 ((struct ext2_ext_attr_entry *)(EXT2_XATTR_BHDR(ptr) + 1))
880 #define EXT2_XATTR_IHDR(inode) \
881 ((struct ext2_ext_attr_header *) ((void *)(inode) + \
882 EXT2_GOOD_OLD_INODE_SIZE + (inode)->i_extra_isize))
883 #define EXT2_XATTR_IFIRST(inode) \
884 ((struct ext2_ext_attr_entry *) ((void *)EXT2_XATTR_IHDR(inode) + \
885 sizeof(EXT2_XATTR_IHDR(inode)->h_magic)))
887 static int ext2_xattr_check_names(struct ext2_ext_attr_entry *entry,
890 struct ext2_ext_attr_entry *next;
892 while (!EXT2_EXT_IS_LAST_ENTRY(entry)) {
893 next = EXT2_EXT_ATTR_NEXT(entry);
894 if ((void *)next >= end)
901 static int ext2_xattr_check_block(const char *buf, size_t size)
904 struct ext2_ext_attr_header *header = EXT2_XATTR_BHDR(buf);
906 if (header->h_magic != EXT2_EXT_ATTR_MAGIC ||
907 header->h_blocks != 1)
909 error = ext2_xattr_check_names(EXT2_XATTR_BFIRST(buf), buf + size);
913 static int ext2_xattr_check_entry(struct ext2_ext_attr_entry *entry,
916 size_t value_size = entry->e_value_size;
918 if (entry->e_value_block != 0 || value_size > size ||
919 entry->e_value_offs + value_size > size)
924 #define EXT2_ACL_VERSION 0x0001
926 /* 23.2.5 acl_tag_t values */
928 #define ACL_UNDEFINED_TAG (0x00)
929 #define ACL_USER_OBJ (0x01)
930 #define ACL_USER (0x02)
931 #define ACL_GROUP_OBJ (0x04)
932 #define ACL_GROUP (0x08)
933 #define ACL_MASK (0x10)
934 #define ACL_OTHER (0x20)
936 /* 23.2.7 ACL qualifier constants */
938 #define ACL_UNDEFINED_ID ((id_t)-1)
949 } ext2_acl_entry_short;
955 static inline int ext2_acl_count(size_t size)
958 size -= sizeof(ext2_acl_header);
959 s = size - 4 * sizeof(ext2_acl_entry_short);
961 if (size % sizeof(ext2_acl_entry_short))
963 return size / sizeof(ext2_acl_entry_short);
965 if (s % sizeof(ext2_acl_entry))
967 return s / sizeof(ext2_acl_entry) + 4;
971 #define ACL_EA_VERSION 0x0002
981 acl_ea_entry a_entries[0];
984 static inline size_t acl_ea_size(int count)
986 return sizeof(acl_ea_header) + count * sizeof(acl_ea_entry);
989 static int ext2_acl_to_xattr(void *dst, const void *src,
990 size_t dst_size, size_t src_size)
993 const void *end = src + src_size;
994 acl_ea_header *ext_acl = (acl_ea_header *)dst;
995 acl_ea_entry *dst_entry = ext_acl->a_entries;
996 ext2_acl_entry *src_entry;
998 if (src_size < sizeof(ext2_acl_header))
1000 if (((ext2_acl_header *)src)->a_version !=
1001 cpu_to_le32(EXT2_ACL_VERSION))
1003 src += sizeof(ext2_acl_header);
1004 count = ext2_acl_count(src_size);
1008 BUG_ON(dst_size < acl_ea_size(count));
1009 ext_acl->a_version = cpu_to_le32(ACL_EA_VERSION);
1010 for (i = 0; i < count; i++, dst_entry++) {
1011 src_entry = (ext2_acl_entry *)src;
1012 if (src + sizeof(ext2_acl_entry_short) > end)
1014 dst_entry->e_tag = src_entry->e_tag;
1015 dst_entry->e_perm = src_entry->e_perm;
1016 switch (le16_to_cpu(src_entry->e_tag)) {
1021 src += sizeof(ext2_acl_entry_short);
1022 dst_entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID);
1026 src += sizeof(ext2_acl_entry);
1029 dst_entry->e_id = src_entry->e_id;
1042 static char *xattr_prefix_table[] = {
1044 [2] = "system.posix_acl_access",
1045 [3] = "system.posix_acl_default",
1050 static int ext2_copy_single_xattr(struct btrfs_trans_handle *trans,
1051 struct btrfs_root *root, u64 objectid,
1052 struct ext2_ext_attr_entry *entry,
1053 const void *data, u32 datalen)
1058 void *databuf = NULL;
1059 char namebuf[XATTR_NAME_MAX + 1];
1061 name_index = entry->e_name_index;
1062 if (name_index >= ARRAY_SIZE(xattr_prefix_table) ||
1063 xattr_prefix_table[name_index] == NULL)
1065 name_len = strlen(xattr_prefix_table[name_index]) +
1067 if (name_len >= sizeof(namebuf))
1070 if (name_index == 2 || name_index == 3) {
1071 size_t bufsize = acl_ea_size(ext2_acl_count(datalen));
1072 databuf = malloc(bufsize);
1075 ret = ext2_acl_to_xattr(databuf, data, bufsize, datalen);
1081 strncpy(namebuf, xattr_prefix_table[name_index], XATTR_NAME_MAX);
1082 strncat(namebuf, EXT2_EXT_ATTR_NAME(entry), entry->e_name_len);
1083 if (name_len + datalen > BTRFS_LEAF_DATA_SIZE(root) -
1084 sizeof(struct btrfs_item) - sizeof(struct btrfs_dir_item)) {
1085 fprintf(stderr, "skip large xattr on inode %Lu name %.*s\n",
1086 objectid - INO_OFFSET, name_len, namebuf);
1089 ret = btrfs_insert_xattr_item(trans, root, namebuf, name_len,
1090 data, datalen, objectid);
1096 static int ext2_copy_extended_attrs(struct btrfs_trans_handle *trans,
1097 struct btrfs_root *root, u64 objectid,
1098 struct btrfs_inode_item *btrfs_inode,
1099 ext2_filsys ext2_fs, ext2_ino_t ext2_ino)
1105 u32 block_size = ext2_fs->blocksize;
1106 u32 inode_size = EXT2_INODE_SIZE(ext2_fs->super);
1107 struct ext2_inode_large *ext2_inode;
1108 struct ext2_ext_attr_entry *entry;
1110 char *buffer = NULL;
1111 char inode_buf[EXT2_GOOD_OLD_INODE_SIZE];
1113 if (inode_size <= EXT2_GOOD_OLD_INODE_SIZE) {
1114 ext2_inode = (struct ext2_inode_large *)inode_buf;
1116 ext2_inode = (struct ext2_inode_large *)malloc(inode_size);
1120 err = ext2fs_read_inode_full(ext2_fs, ext2_ino, (void *)ext2_inode,
1123 fprintf(stderr, "ext2fs_read_inode_full: %s\n",
1124 error_message(err));
1129 if (ext2_ino > ext2_fs->super->s_first_ino &&
1130 inode_size > EXT2_GOOD_OLD_INODE_SIZE) {
1131 if (EXT2_GOOD_OLD_INODE_SIZE +
1132 ext2_inode->i_extra_isize > inode_size) {
1136 if (ext2_inode->i_extra_isize != 0 &&
1137 EXT2_XATTR_IHDR(ext2_inode)->h_magic ==
1138 EXT2_EXT_ATTR_MAGIC) {
1144 void *end = (void *)ext2_inode + inode_size;
1145 entry = EXT2_XATTR_IFIRST(ext2_inode);
1146 total = end - (void *)entry;
1147 ret = ext2_xattr_check_names(entry, end);
1150 while (!EXT2_EXT_IS_LAST_ENTRY(entry)) {
1151 ret = ext2_xattr_check_entry(entry, total);
1154 data = (void *)EXT2_XATTR_IFIRST(ext2_inode) +
1155 entry->e_value_offs;
1156 datalen = entry->e_value_size;
1157 ret = ext2_copy_single_xattr(trans, root, objectid,
1158 entry, data, datalen);
1161 entry = EXT2_EXT_ATTR_NEXT(entry);
1165 if (ext2_inode->i_file_acl == 0)
1168 buffer = malloc(block_size);
1173 err = ext2fs_read_ext_attr(ext2_fs, ext2_inode->i_file_acl, buffer);
1175 fprintf(stderr, "ext2fs_read_ext_attr: %s\n",
1176 error_message(err));
1180 ret = ext2_xattr_check_block(buffer, block_size);
1184 entry = EXT2_XATTR_BFIRST(buffer);
1185 while (!EXT2_EXT_IS_LAST_ENTRY(entry)) {
1186 ret = ext2_xattr_check_entry(entry, block_size);
1189 data = buffer + entry->e_value_offs;
1190 datalen = entry->e_value_size;
1191 ret = ext2_copy_single_xattr(trans, root, objectid,
1192 entry, data, datalen);
1195 entry = EXT2_EXT_ATTR_NEXT(entry);
1199 if ((void *)ext2_inode != inode_buf)
1203 #define MINORBITS 20
1204 #define MKDEV(ma, mi) (((ma) << MINORBITS) | (mi))
1206 static inline dev_t old_decode_dev(u16 val)
1208 return MKDEV((val >> 8) & 255, val & 255);
1211 static inline dev_t new_decode_dev(u32 dev)
1213 unsigned major = (dev & 0xfff00) >> 8;
1214 unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
1215 return MKDEV(major, minor);
1218 static void ext2_copy_inode_item(struct btrfs_inode_item *dst,
1219 struct ext2_inode *src, u32 blocksize)
1221 btrfs_set_stack_inode_generation(dst, 1);
1222 btrfs_set_stack_inode_sequence(dst, 0);
1223 btrfs_set_stack_inode_transid(dst, 1);
1224 btrfs_set_stack_inode_size(dst, src->i_size);
1225 btrfs_set_stack_inode_nbytes(dst, 0);
1226 btrfs_set_stack_inode_block_group(dst, 0);
1227 btrfs_set_stack_inode_nlink(dst, src->i_links_count);
1228 btrfs_set_stack_inode_uid(dst, src->i_uid | (src->i_uid_high << 16));
1229 btrfs_set_stack_inode_gid(dst, src->i_gid | (src->i_gid_high << 16));
1230 btrfs_set_stack_inode_mode(dst, src->i_mode);
1231 btrfs_set_stack_inode_rdev(dst, 0);
1232 btrfs_set_stack_inode_flags(dst, 0);
1233 btrfs_set_stack_timespec_sec(&dst->atime, src->i_atime);
1234 btrfs_set_stack_timespec_nsec(&dst->atime, 0);
1235 btrfs_set_stack_timespec_sec(&dst->ctime, src->i_ctime);
1236 btrfs_set_stack_timespec_nsec(&dst->ctime, 0);
1237 btrfs_set_stack_timespec_sec(&dst->mtime, src->i_mtime);
1238 btrfs_set_stack_timespec_nsec(&dst->mtime, 0);
1239 btrfs_set_stack_timespec_sec(&dst->otime, 0);
1240 btrfs_set_stack_timespec_nsec(&dst->otime, 0);
1242 if (S_ISDIR(src->i_mode)) {
1243 btrfs_set_stack_inode_size(dst, 0);
1244 btrfs_set_stack_inode_nlink(dst, 1);
1246 if (S_ISREG(src->i_mode)) {
1247 btrfs_set_stack_inode_size(dst, (u64)src->i_size_high << 32 |
1250 if (!S_ISREG(src->i_mode) && !S_ISDIR(src->i_mode) &&
1251 !S_ISLNK(src->i_mode)) {
1252 if (src->i_block[0]) {
1253 btrfs_set_stack_inode_rdev(dst,
1254 old_decode_dev(src->i_block[0]));
1256 btrfs_set_stack_inode_rdev(dst,
1257 new_decode_dev(src->i_block[1]));
1260 memset(&dst->reserved, 0, sizeof(dst->reserved));
1264 * copy a single inode. do all the required works, such as cloning
1265 * inode item, creating file extents and creating directory entries.
1267 static int ext2_copy_single_inode(struct btrfs_trans_handle *trans,
1268 struct btrfs_root *root, u64 objectid,
1269 ext2_filsys ext2_fs, ext2_ino_t ext2_ino,
1270 struct ext2_inode *ext2_inode,
1271 int datacsum, int packing, int noxattr)
1274 struct btrfs_inode_item btrfs_inode;
1276 if (ext2_inode->i_links_count == 0)
1279 ext2_copy_inode_item(&btrfs_inode, ext2_inode, ext2_fs->blocksize);
1280 if (!datacsum && S_ISREG(ext2_inode->i_mode)) {
1281 u32 flags = btrfs_stack_inode_flags(&btrfs_inode) |
1282 BTRFS_INODE_NODATASUM;
1283 btrfs_set_stack_inode_flags(&btrfs_inode, flags);
1286 switch (ext2_inode->i_mode & S_IFMT) {
1288 ret = ext2_create_file_extents(trans, root, objectid,
1289 &btrfs_inode, ext2_fs, ext2_ino, datacsum, packing);
1292 ret = ext2_create_dir_entries(trans, root, objectid,
1293 &btrfs_inode, ext2_fs, ext2_ino);
1296 ret = ext2_create_symbol_link(trans, root, objectid,
1297 &btrfs_inode, ext2_fs, ext2_ino, ext2_inode);
1307 ret = ext2_copy_extended_attrs(trans, root, objectid,
1308 &btrfs_inode, ext2_fs, ext2_ino);
1312 return btrfs_insert_inode(trans, root, objectid, &btrfs_inode);
1316 * scan ext2's inode bitmap and copy all used inodes.
1318 static int ext2_copy_inodes(struct btrfs_convert_context *cctx,
1319 struct btrfs_root *root,
1320 int datacsum, int packing, int noxattr, struct task_ctx *p)
1322 ext2_filsys ext2_fs = cctx->fs_data;
1325 ext2_inode_scan ext2_scan;
1326 struct ext2_inode ext2_inode;
1327 ext2_ino_t ext2_ino;
1329 struct btrfs_trans_handle *trans;
1331 trans = btrfs_start_transaction(root, 1);
1334 err = ext2fs_open_inode_scan(ext2_fs, 0, &ext2_scan);
1336 fprintf(stderr, "ext2fs_open_inode_scan: %s\n", error_message(err));
1339 while (!(err = ext2fs_get_next_inode(ext2_scan, &ext2_ino,
1341 /* no more inodes */
1344 /* skip special inode in ext2fs */
1345 if (ext2_ino < EXT2_GOOD_OLD_FIRST_INO &&
1346 ext2_ino != EXT2_ROOT_INO)
1348 objectid = ext2_ino + INO_OFFSET;
1349 ret = ext2_copy_single_inode(trans, root,
1350 objectid, ext2_fs, ext2_ino,
1351 &ext2_inode, datacsum, packing,
1353 p->cur_copy_inodes++;
1356 if (trans->blocks_used >= 4096) {
1357 ret = btrfs_commit_transaction(trans, root);
1359 trans = btrfs_start_transaction(root, 1);
1364 fprintf(stderr, "ext2fs_get_next_inode: %s\n", error_message(err));
1367 ret = btrfs_commit_transaction(trans, root);
1369 ext2fs_close_inode_scan(ext2_scan);
1375 * Relocate old fs data in one reserved ranges
1377 * Since all old fs data in reserved range is not covered by any chunk nor
1378 * data extent, we don't need to handle any reference but add new
1379 * extent/reference, which makes codes more clear
1381 static int migrate_one_reserved_range(struct btrfs_trans_handle *trans,
1382 struct btrfs_root *root,
1383 struct cache_tree *used,
1384 struct btrfs_inode_item *inode, int fd,
1385 u64 ino, u64 start, u64 len, int datacsum)
1387 u64 cur_off = start;
1389 u64 hole_start = start;
1391 struct cache_extent *cache;
1392 struct btrfs_key key;
1393 struct extent_buffer *eb;
1396 while (cur_off < start + len) {
1397 cache = lookup_cache_extent(used, cur_off, cur_len);
1400 cur_off = max(cache->start, cur_off);
1401 cur_len = min(cache->start + cache->size, start + len) -
1403 BUG_ON(cur_len < root->sectorsize);
1405 /* reserve extent for the data */
1406 ret = btrfs_reserve_extent(trans, root, cur_len, 0, 0, (u64)-1,
1411 eb = malloc(sizeof(*eb) + cur_len);
1417 ret = pread(fd, eb->data, cur_len, cur_off);
1418 if (ret < cur_len) {
1419 ret = (ret < 0 ? ret : -EIO);
1423 eb->start = key.objectid;
1424 eb->len = key.offset;
1426 /* Write the data */
1427 ret = write_and_map_eb(trans, root, eb);
1432 /* Now handle extent item and file extent things */
1433 ret = btrfs_record_file_extent(trans, root, ino, inode, cur_off,
1434 key.objectid, key.offset);
1437 /* Finally, insert csum items */
1439 ret = csum_disk_extent(trans, root, key.objectid,
1442 /* Don't forget to insert hole */
1443 hole_len = cur_off - hole_start;
1445 ret = btrfs_record_file_extent(trans, root, ino, inode,
1446 hole_start, 0, hole_len);
1451 cur_off += key.offset;
1452 hole_start = cur_off;
1453 cur_len = start + len - cur_off;
1456 if (start + len - hole_start > 0)
1457 ret = btrfs_record_file_extent(trans, root, ino, inode,
1458 hole_start, 0, start + len - hole_start);
1463 * Relocate the used ext2 data in reserved ranges
1465 * [btrfs_sb_offset(1), +BTRFS_STRIPE_LEN)
1466 * [btrfs_sb_offset(2), +BTRFS_STRIPE_LEN)
1468 static int migrate_reserved_ranges(struct btrfs_trans_handle *trans,
1469 struct btrfs_root *root,
1470 struct cache_tree *used,
1471 struct btrfs_inode_item *inode, int fd,
1472 u64 ino, u64 total_bytes, int datacsum)
1480 cur_len = 1024 * 1024;
1481 ret = migrate_one_reserved_range(trans, root, used, inode, fd, ino,
1482 cur_off, cur_len, datacsum);
1486 /* second sb(fisrt sb is included in 0~1M) */
1487 cur_off = btrfs_sb_offset(1);
1488 cur_len = min(total_bytes, cur_off + BTRFS_STRIPE_LEN) - cur_off;
1489 if (cur_off > total_bytes)
1491 ret = migrate_one_reserved_range(trans, root, used, inode, fd, ino,
1492 cur_off, cur_len, datacsum);
1497 cur_off = btrfs_sb_offset(2);
1498 cur_len = min(total_bytes, cur_off + BTRFS_STRIPE_LEN) - cur_off;
1499 if (cur_off > total_bytes)
1501 ret = migrate_one_reserved_range(trans, root, used, inode, fd, ino,
1502 cur_off, cur_len, datacsum);
1506 static int wipe_reserved_ranges(struct cache_tree *tree, u64 min_stripe_size,
1510 * Create the fs image file of old filesystem.
1512 * This is completely fs independent as we have cctx->used, only
1513 * need to create file extents pointing to all the positions.
1515 static int create_image(struct btrfs_root *root,
1516 struct btrfs_mkfs_config *cfg,
1517 struct btrfs_convert_context *cctx, int fd,
1518 u64 size, char *name, int datacsum)
1520 struct btrfs_inode_item buf;
1521 struct btrfs_trans_handle *trans;
1522 struct btrfs_path *path = NULL;
1523 struct btrfs_key key;
1524 struct cache_extent *cache;
1525 struct cache_tree used_tmp;
1528 u64 flags = BTRFS_INODE_READONLY;
1532 flags |= BTRFS_INODE_NODATASUM;
1534 trans = btrfs_start_transaction(root, 1);
1538 cache_tree_init(&used_tmp);
1540 ret = btrfs_find_free_objectid(trans, root, BTRFS_FIRST_FREE_OBJECTID,
1544 ret = btrfs_new_inode(trans, root, ino, 0400 | S_IFREG);
1547 ret = btrfs_change_inode_flags(trans, root, ino, flags);
1550 ret = btrfs_add_link(trans, root, ino, BTRFS_FIRST_FREE_OBJECTID, name,
1551 strlen(name), BTRFS_FT_REG_FILE, NULL, 1);
1555 path = btrfs_alloc_path();
1561 key.type = BTRFS_INODE_ITEM_KEY;
1564 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1566 ret = (ret > 0 ? -ENOENT : ret);
1569 read_extent_buffer(path->nodes[0], &buf,
1570 btrfs_item_ptr_offset(path->nodes[0], path->slots[0]),
1572 btrfs_release_path(path);
1575 * Create a new used space cache, which doesn't contain the reserved
1578 for (cache = first_cache_extent(&cctx->used); cache;
1579 cache = next_cache_extent(cache)) {
1580 ret = add_cache_extent(&used_tmp, cache->start, cache->size);
1584 ret = wipe_reserved_ranges(&used_tmp, 0, 0);
1589 * Start from 1M, as 0~1M is reserved, and create_image_file_range()
1590 * can't handle bytenr 0(will consider it as a hole)
1593 while (cur < size) {
1594 u64 len = size - cur;
1596 ret = create_image_file_range(trans, root, &used_tmp,
1597 &buf, ino, cur, &len, datacsum);
1602 /* Handle the reserved ranges */
1603 ret = migrate_reserved_ranges(trans, root, &cctx->used, &buf, fd, ino,
1604 cfg->num_bytes, datacsum);
1608 key.type = BTRFS_INODE_ITEM_KEY;
1610 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1612 ret = (ret > 0 ? -ENOENT : ret);
1615 btrfs_set_stack_inode_size(&buf, cfg->num_bytes);
1616 write_extent_buffer(path->nodes[0], &buf,
1617 btrfs_item_ptr_offset(path->nodes[0], path->slots[0]),
1620 free_extent_cache_tree(&used_tmp);
1621 btrfs_free_path(path);
1622 btrfs_commit_transaction(trans, root);
1626 static struct btrfs_root * link_subvol(struct btrfs_root *root,
1627 const char *base, u64 root_objectid)
1629 struct btrfs_trans_handle *trans;
1630 struct btrfs_fs_info *fs_info = root->fs_info;
1631 struct btrfs_root *tree_root = fs_info->tree_root;
1632 struct btrfs_root *new_root = NULL;
1633 struct btrfs_path *path;
1634 struct btrfs_inode_item *inode_item;
1635 struct extent_buffer *leaf;
1636 struct btrfs_key key;
1637 u64 dirid = btrfs_root_dirid(&root->root_item);
1639 char buf[BTRFS_NAME_LEN + 1]; /* for snprintf null */
1645 if (len == 0 || len > BTRFS_NAME_LEN)
1648 path = btrfs_alloc_path();
1651 key.objectid = dirid;
1652 key.type = BTRFS_DIR_INDEX_KEY;
1653 key.offset = (u64)-1;
1655 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1658 if (path->slots[0] > 0) {
1660 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1661 if (key.objectid == dirid && key.type == BTRFS_DIR_INDEX_KEY)
1662 index = key.offset + 1;
1664 btrfs_release_path(path);
1666 trans = btrfs_start_transaction(root, 1);
1669 key.objectid = dirid;
1671 key.type = BTRFS_INODE_ITEM_KEY;
1673 ret = btrfs_lookup_inode(trans, root, path, &key, 1);
1675 leaf = path->nodes[0];
1676 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1677 struct btrfs_inode_item);
1679 key.objectid = root_objectid;
1680 key.offset = (u64)-1;
1681 key.type = BTRFS_ROOT_ITEM_KEY;
1683 memcpy(buf, base, len);
1684 for (i = 0; i < 1024; i++) {
1685 ret = btrfs_insert_dir_item(trans, root, buf, len,
1686 dirid, &key, BTRFS_FT_DIR, index);
1689 len = snprintf(buf, ARRAY_SIZE(buf), "%s%d", base, i);
1690 if (len < 1 || len > BTRFS_NAME_LEN) {
1698 btrfs_set_inode_size(leaf, inode_item, len * 2 +
1699 btrfs_inode_size(leaf, inode_item));
1700 btrfs_mark_buffer_dirty(leaf);
1701 btrfs_release_path(path);
1703 /* add the backref first */
1704 ret = btrfs_add_root_ref(trans, tree_root, root_objectid,
1705 BTRFS_ROOT_BACKREF_KEY,
1706 root->root_key.objectid,
1707 dirid, index, buf, len);
1710 /* now add the forward ref */
1711 ret = btrfs_add_root_ref(trans, tree_root, root->root_key.objectid,
1712 BTRFS_ROOT_REF_KEY, root_objectid,
1713 dirid, index, buf, len);
1715 ret = btrfs_commit_transaction(trans, root);
1718 new_root = btrfs_read_fs_root(fs_info, &key);
1719 if (IS_ERR(new_root))
1722 btrfs_free_path(path);
1726 static int create_subvol(struct btrfs_trans_handle *trans,
1727 struct btrfs_root *root, u64 root_objectid)
1729 struct extent_buffer *tmp;
1730 struct btrfs_root *new_root;
1731 struct btrfs_key key;
1732 struct btrfs_root_item root_item;
1735 ret = btrfs_copy_root(trans, root, root->node, &tmp,
1739 memcpy(&root_item, &root->root_item, sizeof(root_item));
1740 btrfs_set_root_bytenr(&root_item, tmp->start);
1741 btrfs_set_root_level(&root_item, btrfs_header_level(tmp));
1742 btrfs_set_root_generation(&root_item, trans->transid);
1743 free_extent_buffer(tmp);
1745 key.objectid = root_objectid;
1746 key.type = BTRFS_ROOT_ITEM_KEY;
1747 key.offset = trans->transid;
1748 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
1751 key.offset = (u64)-1;
1752 new_root = btrfs_read_fs_root(root->fs_info, &key);
1753 BUG_ON(!new_root || IS_ERR(new_root));
1755 ret = btrfs_make_root_dir(trans, new_root, BTRFS_FIRST_FREE_OBJECTID);
1762 * New make_btrfs() has handle system and meta chunks quite well.
1763 * So only need to add remaining data chunks.
1765 static int make_convert_data_block_groups(struct btrfs_trans_handle *trans,
1766 struct btrfs_fs_info *fs_info,
1767 struct btrfs_mkfs_config *cfg,
1768 struct btrfs_convert_context *cctx)
1770 struct btrfs_root *extent_root = fs_info->extent_root;
1771 struct cache_tree *data_chunks = &cctx->data_chunks;
1772 struct cache_extent *cache;
1777 * Don't create data chunk over 10% of the convert device
1778 * And for single chunk, don't create chunk larger than 1G.
1780 max_chunk_size = cfg->num_bytes / 10;
1781 max_chunk_size = min((u64)(1024 * 1024 * 1024), max_chunk_size);
1782 max_chunk_size = round_down(max_chunk_size, extent_root->sectorsize);
1784 for (cache = first_cache_extent(data_chunks); cache;
1785 cache = next_cache_extent(cache)) {
1786 u64 cur = cache->start;
1788 while (cur < cache->start + cache->size) {
1790 u64 cur_backup = cur;
1792 len = min(max_chunk_size,
1793 cache->start + cache->size - cur);
1794 ret = btrfs_alloc_data_chunk(trans, extent_root,
1796 BTRFS_BLOCK_GROUP_DATA, 1);
1799 ret = btrfs_make_block_group(trans, extent_root, 0,
1800 BTRFS_BLOCK_GROUP_DATA,
1801 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
1812 * Init the temp btrfs to a operational status.
1814 * It will fix the extent usage accounting(XXX: Do we really need?) and
1815 * insert needed data chunks, to ensure all old fs data extents are covered
1816 * by DATA chunks, preventing wrong chunks are allocated.
1818 * And also create convert image subvolume and relocation tree.
1819 * (XXX: Not need again?)
1820 * But the convert image subvolume is *NOT* linked to fs tree yet.
1822 static int init_btrfs(struct btrfs_mkfs_config *cfg, struct btrfs_root *root,
1823 struct btrfs_convert_context *cctx, int datacsum,
1824 int packing, int noxattr)
1826 struct btrfs_key location;
1827 struct btrfs_trans_handle *trans;
1828 struct btrfs_fs_info *fs_info = root->fs_info;
1832 * Don't alloc any metadata/system chunk, as we don't want
1833 * any meta/sys chunk allcated before all data chunks are inserted.
1834 * Or we screw up the chunk layout just like the old implement.
1836 fs_info->avoid_sys_chunk_alloc = 1;
1837 fs_info->avoid_meta_chunk_alloc = 1;
1838 trans = btrfs_start_transaction(root, 1);
1840 ret = btrfs_fix_block_accounting(trans, root);
1843 ret = make_convert_data_block_groups(trans, fs_info, cfg, cctx);
1846 ret = btrfs_make_root_dir(trans, fs_info->tree_root,
1847 BTRFS_ROOT_TREE_DIR_OBJECTID);
1850 memcpy(&location, &root->root_key, sizeof(location));
1851 location.offset = (u64)-1;
1852 ret = btrfs_insert_dir_item(trans, fs_info->tree_root, "default", 7,
1853 btrfs_super_root_dir(fs_info->super_copy),
1854 &location, BTRFS_FT_DIR, 0);
1857 ret = btrfs_insert_inode_ref(trans, fs_info->tree_root, "default", 7,
1859 btrfs_super_root_dir(fs_info->super_copy), 0);
1862 btrfs_set_root_dirid(&fs_info->fs_root->root_item,
1863 BTRFS_FIRST_FREE_OBJECTID);
1865 /* subvol for fs image file */
1866 ret = create_subvol(trans, root, CONV_IMAGE_SUBVOL_OBJECTID);
1869 /* subvol for data relocation tree */
1870 ret = create_subvol(trans, root, BTRFS_DATA_RELOC_TREE_OBJECTID);
1874 ret = btrfs_commit_transaction(trans, root);
1875 fs_info->avoid_sys_chunk_alloc = 0;
1876 fs_info->avoid_meta_chunk_alloc = 0;
1882 * Migrate super block to its default position and zero 0 ~ 16k
1884 static int migrate_super_block(int fd, u64 old_bytenr, u32 sectorsize)
1887 struct extent_buffer *buf;
1888 struct btrfs_super_block *super;
1892 BUG_ON(sectorsize < sizeof(*super));
1893 buf = malloc(sizeof(*buf) + sectorsize);
1897 buf->len = sectorsize;
1898 ret = pread(fd, buf->data, sectorsize, old_bytenr);
1899 if (ret != sectorsize)
1902 super = (struct btrfs_super_block *)buf->data;
1903 BUG_ON(btrfs_super_bytenr(super) != old_bytenr);
1904 btrfs_set_super_bytenr(super, BTRFS_SUPER_INFO_OFFSET);
1906 csum_tree_block_size(buf, BTRFS_CRC32_SIZE, 0);
1907 ret = pwrite(fd, buf->data, sectorsize, BTRFS_SUPER_INFO_OFFSET);
1908 if (ret != sectorsize)
1915 memset(buf->data, 0, sectorsize);
1916 for (bytenr = 0; bytenr < BTRFS_SUPER_INFO_OFFSET; ) {
1917 len = BTRFS_SUPER_INFO_OFFSET - bytenr;
1918 if (len > sectorsize)
1920 ret = pwrite(fd, buf->data, len, bytenr);
1922 fprintf(stderr, "unable to zero fill device\n");
1936 static int prepare_system_chunk_sb(struct btrfs_super_block *super)
1938 struct btrfs_chunk *chunk;
1939 struct btrfs_disk_key *key;
1940 u32 sectorsize = btrfs_super_sectorsize(super);
1942 key = (struct btrfs_disk_key *)(super->sys_chunk_array);
1943 chunk = (struct btrfs_chunk *)(super->sys_chunk_array +
1944 sizeof(struct btrfs_disk_key));
1946 btrfs_set_disk_key_objectid(key, BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1947 btrfs_set_disk_key_type(key, BTRFS_CHUNK_ITEM_KEY);
1948 btrfs_set_disk_key_offset(key, 0);
1950 btrfs_set_stack_chunk_length(chunk, btrfs_super_total_bytes(super));
1951 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
1952 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
1953 btrfs_set_stack_chunk_type(chunk, BTRFS_BLOCK_GROUP_SYSTEM);
1954 btrfs_set_stack_chunk_io_align(chunk, sectorsize);
1955 btrfs_set_stack_chunk_io_width(chunk, sectorsize);
1956 btrfs_set_stack_chunk_sector_size(chunk, sectorsize);
1957 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1958 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1959 chunk->stripe.devid = super->dev_item.devid;
1960 btrfs_set_stack_stripe_offset(&chunk->stripe, 0);
1961 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
1962 btrfs_set_super_sys_array_size(super, sizeof(*key) + sizeof(*chunk));
1966 static const struct btrfs_convert_operations ext2_convert_ops = {
1968 .open_fs = ext2_open_fs,
1969 .read_used_space = ext2_read_used_space,
1970 .copy_inodes = ext2_copy_inodes,
1971 .close_fs = ext2_close_fs,
1974 static const struct btrfs_convert_operations *convert_operations[] = {
1978 static int convert_open_fs(const char *devname,
1979 struct btrfs_convert_context *cctx)
1983 memset(cctx, 0, sizeof(*cctx));
1985 for (i = 0; i < ARRAY_SIZE(convert_operations); i++) {
1986 int ret = convert_operations[i]->open_fs(cctx, devname);
1989 cctx->convert_ops = convert_operations[i];
1994 fprintf(stderr, "No file system found to convert.\n");
1999 * Helper for expand and merge extent_cache for wipe_one_reserved_range() to
2000 * handle wiping a range that exists in cache.
2002 static int _expand_extent_cache(struct cache_tree *tree,
2003 struct cache_extent *entry,
2004 u64 min_stripe_size, int backward)
2006 struct cache_extent *ce;
2009 if (entry->size >= min_stripe_size)
2011 diff = min_stripe_size - entry->size;
2014 ce = prev_cache_extent(entry);
2017 if (ce->start + ce->size >= entry->start - diff) {
2018 /* Directly merge with previous extent */
2019 ce->size = entry->start + entry->size - ce->start;
2020 remove_cache_extent(tree, entry);
2025 /* No overlap, normal extent */
2026 if (entry->start < diff) {
2027 error("cannot find space for data chunk layout");
2030 entry->start -= diff;
2031 entry->size += diff;
2034 ce = next_cache_extent(entry);
2037 if (entry->start + entry->size + diff >= ce->start) {
2038 /* Directly merge with next extent */
2039 entry->size = ce->start + ce->size - entry->start;
2040 remove_cache_extent(tree, ce);
2045 entry->size += diff;
2050 * Remove one reserve range from given cache tree
2051 * if min_stripe_size is non-zero, it will ensure for split case,
2052 * all its split cache extent is no smaller than @min_strip_size / 2.
2054 static int wipe_one_reserved_range(struct cache_tree *tree,
2055 u64 start, u64 len, u64 min_stripe_size,
2058 struct cache_extent *cache;
2061 BUG_ON(ensure_size && min_stripe_size == 0);
2063 * The logical here is simplified to handle special cases only
2064 * So we don't need to consider merge case for ensure_size
2066 BUG_ON(min_stripe_size && (min_stripe_size < len * 2 ||
2067 min_stripe_size / 2 < BTRFS_STRIPE_LEN));
2069 /* Also, wipe range should already be aligned */
2070 BUG_ON(start != round_down(start, BTRFS_STRIPE_LEN) ||
2071 start + len != round_up(start + len, BTRFS_STRIPE_LEN));
2073 min_stripe_size /= 2;
2075 cache = lookup_cache_extent(tree, start, len);
2079 if (start <= cache->start) {
2081 * |--------cache---------|
2084 BUG_ON(start + len <= cache->start);
2087 * The wipe size is smaller than min_stripe_size / 2,
2088 * so the result length should still meet min_stripe_size
2089 * And no need to do alignment
2091 cache->size -= (start + len - cache->start);
2092 if (cache->size == 0) {
2093 remove_cache_extent(tree, cache);
2098 BUG_ON(ensure_size && cache->size < min_stripe_size);
2100 cache->start = start + len;
2102 } else if (start > cache->start && start + len < cache->start +
2105 * |-------cache-----|
2108 u64 old_start = cache->start;
2109 u64 old_len = cache->size;
2110 u64 insert_start = start + len;
2113 cache->size = start - cache->start;
2114 /* Expand the leading half part if needed */
2115 if (ensure_size && cache->size < min_stripe_size) {
2116 ret = _expand_extent_cache(tree, cache,
2117 min_stripe_size, 1);
2122 /* And insert the new one */
2123 insert_len = old_start + old_len - start - len;
2124 ret = add_merge_cache_extent(tree, insert_start, insert_len);
2128 /* Expand the last half part if needed */
2129 if (ensure_size && insert_len < min_stripe_size) {
2130 cache = lookup_cache_extent(tree, insert_start,
2132 if (!cache || cache->start != insert_start ||
2133 cache->size != insert_len)
2135 ret = _expand_extent_cache(tree, cache,
2136 min_stripe_size, 0);
2144 * Wipe len should be small enough and no need to expand the
2147 cache->size = start - cache->start;
2148 BUG_ON(ensure_size && cache->size < min_stripe_size);
2153 * Remove reserved ranges from given cache_tree
2155 * It will remove the following ranges
2157 * 2) 2nd superblock, +64K (make sure chunks are 64K aligned)
2158 * 3) 3rd superblock, +64K
2160 * @min_stripe must be given for safety check
2161 * and if @ensure_size is given, it will ensure affected cache_extent will be
2162 * larger than min_stripe_size
2164 static int wipe_reserved_ranges(struct cache_tree *tree, u64 min_stripe_size,
2169 ret = wipe_one_reserved_range(tree, 0, 1024 * 1024, min_stripe_size,
2173 ret = wipe_one_reserved_range(tree, btrfs_sb_offset(1),
2174 BTRFS_STRIPE_LEN, min_stripe_size, ensure_size);
2177 ret = wipe_one_reserved_range(tree, btrfs_sb_offset(2),
2178 BTRFS_STRIPE_LEN, min_stripe_size, ensure_size);
2182 static int calculate_available_space(struct btrfs_convert_context *cctx)
2184 struct cache_tree *used = &cctx->used;
2185 struct cache_tree *data_chunks = &cctx->data_chunks;
2186 struct cache_tree *free = &cctx->free;
2187 struct cache_extent *cache;
2190 * Twice the minimal chunk size, to allow later wipe_reserved_ranges()
2191 * works without need to consider overlap
2193 u64 min_stripe_size = 2 * 16 * 1024 * 1024;
2196 /* Calculate data_chunks */
2197 for (cache = first_cache_extent(used); cache;
2198 cache = next_cache_extent(cache)) {
2201 if (cache->start + cache->size < cur_off)
2203 if (cache->start > cur_off + min_stripe_size)
2204 cur_off = cache->start;
2205 cur_len = max(cache->start + cache->size - cur_off,
2207 ret = add_merge_cache_extent(data_chunks, cur_off, cur_len);
2213 * remove reserved ranges, so we won't ever bother relocating an old
2214 * filesystem extent to other place.
2216 ret = wipe_reserved_ranges(data_chunks, min_stripe_size, 1);
2222 * Calculate free space
2223 * Always round up the start bytenr, to avoid metadata extent corss
2224 * stripe boundary, as later mkfs_convert() won't have all the extent
2227 for (cache = first_cache_extent(data_chunks); cache;
2228 cache = next_cache_extent(cache)) {
2229 if (cache->start < cur_off)
2231 if (cache->start > cur_off) {
2235 len = cache->start - round_up(cur_off,
2237 insert_start = round_up(cur_off, BTRFS_STRIPE_LEN);
2239 ret = add_merge_cache_extent(free, insert_start, len);
2243 cur_off = cache->start + cache->size;
2245 /* Don't forget the last range */
2246 if (cctx->total_bytes > cur_off) {
2247 u64 len = cctx->total_bytes - cur_off;
2250 insert_start = round_up(cur_off, BTRFS_STRIPE_LEN);
2252 ret = add_merge_cache_extent(free, insert_start, len);
2257 /* Remove reserved bytes */
2258 ret = wipe_reserved_ranges(free, min_stripe_size, 0);
2263 * Read used space, and since we have the used space,
2264 * calcuate data_chunks and free for later mkfs
2266 static int convert_read_used_space(struct btrfs_convert_context *cctx)
2270 ret = cctx->convert_ops->read_used_space(cctx);
2274 ret = calculate_available_space(cctx);
2278 static int do_convert(const char *devname, int datacsum, int packing,
2279 int noxattr, u32 nodesize, int copylabel, const char *fslabel,
2280 int progress, u64 features)
2287 struct btrfs_root *root;
2288 struct btrfs_root *image_root;
2289 struct btrfs_convert_context cctx;
2290 struct btrfs_key key;
2291 char *subvol_name = NULL;
2292 struct task_ctx ctx;
2293 char features_buf[64];
2294 struct btrfs_mkfs_config mkfs_cfg;
2296 init_convert_context(&cctx);
2297 ret = convert_open_fs(devname, &cctx);
2300 ret = convert_read_used_space(&cctx);
2304 blocksize = cctx.blocksize;
2305 total_bytes = (u64)blocksize * (u64)cctx.block_count;
2306 if (blocksize < 4096) {
2307 fprintf(stderr, "block size is too small\n");
2310 if (btrfs_check_nodesize(nodesize, blocksize, features))
2312 fd = open(devname, O_RDWR);
2314 fprintf(stderr, "unable to open %s\n", devname);
2317 btrfs_parse_features_to_string(features_buf, features);
2318 if (features == BTRFS_MKFS_DEFAULT_FEATURES)
2319 strcat(features_buf, " (default)");
2321 printf("create btrfs filesystem:\n");
2322 printf("\tblocksize: %u\n", blocksize);
2323 printf("\tnodesize: %u\n", nodesize);
2324 printf("\tfeatures: %s\n", features_buf);
2326 mkfs_cfg.label = cctx.volume_name;
2327 mkfs_cfg.num_bytes = total_bytes;
2328 mkfs_cfg.nodesize = nodesize;
2329 mkfs_cfg.sectorsize = blocksize;
2330 mkfs_cfg.stripesize = blocksize;
2331 mkfs_cfg.features = features;
2332 /* New convert need these space */
2333 mkfs_cfg.fs_uuid = malloc(BTRFS_UUID_UNPARSED_SIZE);
2334 mkfs_cfg.chunk_uuid = malloc(BTRFS_UUID_UNPARSED_SIZE);
2335 *(mkfs_cfg.fs_uuid) = '\0';
2336 *(mkfs_cfg.chunk_uuid) = '\0';
2338 ret = make_btrfs(fd, &mkfs_cfg, &cctx);
2340 fprintf(stderr, "unable to create initial ctree: %s\n",
2345 root = open_ctree_fd(fd, devname, mkfs_cfg.super_bytenr,
2346 OPEN_CTREE_WRITES | OPEN_CTREE_FS_PARTIAL);
2348 fprintf(stderr, "unable to open ctree\n");
2351 ret = init_btrfs(&mkfs_cfg, root, &cctx, datacsum, packing, noxattr);
2353 fprintf(stderr, "unable to setup the root tree\n");
2357 printf("creating %s image file.\n", cctx.convert_ops->name);
2358 ret = asprintf(&subvol_name, "%s_saved", cctx.convert_ops->name);
2360 fprintf(stderr, "error allocating subvolume name: %s_saved\n",
2361 cctx.convert_ops->name);
2364 key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
2365 key.offset = (u64)-1;
2366 key.type = BTRFS_ROOT_ITEM_KEY;
2367 image_root = btrfs_read_fs_root(root->fs_info, &key);
2369 fprintf(stderr, "unable to create subvol\n");
2372 ret = create_image(image_root, &mkfs_cfg, &cctx, fd,
2373 mkfs_cfg.num_bytes, "image", datacsum);
2375 fprintf(stderr, "error during create_image %d\n", ret);
2379 printf("creating btrfs metadata.\n");
2380 ctx.max_copy_inodes = (cctx.inodes_count - cctx.free_inodes_count);
2381 ctx.cur_copy_inodes = 0;
2384 ctx.info = task_init(print_copied_inodes, after_copied_inodes,
2386 task_start(ctx.info);
2388 ret = copy_inodes(&cctx, root, datacsum, packing, noxattr, &ctx);
2390 fprintf(stderr, "error during copy_inodes %d\n", ret);
2394 task_stop(ctx.info);
2395 task_deinit(ctx.info);
2398 image_root = link_subvol(root, subvol_name, CONV_IMAGE_SUBVOL_OBJECTID);
2402 memset(root->fs_info->super_copy->label, 0, BTRFS_LABEL_SIZE);
2403 if (copylabel == 1) {
2404 __strncpy_null(root->fs_info->super_copy->label,
2405 cctx.volume_name, BTRFS_LABEL_SIZE - 1);
2406 fprintf(stderr, "copy label '%s'\n",
2407 root->fs_info->super_copy->label);
2408 } else if (copylabel == -1) {
2409 strcpy(root->fs_info->super_copy->label, fslabel);
2410 fprintf(stderr, "set label to '%s'\n", fslabel);
2413 ret = close_ctree(root);
2415 fprintf(stderr, "error during close_ctree %d\n", ret);
2418 convert_close_fs(&cctx);
2419 clean_convert_context(&cctx);
2422 * If this step succeed, we get a mountable btrfs. Otherwise
2423 * the source fs is left unchanged.
2425 ret = migrate_super_block(fd, mkfs_cfg.super_bytenr, blocksize);
2427 fprintf(stderr, "unable to migrate super block\n");
2432 root = open_ctree_fd(fd, devname, 0,
2433 OPEN_CTREE_WRITES | OPEN_CTREE_FS_PARTIAL);
2435 fprintf(stderr, "unable to open ctree\n");
2438 root->fs_info->finalize_on_close = 1;
2442 printf("conversion complete.\n");
2445 clean_convert_context(&cctx);
2450 "WARNING: an error occurred during chunk mapping fixup, filesystem mountable but not finalized\n");
2452 fprintf(stderr, "conversion aborted\n");
2457 * Check if a non 1:1 mapped chunk can be rolled back.
2458 * For new convert, it's OK while for old convert it's not.
2460 static int may_rollback_chunk(struct btrfs_fs_info *fs_info, u64 bytenr)
2462 struct btrfs_block_group_cache *bg;
2463 struct btrfs_key key;
2464 struct btrfs_path path;
2465 struct btrfs_root *extent_root = fs_info->extent_root;
2470 bg = btrfs_lookup_first_block_group(fs_info, bytenr);
2473 bg_start = bg->key.objectid;
2474 bg_end = bg->key.objectid + bg->key.offset;
2476 key.objectid = bg_end;
2477 key.type = BTRFS_METADATA_ITEM_KEY;
2479 btrfs_init_path(&path);
2481 ret = btrfs_search_slot(NULL, extent_root, &key, &path, 0, 0);
2486 struct btrfs_extent_item *ei;
2488 ret = btrfs_previous_extent_item(extent_root, &path, bg_start);
2496 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
2497 if (key.type == BTRFS_METADATA_ITEM_KEY)
2499 /* Now it's EXTENT_ITEM_KEY only */
2500 ei = btrfs_item_ptr(path.nodes[0], path.slots[0],
2501 struct btrfs_extent_item);
2503 * Found data extent, means this is old convert must follow 1:1
2506 if (btrfs_extent_flags(path.nodes[0], ei)
2507 & BTRFS_EXTENT_FLAG_DATA) {
2512 btrfs_release_path(&path);
2516 static int may_rollback(struct btrfs_root *root)
2518 struct btrfs_fs_info *info = root->fs_info;
2519 struct btrfs_multi_bio *multi = NULL;
2527 if (btrfs_super_num_devices(info->super_copy) != 1)
2530 bytenr = BTRFS_SUPER_INFO_OFFSET;
2531 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
2534 ret = btrfs_map_block(&info->mapping_tree, WRITE, bytenr,
2535 &length, &multi, 0, NULL);
2537 if (ret == -ENOENT) {
2538 /* removed block group at the tail */
2539 if (length == (u64)-1)
2542 /* removed block group in the middle */
2548 num_stripes = multi->num_stripes;
2549 physical = multi->stripes[0].physical;
2552 if (num_stripes != 1) {
2553 error("num stripes for bytenr %llu is not 1", bytenr);
2558 * Extra check for new convert, as metadata chunk from new
2559 * convert is much more free than old convert, it doesn't need
2560 * to do 1:1 mapping.
2562 if (physical != bytenr) {
2564 * Check if it's a metadata chunk and has only metadata
2567 ret = may_rollback_chunk(info, bytenr);
2573 if (bytenr >= total_bytes)
2581 static int do_rollback(const char *devname)
2586 struct btrfs_root *root;
2587 struct btrfs_root *image_root;
2588 struct btrfs_root *chunk_root;
2589 struct btrfs_dir_item *dir;
2590 struct btrfs_inode_item *inode;
2591 struct btrfs_file_extent_item *fi;
2592 struct btrfs_trans_handle *trans;
2593 struct extent_buffer *leaf;
2594 struct btrfs_block_group_cache *cache1;
2595 struct btrfs_block_group_cache *cache2;
2596 struct btrfs_key key;
2597 struct btrfs_path path;
2598 struct extent_io_tree io_tree;
2613 extent_io_tree_init(&io_tree);
2615 fd = open(devname, O_RDWR);
2617 fprintf(stderr, "unable to open %s\n", devname);
2620 root = open_ctree_fd(fd, devname, 0, OPEN_CTREE_WRITES);
2622 fprintf(stderr, "unable to open ctree\n");
2625 ret = may_rollback(root);
2627 fprintf(stderr, "unable to do rollback\n");
2631 sectorsize = root->sectorsize;
2632 buf = malloc(sectorsize);
2634 fprintf(stderr, "unable to allocate memory\n");
2638 btrfs_init_path(&path);
2640 key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
2641 key.type = BTRFS_ROOT_BACKREF_KEY;
2642 key.offset = BTRFS_FS_TREE_OBJECTID;
2643 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, &path, 0,
2645 btrfs_release_path(&path);
2648 "ERROR: unable to convert ext2 image subvolume, is it deleted?\n");
2650 } else if (ret < 0) {
2652 "ERROR: unable to open ext2_saved, id=%llu: %s\n",
2653 (unsigned long long)key.objectid, strerror(-ret));
2657 key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
2658 key.type = BTRFS_ROOT_ITEM_KEY;
2659 key.offset = (u64)-1;
2660 image_root = btrfs_read_fs_root(root->fs_info, &key);
2661 if (!image_root || IS_ERR(image_root)) {
2662 fprintf(stderr, "unable to open subvol %llu\n",
2663 (unsigned long long)key.objectid);
2668 root_dir = btrfs_root_dirid(&root->root_item);
2669 dir = btrfs_lookup_dir_item(NULL, image_root, &path,
2670 root_dir, name, strlen(name), 0);
2671 if (!dir || IS_ERR(dir)) {
2672 fprintf(stderr, "unable to find file %s\n", name);
2675 leaf = path.nodes[0];
2676 btrfs_dir_item_key_to_cpu(leaf, dir, &key);
2677 btrfs_release_path(&path);
2679 objectid = key.objectid;
2681 ret = btrfs_lookup_inode(NULL, image_root, &path, &key, 0);
2683 fprintf(stderr, "unable to find inode item\n");
2686 leaf = path.nodes[0];
2687 inode = btrfs_item_ptr(leaf, path.slots[0], struct btrfs_inode_item);
2688 total_bytes = btrfs_inode_size(leaf, inode);
2689 btrfs_release_path(&path);
2691 key.objectid = objectid;
2693 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
2694 ret = btrfs_search_slot(NULL, image_root, &key, &path, 0, 0);
2696 fprintf(stderr, "unable to find first file extent\n");
2697 btrfs_release_path(&path);
2701 /* build mapping tree for the relocated blocks */
2702 for (offset = 0; offset < total_bytes; ) {
2703 leaf = path.nodes[0];
2704 if (path.slots[0] >= btrfs_header_nritems(leaf)) {
2705 ret = btrfs_next_leaf(root, &path);
2711 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
2712 if (key.objectid != objectid || key.offset != offset ||
2713 btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2716 fi = btrfs_item_ptr(leaf, path.slots[0],
2717 struct btrfs_file_extent_item);
2718 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2720 if (btrfs_file_extent_compression(leaf, fi) ||
2721 btrfs_file_extent_encryption(leaf, fi) ||
2722 btrfs_file_extent_other_encoding(leaf, fi))
2725 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
2726 /* skip holes and direct mapped extents */
2727 if (bytenr == 0 || bytenr == offset)
2730 bytenr += btrfs_file_extent_offset(leaf, fi);
2731 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
2733 cache1 = btrfs_lookup_block_group(root->fs_info, offset);
2734 cache2 = btrfs_lookup_block_group(root->fs_info,
2735 offset + num_bytes - 1);
2737 * Here we must take consideration of old and new convert
2739 * For old convert case, sign, there is no consist chunk type
2740 * that will cover the extent. META/DATA/SYS are all possible.
2741 * Just ensure relocate one is in SYS chunk.
2742 * For new convert case, they are all covered by DATA chunk.
2744 * So, there is not valid chunk type check for it now.
2746 if (cache1 != cache2)
2749 set_extent_bits(&io_tree, offset, offset + num_bytes - 1,
2750 EXTENT_LOCKED, GFP_NOFS);
2751 set_state_private(&io_tree, offset, bytenr);
2753 offset += btrfs_file_extent_num_bytes(leaf, fi);
2756 btrfs_release_path(&path);
2758 if (offset < total_bytes) {
2759 fprintf(stderr, "unable to build extent mapping\n");
2760 fprintf(stderr, "converted filesystem after balance is unable to rollback\n");
2764 first_free = BTRFS_SUPER_INFO_OFFSET + 2 * sectorsize - 1;
2765 first_free &= ~((u64)sectorsize - 1);
2766 /* backup for extent #0 should exist */
2767 if(!test_range_bit(&io_tree, 0, first_free - 1, EXTENT_LOCKED, 1)) {
2768 fprintf(stderr, "no backup for the first extent\n");
2771 /* force no allocation from system block group */
2772 root->fs_info->system_allocs = -1;
2773 trans = btrfs_start_transaction(root, 1);
2776 * recow the whole chunk tree, this will remove all chunk tree blocks
2777 * from system block group
2779 chunk_root = root->fs_info->chunk_root;
2780 memset(&key, 0, sizeof(key));
2782 ret = btrfs_search_slot(trans, chunk_root, &key, &path, 0, 1);
2786 ret = btrfs_next_leaf(chunk_root, &path);
2790 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
2791 btrfs_release_path(&path);
2793 btrfs_release_path(&path);
2798 cache1 = btrfs_lookup_block_group(root->fs_info, offset);
2802 if (cache1->flags & BTRFS_BLOCK_GROUP_SYSTEM)
2803 num_bytes += btrfs_block_group_used(&cache1->item);
2805 offset = cache1->key.objectid + cache1->key.offset;
2807 /* only extent #0 left in system block group? */
2808 if (num_bytes > first_free) {
2809 fprintf(stderr, "unable to empty system block group\n");
2812 /* create a system chunk that maps the whole device */
2813 ret = prepare_system_chunk_sb(root->fs_info->super_copy);
2815 fprintf(stderr, "unable to update system chunk\n");
2819 ret = btrfs_commit_transaction(trans, root);
2822 ret = close_ctree(root);
2824 fprintf(stderr, "error during close_ctree %d\n", ret);
2828 /* zero btrfs super block mirrors */
2829 memset(buf, 0, sectorsize);
2830 for (i = 1 ; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2831 bytenr = btrfs_sb_offset(i);
2832 if (bytenr >= total_bytes)
2834 ret = pwrite(fd, buf, sectorsize, bytenr);
2835 if (ret != sectorsize) {
2837 "error during zeroing superblock %d: %d\n",
2843 sb_bytenr = (u64)-1;
2844 /* copy all relocated blocks back */
2846 ret = find_first_extent_bit(&io_tree, 0, &start, &end,
2851 ret = get_state_private(&io_tree, start, &bytenr);
2854 clear_extent_bits(&io_tree, start, end, EXTENT_LOCKED,
2857 while (start <= end) {
2858 if (start == BTRFS_SUPER_INFO_OFFSET) {
2862 ret = pread(fd, buf, sectorsize, bytenr);
2864 fprintf(stderr, "error during pread %d\n", ret);
2867 BUG_ON(ret != sectorsize);
2868 ret = pwrite(fd, buf, sectorsize, start);
2870 fprintf(stderr, "error during pwrite %d\n", ret);
2873 BUG_ON(ret != sectorsize);
2875 start += sectorsize;
2876 bytenr += sectorsize;
2882 fprintf(stderr, "error during fsync %d\n", ret);
2886 * finally, overwrite btrfs super block.
2888 ret = pread(fd, buf, sectorsize, sb_bytenr);
2890 fprintf(stderr, "error during pread %d\n", ret);
2893 BUG_ON(ret != sectorsize);
2894 ret = pwrite(fd, buf, sectorsize, BTRFS_SUPER_INFO_OFFSET);
2896 fprintf(stderr, "error during pwrite %d\n", ret);
2899 BUG_ON(ret != sectorsize);
2902 fprintf(stderr, "error during fsync %d\n", ret);
2908 extent_io_tree_cleanup(&io_tree);
2909 printf("rollback complete.\n");
2916 fprintf(stderr, "rollback aborted.\n");
2920 static void print_usage(void)
2922 printf("usage: btrfs-convert [options] device\n");
2923 printf("options:\n");
2924 printf("\t-d|--no-datasum disable data checksum, sets NODATASUM\n");
2925 printf("\t-i|--no-xattr ignore xattrs and ACLs\n");
2926 printf("\t-n|--no-inline disable inlining of small files to metadata\n");
2927 printf("\t-N|--nodesize SIZE set filesystem metadata nodesize\n");
2928 printf("\t-r|--rollback roll back to the original filesystem\n");
2929 printf("\t-l|--label LABEL set filesystem label\n");
2930 printf("\t-L|--copy-label use label from converted filesystem\n");
2931 printf("\t-p|--progress show converting progress (default)\n");
2932 printf("\t-O|--features LIST comma separated list of filesystem features\n");
2933 printf("\t--no-progress show only overview, not the detailed progress\n");
2936 int main(int argc, char *argv[])
2942 u32 nodesize = max_t(u32, sysconf(_SC_PAGESIZE),
2943 BTRFS_MKFS_DEFAULT_NODE_SIZE);
2946 int usage_error = 0;
2949 char fslabel[BTRFS_LABEL_SIZE];
2950 u64 features = BTRFS_MKFS_DEFAULT_FEATURES;
2953 enum { GETOPT_VAL_NO_PROGRESS = 256 };
2954 static const struct option long_options[] = {
2955 { "no-progress", no_argument, NULL,
2956 GETOPT_VAL_NO_PROGRESS },
2957 { "no-datasum", no_argument, NULL, 'd' },
2958 { "no-inline", no_argument, NULL, 'n' },
2959 { "no-xattr", no_argument, NULL, 'i' },
2960 { "rollback", no_argument, NULL, 'r' },
2961 { "features", required_argument, NULL, 'O' },
2962 { "progress", no_argument, NULL, 'p' },
2963 { "label", required_argument, NULL, 'l' },
2964 { "copy-label", no_argument, NULL, 'L' },
2965 { "nodesize", required_argument, NULL, 'N' },
2966 { "help", no_argument, NULL, GETOPT_VAL_HELP},
2967 { NULL, 0, NULL, 0 }
2969 int c = getopt_long(argc, argv, "dinN:rl:LpO:", long_options, NULL);
2984 nodesize = parse_size(optarg);
2991 if (strlen(optarg) >= BTRFS_LABEL_SIZE) {
2993 "WARNING: label too long, trimmed to %d bytes\n",
2994 BTRFS_LABEL_SIZE - 1);
2996 __strncpy_null(fslabel, optarg, BTRFS_LABEL_SIZE - 1);
3005 char *orig = strdup(optarg);
3008 tmp = btrfs_parse_fs_features(tmp, &features);
3011 "Unrecognized filesystem feature '%s'\n",
3017 if (features & BTRFS_FEATURE_LIST_ALL) {
3018 btrfs_list_all_fs_features(
3019 ~BTRFS_CONVERT_ALLOWED_FEATURES);
3022 if (features & ~BTRFS_CONVERT_ALLOWED_FEATURES) {
3025 btrfs_parse_features_to_string(buf,
3026 features & ~BTRFS_CONVERT_ALLOWED_FEATURES);
3028 "ERROR: features not allowed for convert: %s\n",
3035 case GETOPT_VAL_NO_PROGRESS:
3038 case GETOPT_VAL_HELP:
3041 return c != GETOPT_VAL_HELP;
3045 if (check_argc_exact(argc - optind, 1)) {
3050 if (rollback && (!datacsum || noxattr || !packing)) {
3052 "Usage error: -d, -i, -n options do not apply to rollback\n");
3061 file = argv[optind];
3062 ret = check_mounted(file);
3064 fprintf(stderr, "Could not check mount status: %s\n",
3068 fprintf(stderr, "%s is mounted\n", file);
3073 ret = do_rollback(file);
3075 ret = do_convert(file, datacsum, packing, noxattr, nodesize,
3076 copylabel, fslabel, progress, features);