2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include "kerncompat.h"
21 #include <sys/ioctl.h>
22 #include <sys/mount.h>
25 #include <sys/types.h>
29 #include <uuid/uuid.h>
30 #include <linux/limits.h>
36 #include "transaction.h"
39 #include "task-utils.h"
42 #include <ext2fs/ext2_fs.h>
43 #include <ext2fs/ext2fs.h>
44 #include <ext2fs/ext2_ext_attr.h>
46 #define INO_OFFSET (BTRFS_FIRST_FREE_OBJECTID - EXT2_ROOT_INO)
49 * Compatibility code for e2fsprogs 1.41 which doesn't support RO compat flag
51 * Unlike normal RO compat flag, BIGALLOC affects how e2fsprogs check used
52 * space, and btrfs-convert heavily relies on it.
54 #ifdef HAVE_OLD_E2FSPROGS
55 #define EXT2FS_CLUSTER_RATIO(fs) (1)
56 #define EXT2_CLUSTERS_PER_GROUP(s) (EXT2_BLOCKS_PER_GROUP(s))
57 #define EXT2FS_B2C(fs, blk) (blk)
62 #define CONV_IMAGE_SUBVOL_OBJECTID BTRFS_FIRST_FREE_OBJECTID
65 uint32_t max_copy_inodes;
66 uint32_t cur_copy_inodes;
67 struct task_info *info;
70 static void *print_copied_inodes(void *p)
72 struct task_ctx *priv = p;
73 const char work_indicator[] = { '.', 'o', 'O', 'o' };
76 task_period_start(priv->info, 1000 /* 1s */);
79 printf("copy inodes [%c] [%10d/%10d]\r",
80 work_indicator[count % 4], priv->cur_copy_inodes,
81 priv->max_copy_inodes);
83 task_period_wait(priv->info);
89 static int after_copied_inodes(void *p)
97 struct btrfs_convert_context;
98 struct btrfs_convert_operations {
100 int (*open_fs)(struct btrfs_convert_context *cctx, const char *devname);
101 int (*read_used_space)(struct btrfs_convert_context *cctx);
102 int (*copy_inodes)(struct btrfs_convert_context *cctx,
103 struct btrfs_root *root, int datacsum,
104 int packing, int noxattr, struct task_ctx *p);
105 void (*close_fs)(struct btrfs_convert_context *cctx);
108 static void init_convert_context(struct btrfs_convert_context *cctx)
110 cache_tree_init(&cctx->used);
111 cache_tree_init(&cctx->data_chunks);
112 cache_tree_init(&cctx->free);
115 static void clean_convert_context(struct btrfs_convert_context *cctx)
117 free_extent_cache_tree(&cctx->used);
118 free_extent_cache_tree(&cctx->data_chunks);
119 free_extent_cache_tree(&cctx->free);
122 static inline int copy_inodes(struct btrfs_convert_context *cctx,
123 struct btrfs_root *root, int datacsum,
124 int packing, int noxattr, struct task_ctx *p)
126 return cctx->convert_ops->copy_inodes(cctx, root, datacsum, packing,
130 static inline void convert_close_fs(struct btrfs_convert_context *cctx)
132 cctx->convert_ops->close_fs(cctx);
135 static int intersect_with_sb(u64 bytenr, u64 num_bytes)
140 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
141 offset = btrfs_sb_offset(i);
142 offset &= ~((u64)BTRFS_STRIPE_LEN - 1);
144 if (bytenr < offset + BTRFS_STRIPE_LEN &&
145 bytenr + num_bytes > offset)
151 static int convert_insert_dirent(struct btrfs_trans_handle *trans,
152 struct btrfs_root *root,
153 const char *name, size_t name_len,
154 u64 dir, u64 objectid,
155 u8 file_type, u64 index_cnt,
156 struct btrfs_inode_item *inode)
160 struct btrfs_key location = {
161 .objectid = objectid,
163 .type = BTRFS_INODE_ITEM_KEY,
166 ret = btrfs_insert_dir_item(trans, root, name, name_len,
167 dir, &location, file_type, index_cnt);
170 ret = btrfs_insert_inode_ref(trans, root, name, name_len,
171 objectid, dir, index_cnt);
174 inode_size = btrfs_stack_inode_size(inode) + name_len * 2;
175 btrfs_set_stack_inode_size(inode, inode_size);
180 static int read_disk_extent(struct btrfs_root *root, u64 bytenr,
181 u32 num_bytes, char *buffer)
184 struct btrfs_fs_devices *fs_devs = root->fs_info->fs_devices;
186 ret = pread(fs_devs->latest_bdev, buffer, num_bytes, bytenr);
187 if (ret != num_bytes)
196 static int csum_disk_extent(struct btrfs_trans_handle *trans,
197 struct btrfs_root *root,
198 u64 disk_bytenr, u64 num_bytes)
200 u32 blocksize = root->sectorsize;
205 buffer = malloc(blocksize);
208 for (offset = 0; offset < num_bytes; offset += blocksize) {
209 ret = read_disk_extent(root, disk_bytenr + offset,
213 ret = btrfs_csum_file_block(trans,
214 root->fs_info->csum_root,
215 disk_bytenr + num_bytes,
216 disk_bytenr + offset,
225 struct blk_iterate_data {
226 struct btrfs_trans_handle *trans;
227 struct btrfs_root *root;
228 struct btrfs_root *convert_root;
229 struct btrfs_inode_item *inode;
240 static void init_blk_iterate_data(struct blk_iterate_data *data,
241 struct btrfs_trans_handle *trans,
242 struct btrfs_root *root,
243 struct btrfs_inode_item *inode,
244 u64 objectid, int checksum)
246 struct btrfs_key key;
251 data->objectid = objectid;
252 data->first_block = 0;
253 data->disk_block = 0;
254 data->num_blocks = 0;
255 data->boundary = (u64)-1;
256 data->checksum = checksum;
259 key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
260 key.type = BTRFS_ROOT_ITEM_KEY;
261 key.offset = (u64)-1;
262 data->convert_root = btrfs_read_fs_root(root->fs_info, &key);
263 /* Impossible as we just opened it before */
264 BUG_ON(!data->convert_root || IS_ERR(data->convert_root));
265 data->convert_ino = BTRFS_FIRST_FREE_OBJECTID + 1;
269 * Record a file extent in original filesystem into btrfs one.
270 * The special point is, old disk_block can point to a reserved range.
271 * So here, we don't use disk_block directly but search convert_root
272 * to get the real disk_bytenr.
274 static int record_file_blocks(struct blk_iterate_data *data,
275 u64 file_block, u64 disk_block, u64 num_blocks)
278 struct btrfs_root *root = data->root;
279 struct btrfs_root *convert_root = data->convert_root;
280 struct btrfs_path *path;
281 u64 file_pos = file_block * root->sectorsize;
282 u64 old_disk_bytenr = disk_block * root->sectorsize;
283 u64 num_bytes = num_blocks * root->sectorsize;
284 u64 cur_off = old_disk_bytenr;
286 /* Hole, pass it to record_file_extent directly */
287 if (old_disk_bytenr == 0)
288 return btrfs_record_file_extent(data->trans, root,
289 data->objectid, data->inode, file_pos, 0,
292 path = btrfs_alloc_path();
297 * Search real disk bytenr from convert root
299 while (cur_off < old_disk_bytenr + num_bytes) {
300 struct btrfs_key key;
301 struct btrfs_file_extent_item *fi;
302 struct extent_buffer *node;
304 u64 extent_disk_bytenr;
305 u64 extent_num_bytes;
306 u64 real_disk_bytenr;
309 key.objectid = data->convert_ino;
310 key.type = BTRFS_EXTENT_DATA_KEY;
311 key.offset = cur_off;
313 ret = btrfs_search_slot(NULL, convert_root, &key, path, 0, 0);
317 ret = btrfs_previous_item(convert_root, path,
319 BTRFS_EXTENT_DATA_KEY);
327 node = path->nodes[0];
328 slot = path->slots[0];
329 btrfs_item_key_to_cpu(node, &key, slot);
330 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY ||
331 key.objectid != data->convert_ino ||
332 key.offset > cur_off);
333 fi = btrfs_item_ptr(node, slot, struct btrfs_file_extent_item);
334 extent_disk_bytenr = btrfs_file_extent_disk_bytenr(node, fi);
335 extent_num_bytes = btrfs_file_extent_disk_num_bytes(node, fi);
336 BUG_ON(cur_off - key.offset >= extent_num_bytes);
337 btrfs_release_path(path);
339 if (extent_disk_bytenr)
340 real_disk_bytenr = cur_off - key.offset +
343 real_disk_bytenr = 0;
344 cur_len = min(key.offset + extent_num_bytes,
345 old_disk_bytenr + num_bytes) - cur_off;
346 ret = btrfs_record_file_extent(data->trans, data->root,
347 data->objectid, data->inode, file_pos,
348 real_disk_bytenr, cur_len);
355 * No need to care about csum
356 * As every byte of old fs image is calculated for csum, no
357 * need to waste CPU cycles now.
360 btrfs_free_path(path);
364 static int block_iterate_proc(u64 disk_block, u64 file_block,
365 struct blk_iterate_data *idata)
370 struct btrfs_root *root = idata->root;
371 struct btrfs_block_group_cache *cache;
372 u64 bytenr = disk_block * root->sectorsize;
374 sb_region = intersect_with_sb(bytenr, root->sectorsize);
375 do_barrier = sb_region || disk_block >= idata->boundary;
376 if ((idata->num_blocks > 0 && do_barrier) ||
377 (file_block > idata->first_block + idata->num_blocks) ||
378 (disk_block != idata->disk_block + idata->num_blocks)) {
379 if (idata->num_blocks > 0) {
380 ret = record_file_blocks(idata, idata->first_block,
385 idata->first_block += idata->num_blocks;
386 idata->num_blocks = 0;
388 if (file_block > idata->first_block) {
389 ret = record_file_blocks(idata, idata->first_block,
390 0, file_block - idata->first_block);
396 bytenr += BTRFS_STRIPE_LEN - 1;
397 bytenr &= ~((u64)BTRFS_STRIPE_LEN - 1);
399 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
401 bytenr = cache->key.objectid + cache->key.offset;
404 idata->first_block = file_block;
405 idata->disk_block = disk_block;
406 idata->boundary = bytenr / root->sectorsize;
413 static int create_image_file_range(struct btrfs_trans_handle *trans,
414 struct btrfs_root *root,
415 struct cache_tree *used,
416 struct btrfs_inode_item *inode,
417 u64 ino, u64 bytenr, u64 *ret_len,
420 struct cache_extent *cache;
421 struct btrfs_block_group_cache *bg_cache;
427 BUG_ON(bytenr != round_down(bytenr, root->sectorsize));
428 BUG_ON(len != round_down(len, root->sectorsize));
429 len = min_t(u64, len, BTRFS_MAX_EXTENT_SIZE);
432 * Skip sb ranges first
433 * [0, 1M), [sb_offset(1), +64K), [sb_offset(2), +64K].
435 * Or we will insert a hole into current image file, and later
436 * migrate block will fail as there is already a file extent.
438 if (bytenr < 1024 * 1024) {
439 *ret_len = 1024 * 1024 - bytenr;
442 for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
443 u64 cur = btrfs_sb_offset(i);
445 if (bytenr >= cur && bytenr < cur + BTRFS_STRIPE_LEN) {
446 *ret_len = cur + BTRFS_STRIPE_LEN - bytenr;
450 for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
451 u64 cur = btrfs_sb_offset(i);
456 * May still need to go through file extent inserts
458 if (bytenr < cur && bytenr + len >= cur) {
459 len = min_t(u64, len, cur - bytenr);
465 * Drop out, no need to insert anything
467 if (bytenr >= cur && bytenr < cur + BTRFS_STRIPE_LEN) {
468 *ret_len = cur + BTRFS_STRIPE_LEN - bytenr;
473 cache = search_cache_extent(used, bytenr);
475 if (cache->start <= bytenr) {
477 * |///////Used///////|
481 len = min_t(u64, len, cache->start + cache->size -
483 disk_bytenr = bytenr;
490 len = min(len, cache->start - bytenr);
505 /* Check if the range is in a data block group */
506 bg_cache = btrfs_lookup_block_group(root->fs_info, bytenr);
509 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_DATA))
512 /* The extent should never cross block group boundary */
513 len = min_t(u64, len, bg_cache->key.objectid +
514 bg_cache->key.offset - bytenr);
517 BUG_ON(len != round_down(len, root->sectorsize));
518 ret = btrfs_record_file_extent(trans, root, ino, inode, bytenr,
524 ret = csum_disk_extent(trans, root, bytenr, len);
530 * Relocate old fs data in one reserved ranges
532 * Since all old fs data in reserved range is not covered by any chunk nor
533 * data extent, we don't need to handle any reference but add new
534 * extent/reference, which makes codes more clear
536 static int migrate_one_reserved_range(struct btrfs_trans_handle *trans,
537 struct btrfs_root *root,
538 struct cache_tree *used,
539 struct btrfs_inode_item *inode, int fd,
540 u64 ino, u64 start, u64 len, int datacsum)
544 u64 hole_start = start;
546 struct cache_extent *cache;
547 struct btrfs_key key;
548 struct extent_buffer *eb;
551 while (cur_off < start + len) {
552 cache = lookup_cache_extent(used, cur_off, cur_len);
555 cur_off = max(cache->start, cur_off);
556 cur_len = min(cache->start + cache->size, start + len) -
558 BUG_ON(cur_len < root->sectorsize);
560 /* reserve extent for the data */
561 ret = btrfs_reserve_extent(trans, root, cur_len, 0, 0, (u64)-1,
566 eb = malloc(sizeof(*eb) + cur_len);
572 ret = pread(fd, eb->data, cur_len, cur_off);
574 ret = (ret < 0 ? ret : -EIO);
578 eb->start = key.objectid;
579 eb->len = key.offset;
582 ret = write_and_map_eb(trans, root, eb);
587 /* Now handle extent item and file extent things */
588 ret = btrfs_record_file_extent(trans, root, ino, inode, cur_off,
589 key.objectid, key.offset);
592 /* Finally, insert csum items */
594 ret = csum_disk_extent(trans, root, key.objectid,
597 /* Don't forget to insert hole */
598 hole_len = cur_off - hole_start;
600 ret = btrfs_record_file_extent(trans, root, ino, inode,
601 hole_start, 0, hole_len);
606 cur_off += key.offset;
607 hole_start = cur_off;
608 cur_len = start + len - cur_off;
611 if (start + len - hole_start > 0)
612 ret = btrfs_record_file_extent(trans, root, ino, inode,
613 hole_start, 0, start + len - hole_start);
618 * Relocate the used ext2 data in reserved ranges
620 * [btrfs_sb_offset(1), +BTRFS_STRIPE_LEN)
621 * [btrfs_sb_offset(2), +BTRFS_STRIPE_LEN)
623 static int migrate_reserved_ranges(struct btrfs_trans_handle *trans,
624 struct btrfs_root *root,
625 struct cache_tree *used,
626 struct btrfs_inode_item *inode, int fd,
627 u64 ino, u64 total_bytes, int datacsum)
635 cur_len = 1024 * 1024;
636 ret = migrate_one_reserved_range(trans, root, used, inode, fd, ino,
637 cur_off, cur_len, datacsum);
641 /* second sb(fisrt sb is included in 0~1M) */
642 cur_off = btrfs_sb_offset(1);
643 cur_len = min(total_bytes, cur_off + BTRFS_STRIPE_LEN) - cur_off;
644 if (cur_off > total_bytes)
646 ret = migrate_one_reserved_range(trans, root, used, inode, fd, ino,
647 cur_off, cur_len, datacsum);
652 cur_off = btrfs_sb_offset(2);
653 cur_len = min(total_bytes, cur_off + BTRFS_STRIPE_LEN) - cur_off;
654 if (cur_off > total_bytes)
656 ret = migrate_one_reserved_range(trans, root, used, inode, fd, ino,
657 cur_off, cur_len, datacsum);
662 * Helper for expand and merge extent_cache for wipe_one_reserved_range() to
663 * handle wiping a range that exists in cache.
665 static int _expand_extent_cache(struct cache_tree *tree,
666 struct cache_extent *entry,
667 u64 min_stripe_size, int backward)
669 struct cache_extent *ce;
672 if (entry->size >= min_stripe_size)
674 diff = min_stripe_size - entry->size;
677 ce = prev_cache_extent(entry);
680 if (ce->start + ce->size >= entry->start - diff) {
681 /* Directly merge with previous extent */
682 ce->size = entry->start + entry->size - ce->start;
683 remove_cache_extent(tree, entry);
688 /* No overlap, normal extent */
689 if (entry->start < diff) {
690 error("cannot find space for data chunk layout");
693 entry->start -= diff;
697 ce = next_cache_extent(entry);
700 if (entry->start + entry->size + diff >= ce->start) {
701 /* Directly merge with next extent */
702 entry->size = ce->start + ce->size - entry->start;
703 remove_cache_extent(tree, ce);
713 * Remove one reserve range from given cache tree
714 * if min_stripe_size is non-zero, it will ensure for split case,
715 * all its split cache extent is no smaller than @min_strip_size / 2.
717 static int wipe_one_reserved_range(struct cache_tree *tree,
718 u64 start, u64 len, u64 min_stripe_size,
721 struct cache_extent *cache;
724 BUG_ON(ensure_size && min_stripe_size == 0);
726 * The logical here is simplified to handle special cases only
727 * So we don't need to consider merge case for ensure_size
729 BUG_ON(min_stripe_size && (min_stripe_size < len * 2 ||
730 min_stripe_size / 2 < BTRFS_STRIPE_LEN));
732 /* Also, wipe range should already be aligned */
733 BUG_ON(start != round_down(start, BTRFS_STRIPE_LEN) ||
734 start + len != round_up(start + len, BTRFS_STRIPE_LEN));
736 min_stripe_size /= 2;
738 cache = lookup_cache_extent(tree, start, len);
742 if (start <= cache->start) {
744 * |--------cache---------|
747 BUG_ON(start + len <= cache->start);
750 * The wipe size is smaller than min_stripe_size / 2,
751 * so the result length should still meet min_stripe_size
752 * And no need to do alignment
754 cache->size -= (start + len - cache->start);
755 if (cache->size == 0) {
756 remove_cache_extent(tree, cache);
761 BUG_ON(ensure_size && cache->size < min_stripe_size);
763 cache->start = start + len;
765 } else if (start > cache->start && start + len < cache->start +
768 * |-------cache-----|
771 u64 old_start = cache->start;
772 u64 old_len = cache->size;
773 u64 insert_start = start + len;
776 cache->size = start - cache->start;
777 /* Expand the leading half part if needed */
778 if (ensure_size && cache->size < min_stripe_size) {
779 ret = _expand_extent_cache(tree, cache,
785 /* And insert the new one */
786 insert_len = old_start + old_len - start - len;
787 ret = add_merge_cache_extent(tree, insert_start, insert_len);
791 /* Expand the last half part if needed */
792 if (ensure_size && insert_len < min_stripe_size) {
793 cache = lookup_cache_extent(tree, insert_start,
795 if (!cache || cache->start != insert_start ||
796 cache->size != insert_len)
798 ret = _expand_extent_cache(tree, cache,
807 * Wipe len should be small enough and no need to expand the
810 cache->size = start - cache->start;
811 BUG_ON(ensure_size && cache->size < min_stripe_size);
816 * Remove reserved ranges from given cache_tree
818 * It will remove the following ranges
820 * 2) 2nd superblock, +64K (make sure chunks are 64K aligned)
821 * 3) 3rd superblock, +64K
823 * @min_stripe must be given for safety check
824 * and if @ensure_size is given, it will ensure affected cache_extent will be
825 * larger than min_stripe_size
827 static int wipe_reserved_ranges(struct cache_tree *tree, u64 min_stripe_size,
832 ret = wipe_one_reserved_range(tree, 0, 1024 * 1024, min_stripe_size,
836 ret = wipe_one_reserved_range(tree, btrfs_sb_offset(1),
837 BTRFS_STRIPE_LEN, min_stripe_size, ensure_size);
840 ret = wipe_one_reserved_range(tree, btrfs_sb_offset(2),
841 BTRFS_STRIPE_LEN, min_stripe_size, ensure_size);
845 static int calculate_available_space(struct btrfs_convert_context *cctx)
847 struct cache_tree *used = &cctx->used;
848 struct cache_tree *data_chunks = &cctx->data_chunks;
849 struct cache_tree *free = &cctx->free;
850 struct cache_extent *cache;
853 * Twice the minimal chunk size, to allow later wipe_reserved_ranges()
854 * works without need to consider overlap
856 u64 min_stripe_size = 2 * 16 * 1024 * 1024;
859 /* Calculate data_chunks */
860 for (cache = first_cache_extent(used); cache;
861 cache = next_cache_extent(cache)) {
864 if (cache->start + cache->size < cur_off)
866 if (cache->start > cur_off + min_stripe_size)
867 cur_off = cache->start;
868 cur_len = max(cache->start + cache->size - cur_off,
870 ret = add_merge_cache_extent(data_chunks, cur_off, cur_len);
876 * remove reserved ranges, so we won't ever bother relocating an old
877 * filesystem extent to other place.
879 ret = wipe_reserved_ranges(data_chunks, min_stripe_size, 1);
885 * Calculate free space
886 * Always round up the start bytenr, to avoid metadata extent corss
887 * stripe boundary, as later mkfs_convert() won't have all the extent
890 for (cache = first_cache_extent(data_chunks); cache;
891 cache = next_cache_extent(cache)) {
892 if (cache->start < cur_off)
894 if (cache->start > cur_off) {
898 len = cache->start - round_up(cur_off,
900 insert_start = round_up(cur_off, BTRFS_STRIPE_LEN);
902 ret = add_merge_cache_extent(free, insert_start, len);
906 cur_off = cache->start + cache->size;
908 /* Don't forget the last range */
909 if (cctx->total_bytes > cur_off) {
910 u64 len = cctx->total_bytes - cur_off;
913 insert_start = round_up(cur_off, BTRFS_STRIPE_LEN);
915 ret = add_merge_cache_extent(free, insert_start, len);
920 /* Remove reserved bytes */
921 ret = wipe_reserved_ranges(free, min_stripe_size, 0);
927 * Read used space, and since we have the used space,
928 * calcuate data_chunks and free for later mkfs
930 static int convert_read_used_space(struct btrfs_convert_context *cctx)
934 ret = cctx->convert_ops->read_used_space(cctx);
938 ret = calculate_available_space(cctx);
943 * Create the fs image file of old filesystem.
945 * This is completely fs independent as we have cctx->used, only
946 * need to create file extents pointing to all the positions.
948 static int create_image(struct btrfs_root *root,
949 struct btrfs_mkfs_config *cfg,
950 struct btrfs_convert_context *cctx, int fd,
951 u64 size, char *name, int datacsum)
953 struct btrfs_inode_item buf;
954 struct btrfs_trans_handle *trans;
955 struct btrfs_path *path = NULL;
956 struct btrfs_key key;
957 struct cache_extent *cache;
958 struct cache_tree used_tmp;
961 u64 flags = BTRFS_INODE_READONLY;
965 flags |= BTRFS_INODE_NODATASUM;
967 trans = btrfs_start_transaction(root, 1);
971 cache_tree_init(&used_tmp);
973 ret = btrfs_find_free_objectid(trans, root, BTRFS_FIRST_FREE_OBJECTID,
977 ret = btrfs_new_inode(trans, root, ino, 0400 | S_IFREG);
980 ret = btrfs_change_inode_flags(trans, root, ino, flags);
983 ret = btrfs_add_link(trans, root, ino, BTRFS_FIRST_FREE_OBJECTID, name,
984 strlen(name), BTRFS_FT_REG_FILE, NULL, 1);
988 path = btrfs_alloc_path();
994 key.type = BTRFS_INODE_ITEM_KEY;
997 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
999 ret = (ret > 0 ? -ENOENT : ret);
1002 read_extent_buffer(path->nodes[0], &buf,
1003 btrfs_item_ptr_offset(path->nodes[0], path->slots[0]),
1005 btrfs_release_path(path);
1008 * Create a new used space cache, which doesn't contain the reserved
1011 for (cache = first_cache_extent(&cctx->used); cache;
1012 cache = next_cache_extent(cache)) {
1013 ret = add_cache_extent(&used_tmp, cache->start, cache->size);
1017 ret = wipe_reserved_ranges(&used_tmp, 0, 0);
1022 * Start from 1M, as 0~1M is reserved, and create_image_file_range()
1023 * can't handle bytenr 0(will consider it as a hole)
1026 while (cur < size) {
1027 u64 len = size - cur;
1029 ret = create_image_file_range(trans, root, &used_tmp,
1030 &buf, ino, cur, &len, datacsum);
1035 /* Handle the reserved ranges */
1036 ret = migrate_reserved_ranges(trans, root, &cctx->used, &buf, fd, ino,
1037 cfg->num_bytes, datacsum);
1041 key.type = BTRFS_INODE_ITEM_KEY;
1043 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1045 ret = (ret > 0 ? -ENOENT : ret);
1048 btrfs_set_stack_inode_size(&buf, cfg->num_bytes);
1049 write_extent_buffer(path->nodes[0], &buf,
1050 btrfs_item_ptr_offset(path->nodes[0], path->slots[0]),
1053 free_extent_cache_tree(&used_tmp);
1054 btrfs_free_path(path);
1055 btrfs_commit_transaction(trans, root);
1059 static struct btrfs_root* link_subvol(struct btrfs_root *root,
1060 const char *base, u64 root_objectid)
1062 struct btrfs_trans_handle *trans;
1063 struct btrfs_fs_info *fs_info = root->fs_info;
1064 struct btrfs_root *tree_root = fs_info->tree_root;
1065 struct btrfs_root *new_root = NULL;
1066 struct btrfs_path *path;
1067 struct btrfs_inode_item *inode_item;
1068 struct extent_buffer *leaf;
1069 struct btrfs_key key;
1070 u64 dirid = btrfs_root_dirid(&root->root_item);
1072 char buf[BTRFS_NAME_LEN + 1]; /* for snprintf null */
1078 if (len == 0 || len > BTRFS_NAME_LEN)
1081 path = btrfs_alloc_path();
1084 key.objectid = dirid;
1085 key.type = BTRFS_DIR_INDEX_KEY;
1086 key.offset = (u64)-1;
1088 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1091 if (path->slots[0] > 0) {
1093 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1094 if (key.objectid == dirid && key.type == BTRFS_DIR_INDEX_KEY)
1095 index = key.offset + 1;
1097 btrfs_release_path(path);
1099 trans = btrfs_start_transaction(root, 1);
1102 key.objectid = dirid;
1104 key.type = BTRFS_INODE_ITEM_KEY;
1106 ret = btrfs_lookup_inode(trans, root, path, &key, 1);
1108 leaf = path->nodes[0];
1109 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1110 struct btrfs_inode_item);
1112 key.objectid = root_objectid;
1113 key.offset = (u64)-1;
1114 key.type = BTRFS_ROOT_ITEM_KEY;
1116 memcpy(buf, base, len);
1117 for (i = 0; i < 1024; i++) {
1118 ret = btrfs_insert_dir_item(trans, root, buf, len,
1119 dirid, &key, BTRFS_FT_DIR, index);
1122 len = snprintf(buf, ARRAY_SIZE(buf), "%s%d", base, i);
1123 if (len < 1 || len > BTRFS_NAME_LEN) {
1131 btrfs_set_inode_size(leaf, inode_item, len * 2 +
1132 btrfs_inode_size(leaf, inode_item));
1133 btrfs_mark_buffer_dirty(leaf);
1134 btrfs_release_path(path);
1136 /* add the backref first */
1137 ret = btrfs_add_root_ref(trans, tree_root, root_objectid,
1138 BTRFS_ROOT_BACKREF_KEY,
1139 root->root_key.objectid,
1140 dirid, index, buf, len);
1143 /* now add the forward ref */
1144 ret = btrfs_add_root_ref(trans, tree_root, root->root_key.objectid,
1145 BTRFS_ROOT_REF_KEY, root_objectid,
1146 dirid, index, buf, len);
1148 ret = btrfs_commit_transaction(trans, root);
1151 new_root = btrfs_read_fs_root(fs_info, &key);
1152 if (IS_ERR(new_root))
1155 btrfs_free_path(path);
1159 static int create_subvol(struct btrfs_trans_handle *trans,
1160 struct btrfs_root *root, u64 root_objectid)
1162 struct extent_buffer *tmp;
1163 struct btrfs_root *new_root;
1164 struct btrfs_key key;
1165 struct btrfs_root_item root_item;
1168 ret = btrfs_copy_root(trans, root, root->node, &tmp,
1172 memcpy(&root_item, &root->root_item, sizeof(root_item));
1173 btrfs_set_root_bytenr(&root_item, tmp->start);
1174 btrfs_set_root_level(&root_item, btrfs_header_level(tmp));
1175 btrfs_set_root_generation(&root_item, trans->transid);
1176 free_extent_buffer(tmp);
1178 key.objectid = root_objectid;
1179 key.type = BTRFS_ROOT_ITEM_KEY;
1180 key.offset = trans->transid;
1181 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
1184 key.offset = (u64)-1;
1185 new_root = btrfs_read_fs_root(root->fs_info, &key);
1186 BUG_ON(!new_root || IS_ERR(new_root));
1188 ret = btrfs_make_root_dir(trans, new_root, BTRFS_FIRST_FREE_OBJECTID);
1195 * New make_btrfs() has handle system and meta chunks quite well.
1196 * So only need to add remaining data chunks.
1198 static int make_convert_data_block_groups(struct btrfs_trans_handle *trans,
1199 struct btrfs_fs_info *fs_info,
1200 struct btrfs_mkfs_config *cfg,
1201 struct btrfs_convert_context *cctx)
1203 struct btrfs_root *extent_root = fs_info->extent_root;
1204 struct cache_tree *data_chunks = &cctx->data_chunks;
1205 struct cache_extent *cache;
1210 * Don't create data chunk over 10% of the convert device
1211 * And for single chunk, don't create chunk larger than 1G.
1213 max_chunk_size = cfg->num_bytes / 10;
1214 max_chunk_size = min((u64)(1024 * 1024 * 1024), max_chunk_size);
1215 max_chunk_size = round_down(max_chunk_size, extent_root->sectorsize);
1217 for (cache = first_cache_extent(data_chunks); cache;
1218 cache = next_cache_extent(cache)) {
1219 u64 cur = cache->start;
1221 while (cur < cache->start + cache->size) {
1223 u64 cur_backup = cur;
1225 len = min(max_chunk_size,
1226 cache->start + cache->size - cur);
1227 ret = btrfs_alloc_data_chunk(trans, extent_root,
1229 BTRFS_BLOCK_GROUP_DATA, 1);
1232 ret = btrfs_make_block_group(trans, extent_root, 0,
1233 BTRFS_BLOCK_GROUP_DATA,
1234 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
1245 * Init the temp btrfs to a operational status.
1247 * It will fix the extent usage accounting(XXX: Do we really need?) and
1248 * insert needed data chunks, to ensure all old fs data extents are covered
1249 * by DATA chunks, preventing wrong chunks are allocated.
1251 * And also create convert image subvolume and relocation tree.
1252 * (XXX: Not need again?)
1253 * But the convert image subvolume is *NOT* linked to fs tree yet.
1255 static int init_btrfs(struct btrfs_mkfs_config *cfg, struct btrfs_root *root,
1256 struct btrfs_convert_context *cctx, int datacsum,
1257 int packing, int noxattr)
1259 struct btrfs_key location;
1260 struct btrfs_trans_handle *trans;
1261 struct btrfs_fs_info *fs_info = root->fs_info;
1265 * Don't alloc any metadata/system chunk, as we don't want
1266 * any meta/sys chunk allcated before all data chunks are inserted.
1267 * Or we screw up the chunk layout just like the old implement.
1269 fs_info->avoid_sys_chunk_alloc = 1;
1270 fs_info->avoid_meta_chunk_alloc = 1;
1271 trans = btrfs_start_transaction(root, 1);
1273 ret = btrfs_fix_block_accounting(trans, root);
1276 ret = make_convert_data_block_groups(trans, fs_info, cfg, cctx);
1279 ret = btrfs_make_root_dir(trans, fs_info->tree_root,
1280 BTRFS_ROOT_TREE_DIR_OBJECTID);
1283 memcpy(&location, &root->root_key, sizeof(location));
1284 location.offset = (u64)-1;
1285 ret = btrfs_insert_dir_item(trans, fs_info->tree_root, "default", 7,
1286 btrfs_super_root_dir(fs_info->super_copy),
1287 &location, BTRFS_FT_DIR, 0);
1290 ret = btrfs_insert_inode_ref(trans, fs_info->tree_root, "default", 7,
1292 btrfs_super_root_dir(fs_info->super_copy), 0);
1295 btrfs_set_root_dirid(&fs_info->fs_root->root_item,
1296 BTRFS_FIRST_FREE_OBJECTID);
1298 /* subvol for fs image file */
1299 ret = create_subvol(trans, root, CONV_IMAGE_SUBVOL_OBJECTID);
1302 /* subvol for data relocation tree */
1303 ret = create_subvol(trans, root, BTRFS_DATA_RELOC_TREE_OBJECTID);
1307 ret = btrfs_commit_transaction(trans, root);
1308 fs_info->avoid_sys_chunk_alloc = 0;
1309 fs_info->avoid_meta_chunk_alloc = 0;
1315 * Migrate super block to its default position and zero 0 ~ 16k
1317 static int migrate_super_block(int fd, u64 old_bytenr, u32 sectorsize)
1320 struct extent_buffer *buf;
1321 struct btrfs_super_block *super;
1325 BUG_ON(sectorsize < sizeof(*super));
1326 buf = malloc(sizeof(*buf) + sectorsize);
1330 buf->len = sectorsize;
1331 ret = pread(fd, buf->data, sectorsize, old_bytenr);
1332 if (ret != sectorsize)
1335 super = (struct btrfs_super_block *)buf->data;
1336 BUG_ON(btrfs_super_bytenr(super) != old_bytenr);
1337 btrfs_set_super_bytenr(super, BTRFS_SUPER_INFO_OFFSET);
1339 csum_tree_block_size(buf, BTRFS_CRC32_SIZE, 0);
1340 ret = pwrite(fd, buf->data, sectorsize, BTRFS_SUPER_INFO_OFFSET);
1341 if (ret != sectorsize)
1348 memset(buf->data, 0, sectorsize);
1349 for (bytenr = 0; bytenr < BTRFS_SUPER_INFO_OFFSET; ) {
1350 len = BTRFS_SUPER_INFO_OFFSET - bytenr;
1351 if (len > sectorsize)
1353 ret = pwrite(fd, buf->data, len, bytenr);
1355 fprintf(stderr, "unable to zero fill device\n");
1369 static int prepare_system_chunk_sb(struct btrfs_super_block *super)
1371 struct btrfs_chunk *chunk;
1372 struct btrfs_disk_key *key;
1373 u32 sectorsize = btrfs_super_sectorsize(super);
1375 key = (struct btrfs_disk_key *)(super->sys_chunk_array);
1376 chunk = (struct btrfs_chunk *)(super->sys_chunk_array +
1377 sizeof(struct btrfs_disk_key));
1379 btrfs_set_disk_key_objectid(key, BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1380 btrfs_set_disk_key_type(key, BTRFS_CHUNK_ITEM_KEY);
1381 btrfs_set_disk_key_offset(key, 0);
1383 btrfs_set_stack_chunk_length(chunk, btrfs_super_total_bytes(super));
1384 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
1385 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
1386 btrfs_set_stack_chunk_type(chunk, BTRFS_BLOCK_GROUP_SYSTEM);
1387 btrfs_set_stack_chunk_io_align(chunk, sectorsize);
1388 btrfs_set_stack_chunk_io_width(chunk, sectorsize);
1389 btrfs_set_stack_chunk_sector_size(chunk, sectorsize);
1390 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1391 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1392 chunk->stripe.devid = super->dev_item.devid;
1393 btrfs_set_stack_stripe_offset(&chunk->stripe, 0);
1394 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
1395 btrfs_set_super_sys_array_size(super, sizeof(*key) + sizeof(*chunk));
1399 #if BTRFSCONVERT_EXT2
1402 * Open Ext2fs in readonly mode, read block allocation bitmap and
1403 * inode bitmap into memory.
1405 static int ext2_open_fs(struct btrfs_convert_context *cctx, const char *name)
1408 ext2_filsys ext2_fs;
1412 ret = ext2fs_open(name, 0, 0, 0, unix_io_manager, &ext2_fs);
1414 fprintf(stderr, "ext2fs_open: %s\n", error_message(ret));
1418 * We need to know exactly the used space, some RO compat flags like
1419 * BIGALLOC will affect how used space is present.
1420 * So we need manuall check any unsupported RO compat flags
1422 ro_feature = ext2_fs->super->s_feature_ro_compat;
1423 if (ro_feature & ~EXT2_LIB_FEATURE_RO_COMPAT_SUPP) {
1425 "unsupported RO features detected: %x, abort convert to avoid possible corruption",
1426 ro_feature & ~EXT2_LIB_FEATURE_COMPAT_SUPP);
1429 ret = ext2fs_read_inode_bitmap(ext2_fs);
1431 fprintf(stderr, "ext2fs_read_inode_bitmap: %s\n",
1432 error_message(ret));
1435 ret = ext2fs_read_block_bitmap(ext2_fs);
1437 fprintf(stderr, "ext2fs_read_block_bitmap: %s\n",
1438 error_message(ret));
1442 * search each block group for a free inode. this set up
1443 * uninit block/inode bitmaps appropriately.
1446 while (ino <= ext2_fs->super->s_inodes_count) {
1448 ext2fs_new_inode(ext2_fs, ino, 0, NULL, &foo);
1449 ino += EXT2_INODES_PER_GROUP(ext2_fs->super);
1452 if (!(ext2_fs->super->s_feature_incompat &
1453 EXT2_FEATURE_INCOMPAT_FILETYPE)) {
1454 fprintf(stderr, "filetype feature is missing\n");
1458 cctx->fs_data = ext2_fs;
1459 cctx->blocksize = ext2_fs->blocksize;
1460 cctx->block_count = ext2_fs->super->s_blocks_count;
1461 cctx->total_bytes = ext2_fs->blocksize * ext2_fs->super->s_blocks_count;
1462 cctx->volume_name = strndup(ext2_fs->super->s_volume_name, 16);
1463 cctx->first_data_block = ext2_fs->super->s_first_data_block;
1464 cctx->inodes_count = ext2_fs->super->s_inodes_count;
1465 cctx->free_inodes_count = ext2_fs->super->s_free_inodes_count;
1468 ext2fs_close(ext2_fs);
1472 static int __ext2_add_one_block(ext2_filsys fs, char *bitmap,
1473 unsigned long group_nr, struct cache_tree *used)
1475 unsigned long offset;
1479 offset = fs->super->s_first_data_block;
1480 offset /= EXT2FS_CLUSTER_RATIO(fs);
1481 offset += group_nr * EXT2_CLUSTERS_PER_GROUP(fs->super);
1482 for (i = 0; i < EXT2_CLUSTERS_PER_GROUP(fs->super); i++) {
1483 if (ext2fs_test_bit(i, bitmap)) {
1486 start = (i + offset) * EXT2FS_CLUSTER_RATIO(fs);
1487 start *= fs->blocksize;
1488 ret = add_merge_cache_extent(used, start,
1498 * Read all used ext2 space into cctx->used cache tree
1500 static int ext2_read_used_space(struct btrfs_convert_context *cctx)
1502 ext2_filsys fs = (ext2_filsys)cctx->fs_data;
1503 blk64_t blk_itr = EXT2FS_B2C(fs, fs->super->s_first_data_block);
1504 struct cache_tree *used_tree = &cctx->used;
1505 char *block_bitmap = NULL;
1510 block_nbytes = EXT2_CLUSTERS_PER_GROUP(fs->super) / 8;
1511 /* Shouldn't happen */
1512 BUG_ON(!fs->block_map);
1514 block_bitmap = malloc(block_nbytes);
1518 for (i = 0; i < fs->group_desc_count; i++) {
1519 ret = ext2fs_get_block_bitmap_range(fs->block_map, blk_itr,
1520 block_nbytes * 8, block_bitmap);
1522 error("fail to get bitmap from ext2, %s",
1526 ret = __ext2_add_one_block(fs, block_bitmap, i, used_tree);
1528 error("fail to build used space tree, %s",
1532 blk_itr += EXT2_CLUSTERS_PER_GROUP(fs->super);
1539 static void ext2_close_fs(struct btrfs_convert_context *cctx)
1541 if (cctx->volume_name) {
1542 free(cctx->volume_name);
1543 cctx->volume_name = NULL;
1545 ext2fs_close(cctx->fs_data);
1548 struct dir_iterate_data {
1549 struct btrfs_trans_handle *trans;
1550 struct btrfs_root *root;
1551 struct btrfs_inode_item *inode;
1558 static u8 ext2_filetype_conversion_table[EXT2_FT_MAX] = {
1559 [EXT2_FT_UNKNOWN] = BTRFS_FT_UNKNOWN,
1560 [EXT2_FT_REG_FILE] = BTRFS_FT_REG_FILE,
1561 [EXT2_FT_DIR] = BTRFS_FT_DIR,
1562 [EXT2_FT_CHRDEV] = BTRFS_FT_CHRDEV,
1563 [EXT2_FT_BLKDEV] = BTRFS_FT_BLKDEV,
1564 [EXT2_FT_FIFO] = BTRFS_FT_FIFO,
1565 [EXT2_FT_SOCK] = BTRFS_FT_SOCK,
1566 [EXT2_FT_SYMLINK] = BTRFS_FT_SYMLINK,
1569 static int ext2_dir_iterate_proc(ext2_ino_t dir, int entry,
1570 struct ext2_dir_entry *dirent,
1571 int offset, int blocksize,
1572 char *buf,void *priv_data)
1577 char dotdot[] = "..";
1578 struct dir_iterate_data *idata = (struct dir_iterate_data *)priv_data;
1581 name_len = dirent->name_len & 0xFF;
1583 objectid = dirent->inode + INO_OFFSET;
1584 if (!strncmp(dirent->name, dotdot, name_len)) {
1585 if (name_len == 2) {
1586 BUG_ON(idata->parent != 0);
1587 idata->parent = objectid;
1591 if (dirent->inode < EXT2_GOOD_OLD_FIRST_INO)
1594 file_type = dirent->name_len >> 8;
1595 BUG_ON(file_type > EXT2_FT_SYMLINK);
1597 ret = convert_insert_dirent(idata->trans, idata->root, dirent->name,
1598 name_len, idata->objectid, objectid,
1599 ext2_filetype_conversion_table[file_type],
1600 idata->index_cnt, idata->inode);
1602 idata->errcode = ret;
1610 static int ext2_create_dir_entries(struct btrfs_trans_handle *trans,
1611 struct btrfs_root *root, u64 objectid,
1612 struct btrfs_inode_item *btrfs_inode,
1613 ext2_filsys ext2_fs, ext2_ino_t ext2_ino)
1617 struct dir_iterate_data data = {
1620 .inode = btrfs_inode,
1621 .objectid = objectid,
1627 err = ext2fs_dir_iterate2(ext2_fs, ext2_ino, 0, NULL,
1628 ext2_dir_iterate_proc, &data);
1632 if (ret == 0 && data.parent == objectid) {
1633 ret = btrfs_insert_inode_ref(trans, root, "..", 2,
1634 objectid, objectid, 0);
1638 fprintf(stderr, "ext2fs_dir_iterate2: %s\n", error_message(err));
1642 static int ext2_block_iterate_proc(ext2_filsys fs, blk_t *blocknr,
1643 e2_blkcnt_t blockcnt, blk_t ref_block,
1644 int ref_offset, void *priv_data)
1647 struct blk_iterate_data *idata;
1648 idata = (struct blk_iterate_data *)priv_data;
1649 ret = block_iterate_proc(*blocknr, blockcnt, idata);
1651 idata->errcode = ret;
1658 * traverse file's data blocks, record these data blocks as file extents.
1660 static int ext2_create_file_extents(struct btrfs_trans_handle *trans,
1661 struct btrfs_root *root, u64 objectid,
1662 struct btrfs_inode_item *btrfs_inode,
1663 ext2_filsys ext2_fs, ext2_ino_t ext2_ino,
1664 int datacsum, int packing)
1667 char *buffer = NULL;
1670 u32 sectorsize = root->sectorsize;
1671 u64 inode_size = btrfs_stack_inode_size(btrfs_inode);
1672 struct blk_iterate_data data;
1674 init_blk_iterate_data(&data, trans, root, btrfs_inode, objectid,
1677 err = ext2fs_block_iterate2(ext2_fs, ext2_ino, BLOCK_FLAG_DATA_ONLY,
1678 NULL, ext2_block_iterate_proc, &data);
1684 if (packing && data.first_block == 0 && data.num_blocks > 0 &&
1685 inode_size <= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
1686 u64 num_bytes = data.num_blocks * sectorsize;
1687 u64 disk_bytenr = data.disk_block * sectorsize;
1690 buffer = malloc(num_bytes);
1693 ret = read_disk_extent(root, disk_bytenr, num_bytes, buffer);
1696 if (num_bytes > inode_size)
1697 num_bytes = inode_size;
1698 ret = btrfs_insert_inline_extent(trans, root, objectid,
1699 0, buffer, num_bytes);
1702 nbytes = btrfs_stack_inode_nbytes(btrfs_inode) + num_bytes;
1703 btrfs_set_stack_inode_nbytes(btrfs_inode, nbytes);
1704 } else if (data.num_blocks > 0) {
1705 ret = record_file_blocks(&data, data.first_block,
1706 data.disk_block, data.num_blocks);
1710 data.first_block += data.num_blocks;
1711 last_block = (inode_size + sectorsize - 1) / sectorsize;
1712 if (last_block > data.first_block) {
1713 ret = record_file_blocks(&data, data.first_block, 0,
1714 last_block - data.first_block);
1720 fprintf(stderr, "ext2fs_block_iterate2: %s\n", error_message(err));
1724 static int ext2_create_symbol_link(struct btrfs_trans_handle *trans,
1725 struct btrfs_root *root, u64 objectid,
1726 struct btrfs_inode_item *btrfs_inode,
1727 ext2_filsys ext2_fs, ext2_ino_t ext2_ino,
1728 struct ext2_inode *ext2_inode)
1732 u64 inode_size = btrfs_stack_inode_size(btrfs_inode);
1733 if (ext2fs_inode_data_blocks(ext2_fs, ext2_inode)) {
1734 btrfs_set_stack_inode_size(btrfs_inode, inode_size + 1);
1735 ret = ext2_create_file_extents(trans, root, objectid,
1736 btrfs_inode, ext2_fs, ext2_ino, 1, 1);
1737 btrfs_set_stack_inode_size(btrfs_inode, inode_size);
1741 pathname = (char *)&(ext2_inode->i_block[0]);
1742 BUG_ON(pathname[inode_size] != 0);
1743 ret = btrfs_insert_inline_extent(trans, root, objectid, 0,
1744 pathname, inode_size + 1);
1745 btrfs_set_stack_inode_nbytes(btrfs_inode, inode_size + 1);
1750 * Following xattr/acl related codes are based on codes in
1751 * fs/ext3/xattr.c and fs/ext3/acl.c
1753 #define EXT2_XATTR_BHDR(ptr) ((struct ext2_ext_attr_header *)(ptr))
1754 #define EXT2_XATTR_BFIRST(ptr) \
1755 ((struct ext2_ext_attr_entry *)(EXT2_XATTR_BHDR(ptr) + 1))
1756 #define EXT2_XATTR_IHDR(inode) \
1757 ((struct ext2_ext_attr_header *) ((void *)(inode) + \
1758 EXT2_GOOD_OLD_INODE_SIZE + (inode)->i_extra_isize))
1759 #define EXT2_XATTR_IFIRST(inode) \
1760 ((struct ext2_ext_attr_entry *) ((void *)EXT2_XATTR_IHDR(inode) + \
1761 sizeof(EXT2_XATTR_IHDR(inode)->h_magic)))
1763 static int ext2_xattr_check_names(struct ext2_ext_attr_entry *entry,
1766 struct ext2_ext_attr_entry *next;
1768 while (!EXT2_EXT_IS_LAST_ENTRY(entry)) {
1769 next = EXT2_EXT_ATTR_NEXT(entry);
1770 if ((void *)next >= end)
1777 static int ext2_xattr_check_block(const char *buf, size_t size)
1780 struct ext2_ext_attr_header *header = EXT2_XATTR_BHDR(buf);
1782 if (header->h_magic != EXT2_EXT_ATTR_MAGIC ||
1783 header->h_blocks != 1)
1785 error = ext2_xattr_check_names(EXT2_XATTR_BFIRST(buf), buf + size);
1789 static int ext2_xattr_check_entry(struct ext2_ext_attr_entry *entry,
1792 size_t value_size = entry->e_value_size;
1794 if (entry->e_value_block != 0 || value_size > size ||
1795 entry->e_value_offs + value_size > size)
1800 #define EXT2_ACL_VERSION 0x0001
1802 /* 23.2.5 acl_tag_t values */
1804 #define ACL_UNDEFINED_TAG (0x00)
1805 #define ACL_USER_OBJ (0x01)
1806 #define ACL_USER (0x02)
1807 #define ACL_GROUP_OBJ (0x04)
1808 #define ACL_GROUP (0x08)
1809 #define ACL_MASK (0x10)
1810 #define ACL_OTHER (0x20)
1812 /* 23.2.7 ACL qualifier constants */
1814 #define ACL_UNDEFINED_ID ((id_t)-1)
1825 } ext2_acl_entry_short;
1831 static inline int ext2_acl_count(size_t size)
1834 size -= sizeof(ext2_acl_header);
1835 s = size - 4 * sizeof(ext2_acl_entry_short);
1837 if (size % sizeof(ext2_acl_entry_short))
1839 return size / sizeof(ext2_acl_entry_short);
1841 if (s % sizeof(ext2_acl_entry))
1843 return s / sizeof(ext2_acl_entry) + 4;
1847 #define ACL_EA_VERSION 0x0002
1857 acl_ea_entry a_entries[0];
1860 static inline size_t acl_ea_size(int count)
1862 return sizeof(acl_ea_header) + count * sizeof(acl_ea_entry);
1865 static int ext2_acl_to_xattr(void *dst, const void *src,
1866 size_t dst_size, size_t src_size)
1869 const void *end = src + src_size;
1870 acl_ea_header *ext_acl = (acl_ea_header *)dst;
1871 acl_ea_entry *dst_entry = ext_acl->a_entries;
1872 ext2_acl_entry *src_entry;
1874 if (src_size < sizeof(ext2_acl_header))
1876 if (((ext2_acl_header *)src)->a_version !=
1877 cpu_to_le32(EXT2_ACL_VERSION))
1879 src += sizeof(ext2_acl_header);
1880 count = ext2_acl_count(src_size);
1884 BUG_ON(dst_size < acl_ea_size(count));
1885 ext_acl->a_version = cpu_to_le32(ACL_EA_VERSION);
1886 for (i = 0; i < count; i++, dst_entry++) {
1887 src_entry = (ext2_acl_entry *)src;
1888 if (src + sizeof(ext2_acl_entry_short) > end)
1890 dst_entry->e_tag = src_entry->e_tag;
1891 dst_entry->e_perm = src_entry->e_perm;
1892 switch (le16_to_cpu(src_entry->e_tag)) {
1897 src += sizeof(ext2_acl_entry_short);
1898 dst_entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID);
1902 src += sizeof(ext2_acl_entry);
1905 dst_entry->e_id = src_entry->e_id;
1918 static char *xattr_prefix_table[] = {
1920 [2] = "system.posix_acl_access",
1921 [3] = "system.posix_acl_default",
1926 static int ext2_copy_single_xattr(struct btrfs_trans_handle *trans,
1927 struct btrfs_root *root, u64 objectid,
1928 struct ext2_ext_attr_entry *entry,
1929 const void *data, u32 datalen)
1934 void *databuf = NULL;
1935 char namebuf[XATTR_NAME_MAX + 1];
1937 name_index = entry->e_name_index;
1938 if (name_index >= ARRAY_SIZE(xattr_prefix_table) ||
1939 xattr_prefix_table[name_index] == NULL)
1941 name_len = strlen(xattr_prefix_table[name_index]) +
1943 if (name_len >= sizeof(namebuf))
1946 if (name_index == 2 || name_index == 3) {
1947 size_t bufsize = acl_ea_size(ext2_acl_count(datalen));
1948 databuf = malloc(bufsize);
1951 ret = ext2_acl_to_xattr(databuf, data, bufsize, datalen);
1957 strncpy(namebuf, xattr_prefix_table[name_index], XATTR_NAME_MAX);
1958 strncat(namebuf, EXT2_EXT_ATTR_NAME(entry), entry->e_name_len);
1959 if (name_len + datalen > BTRFS_LEAF_DATA_SIZE(root) -
1960 sizeof(struct btrfs_item) - sizeof(struct btrfs_dir_item)) {
1961 fprintf(stderr, "skip large xattr on inode %Lu name %.*s\n",
1962 objectid - INO_OFFSET, name_len, namebuf);
1965 ret = btrfs_insert_xattr_item(trans, root, namebuf, name_len,
1966 data, datalen, objectid);
1972 static int ext2_copy_extended_attrs(struct btrfs_trans_handle *trans,
1973 struct btrfs_root *root, u64 objectid,
1974 struct btrfs_inode_item *btrfs_inode,
1975 ext2_filsys ext2_fs, ext2_ino_t ext2_ino)
1981 u32 block_size = ext2_fs->blocksize;
1982 u32 inode_size = EXT2_INODE_SIZE(ext2_fs->super);
1983 struct ext2_inode_large *ext2_inode;
1984 struct ext2_ext_attr_entry *entry;
1986 char *buffer = NULL;
1987 char inode_buf[EXT2_GOOD_OLD_INODE_SIZE];
1989 if (inode_size <= EXT2_GOOD_OLD_INODE_SIZE) {
1990 ext2_inode = (struct ext2_inode_large *)inode_buf;
1992 ext2_inode = (struct ext2_inode_large *)malloc(inode_size);
1996 err = ext2fs_read_inode_full(ext2_fs, ext2_ino, (void *)ext2_inode,
1999 fprintf(stderr, "ext2fs_read_inode_full: %s\n",
2000 error_message(err));
2005 if (ext2_ino > ext2_fs->super->s_first_ino &&
2006 inode_size > EXT2_GOOD_OLD_INODE_SIZE) {
2007 if (EXT2_GOOD_OLD_INODE_SIZE +
2008 ext2_inode->i_extra_isize > inode_size) {
2012 if (ext2_inode->i_extra_isize != 0 &&
2013 EXT2_XATTR_IHDR(ext2_inode)->h_magic ==
2014 EXT2_EXT_ATTR_MAGIC) {
2020 void *end = (void *)ext2_inode + inode_size;
2021 entry = EXT2_XATTR_IFIRST(ext2_inode);
2022 total = end - (void *)entry;
2023 ret = ext2_xattr_check_names(entry, end);
2026 while (!EXT2_EXT_IS_LAST_ENTRY(entry)) {
2027 ret = ext2_xattr_check_entry(entry, total);
2030 data = (void *)EXT2_XATTR_IFIRST(ext2_inode) +
2031 entry->e_value_offs;
2032 datalen = entry->e_value_size;
2033 ret = ext2_copy_single_xattr(trans, root, objectid,
2034 entry, data, datalen);
2037 entry = EXT2_EXT_ATTR_NEXT(entry);
2041 if (ext2_inode->i_file_acl == 0)
2044 buffer = malloc(block_size);
2049 err = ext2fs_read_ext_attr(ext2_fs, ext2_inode->i_file_acl, buffer);
2051 fprintf(stderr, "ext2fs_read_ext_attr: %s\n",
2052 error_message(err));
2056 ret = ext2_xattr_check_block(buffer, block_size);
2060 entry = EXT2_XATTR_BFIRST(buffer);
2061 while (!EXT2_EXT_IS_LAST_ENTRY(entry)) {
2062 ret = ext2_xattr_check_entry(entry, block_size);
2065 data = buffer + entry->e_value_offs;
2066 datalen = entry->e_value_size;
2067 ret = ext2_copy_single_xattr(trans, root, objectid,
2068 entry, data, datalen);
2071 entry = EXT2_EXT_ATTR_NEXT(entry);
2075 if ((void *)ext2_inode != inode_buf)
2079 #define MINORBITS 20
2080 #define MKDEV(ma, mi) (((ma) << MINORBITS) | (mi))
2082 static inline dev_t old_decode_dev(u16 val)
2084 return MKDEV((val >> 8) & 255, val & 255);
2087 static inline dev_t new_decode_dev(u32 dev)
2089 unsigned major = (dev & 0xfff00) >> 8;
2090 unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
2091 return MKDEV(major, minor);
2094 static void ext2_copy_inode_item(struct btrfs_inode_item *dst,
2095 struct ext2_inode *src, u32 blocksize)
2097 btrfs_set_stack_inode_generation(dst, 1);
2098 btrfs_set_stack_inode_sequence(dst, 0);
2099 btrfs_set_stack_inode_transid(dst, 1);
2100 btrfs_set_stack_inode_size(dst, src->i_size);
2101 btrfs_set_stack_inode_nbytes(dst, 0);
2102 btrfs_set_stack_inode_block_group(dst, 0);
2103 btrfs_set_stack_inode_nlink(dst, src->i_links_count);
2104 btrfs_set_stack_inode_uid(dst, src->i_uid | (src->i_uid_high << 16));
2105 btrfs_set_stack_inode_gid(dst, src->i_gid | (src->i_gid_high << 16));
2106 btrfs_set_stack_inode_mode(dst, src->i_mode);
2107 btrfs_set_stack_inode_rdev(dst, 0);
2108 btrfs_set_stack_inode_flags(dst, 0);
2109 btrfs_set_stack_timespec_sec(&dst->atime, src->i_atime);
2110 btrfs_set_stack_timespec_nsec(&dst->atime, 0);
2111 btrfs_set_stack_timespec_sec(&dst->ctime, src->i_ctime);
2112 btrfs_set_stack_timespec_nsec(&dst->ctime, 0);
2113 btrfs_set_stack_timespec_sec(&dst->mtime, src->i_mtime);
2114 btrfs_set_stack_timespec_nsec(&dst->mtime, 0);
2115 btrfs_set_stack_timespec_sec(&dst->otime, 0);
2116 btrfs_set_stack_timespec_nsec(&dst->otime, 0);
2118 if (S_ISDIR(src->i_mode)) {
2119 btrfs_set_stack_inode_size(dst, 0);
2120 btrfs_set_stack_inode_nlink(dst, 1);
2122 if (S_ISREG(src->i_mode)) {
2123 btrfs_set_stack_inode_size(dst, (u64)src->i_size_high << 32 |
2126 if (!S_ISREG(src->i_mode) && !S_ISDIR(src->i_mode) &&
2127 !S_ISLNK(src->i_mode)) {
2128 if (src->i_block[0]) {
2129 btrfs_set_stack_inode_rdev(dst,
2130 old_decode_dev(src->i_block[0]));
2132 btrfs_set_stack_inode_rdev(dst,
2133 new_decode_dev(src->i_block[1]));
2136 memset(&dst->reserved, 0, sizeof(dst->reserved));
2140 * copy a single inode. do all the required works, such as cloning
2141 * inode item, creating file extents and creating directory entries.
2143 static int ext2_copy_single_inode(struct btrfs_trans_handle *trans,
2144 struct btrfs_root *root, u64 objectid,
2145 ext2_filsys ext2_fs, ext2_ino_t ext2_ino,
2146 struct ext2_inode *ext2_inode,
2147 int datacsum, int packing, int noxattr)
2150 struct btrfs_inode_item btrfs_inode;
2152 if (ext2_inode->i_links_count == 0)
2155 ext2_copy_inode_item(&btrfs_inode, ext2_inode, ext2_fs->blocksize);
2156 if (!datacsum && S_ISREG(ext2_inode->i_mode)) {
2157 u32 flags = btrfs_stack_inode_flags(&btrfs_inode) |
2158 BTRFS_INODE_NODATASUM;
2159 btrfs_set_stack_inode_flags(&btrfs_inode, flags);
2162 switch (ext2_inode->i_mode & S_IFMT) {
2164 ret = ext2_create_file_extents(trans, root, objectid,
2165 &btrfs_inode, ext2_fs, ext2_ino, datacsum, packing);
2168 ret = ext2_create_dir_entries(trans, root, objectid,
2169 &btrfs_inode, ext2_fs, ext2_ino);
2172 ret = ext2_create_symbol_link(trans, root, objectid,
2173 &btrfs_inode, ext2_fs, ext2_ino, ext2_inode);
2183 ret = ext2_copy_extended_attrs(trans, root, objectid,
2184 &btrfs_inode, ext2_fs, ext2_ino);
2188 return btrfs_insert_inode(trans, root, objectid, &btrfs_inode);
2192 * scan ext2's inode bitmap and copy all used inodes.
2194 static int ext2_copy_inodes(struct btrfs_convert_context *cctx,
2195 struct btrfs_root *root,
2196 int datacsum, int packing, int noxattr, struct task_ctx *p)
2198 ext2_filsys ext2_fs = cctx->fs_data;
2201 ext2_inode_scan ext2_scan;
2202 struct ext2_inode ext2_inode;
2203 ext2_ino_t ext2_ino;
2205 struct btrfs_trans_handle *trans;
2207 trans = btrfs_start_transaction(root, 1);
2210 err = ext2fs_open_inode_scan(ext2_fs, 0, &ext2_scan);
2212 fprintf(stderr, "ext2fs_open_inode_scan: %s\n", error_message(err));
2215 while (!(err = ext2fs_get_next_inode(ext2_scan, &ext2_ino,
2217 /* no more inodes */
2220 /* skip special inode in ext2fs */
2221 if (ext2_ino < EXT2_GOOD_OLD_FIRST_INO &&
2222 ext2_ino != EXT2_ROOT_INO)
2224 objectid = ext2_ino + INO_OFFSET;
2225 ret = ext2_copy_single_inode(trans, root,
2226 objectid, ext2_fs, ext2_ino,
2227 &ext2_inode, datacsum, packing,
2229 p->cur_copy_inodes++;
2232 if (trans->blocks_used >= 4096) {
2233 ret = btrfs_commit_transaction(trans, root);
2235 trans = btrfs_start_transaction(root, 1);
2240 fprintf(stderr, "ext2fs_get_next_inode: %s\n", error_message(err));
2243 ret = btrfs_commit_transaction(trans, root);
2245 ext2fs_close_inode_scan(ext2_scan);
2250 static const struct btrfs_convert_operations ext2_convert_ops = {
2252 .open_fs = ext2_open_fs,
2253 .read_used_space = ext2_read_used_space,
2254 .copy_inodes = ext2_copy_inodes,
2255 .close_fs = ext2_close_fs,
2260 static const struct btrfs_convert_operations *convert_operations[] = {
2261 #if BTRFSCONVERT_EXT2
2266 static int convert_open_fs(const char *devname,
2267 struct btrfs_convert_context *cctx)
2271 memset(cctx, 0, sizeof(*cctx));
2273 for (i = 0; i < ARRAY_SIZE(convert_operations); i++) {
2274 int ret = convert_operations[i]->open_fs(cctx, devname);
2277 cctx->convert_ops = convert_operations[i];
2282 fprintf(stderr, "No file system found to convert.\n");
2286 static int do_convert(const char *devname, int datacsum, int packing,
2287 int noxattr, u32 nodesize, int copylabel, const char *fslabel,
2288 int progress, u64 features)
2295 struct btrfs_root *root;
2296 struct btrfs_root *image_root;
2297 struct btrfs_convert_context cctx;
2298 struct btrfs_key key;
2299 char *subvol_name = NULL;
2300 struct task_ctx ctx;
2301 char features_buf[64];
2302 struct btrfs_mkfs_config mkfs_cfg;
2304 init_convert_context(&cctx);
2305 ret = convert_open_fs(devname, &cctx);
2308 ret = convert_read_used_space(&cctx);
2312 blocksize = cctx.blocksize;
2313 total_bytes = (u64)blocksize * (u64)cctx.block_count;
2314 if (blocksize < 4096) {
2315 fprintf(stderr, "block size is too small\n");
2318 if (btrfs_check_nodesize(nodesize, blocksize, features))
2320 fd = open(devname, O_RDWR);
2322 fprintf(stderr, "unable to open %s\n", devname);
2325 btrfs_parse_features_to_string(features_buf, features);
2326 if (features == BTRFS_MKFS_DEFAULT_FEATURES)
2327 strcat(features_buf, " (default)");
2329 printf("create btrfs filesystem:\n");
2330 printf("\tblocksize: %u\n", blocksize);
2331 printf("\tnodesize: %u\n", nodesize);
2332 printf("\tfeatures: %s\n", features_buf);
2334 mkfs_cfg.label = cctx.volume_name;
2335 mkfs_cfg.num_bytes = total_bytes;
2336 mkfs_cfg.nodesize = nodesize;
2337 mkfs_cfg.sectorsize = blocksize;
2338 mkfs_cfg.stripesize = blocksize;
2339 mkfs_cfg.features = features;
2340 /* New convert need these space */
2341 mkfs_cfg.fs_uuid = malloc(BTRFS_UUID_UNPARSED_SIZE);
2342 mkfs_cfg.chunk_uuid = malloc(BTRFS_UUID_UNPARSED_SIZE);
2343 *(mkfs_cfg.fs_uuid) = '\0';
2344 *(mkfs_cfg.chunk_uuid) = '\0';
2346 ret = make_btrfs(fd, &mkfs_cfg, &cctx);
2348 fprintf(stderr, "unable to create initial ctree: %s\n",
2353 root = open_ctree_fd(fd, devname, mkfs_cfg.super_bytenr,
2354 OPEN_CTREE_WRITES | OPEN_CTREE_FS_PARTIAL);
2356 fprintf(stderr, "unable to open ctree\n");
2359 ret = init_btrfs(&mkfs_cfg, root, &cctx, datacsum, packing, noxattr);
2361 fprintf(stderr, "unable to setup the root tree\n");
2365 printf("creating %s image file.\n", cctx.convert_ops->name);
2366 ret = asprintf(&subvol_name, "%s_saved", cctx.convert_ops->name);
2368 fprintf(stderr, "error allocating subvolume name: %s_saved\n",
2369 cctx.convert_ops->name);
2372 key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
2373 key.offset = (u64)-1;
2374 key.type = BTRFS_ROOT_ITEM_KEY;
2375 image_root = btrfs_read_fs_root(root->fs_info, &key);
2377 fprintf(stderr, "unable to create subvol\n");
2380 ret = create_image(image_root, &mkfs_cfg, &cctx, fd,
2381 mkfs_cfg.num_bytes, "image", datacsum);
2383 fprintf(stderr, "error during create_image %d\n", ret);
2387 printf("creating btrfs metadata.\n");
2388 ctx.max_copy_inodes = (cctx.inodes_count - cctx.free_inodes_count);
2389 ctx.cur_copy_inodes = 0;
2392 ctx.info = task_init(print_copied_inodes, after_copied_inodes,
2394 task_start(ctx.info);
2396 ret = copy_inodes(&cctx, root, datacsum, packing, noxattr, &ctx);
2398 fprintf(stderr, "error during copy_inodes %d\n", ret);
2402 task_stop(ctx.info);
2403 task_deinit(ctx.info);
2406 image_root = link_subvol(root, subvol_name, CONV_IMAGE_SUBVOL_OBJECTID);
2410 memset(root->fs_info->super_copy->label, 0, BTRFS_LABEL_SIZE);
2411 if (copylabel == 1) {
2412 __strncpy_null(root->fs_info->super_copy->label,
2413 cctx.volume_name, BTRFS_LABEL_SIZE - 1);
2414 fprintf(stderr, "copy label '%s'\n",
2415 root->fs_info->super_copy->label);
2416 } else if (copylabel == -1) {
2417 strcpy(root->fs_info->super_copy->label, fslabel);
2418 fprintf(stderr, "set label to '%s'\n", fslabel);
2421 ret = close_ctree(root);
2423 fprintf(stderr, "error during close_ctree %d\n", ret);
2426 convert_close_fs(&cctx);
2427 clean_convert_context(&cctx);
2430 * If this step succeed, we get a mountable btrfs. Otherwise
2431 * the source fs is left unchanged.
2433 ret = migrate_super_block(fd, mkfs_cfg.super_bytenr, blocksize);
2435 fprintf(stderr, "unable to migrate super block\n");
2440 root = open_ctree_fd(fd, devname, 0,
2441 OPEN_CTREE_WRITES | OPEN_CTREE_FS_PARTIAL);
2443 fprintf(stderr, "unable to open ctree\n");
2446 root->fs_info->finalize_on_close = 1;
2450 printf("conversion complete.\n");
2453 clean_convert_context(&cctx);
2458 "WARNING: an error occurred during chunk mapping fixup, filesystem mountable but not finalized\n");
2460 fprintf(stderr, "conversion aborted\n");
2465 * Check if a non 1:1 mapped chunk can be rolled back.
2466 * For new convert, it's OK while for old convert it's not.
2468 static int may_rollback_chunk(struct btrfs_fs_info *fs_info, u64 bytenr)
2470 struct btrfs_block_group_cache *bg;
2471 struct btrfs_key key;
2472 struct btrfs_path path;
2473 struct btrfs_root *extent_root = fs_info->extent_root;
2478 bg = btrfs_lookup_first_block_group(fs_info, bytenr);
2481 bg_start = bg->key.objectid;
2482 bg_end = bg->key.objectid + bg->key.offset;
2484 key.objectid = bg_end;
2485 key.type = BTRFS_METADATA_ITEM_KEY;
2487 btrfs_init_path(&path);
2489 ret = btrfs_search_slot(NULL, extent_root, &key, &path, 0, 0);
2494 struct btrfs_extent_item *ei;
2496 ret = btrfs_previous_extent_item(extent_root, &path, bg_start);
2504 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
2505 if (key.type == BTRFS_METADATA_ITEM_KEY)
2507 /* Now it's EXTENT_ITEM_KEY only */
2508 ei = btrfs_item_ptr(path.nodes[0], path.slots[0],
2509 struct btrfs_extent_item);
2511 * Found data extent, means this is old convert must follow 1:1
2514 if (btrfs_extent_flags(path.nodes[0], ei)
2515 & BTRFS_EXTENT_FLAG_DATA) {
2520 btrfs_release_path(&path);
2524 static int may_rollback(struct btrfs_root *root)
2526 struct btrfs_fs_info *info = root->fs_info;
2527 struct btrfs_multi_bio *multi = NULL;
2535 if (btrfs_super_num_devices(info->super_copy) != 1)
2538 bytenr = BTRFS_SUPER_INFO_OFFSET;
2539 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
2542 ret = btrfs_map_block(&info->mapping_tree, WRITE, bytenr,
2543 &length, &multi, 0, NULL);
2545 if (ret == -ENOENT) {
2546 /* removed block group at the tail */
2547 if (length == (u64)-1)
2550 /* removed block group in the middle */
2556 num_stripes = multi->num_stripes;
2557 physical = multi->stripes[0].physical;
2560 if (num_stripes != 1) {
2561 error("num stripes for bytenr %llu is not 1", bytenr);
2566 * Extra check for new convert, as metadata chunk from new
2567 * convert is much more free than old convert, it doesn't need
2568 * to do 1:1 mapping.
2570 if (physical != bytenr) {
2572 * Check if it's a metadata chunk and has only metadata
2575 ret = may_rollback_chunk(info, bytenr);
2581 if (bytenr >= total_bytes)
2589 static int do_rollback(const char *devname)
2594 struct btrfs_root *root;
2595 struct btrfs_root *image_root;
2596 struct btrfs_root *chunk_root;
2597 struct btrfs_dir_item *dir;
2598 struct btrfs_inode_item *inode;
2599 struct btrfs_file_extent_item *fi;
2600 struct btrfs_trans_handle *trans;
2601 struct extent_buffer *leaf;
2602 struct btrfs_block_group_cache *cache1;
2603 struct btrfs_block_group_cache *cache2;
2604 struct btrfs_key key;
2605 struct btrfs_path path;
2606 struct extent_io_tree io_tree;
2621 extent_io_tree_init(&io_tree);
2623 fd = open(devname, O_RDWR);
2625 fprintf(stderr, "unable to open %s\n", devname);
2628 root = open_ctree_fd(fd, devname, 0, OPEN_CTREE_WRITES);
2630 fprintf(stderr, "unable to open ctree\n");
2633 ret = may_rollback(root);
2635 fprintf(stderr, "unable to do rollback\n");
2639 sectorsize = root->sectorsize;
2640 buf = malloc(sectorsize);
2642 fprintf(stderr, "unable to allocate memory\n");
2646 btrfs_init_path(&path);
2648 key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
2649 key.type = BTRFS_ROOT_BACKREF_KEY;
2650 key.offset = BTRFS_FS_TREE_OBJECTID;
2651 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, &path, 0,
2653 btrfs_release_path(&path);
2656 "ERROR: unable to convert ext2 image subvolume, is it deleted?\n");
2658 } else if (ret < 0) {
2660 "ERROR: unable to open ext2_saved, id=%llu: %s\n",
2661 (unsigned long long)key.objectid, strerror(-ret));
2665 key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
2666 key.type = BTRFS_ROOT_ITEM_KEY;
2667 key.offset = (u64)-1;
2668 image_root = btrfs_read_fs_root(root->fs_info, &key);
2669 if (!image_root || IS_ERR(image_root)) {
2670 fprintf(stderr, "unable to open subvol %llu\n",
2671 (unsigned long long)key.objectid);
2676 root_dir = btrfs_root_dirid(&root->root_item);
2677 dir = btrfs_lookup_dir_item(NULL, image_root, &path,
2678 root_dir, name, strlen(name), 0);
2679 if (!dir || IS_ERR(dir)) {
2680 fprintf(stderr, "unable to find file %s\n", name);
2683 leaf = path.nodes[0];
2684 btrfs_dir_item_key_to_cpu(leaf, dir, &key);
2685 btrfs_release_path(&path);
2687 objectid = key.objectid;
2689 ret = btrfs_lookup_inode(NULL, image_root, &path, &key, 0);
2691 fprintf(stderr, "unable to find inode item\n");
2694 leaf = path.nodes[0];
2695 inode = btrfs_item_ptr(leaf, path.slots[0], struct btrfs_inode_item);
2696 total_bytes = btrfs_inode_size(leaf, inode);
2697 btrfs_release_path(&path);
2699 key.objectid = objectid;
2701 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
2702 ret = btrfs_search_slot(NULL, image_root, &key, &path, 0, 0);
2704 fprintf(stderr, "unable to find first file extent\n");
2705 btrfs_release_path(&path);
2709 /* build mapping tree for the relocated blocks */
2710 for (offset = 0; offset < total_bytes; ) {
2711 leaf = path.nodes[0];
2712 if (path.slots[0] >= btrfs_header_nritems(leaf)) {
2713 ret = btrfs_next_leaf(root, &path);
2719 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
2720 if (key.objectid != objectid || key.offset != offset ||
2721 btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2724 fi = btrfs_item_ptr(leaf, path.slots[0],
2725 struct btrfs_file_extent_item);
2726 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2728 if (btrfs_file_extent_compression(leaf, fi) ||
2729 btrfs_file_extent_encryption(leaf, fi) ||
2730 btrfs_file_extent_other_encoding(leaf, fi))
2733 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
2734 /* skip holes and direct mapped extents */
2735 if (bytenr == 0 || bytenr == offset)
2738 bytenr += btrfs_file_extent_offset(leaf, fi);
2739 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
2741 cache1 = btrfs_lookup_block_group(root->fs_info, offset);
2742 cache2 = btrfs_lookup_block_group(root->fs_info,
2743 offset + num_bytes - 1);
2745 * Here we must take consideration of old and new convert
2747 * For old convert case, sign, there is no consist chunk type
2748 * that will cover the extent. META/DATA/SYS are all possible.
2749 * Just ensure relocate one is in SYS chunk.
2750 * For new convert case, they are all covered by DATA chunk.
2752 * So, there is not valid chunk type check for it now.
2754 if (cache1 != cache2)
2757 set_extent_bits(&io_tree, offset, offset + num_bytes - 1,
2758 EXTENT_LOCKED, GFP_NOFS);
2759 set_state_private(&io_tree, offset, bytenr);
2761 offset += btrfs_file_extent_num_bytes(leaf, fi);
2764 btrfs_release_path(&path);
2766 if (offset < total_bytes) {
2767 fprintf(stderr, "unable to build extent mapping\n");
2768 fprintf(stderr, "converted filesystem after balance is unable to rollback\n");
2772 first_free = BTRFS_SUPER_INFO_OFFSET + 2 * sectorsize - 1;
2773 first_free &= ~((u64)sectorsize - 1);
2774 /* backup for extent #0 should exist */
2775 if(!test_range_bit(&io_tree, 0, first_free - 1, EXTENT_LOCKED, 1)) {
2776 fprintf(stderr, "no backup for the first extent\n");
2779 /* force no allocation from system block group */
2780 root->fs_info->system_allocs = -1;
2781 trans = btrfs_start_transaction(root, 1);
2784 * recow the whole chunk tree, this will remove all chunk tree blocks
2785 * from system block group
2787 chunk_root = root->fs_info->chunk_root;
2788 memset(&key, 0, sizeof(key));
2790 ret = btrfs_search_slot(trans, chunk_root, &key, &path, 0, 1);
2794 ret = btrfs_next_leaf(chunk_root, &path);
2798 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
2799 btrfs_release_path(&path);
2801 btrfs_release_path(&path);
2806 cache1 = btrfs_lookup_block_group(root->fs_info, offset);
2810 if (cache1->flags & BTRFS_BLOCK_GROUP_SYSTEM)
2811 num_bytes += btrfs_block_group_used(&cache1->item);
2813 offset = cache1->key.objectid + cache1->key.offset;
2815 /* only extent #0 left in system block group? */
2816 if (num_bytes > first_free) {
2817 fprintf(stderr, "unable to empty system block group\n");
2820 /* create a system chunk that maps the whole device */
2821 ret = prepare_system_chunk_sb(root->fs_info->super_copy);
2823 fprintf(stderr, "unable to update system chunk\n");
2827 ret = btrfs_commit_transaction(trans, root);
2830 ret = close_ctree(root);
2832 fprintf(stderr, "error during close_ctree %d\n", ret);
2836 /* zero btrfs super block mirrors */
2837 memset(buf, 0, sectorsize);
2838 for (i = 1 ; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2839 bytenr = btrfs_sb_offset(i);
2840 if (bytenr >= total_bytes)
2842 ret = pwrite(fd, buf, sectorsize, bytenr);
2843 if (ret != sectorsize) {
2845 "error during zeroing superblock %d: %d\n",
2851 sb_bytenr = (u64)-1;
2852 /* copy all relocated blocks back */
2854 ret = find_first_extent_bit(&io_tree, 0, &start, &end,
2859 ret = get_state_private(&io_tree, start, &bytenr);
2862 clear_extent_bits(&io_tree, start, end, EXTENT_LOCKED,
2865 while (start <= end) {
2866 if (start == BTRFS_SUPER_INFO_OFFSET) {
2870 ret = pread(fd, buf, sectorsize, bytenr);
2872 fprintf(stderr, "error during pread %d\n", ret);
2875 BUG_ON(ret != sectorsize);
2876 ret = pwrite(fd, buf, sectorsize, start);
2878 fprintf(stderr, "error during pwrite %d\n", ret);
2881 BUG_ON(ret != sectorsize);
2883 start += sectorsize;
2884 bytenr += sectorsize;
2890 fprintf(stderr, "error during fsync %d\n", ret);
2894 * finally, overwrite btrfs super block.
2896 ret = pread(fd, buf, sectorsize, sb_bytenr);
2898 fprintf(stderr, "error during pread %d\n", ret);
2901 BUG_ON(ret != sectorsize);
2902 ret = pwrite(fd, buf, sectorsize, BTRFS_SUPER_INFO_OFFSET);
2904 fprintf(stderr, "error during pwrite %d\n", ret);
2907 BUG_ON(ret != sectorsize);
2910 fprintf(stderr, "error during fsync %d\n", ret);
2916 extent_io_tree_cleanup(&io_tree);
2917 printf("rollback complete.\n");
2924 fprintf(stderr, "rollback aborted.\n");
2928 static void print_usage(void)
2930 printf("usage: btrfs-convert [options] device\n");
2931 printf("options:\n");
2932 printf("\t-d|--no-datasum disable data checksum, sets NODATASUM\n");
2933 printf("\t-i|--no-xattr ignore xattrs and ACLs\n");
2934 printf("\t-n|--no-inline disable inlining of small files to metadata\n");
2935 printf("\t-N|--nodesize SIZE set filesystem metadata nodesize\n");
2936 printf("\t-r|--rollback roll back to the original filesystem\n");
2937 printf("\t-l|--label LABEL set filesystem label\n");
2938 printf("\t-L|--copy-label use label from converted filesystem\n");
2939 printf("\t-p|--progress show converting progress (default)\n");
2940 printf("\t-O|--features LIST comma separated list of filesystem features\n");
2941 printf("\t--no-progress show only overview, not the detailed progress\n");
2943 printf("Suported filesystems:\n");
2944 printf("\text2/3/4: %s\n", BTRFSCONVERT_EXT2 ? "yes" : "no");
2947 int main(int argc, char *argv[])
2953 u32 nodesize = max_t(u32, sysconf(_SC_PAGESIZE),
2954 BTRFS_MKFS_DEFAULT_NODE_SIZE);
2957 int usage_error = 0;
2960 char fslabel[BTRFS_LABEL_SIZE];
2961 u64 features = BTRFS_MKFS_DEFAULT_FEATURES;
2964 enum { GETOPT_VAL_NO_PROGRESS = 256 };
2965 static const struct option long_options[] = {
2966 { "no-progress", no_argument, NULL,
2967 GETOPT_VAL_NO_PROGRESS },
2968 { "no-datasum", no_argument, NULL, 'd' },
2969 { "no-inline", no_argument, NULL, 'n' },
2970 { "no-xattr", no_argument, NULL, 'i' },
2971 { "rollback", no_argument, NULL, 'r' },
2972 { "features", required_argument, NULL, 'O' },
2973 { "progress", no_argument, NULL, 'p' },
2974 { "label", required_argument, NULL, 'l' },
2975 { "copy-label", no_argument, NULL, 'L' },
2976 { "nodesize", required_argument, NULL, 'N' },
2977 { "help", no_argument, NULL, GETOPT_VAL_HELP},
2978 { NULL, 0, NULL, 0 }
2980 int c = getopt_long(argc, argv, "dinN:rl:LpO:", long_options, NULL);
2995 nodesize = parse_size(optarg);
3002 if (strlen(optarg) >= BTRFS_LABEL_SIZE) {
3004 "WARNING: label too long, trimmed to %d bytes\n",
3005 BTRFS_LABEL_SIZE - 1);
3007 __strncpy_null(fslabel, optarg, BTRFS_LABEL_SIZE - 1);
3016 char *orig = strdup(optarg);
3019 tmp = btrfs_parse_fs_features(tmp, &features);
3022 "Unrecognized filesystem feature '%s'\n",
3028 if (features & BTRFS_FEATURE_LIST_ALL) {
3029 btrfs_list_all_fs_features(
3030 ~BTRFS_CONVERT_ALLOWED_FEATURES);
3033 if (features & ~BTRFS_CONVERT_ALLOWED_FEATURES) {
3036 btrfs_parse_features_to_string(buf,
3037 features & ~BTRFS_CONVERT_ALLOWED_FEATURES);
3039 "ERROR: features not allowed for convert: %s\n",
3046 case GETOPT_VAL_NO_PROGRESS:
3049 case GETOPT_VAL_HELP:
3052 return c != GETOPT_VAL_HELP;
3056 if (check_argc_exact(argc - optind, 1)) {
3061 if (rollback && (!datacsum || noxattr || !packing)) {
3063 "Usage error: -d, -i, -n options do not apply to rollback\n");
3072 file = argv[optind];
3073 ret = check_mounted(file);
3075 fprintf(stderr, "Could not check mount status: %s\n",
3079 fprintf(stderr, "%s is mounted\n", file);
3084 ret = do_rollback(file);
3086 ret = do_convert(file, datacsum, packing, noxattr, nodesize,
3087 copylabel, fslabel, progress, features);