2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 * Btrfs convert design:
22 * The overall design of btrfs convert is like the following:
24 * |<------------------Old fs----------------------------->|
25 * |<- used ->| |<- used ->| |<- used ->|
28 * |<---------------Btrfs fs------------------------------>|
29 * |<- Old data chunk ->|< new chunk (D/M/S)>|<- ODC ->|
30 * |<-Old-FE->| |<-Old-FE->|<- Btrfs extents ->|<-Old-FE->|
32 * ODC = Old data chunk, btrfs chunks containing old fs data
33 * Mapped 1:1 (logical address == device offset)
34 * Old-FE = file extents pointing to old fs.
36 * So old fs used space is (mostly) kept as is, while btrfs will insert
37 * its chunk (Data/Meta/Sys) into large enough free space.
38 * In this way, we can create different profiles for metadata/data for
41 * We must reserve and relocate 3 ranges for btrfs:
42 * * [0, 1M) - area never used for any data except the first
44 * * [btrfs_sb_offset(1), +64K) - 1st superblock backup copy
45 * * [btrfs_sb_offset(2), +64K) - 2nd, dtto
47 * Most work is spent handling corner cases around these reserved ranges.
49 * Detailed workflow is:
50 * 1) Scan old fs used space and calculate data chunk layout
52 * We can a map used space of old fs
54 * 1.2) Calculate data chunk layout - this is the hard part
55 * New data chunks must meet 3 conditions using result fomr 1.1
56 * a. Large enough to be a chunk
57 * b. Doesn't intersect reserved ranges
58 * c. Covers all the remaining old fs used space
60 * NOTE: This can be simplified if we don't need to handle backup supers
62 * 1.3) Calculate usable space for new btrfs chunks
63 * Btrfs chunk usable space must meet 3 conditions using result from 1.2
64 * a. Large enough to be a chunk
65 * b. Doesn't intersect reserved ranges
66 * c. Doesn't cover any data chunks in 1.1
68 * 2) Create basic btrfs filesystem structure
69 * Initial metadata and sys chunks are inserted in the first availabe
70 * space found in step 1.3
71 * Then insert all data chunks into the basic btrfs
73 * 3) Create convert image
74 * We need to relocate reserved ranges here.
75 * After this step, the convert image is done, and we can use the image
76 * as reflink source to create old files
78 * 4) Iterate old fs to create files
79 * We just reflink file extents from old fs to newly created files on
83 #include "kerncompat.h"
87 #include <sys/types.h>
96 #include "transaction.h"
98 #include "task-utils.h"
100 #include "mkfs/common.h"
101 #include "convert/common.h"
102 #include "convert/source-fs.h"
103 #include "fsfeatures.h"
105 const struct btrfs_convert_operations ext2_convert_ops;
107 static const struct btrfs_convert_operations *convert_operations[] = {
108 #if BTRFSCONVERT_EXT2
113 static void *print_copied_inodes(void *p)
115 struct task_ctx *priv = p;
116 const char work_indicator[] = { '.', 'o', 'O', 'o' };
119 task_period_start(priv->info, 1000 /* 1s */);
122 printf("copy inodes [%c] [%10llu/%10llu]\r",
123 work_indicator[count % 4],
124 (unsigned long long)priv->cur_copy_inodes,
125 (unsigned long long)priv->max_copy_inodes);
127 task_period_wait(priv->info);
133 static int after_copied_inodes(void *p)
141 static inline int copy_inodes(struct btrfs_convert_context *cctx,
142 struct btrfs_root *root, u32 convert_flags,
145 return cctx->convert_ops->copy_inodes(cctx, root, convert_flags, p);
148 static inline void convert_close_fs(struct btrfs_convert_context *cctx)
150 cctx->convert_ops->close_fs(cctx);
153 static inline int convert_check_state(struct btrfs_convert_context *cctx)
155 return cctx->convert_ops->check_state(cctx);
158 static int csum_disk_extent(struct btrfs_trans_handle *trans,
159 struct btrfs_root *root,
160 u64 disk_bytenr, u64 num_bytes)
162 u32 blocksize = root->sectorsize;
167 buffer = malloc(blocksize);
170 for (offset = 0; offset < num_bytes; offset += blocksize) {
171 ret = read_disk_extent(root, disk_bytenr + offset,
175 ret = btrfs_csum_file_block(trans,
176 root->fs_info->csum_root,
177 disk_bytenr + num_bytes,
178 disk_bytenr + offset,
187 static int create_image_file_range(struct btrfs_trans_handle *trans,
188 struct btrfs_root *root,
189 struct cache_tree *used,
190 struct btrfs_inode_item *inode,
191 u64 ino, u64 bytenr, u64 *ret_len,
194 struct cache_extent *cache;
195 struct btrfs_block_group_cache *bg_cache;
200 u32 datacsum = convert_flags & CONVERT_FLAG_DATACSUM;
202 if (bytenr != round_down(bytenr, root->sectorsize)) {
203 error("bytenr not sectorsize aligned: %llu",
204 (unsigned long long)bytenr);
207 if (len != round_down(len, root->sectorsize)) {
208 error("length not sectorsize aligned: %llu",
209 (unsigned long long)len);
212 len = min_t(u64, len, BTRFS_MAX_EXTENT_SIZE);
215 * Skip reserved ranges first
217 * Or we will insert a hole into current image file, and later
218 * migrate block will fail as there is already a file extent.
220 for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
221 struct simple_range *reserved = &btrfs_reserved_ranges[i];
227 * |---- reserved ----|
229 * Skip to reserved range end
231 if (bytenr >= reserved->start && bytenr < range_end(reserved)) {
232 *ret_len = range_end(reserved) - bytenr;
239 * Leading part may still create a file extent
241 if (bytenr < reserved->start &&
242 bytenr + len >= range_end(reserved)) {
243 len = min_t(u64, len, reserved->start - bytenr);
248 /* Check if we are going to insert regular file extent, or hole */
249 cache = search_cache_extent(used, bytenr);
251 if (cache->start <= bytenr) {
253 * |///////Used///////|
256 * Insert one real file extent
258 len = min_t(u64, len, cache->start + cache->size -
260 disk_bytenr = bytenr;
268 len = min(len, cache->start - bytenr);
284 /* Check if the range is in a data block group */
285 bg_cache = btrfs_lookup_block_group(root->fs_info, bytenr);
288 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_DATA))
291 /* The extent should never cross block group boundary */
292 len = min_t(u64, len, bg_cache->key.objectid +
293 bg_cache->key.offset - bytenr);
296 if (len != round_down(len, root->sectorsize)) {
297 error("remaining length not sectorsize aligned: %llu",
298 (unsigned long long)len);
301 ret = btrfs_record_file_extent(trans, root, ino, inode, bytenr,
307 ret = csum_disk_extent(trans, root, bytenr, len);
313 * Relocate old fs data in one reserved ranges
315 * Since all old fs data in reserved range is not covered by any chunk nor
316 * data extent, we don't need to handle any reference but add new
317 * extent/reference, which makes codes more clear
319 static int migrate_one_reserved_range(struct btrfs_trans_handle *trans,
320 struct btrfs_root *root,
321 struct cache_tree *used,
322 struct btrfs_inode_item *inode, int fd,
323 u64 ino, struct simple_range *range,
326 u64 cur_off = range->start;
327 u64 cur_len = range->len;
328 u64 hole_start = range->start;
330 struct cache_extent *cache;
331 struct btrfs_key key;
332 struct extent_buffer *eb;
336 * It's possible that there are holes in reserved range:
337 * |<---------------- Reserved range ---------------------->|
338 * |<- Old fs data ->| |<- Old fs data ->|
339 * So here we need to iterate through old fs used space and only
340 * migrate ranges that covered by old fs data.
342 while (cur_off < range_end(range)) {
343 cache = lookup_cache_extent(used, cur_off, cur_len);
346 cur_off = max(cache->start, cur_off);
347 cur_len = min(cache->start + cache->size, range_end(range)) -
349 BUG_ON(cur_len < root->sectorsize);
351 /* reserve extent for the data */
352 ret = btrfs_reserve_extent(trans, root, cur_len, 0, 0, (u64)-1,
357 eb = malloc(sizeof(*eb) + cur_len);
363 ret = pread(fd, eb->data, cur_len, cur_off);
365 ret = (ret < 0 ? ret : -EIO);
369 eb->start = key.objectid;
370 eb->len = key.offset;
373 ret = write_and_map_eb(root, eb);
378 /* Now handle extent item and file extent things */
379 ret = btrfs_record_file_extent(trans, root, ino, inode, cur_off,
380 key.objectid, key.offset);
383 /* Finally, insert csum items */
384 if (convert_flags & CONVERT_FLAG_DATACSUM)
385 ret = csum_disk_extent(trans, root, key.objectid,
388 /* Don't forget to insert hole */
389 hole_len = cur_off - hole_start;
391 ret = btrfs_record_file_extent(trans, root, ino, inode,
392 hole_start, 0, hole_len);
397 cur_off += key.offset;
398 hole_start = cur_off;
399 cur_len = range_end(range) - cur_off;
403 * |<---- reserved -------->|
404 * |<- Old fs data ->| |
407 if (range_end(range) - hole_start > 0)
408 ret = btrfs_record_file_extent(trans, root, ino, inode,
409 hole_start, 0, range_end(range) - hole_start);
414 * Relocate the used ext2 data in reserved ranges
416 static int migrate_reserved_ranges(struct btrfs_trans_handle *trans,
417 struct btrfs_root *root,
418 struct cache_tree *used,
419 struct btrfs_inode_item *inode, int fd,
420 u64 ino, u64 total_bytes, u32 convert_flags)
425 for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
426 struct simple_range *range = &btrfs_reserved_ranges[i];
428 if (range->start > total_bytes)
430 ret = migrate_one_reserved_range(trans, root, used, inode, fd,
431 ino, range, convert_flags);
440 * Helper for expand and merge extent_cache for wipe_one_reserved_range() to
441 * handle wiping a range that exists in cache.
443 static int _expand_extent_cache(struct cache_tree *tree,
444 struct cache_extent *entry,
445 u64 min_stripe_size, int backward)
447 struct cache_extent *ce;
450 if (entry->size >= min_stripe_size)
452 diff = min_stripe_size - entry->size;
455 ce = prev_cache_extent(entry);
458 if (ce->start + ce->size >= entry->start - diff) {
459 /* Directly merge with previous extent */
460 ce->size = entry->start + entry->size - ce->start;
461 remove_cache_extent(tree, entry);
466 /* No overlap, normal extent */
467 if (entry->start < diff) {
468 error("cannot find space for data chunk layout");
471 entry->start -= diff;
475 ce = next_cache_extent(entry);
478 if (entry->start + entry->size + diff >= ce->start) {
479 /* Directly merge with next extent */
480 entry->size = ce->start + ce->size - entry->start;
481 remove_cache_extent(tree, ce);
491 * Remove one reserve range from given cache tree
492 * if min_stripe_size is non-zero, it will ensure for split case,
493 * all its split cache extent is no smaller than @min_strip_size / 2.
495 static int wipe_one_reserved_range(struct cache_tree *tree,
496 u64 start, u64 len, u64 min_stripe_size,
499 struct cache_extent *cache;
502 BUG_ON(ensure_size && min_stripe_size == 0);
504 * The logical here is simplified to handle special cases only
505 * So we don't need to consider merge case for ensure_size
507 BUG_ON(min_stripe_size && (min_stripe_size < len * 2 ||
508 min_stripe_size / 2 < BTRFS_STRIPE_LEN));
510 /* Also, wipe range should already be aligned */
511 BUG_ON(start != round_down(start, BTRFS_STRIPE_LEN) ||
512 start + len != round_up(start + len, BTRFS_STRIPE_LEN));
514 min_stripe_size /= 2;
516 cache = lookup_cache_extent(tree, start, len);
520 if (start <= cache->start) {
522 * |--------cache---------|
525 BUG_ON(start + len <= cache->start);
528 * The wipe size is smaller than min_stripe_size / 2,
529 * so the result length should still meet min_stripe_size
530 * And no need to do alignment
532 cache->size -= (start + len - cache->start);
533 if (cache->size == 0) {
534 remove_cache_extent(tree, cache);
539 BUG_ON(ensure_size && cache->size < min_stripe_size);
541 cache->start = start + len;
543 } else if (start > cache->start && start + len < cache->start +
546 * |-------cache-----|
549 u64 old_start = cache->start;
550 u64 old_len = cache->size;
551 u64 insert_start = start + len;
554 cache->size = start - cache->start;
555 /* Expand the leading half part if needed */
556 if (ensure_size && cache->size < min_stripe_size) {
557 ret = _expand_extent_cache(tree, cache,
563 /* And insert the new one */
564 insert_len = old_start + old_len - start - len;
565 ret = add_merge_cache_extent(tree, insert_start, insert_len);
569 /* Expand the last half part if needed */
570 if (ensure_size && insert_len < min_stripe_size) {
571 cache = lookup_cache_extent(tree, insert_start,
573 if (!cache || cache->start != insert_start ||
574 cache->size != insert_len)
576 ret = _expand_extent_cache(tree, cache,
585 * Wipe len should be small enough and no need to expand the
588 cache->size = start - cache->start;
589 BUG_ON(ensure_size && cache->size < min_stripe_size);
594 * Remove reserved ranges from given cache_tree
596 * It will remove the following ranges
598 * 2) 2nd superblock, +64K (make sure chunks are 64K aligned)
599 * 3) 3rd superblock, +64K
601 * @min_stripe must be given for safety check
602 * and if @ensure_size is given, it will ensure affected cache_extent will be
603 * larger than min_stripe_size
605 static int wipe_reserved_ranges(struct cache_tree *tree, u64 min_stripe_size,
611 for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
612 struct simple_range *range = &btrfs_reserved_ranges[i];
614 ret = wipe_one_reserved_range(tree, range->start, range->len,
615 min_stripe_size, ensure_size);
622 static int calculate_available_space(struct btrfs_convert_context *cctx)
624 struct cache_tree *used = &cctx->used_space;
625 struct cache_tree *data_chunks = &cctx->data_chunks;
626 struct cache_tree *free = &cctx->free_space;
627 struct cache_extent *cache;
630 * Twice the minimal chunk size, to allow later wipe_reserved_ranges()
631 * works without need to consider overlap
633 u64 min_stripe_size = 2 * 16 * 1024 * 1024;
636 /* Calculate data_chunks */
637 for (cache = first_cache_extent(used); cache;
638 cache = next_cache_extent(cache)) {
641 if (cache->start + cache->size < cur_off)
643 if (cache->start > cur_off + min_stripe_size)
644 cur_off = cache->start;
645 cur_len = max(cache->start + cache->size - cur_off,
647 ret = add_merge_cache_extent(data_chunks, cur_off, cur_len);
653 * remove reserved ranges, so we won't ever bother relocating an old
654 * filesystem extent to other place.
656 ret = wipe_reserved_ranges(data_chunks, min_stripe_size, 1);
662 * Calculate free space
663 * Always round up the start bytenr, to avoid metadata extent corss
664 * stripe boundary, as later mkfs_convert() won't have all the extent
667 for (cache = first_cache_extent(data_chunks); cache;
668 cache = next_cache_extent(cache)) {
669 if (cache->start < cur_off)
671 if (cache->start > cur_off) {
675 len = cache->start - round_up(cur_off,
677 insert_start = round_up(cur_off, BTRFS_STRIPE_LEN);
679 ret = add_merge_cache_extent(free, insert_start, len);
683 cur_off = cache->start + cache->size;
685 /* Don't forget the last range */
686 if (cctx->total_bytes > cur_off) {
687 u64 len = cctx->total_bytes - cur_off;
690 insert_start = round_up(cur_off, BTRFS_STRIPE_LEN);
692 ret = add_merge_cache_extent(free, insert_start, len);
697 /* Remove reserved bytes */
698 ret = wipe_reserved_ranges(free, min_stripe_size, 0);
704 * Read used space, and since we have the used space,
705 * calcuate data_chunks and free for later mkfs
707 static int convert_read_used_space(struct btrfs_convert_context *cctx)
711 ret = cctx->convert_ops->read_used_space(cctx);
715 ret = calculate_available_space(cctx);
720 * Create the fs image file of old filesystem.
722 * This is completely fs independent as we have cctx->used, only
723 * need to create file extents pointing to all the positions.
725 static int create_image(struct btrfs_root *root,
726 struct btrfs_mkfs_config *cfg,
727 struct btrfs_convert_context *cctx, int fd,
728 u64 size, char *name, u32 convert_flags)
730 struct btrfs_inode_item buf;
731 struct btrfs_trans_handle *trans;
732 struct btrfs_path path;
733 struct btrfs_key key;
734 struct cache_extent *cache;
735 struct cache_tree used_tmp;
738 u64 flags = BTRFS_INODE_READONLY;
741 if (!(convert_flags & CONVERT_FLAG_DATACSUM))
742 flags |= BTRFS_INODE_NODATASUM;
744 trans = btrfs_start_transaction(root, 1);
748 cache_tree_init(&used_tmp);
749 btrfs_init_path(&path);
751 ret = btrfs_find_free_objectid(trans, root, BTRFS_FIRST_FREE_OBJECTID,
755 ret = btrfs_new_inode(trans, root, ino, 0400 | S_IFREG);
758 ret = btrfs_change_inode_flags(trans, root, ino, flags);
761 ret = btrfs_add_link(trans, root, ino, BTRFS_FIRST_FREE_OBJECTID, name,
762 strlen(name), BTRFS_FT_REG_FILE, NULL, 1);
767 key.type = BTRFS_INODE_ITEM_KEY;
770 ret = btrfs_search_slot(trans, root, &key, &path, 0, 1);
772 ret = (ret > 0 ? -ENOENT : ret);
775 read_extent_buffer(path.nodes[0], &buf,
776 btrfs_item_ptr_offset(path.nodes[0], path.slots[0]),
778 btrfs_release_path(&path);
781 * Create a new used space cache, which doesn't contain the reserved
784 for (cache = first_cache_extent(&cctx->used_space); cache;
785 cache = next_cache_extent(cache)) {
786 ret = add_cache_extent(&used_tmp, cache->start, cache->size);
790 ret = wipe_reserved_ranges(&used_tmp, 0, 0);
795 * Start from 1M, as 0~1M is reserved, and create_image_file_range()
796 * can't handle bytenr 0(will consider it as a hole)
800 u64 len = size - cur;
802 ret = create_image_file_range(trans, root, &used_tmp,
803 &buf, ino, cur, &len,
809 /* Handle the reserved ranges */
810 ret = migrate_reserved_ranges(trans, root, &cctx->used_space, &buf, fd,
811 ino, cfg->num_bytes, convert_flags);
814 key.type = BTRFS_INODE_ITEM_KEY;
816 ret = btrfs_search_slot(trans, root, &key, &path, 0, 1);
818 ret = (ret > 0 ? -ENOENT : ret);
821 btrfs_set_stack_inode_size(&buf, cfg->num_bytes);
822 write_extent_buffer(path.nodes[0], &buf,
823 btrfs_item_ptr_offset(path.nodes[0], path.slots[0]),
826 free_extent_cache_tree(&used_tmp);
827 btrfs_release_path(&path);
828 btrfs_commit_transaction(trans, root);
832 static struct btrfs_root* link_subvol(struct btrfs_root *root,
833 const char *base, u64 root_objectid)
835 struct btrfs_trans_handle *trans;
836 struct btrfs_fs_info *fs_info = root->fs_info;
837 struct btrfs_root *tree_root = fs_info->tree_root;
838 struct btrfs_root *new_root = NULL;
839 struct btrfs_path path;
840 struct btrfs_inode_item *inode_item;
841 struct extent_buffer *leaf;
842 struct btrfs_key key;
843 u64 dirid = btrfs_root_dirid(&root->root_item);
845 char buf[BTRFS_NAME_LEN + 1]; /* for snprintf null */
851 if (len == 0 || len > BTRFS_NAME_LEN)
854 btrfs_init_path(&path);
855 key.objectid = dirid;
856 key.type = BTRFS_DIR_INDEX_KEY;
857 key.offset = (u64)-1;
859 ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
861 error("search for DIR_INDEX dirid %llu failed: %d",
862 (unsigned long long)dirid, ret);
866 if (path.slots[0] > 0) {
868 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
869 if (key.objectid == dirid && key.type == BTRFS_DIR_INDEX_KEY)
870 index = key.offset + 1;
872 btrfs_release_path(&path);
874 trans = btrfs_start_transaction(root, 1);
876 error("unable to start transaction");
880 key.objectid = dirid;
882 key.type = BTRFS_INODE_ITEM_KEY;
884 ret = btrfs_lookup_inode(trans, root, &path, &key, 1);
886 error("search for INODE_ITEM %llu failed: %d",
887 (unsigned long long)dirid, ret);
890 leaf = path.nodes[0];
891 inode_item = btrfs_item_ptr(leaf, path.slots[0],
892 struct btrfs_inode_item);
894 key.objectid = root_objectid;
895 key.offset = (u64)-1;
896 key.type = BTRFS_ROOT_ITEM_KEY;
898 memcpy(buf, base, len);
899 for (i = 0; i < 1024; i++) {
900 ret = btrfs_insert_dir_item(trans, root, buf, len,
901 dirid, &key, BTRFS_FT_DIR, index);
904 len = snprintf(buf, ARRAY_SIZE(buf), "%s%d", base, i);
905 if (len < 1 || len > BTRFS_NAME_LEN) {
913 btrfs_set_inode_size(leaf, inode_item, len * 2 +
914 btrfs_inode_size(leaf, inode_item));
915 btrfs_mark_buffer_dirty(leaf);
916 btrfs_release_path(&path);
918 /* add the backref first */
919 ret = btrfs_add_root_ref(trans, tree_root, root_objectid,
920 BTRFS_ROOT_BACKREF_KEY,
921 root->root_key.objectid,
922 dirid, index, buf, len);
924 error("unable to add root backref for %llu: %d",
925 root->root_key.objectid, ret);
929 /* now add the forward ref */
930 ret = btrfs_add_root_ref(trans, tree_root, root->root_key.objectid,
931 BTRFS_ROOT_REF_KEY, root_objectid,
932 dirid, index, buf, len);
934 error("unable to add root ref for %llu: %d",
935 root->root_key.objectid, ret);
939 ret = btrfs_commit_transaction(trans, root);
941 error("transaction commit failed: %d", ret);
945 new_root = btrfs_read_fs_root(fs_info, &key);
946 if (IS_ERR(new_root)) {
947 error("unable to fs read root: %lu", PTR_ERR(new_root));
951 btrfs_init_path(&path);
955 static int create_subvol(struct btrfs_trans_handle *trans,
956 struct btrfs_root *root, u64 root_objectid)
958 struct extent_buffer *tmp;
959 struct btrfs_root *new_root;
960 struct btrfs_key key;
961 struct btrfs_root_item root_item;
964 ret = btrfs_copy_root(trans, root, root->node, &tmp,
969 memcpy(&root_item, &root->root_item, sizeof(root_item));
970 btrfs_set_root_bytenr(&root_item, tmp->start);
971 btrfs_set_root_level(&root_item, btrfs_header_level(tmp));
972 btrfs_set_root_generation(&root_item, trans->transid);
973 free_extent_buffer(tmp);
975 key.objectid = root_objectid;
976 key.type = BTRFS_ROOT_ITEM_KEY;
977 key.offset = trans->transid;
978 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
981 key.offset = (u64)-1;
982 new_root = btrfs_read_fs_root(root->fs_info, &key);
983 if (!new_root || IS_ERR(new_root)) {
984 error("unable to fs read root: %lu", PTR_ERR(new_root));
985 return PTR_ERR(new_root);
988 ret = btrfs_make_root_dir(trans, new_root, BTRFS_FIRST_FREE_OBJECTID);
994 * New make_btrfs() has handle system and meta chunks quite well.
995 * So only need to add remaining data chunks.
997 static int make_convert_data_block_groups(struct btrfs_trans_handle *trans,
998 struct btrfs_fs_info *fs_info,
999 struct btrfs_mkfs_config *cfg,
1000 struct btrfs_convert_context *cctx)
1002 struct btrfs_root *extent_root = fs_info->extent_root;
1003 struct cache_tree *data_chunks = &cctx->data_chunks;
1004 struct cache_extent *cache;
1009 * Don't create data chunk over 10% of the convert device
1010 * And for single chunk, don't create chunk larger than 1G.
1012 max_chunk_size = cfg->num_bytes / 10;
1013 max_chunk_size = min((u64)(1024 * 1024 * 1024), max_chunk_size);
1014 max_chunk_size = round_down(max_chunk_size, extent_root->sectorsize);
1016 for (cache = first_cache_extent(data_chunks); cache;
1017 cache = next_cache_extent(cache)) {
1018 u64 cur = cache->start;
1020 while (cur < cache->start + cache->size) {
1022 u64 cur_backup = cur;
1024 len = min(max_chunk_size,
1025 cache->start + cache->size - cur);
1026 ret = btrfs_alloc_data_chunk(trans, extent_root,
1028 BTRFS_BLOCK_GROUP_DATA, 1);
1031 ret = btrfs_make_block_group(trans, extent_root, 0,
1032 BTRFS_BLOCK_GROUP_DATA,
1033 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
1044 * Init the temp btrfs to a operational status.
1046 * It will fix the extent usage accounting(XXX: Do we really need?) and
1047 * insert needed data chunks, to ensure all old fs data extents are covered
1048 * by DATA chunks, preventing wrong chunks are allocated.
1050 * And also create convert image subvolume and relocation tree.
1051 * (XXX: Not need again?)
1052 * But the convert image subvolume is *NOT* linked to fs tree yet.
1054 static int init_btrfs(struct btrfs_mkfs_config *cfg, struct btrfs_root *root,
1055 struct btrfs_convert_context *cctx, u32 convert_flags)
1057 struct btrfs_key location;
1058 struct btrfs_trans_handle *trans;
1059 struct btrfs_fs_info *fs_info = root->fs_info;
1063 * Don't alloc any metadata/system chunk, as we don't want
1064 * any meta/sys chunk allcated before all data chunks are inserted.
1065 * Or we screw up the chunk layout just like the old implement.
1067 fs_info->avoid_sys_chunk_alloc = 1;
1068 fs_info->avoid_meta_chunk_alloc = 1;
1069 trans = btrfs_start_transaction(root, 1);
1071 error("unable to start transaction");
1075 ret = btrfs_fix_block_accounting(trans, root);
1078 ret = make_convert_data_block_groups(trans, fs_info, cfg, cctx);
1081 ret = btrfs_make_root_dir(trans, fs_info->tree_root,
1082 BTRFS_ROOT_TREE_DIR_OBJECTID);
1085 memcpy(&location, &root->root_key, sizeof(location));
1086 location.offset = (u64)-1;
1087 ret = btrfs_insert_dir_item(trans, fs_info->tree_root, "default", 7,
1088 btrfs_super_root_dir(fs_info->super_copy),
1089 &location, BTRFS_FT_DIR, 0);
1092 ret = btrfs_insert_inode_ref(trans, fs_info->tree_root, "default", 7,
1094 btrfs_super_root_dir(fs_info->super_copy), 0);
1097 btrfs_set_root_dirid(&fs_info->fs_root->root_item,
1098 BTRFS_FIRST_FREE_OBJECTID);
1100 /* subvol for fs image file */
1101 ret = create_subvol(trans, root, CONV_IMAGE_SUBVOL_OBJECTID);
1103 error("failed to create subvolume image root: %d", ret);
1106 /* subvol for data relocation tree */
1107 ret = create_subvol(trans, root, BTRFS_DATA_RELOC_TREE_OBJECTID);
1109 error("failed to create DATA_RELOC root: %d", ret);
1113 ret = btrfs_commit_transaction(trans, root);
1114 fs_info->avoid_sys_chunk_alloc = 0;
1115 fs_info->avoid_meta_chunk_alloc = 0;
1121 * Migrate super block to its default position and zero 0 ~ 16k
1123 static int migrate_super_block(int fd, u64 old_bytenr)
1126 struct extent_buffer *buf;
1127 struct btrfs_super_block *super;
1131 buf = malloc(sizeof(*buf) + BTRFS_SUPER_INFO_SIZE);
1135 buf->len = BTRFS_SUPER_INFO_SIZE;
1136 ret = pread(fd, buf->data, BTRFS_SUPER_INFO_SIZE, old_bytenr);
1137 if (ret != BTRFS_SUPER_INFO_SIZE)
1140 super = (struct btrfs_super_block *)buf->data;
1141 BUG_ON(btrfs_super_bytenr(super) != old_bytenr);
1142 btrfs_set_super_bytenr(super, BTRFS_SUPER_INFO_OFFSET);
1144 csum_tree_block_size(buf, BTRFS_CRC32_SIZE, 0);
1145 ret = pwrite(fd, buf->data, BTRFS_SUPER_INFO_SIZE,
1146 BTRFS_SUPER_INFO_OFFSET);
1147 if (ret != BTRFS_SUPER_INFO_SIZE)
1154 memset(buf->data, 0, BTRFS_SUPER_INFO_SIZE);
1155 for (bytenr = 0; bytenr < BTRFS_SUPER_INFO_OFFSET; ) {
1156 len = BTRFS_SUPER_INFO_OFFSET - bytenr;
1157 if (len > BTRFS_SUPER_INFO_SIZE)
1158 len = BTRFS_SUPER_INFO_SIZE;
1159 ret = pwrite(fd, buf->data, len, bytenr);
1161 fprintf(stderr, "unable to zero fill device\n");
1175 static int prepare_system_chunk_sb(struct btrfs_super_block *super)
1177 struct btrfs_chunk *chunk;
1178 struct btrfs_disk_key *key;
1179 u32 sectorsize = btrfs_super_sectorsize(super);
1181 key = (struct btrfs_disk_key *)(super->sys_chunk_array);
1182 chunk = (struct btrfs_chunk *)(super->sys_chunk_array +
1183 sizeof(struct btrfs_disk_key));
1185 btrfs_set_disk_key_objectid(key, BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1186 btrfs_set_disk_key_type(key, BTRFS_CHUNK_ITEM_KEY);
1187 btrfs_set_disk_key_offset(key, 0);
1189 btrfs_set_stack_chunk_length(chunk, btrfs_super_total_bytes(super));
1190 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
1191 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
1192 btrfs_set_stack_chunk_type(chunk, BTRFS_BLOCK_GROUP_SYSTEM);
1193 btrfs_set_stack_chunk_io_align(chunk, sectorsize);
1194 btrfs_set_stack_chunk_io_width(chunk, sectorsize);
1195 btrfs_set_stack_chunk_sector_size(chunk, sectorsize);
1196 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1197 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1198 chunk->stripe.devid = super->dev_item.devid;
1199 btrfs_set_stack_stripe_offset(&chunk->stripe, 0);
1200 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
1201 btrfs_set_super_sys_array_size(super, sizeof(*key) + sizeof(*chunk));
1205 static int convert_open_fs(const char *devname,
1206 struct btrfs_convert_context *cctx)
1210 for (i = 0; i < ARRAY_SIZE(convert_operations); i++) {
1211 int ret = convert_operations[i]->open_fs(cctx, devname);
1214 cctx->convert_ops = convert_operations[i];
1219 error("no file system found to convert");
1223 static int do_convert(const char *devname, u32 convert_flags, u32 nodesize,
1224 const char *fslabel, int progress, u64 features)
1230 struct btrfs_root *root;
1231 struct btrfs_root *image_root;
1232 struct btrfs_convert_context cctx;
1233 struct btrfs_key key;
1234 char subvol_name[SOURCE_FS_NAME_LEN + 8];
1235 struct task_ctx ctx;
1236 char features_buf[64];
1237 struct btrfs_mkfs_config mkfs_cfg;
1239 init_convert_context(&cctx);
1240 ret = convert_open_fs(devname, &cctx);
1243 ret = convert_check_state(&cctx);
1246 "source filesystem is not clean, running filesystem check is recommended");
1247 ret = convert_read_used_space(&cctx);
1251 blocksize = cctx.blocksize;
1252 total_bytes = (u64)blocksize * (u64)cctx.block_count;
1253 if (blocksize < 4096) {
1254 error("block size is too small: %u < 4096", blocksize);
1257 if (btrfs_check_nodesize(nodesize, blocksize, features))
1259 fd = open(devname, O_RDWR);
1261 error("unable to open %s: %s", devname, strerror(errno));
1264 btrfs_parse_features_to_string(features_buf, features);
1265 if (features == BTRFS_MKFS_DEFAULT_FEATURES)
1266 strcat(features_buf, " (default)");
1268 printf("create btrfs filesystem:\n");
1269 printf("\tblocksize: %u\n", blocksize);
1270 printf("\tnodesize: %u\n", nodesize);
1271 printf("\tfeatures: %s\n", features_buf);
1273 memset(&mkfs_cfg, 0, sizeof(mkfs_cfg));
1274 mkfs_cfg.label = cctx.volume_name;
1275 mkfs_cfg.num_bytes = total_bytes;
1276 mkfs_cfg.nodesize = nodesize;
1277 mkfs_cfg.sectorsize = blocksize;
1278 mkfs_cfg.stripesize = blocksize;
1279 mkfs_cfg.features = features;
1281 ret = make_convert_btrfs(fd, &mkfs_cfg, &cctx);
1283 error("unable to create initial ctree: %s", strerror(-ret));
1287 root = open_ctree_fd(fd, devname, mkfs_cfg.super_bytenr,
1288 OPEN_CTREE_WRITES | OPEN_CTREE_FS_PARTIAL);
1290 error("unable to open ctree");
1293 ret = init_btrfs(&mkfs_cfg, root, &cctx, convert_flags);
1295 error("unable to setup the root tree: %d", ret);
1299 printf("creating %s image file\n", cctx.convert_ops->name);
1300 snprintf(subvol_name, sizeof(subvol_name), "%s_saved",
1301 cctx.convert_ops->name);
1302 key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
1303 key.offset = (u64)-1;
1304 key.type = BTRFS_ROOT_ITEM_KEY;
1305 image_root = btrfs_read_fs_root(root->fs_info, &key);
1307 error("unable to create image subvolume");
1310 ret = create_image(image_root, &mkfs_cfg, &cctx, fd,
1311 mkfs_cfg.num_bytes, "image",
1314 error("failed to create %s/image: %d", subvol_name, ret);
1318 printf("creating btrfs metadata");
1319 ctx.max_copy_inodes = (cctx.inodes_count - cctx.free_inodes_count);
1320 ctx.cur_copy_inodes = 0;
1323 ctx.info = task_init(print_copied_inodes, after_copied_inodes,
1325 task_start(ctx.info);
1327 ret = copy_inodes(&cctx, root, convert_flags, &ctx);
1329 error("error during copy_inodes %d", ret);
1333 task_stop(ctx.info);
1334 task_deinit(ctx.info);
1337 image_root = link_subvol(root, subvol_name, CONV_IMAGE_SUBVOL_OBJECTID);
1339 error("unable to link subvolume %s", subvol_name);
1343 memset(root->fs_info->super_copy->label, 0, BTRFS_LABEL_SIZE);
1344 if (convert_flags & CONVERT_FLAG_COPY_LABEL) {
1345 __strncpy_null(root->fs_info->super_copy->label,
1346 cctx.volume_name, BTRFS_LABEL_SIZE - 1);
1347 printf("copy label '%s'\n", root->fs_info->super_copy->label);
1348 } else if (convert_flags & CONVERT_FLAG_SET_LABEL) {
1349 strcpy(root->fs_info->super_copy->label, fslabel);
1350 printf("set label to '%s'\n", fslabel);
1353 ret = close_ctree(root);
1355 error("close_ctree failed: %d", ret);
1358 convert_close_fs(&cctx);
1359 clean_convert_context(&cctx);
1362 * If this step succeed, we get a mountable btrfs. Otherwise
1363 * the source fs is left unchanged.
1365 ret = migrate_super_block(fd, mkfs_cfg.super_bytenr);
1367 error("unable to migrate super block: %d", ret);
1371 root = open_ctree_fd(fd, devname, 0,
1372 OPEN_CTREE_WRITES | OPEN_CTREE_FS_PARTIAL);
1374 error("unable to open ctree for finalization");
1377 root->fs_info->finalize_on_close = 1;
1381 printf("conversion complete");
1384 clean_convert_context(&cctx);
1388 "an error occurred during conversion, filesystem is partially created but not finalized and not mountable");
1393 * Check if a non 1:1 mapped chunk can be rolled back.
1394 * For new convert, it's OK while for old convert it's not.
1396 static int may_rollback_chunk(struct btrfs_fs_info *fs_info, u64 bytenr)
1398 struct btrfs_block_group_cache *bg;
1399 struct btrfs_key key;
1400 struct btrfs_path path;
1401 struct btrfs_root *extent_root = fs_info->extent_root;
1406 bg = btrfs_lookup_first_block_group(fs_info, bytenr);
1409 bg_start = bg->key.objectid;
1410 bg_end = bg->key.objectid + bg->key.offset;
1412 key.objectid = bg_end;
1413 key.type = BTRFS_METADATA_ITEM_KEY;
1415 btrfs_init_path(&path);
1417 ret = btrfs_search_slot(NULL, extent_root, &key, &path, 0, 0);
1422 struct btrfs_extent_item *ei;
1424 ret = btrfs_previous_extent_item(extent_root, &path, bg_start);
1432 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
1433 if (key.type == BTRFS_METADATA_ITEM_KEY)
1435 /* Now it's EXTENT_ITEM_KEY only */
1436 ei = btrfs_item_ptr(path.nodes[0], path.slots[0],
1437 struct btrfs_extent_item);
1439 * Found data extent, means this is old convert must follow 1:1
1442 if (btrfs_extent_flags(path.nodes[0], ei)
1443 & BTRFS_EXTENT_FLAG_DATA) {
1448 btrfs_release_path(&path);
1452 static int may_rollback(struct btrfs_root *root)
1454 struct btrfs_fs_info *info = root->fs_info;
1455 struct btrfs_multi_bio *multi = NULL;
1463 if (btrfs_super_num_devices(info->super_copy) != 1)
1466 bytenr = BTRFS_SUPER_INFO_OFFSET;
1467 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1470 ret = btrfs_map_block(&info->mapping_tree, WRITE, bytenr,
1471 &length, &multi, 0, NULL);
1473 if (ret == -ENOENT) {
1474 /* removed block group at the tail */
1475 if (length == (u64)-1)
1478 /* removed block group in the middle */
1484 num_stripes = multi->num_stripes;
1485 physical = multi->stripes[0].physical;
1488 if (num_stripes != 1) {
1489 error("num stripes for bytenr %llu is not 1", bytenr);
1494 * Extra check for new convert, as metadata chunk from new
1495 * convert is much more free than old convert, it doesn't need
1496 * to do 1:1 mapping.
1498 if (physical != bytenr) {
1500 * Check if it's a metadata chunk and has only metadata
1503 ret = may_rollback_chunk(info, bytenr);
1509 if (bytenr >= total_bytes)
1518 * Read out data of convert image which is in btrfs reserved ranges so we can
1519 * use them to overwrite the ranges during rollback.
1521 static int read_reserved_ranges(struct btrfs_root *root, u64 ino,
1522 u64 total_bytes, char *reserved_ranges[])
1527 for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
1528 struct simple_range *range = &btrfs_reserved_ranges[i];
1530 if (range->start + range->len >= total_bytes)
1532 ret = btrfs_read_file(root, ino, range->start, range->len,
1533 reserved_ranges[i]);
1534 if (ret < range->len) {
1536 "failed to read data of convert image, offset=%llu len=%llu ret=%d",
1537 range->start, range->len, ret);
1547 static bool is_subset_of_reserved_ranges(u64 start, u64 len)
1552 for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
1553 struct simple_range *range = &btrfs_reserved_ranges[i];
1555 if (start >= range->start && start + len <= range_end(range)) {
1563 static bool is_chunk_direct_mapped(struct btrfs_fs_info *fs_info, u64 start)
1565 struct cache_extent *ce;
1566 struct map_lookup *map;
1569 ce = search_cache_extent(&fs_info->mapping_tree.cache_tree, start);
1572 if (ce->start > start || ce->start + ce->size < start)
1575 map = container_of(ce, struct map_lookup, ce);
1577 /* Not SINGLE chunk */
1578 if (map->num_stripes != 1)
1581 /* Chunk's logical doesn't match with phisical, not 1:1 mapped */
1582 if (map->ce.start != map->stripes[0].physical)
1590 * Iterate all file extents of the convert image.
1592 * All file extents except ones in btrfs_reserved_ranges must be mapped 1:1
1593 * on disk. (Means thier file_offset must match their on disk bytenr)
1595 * File extents in reserved ranges can be relocated to other place, and in
1596 * that case we will read them out for later use.
1598 static int check_image_file_extents(struct btrfs_root *image_root, u64 ino,
1599 u64 total_size, char *reserved_ranges[])
1601 struct btrfs_key key;
1602 struct btrfs_path path;
1603 struct btrfs_fs_info *fs_info = image_root->fs_info;
1604 u64 checked_bytes = 0;
1609 key.type = BTRFS_EXTENT_DATA_KEY;
1611 btrfs_init_path(&path);
1612 ret = btrfs_search_slot(NULL, image_root, &key, &path, 0, 0);
1614 * It's possible that some fs doesn't store any (including sb)
1615 * data into 0~1M range, and NO_HOLES is enabled.
1617 * So we only need to check if ret < 0
1620 error("failed to iterate file extents at offset 0: %s",
1622 btrfs_release_path(&path);
1626 /* Loop from the first file extents */
1628 struct btrfs_file_extent_item *fi;
1629 struct extent_buffer *leaf = path.nodes[0];
1633 int slot = path.slots[0];
1635 if (slot >= btrfs_header_nritems(leaf))
1637 btrfs_item_key_to_cpu(leaf, &key, slot);
1640 * Iteration is done, exit normally, we have extra check out of
1643 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
1647 file_offset = key.offset;
1648 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1649 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) {
1652 "ino %llu offset %llu doesn't have a regular file extent",
1656 if (btrfs_file_extent_compression(leaf, fi) ||
1657 btrfs_file_extent_encryption(leaf, fi) ||
1658 btrfs_file_extent_other_encoding(leaf, fi)) {
1661 "ino %llu offset %llu doesn't have a plain file extent",
1666 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1667 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1669 checked_bytes += ram_bytes;
1671 if (disk_bytenr == 0)
1675 * Most file extents must be 1:1 mapped, which means 2 things:
1676 * 1) File extent file offset == disk_bytenr
1677 * 2) That data chunk's logical == chunk's physical
1679 * So file extent's file offset == physical position on disk.
1681 * And after rolling back btrfs reserved range, other part
1682 * remains what old fs used to be.
1684 if (file_offset != disk_bytenr ||
1685 !is_chunk_direct_mapped(fs_info, disk_bytenr)) {
1687 * Only file extent in btrfs reserved ranges are
1688 * allowed to be non-1:1 mapped
1690 if (!is_subset_of_reserved_ranges(file_offset,
1694 "ino %llu offset %llu file extent should not be relocated",
1700 ret = btrfs_next_item(image_root, &path);
1707 btrfs_release_path(&path);
1709 * For HOLES mode (without NO_HOLES), we must ensure file extents
1710 * cover the whole range of the image
1712 if (!ret && !btrfs_fs_incompat(fs_info, NO_HOLES)) {
1713 if (checked_bytes != total_size) {
1715 error("inode %llu has some file extents not checked",
1720 /* So far so good, read old data located in btrfs reserved ranges */
1721 ret = read_reserved_ranges(image_root, ino, total_size,
1726 static int do_rollback(const char *devname)
1731 struct btrfs_root *root;
1732 struct btrfs_root *image_root;
1733 struct btrfs_root *chunk_root;
1734 struct btrfs_dir_item *dir;
1735 struct btrfs_inode_item *inode;
1736 struct btrfs_file_extent_item *fi;
1737 struct btrfs_trans_handle *trans;
1738 struct extent_buffer *leaf;
1739 struct btrfs_block_group_cache *cache1;
1740 struct btrfs_block_group_cache *cache2;
1741 struct btrfs_key key;
1742 struct btrfs_path path;
1743 struct extent_io_tree io_tree;
1758 extent_io_tree_init(&io_tree);
1760 fd = open(devname, O_RDWR);
1762 error("unable to open %s: %s", devname, strerror(errno));
1765 root = open_ctree_fd(fd, devname, 0, OPEN_CTREE_WRITES);
1767 error("unable to open ctree");
1770 ret = may_rollback(root);
1772 error("unable to do rollback: %d", ret);
1776 sectorsize = root->sectorsize;
1777 buf = malloc(sectorsize);
1779 error("unable to allocate memory");
1783 btrfs_init_path(&path);
1785 key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
1786 key.type = BTRFS_ROOT_BACKREF_KEY;
1787 key.offset = BTRFS_FS_TREE_OBJECTID;
1788 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, &path, 0,
1790 btrfs_release_path(&path);
1792 error("unable to convert ext2 image subvolume, is it deleted?");
1794 } else if (ret < 0) {
1795 error("unable to open ext2_saved, id %llu: %s",
1796 (unsigned long long)key.objectid, strerror(-ret));
1800 key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
1801 key.type = BTRFS_ROOT_ITEM_KEY;
1802 key.offset = (u64)-1;
1803 image_root = btrfs_read_fs_root(root->fs_info, &key);
1804 if (!image_root || IS_ERR(image_root)) {
1805 error("unable to open subvolume %llu: %ld",
1806 (unsigned long long)key.objectid, PTR_ERR(image_root));
1811 root_dir = btrfs_root_dirid(&root->root_item);
1812 dir = btrfs_lookup_dir_item(NULL, image_root, &path,
1813 root_dir, name, strlen(name), 0);
1814 if (!dir || IS_ERR(dir)) {
1815 error("unable to find file %s: %ld", name, PTR_ERR(dir));
1818 leaf = path.nodes[0];
1819 btrfs_dir_item_key_to_cpu(leaf, dir, &key);
1820 btrfs_release_path(&path);
1822 objectid = key.objectid;
1824 ret = btrfs_lookup_inode(NULL, image_root, &path, &key, 0);
1826 error("unable to find inode item: %d", ret);
1829 leaf = path.nodes[0];
1830 inode = btrfs_item_ptr(leaf, path.slots[0], struct btrfs_inode_item);
1831 total_bytes = btrfs_inode_size(leaf, inode);
1832 btrfs_release_path(&path);
1834 key.objectid = objectid;
1836 key.type = BTRFS_EXTENT_DATA_KEY;
1837 ret = btrfs_search_slot(NULL, image_root, &key, &path, 0, 0);
1839 error("unable to find first file extent: %d", ret);
1840 btrfs_release_path(&path);
1844 /* build mapping tree for the relocated blocks */
1845 for (offset = 0; offset < total_bytes; ) {
1846 leaf = path.nodes[0];
1847 if (path.slots[0] >= btrfs_header_nritems(leaf)) {
1848 ret = btrfs_next_leaf(root, &path);
1854 btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
1855 if (key.objectid != objectid || key.offset != offset ||
1856 key.type != BTRFS_EXTENT_DATA_KEY)
1859 fi = btrfs_item_ptr(leaf, path.slots[0],
1860 struct btrfs_file_extent_item);
1861 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
1863 if (btrfs_file_extent_compression(leaf, fi) ||
1864 btrfs_file_extent_encryption(leaf, fi) ||
1865 btrfs_file_extent_other_encoding(leaf, fi))
1868 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1869 /* skip holes and direct mapped extents */
1870 if (bytenr == 0 || bytenr == offset)
1873 bytenr += btrfs_file_extent_offset(leaf, fi);
1874 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
1876 cache1 = btrfs_lookup_block_group(root->fs_info, offset);
1877 cache2 = btrfs_lookup_block_group(root->fs_info,
1878 offset + num_bytes - 1);
1880 * Here we must take consideration of old and new convert
1882 * For old convert case, sign, there is no consist chunk type
1883 * that will cover the extent. META/DATA/SYS are all possible.
1884 * Just ensure relocate one is in SYS chunk.
1885 * For new convert case, they are all covered by DATA chunk.
1887 * So, there is not valid chunk type check for it now.
1889 if (cache1 != cache2)
1892 set_extent_bits(&io_tree, offset, offset + num_bytes - 1,
1894 set_state_private(&io_tree, offset, bytenr);
1896 offset += btrfs_file_extent_num_bytes(leaf, fi);
1899 btrfs_release_path(&path);
1901 if (offset < total_bytes) {
1902 error("unable to build extent mapping (offset %llu, total_bytes %llu)",
1903 (unsigned long long)offset,
1904 (unsigned long long)total_bytes);
1905 error("converted filesystem after balance is unable to rollback");
1909 first_free = BTRFS_SUPER_INFO_OFFSET + 2 * sectorsize - 1;
1910 first_free &= ~((u64)sectorsize - 1);
1911 /* backup for extent #0 should exist */
1912 if(!test_range_bit(&io_tree, 0, first_free - 1, EXTENT_LOCKED, 1)) {
1913 error("no backup for the first extent");
1916 /* force no allocation from system block group */
1917 root->fs_info->system_allocs = -1;
1918 trans = btrfs_start_transaction(root, 1);
1920 error("unable to start transaction");
1924 * recow the whole chunk tree, this will remove all chunk tree blocks
1925 * from system block group
1927 chunk_root = root->fs_info->chunk_root;
1928 memset(&key, 0, sizeof(key));
1930 ret = btrfs_search_slot(trans, chunk_root, &key, &path, 0, 1);
1934 ret = btrfs_next_leaf(chunk_root, &path);
1938 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
1939 btrfs_release_path(&path);
1941 btrfs_release_path(&path);
1946 cache1 = btrfs_lookup_block_group(root->fs_info, offset);
1950 if (cache1->flags & BTRFS_BLOCK_GROUP_SYSTEM)
1951 num_bytes += btrfs_block_group_used(&cache1->item);
1953 offset = cache1->key.objectid + cache1->key.offset;
1955 /* only extent #0 left in system block group? */
1956 if (num_bytes > first_free) {
1958 "unable to empty system block group (num_bytes %llu, first_free %llu",
1959 (unsigned long long)num_bytes,
1960 (unsigned long long)first_free);
1963 /* create a system chunk that maps the whole device */
1964 ret = prepare_system_chunk_sb(root->fs_info->super_copy);
1966 error("unable to update system chunk: %d", ret);
1970 ret = btrfs_commit_transaction(trans, root);
1972 error("transaction commit failed: %d", ret);
1976 ret = close_ctree(root);
1978 error("close_ctree failed: %d", ret);
1982 /* zero btrfs super block mirrors */
1983 memset(buf, 0, sectorsize);
1984 for (i = 1 ; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1985 bytenr = btrfs_sb_offset(i);
1986 if (bytenr >= total_bytes)
1988 ret = pwrite(fd, buf, sectorsize, bytenr);
1989 if (ret != sectorsize) {
1990 error("zeroing superblock mirror %d failed: %d",
1996 sb_bytenr = (u64)-1;
1997 /* copy all relocated blocks back */
1999 ret = find_first_extent_bit(&io_tree, 0, &start, &end,
2004 ret = get_state_private(&io_tree, start, &bytenr);
2007 clear_extent_bits(&io_tree, start, end, EXTENT_LOCKED);
2009 while (start <= end) {
2010 if (start == BTRFS_SUPER_INFO_OFFSET) {
2014 ret = pread(fd, buf, sectorsize, bytenr);
2016 error("reading superblock at %llu failed: %d",
2017 (unsigned long long)bytenr, ret);
2020 BUG_ON(ret != sectorsize);
2021 ret = pwrite(fd, buf, sectorsize, start);
2023 error("writing superblock at %llu failed: %d",
2024 (unsigned long long)start, ret);
2027 BUG_ON(ret != sectorsize);
2029 start += sectorsize;
2030 bytenr += sectorsize;
2036 error("fsync failed: %s", strerror(errno));
2040 * finally, overwrite btrfs super block.
2042 ret = pread(fd, buf, sectorsize, sb_bytenr);
2044 error("reading primary superblock failed: %s",
2048 BUG_ON(ret != sectorsize);
2049 ret = pwrite(fd, buf, sectorsize, BTRFS_SUPER_INFO_OFFSET);
2051 error("writing primary superblock failed: %s",
2055 BUG_ON(ret != sectorsize);
2058 error("fsync failed: %s", strerror(errno));
2064 extent_io_tree_cleanup(&io_tree);
2065 printf("rollback complete\n");
2072 error("rollback aborted");
2076 static void print_usage(void)
2078 printf("usage: btrfs-convert [options] device\n");
2079 printf("options:\n");
2080 printf("\t-d|--no-datasum disable data checksum, sets NODATASUM\n");
2081 printf("\t-i|--no-xattr ignore xattrs and ACLs\n");
2082 printf("\t-n|--no-inline disable inlining of small files to metadata\n");
2083 printf("\t-N|--nodesize SIZE set filesystem metadata nodesize\n");
2084 printf("\t-r|--rollback roll back to the original filesystem\n");
2085 printf("\t-l|--label LABEL set filesystem label\n");
2086 printf("\t-L|--copy-label use label from converted filesystem\n");
2087 printf("\t-p|--progress show converting progress (default)\n");
2088 printf("\t-O|--features LIST comma separated list of filesystem features\n");
2089 printf("\t--no-progress show only overview, not the detailed progress\n");
2091 printf("Supported filesystems:\n");
2092 printf("\text2/3/4: %s\n", BTRFSCONVERT_EXT2 ? "yes" : "no");
2095 int main(int argc, char *argv[])
2101 u32 nodesize = max_t(u32, sysconf(_SC_PAGESIZE),
2102 BTRFS_MKFS_DEFAULT_NODE_SIZE);
2105 int usage_error = 0;
2108 char fslabel[BTRFS_LABEL_SIZE];
2109 u64 features = BTRFS_MKFS_DEFAULT_FEATURES;
2112 enum { GETOPT_VAL_NO_PROGRESS = 256 };
2113 static const struct option long_options[] = {
2114 { "no-progress", no_argument, NULL,
2115 GETOPT_VAL_NO_PROGRESS },
2116 { "no-datasum", no_argument, NULL, 'd' },
2117 { "no-inline", no_argument, NULL, 'n' },
2118 { "no-xattr", no_argument, NULL, 'i' },
2119 { "rollback", no_argument, NULL, 'r' },
2120 { "features", required_argument, NULL, 'O' },
2121 { "progress", no_argument, NULL, 'p' },
2122 { "label", required_argument, NULL, 'l' },
2123 { "copy-label", no_argument, NULL, 'L' },
2124 { "nodesize", required_argument, NULL, 'N' },
2125 { "help", no_argument, NULL, GETOPT_VAL_HELP},
2126 { NULL, 0, NULL, 0 }
2128 int c = getopt_long(argc, argv, "dinN:rl:LpO:", long_options, NULL);
2143 nodesize = parse_size(optarg);
2149 copylabel = CONVERT_FLAG_SET_LABEL;
2150 if (strlen(optarg) >= BTRFS_LABEL_SIZE) {
2152 "label too long, trimmed to %d bytes",
2153 BTRFS_LABEL_SIZE - 1);
2155 __strncpy_null(fslabel, optarg, BTRFS_LABEL_SIZE - 1);
2158 copylabel = CONVERT_FLAG_COPY_LABEL;
2164 char *orig = strdup(optarg);
2167 tmp = btrfs_parse_fs_features(tmp, &features);
2169 error("unrecognized filesystem feature: %s",
2175 if (features & BTRFS_FEATURE_LIST_ALL) {
2176 btrfs_list_all_fs_features(
2177 ~BTRFS_CONVERT_ALLOWED_FEATURES);
2180 if (features & ~BTRFS_CONVERT_ALLOWED_FEATURES) {
2183 btrfs_parse_features_to_string(buf,
2184 features & ~BTRFS_CONVERT_ALLOWED_FEATURES);
2185 error("features not allowed for convert: %s",
2192 case GETOPT_VAL_NO_PROGRESS:
2195 case GETOPT_VAL_HELP:
2198 return c != GETOPT_VAL_HELP;
2202 if (check_argc_exact(argc - optind, 1)) {
2207 if (rollback && (!datacsum || noxattr || !packing)) {
2209 "Usage error: -d, -i, -n options do not apply to rollback\n");
2218 file = argv[optind];
2219 ret = check_mounted(file);
2221 error("could not check mount status: %s", strerror(-ret));
2224 error("%s is mounted", file);
2229 ret = do_rollback(file);
2233 cf |= datacsum ? CONVERT_FLAG_DATACSUM : 0;
2234 cf |= packing ? CONVERT_FLAG_INLINE_DATA : 0;
2235 cf |= noxattr ? 0 : CONVERT_FLAG_XATTR;
2237 ret = do_convert(file, cf, nodesize, fslabel, progress, features);