2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 * Btrfs convert design:
22 * The overall design of btrfs convert is like the following:
24 * |<------------------Old fs----------------------------->|
25 * |<- used ->| |<- used ->| |<- used ->|
28 * |<---------------Btrfs fs------------------------------>|
29 * |<- Old data chunk ->|< new chunk (D/M/S)>|<- ODC ->|
30 * |<-Old-FE->| |<-Old-FE->|<- Btrfs extents ->|<-Old-FE->|
32 * ODC = Old data chunk, btrfs chunks containing old fs data
33 * Mapped 1:1 (logical address == device offset)
34 * Old-FE = file extents pointing to old fs.
36 * So old fs used space is (mostly) kept as is, while btrfs will insert
37 * its chunk (Data/Meta/Sys) into large enough free space.
38 * In this way, we can create different profiles for metadata/data for
41 * We must reserve and relocate 3 ranges for btrfs:
42 * * [0, 1M) - area never used for any data except the first
44 * * [btrfs_sb_offset(1), +64K) - 1st superblock backup copy
45 * * [btrfs_sb_offset(2), +64K) - 2nd, dtto
47 * Most work is spent handling corner cases around these reserved ranges.
49 * Detailed workflow is:
50 * 1) Scan old fs used space and calculate data chunk layout
52 * We can a map used space of old fs
54 * 1.2) Calculate data chunk layout - this is the hard part
55 * New data chunks must meet 3 conditions using result fomr 1.1
56 * a. Large enough to be a chunk
57 * b. Doesn't intersect reserved ranges
58 * c. Covers all the remaining old fs used space
60 * NOTE: This can be simplified if we don't need to handle backup supers
62 * 1.3) Calculate usable space for new btrfs chunks
63 * Btrfs chunk usable space must meet 3 conditions using result from 1.2
64 * a. Large enough to be a chunk
65 * b. Doesn't intersect reserved ranges
66 * c. Doesn't cover any data chunks in 1.1
68 * 2) Create basic btrfs filesystem structure
69 * Initial metadata and sys chunks are inserted in the first availabe
70 * space found in step 1.3
71 * Then insert all data chunks into the basic btrfs
73 * 3) Create convert image
74 * We need to relocate reserved ranges here.
75 * After this step, the convert image is done, and we can use the image
76 * as reflink source to create old files
78 * 4) Iterate old fs to create files
79 * We just reflink file extents from old fs to newly created files on
83 #include "kerncompat.h"
87 #include <sys/types.h>
97 #include "transaction.h"
99 #include "task-utils.h"
101 #include "mkfs/common.h"
102 #include "convert/common.h"
103 #include "convert/source-fs.h"
104 #include "fsfeatures.h"
106 const struct btrfs_convert_operations ext2_convert_ops;
108 static const struct btrfs_convert_operations *convert_operations[] = {
109 #if BTRFSCONVERT_EXT2
114 static void *print_copied_inodes(void *p)
116 struct task_ctx *priv = p;
117 const char work_indicator[] = { '.', 'o', 'O', 'o' };
120 task_period_start(priv->info, 1000 /* 1s */);
123 pthread_mutex_lock(&priv->mutex);
124 printf("copy inodes [%c] [%10llu/%10llu]\r",
125 work_indicator[count % 4],
126 (unsigned long long)priv->cur_copy_inodes,
127 (unsigned long long)priv->max_copy_inodes);
128 pthread_mutex_unlock(&priv->mutex);
130 task_period_wait(priv->info);
136 static int after_copied_inodes(void *p)
144 static inline int copy_inodes(struct btrfs_convert_context *cctx,
145 struct btrfs_root *root, u32 convert_flags,
148 return cctx->convert_ops->copy_inodes(cctx, root, convert_flags, p);
151 static inline void convert_close_fs(struct btrfs_convert_context *cctx)
153 cctx->convert_ops->close_fs(cctx);
156 static inline int convert_check_state(struct btrfs_convert_context *cctx)
158 return cctx->convert_ops->check_state(cctx);
161 static int csum_disk_extent(struct btrfs_trans_handle *trans,
162 struct btrfs_root *root,
163 u64 disk_bytenr, u64 num_bytes)
165 u32 blocksize = root->fs_info->sectorsize;
170 buffer = malloc(blocksize);
173 for (offset = 0; offset < num_bytes; offset += blocksize) {
174 ret = read_disk_extent(root, disk_bytenr + offset,
178 ret = btrfs_csum_file_block(trans,
179 root->fs_info->csum_root,
180 disk_bytenr + num_bytes,
181 disk_bytenr + offset,
190 static int create_image_file_range(struct btrfs_trans_handle *trans,
191 struct btrfs_root *root,
192 struct cache_tree *used,
193 struct btrfs_inode_item *inode,
194 u64 ino, u64 bytenr, u64 *ret_len,
197 struct cache_extent *cache;
198 struct btrfs_block_group_cache *bg_cache;
203 u32 datacsum = convert_flags & CONVERT_FLAG_DATACSUM;
205 if (bytenr != round_down(bytenr, root->fs_info->sectorsize)) {
206 error("bytenr not sectorsize aligned: %llu",
207 (unsigned long long)bytenr);
210 if (len != round_down(len, root->fs_info->sectorsize)) {
211 error("length not sectorsize aligned: %llu",
212 (unsigned long long)len);
215 len = min_t(u64, len, BTRFS_MAX_EXTENT_SIZE);
218 * Skip reserved ranges first
220 * Or we will insert a hole into current image file, and later
221 * migrate block will fail as there is already a file extent.
223 for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
224 const struct simple_range *reserved = &btrfs_reserved_ranges[i];
230 * |---- reserved ----|
232 * Skip to reserved range end
234 if (bytenr >= reserved->start && bytenr < range_end(reserved)) {
235 *ret_len = range_end(reserved) - bytenr;
242 * Leading part may still create a file extent
244 if (bytenr < reserved->start &&
245 bytenr + len >= range_end(reserved)) {
246 len = min_t(u64, len, reserved->start - bytenr);
251 /* Check if we are going to insert regular file extent, or hole */
252 cache = search_cache_extent(used, bytenr);
254 if (cache->start <= bytenr) {
256 * |///////Used///////|
259 * Insert one real file extent
261 len = min_t(u64, len, cache->start + cache->size -
263 disk_bytenr = bytenr;
271 len = min(len, cache->start - bytenr);
287 /* Check if the range is in a data block group */
288 bg_cache = btrfs_lookup_block_group(root->fs_info, bytenr);
291 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_DATA))
294 /* The extent should never cross block group boundary */
295 len = min_t(u64, len, bg_cache->key.objectid +
296 bg_cache->key.offset - bytenr);
299 if (len != round_down(len, root->fs_info->sectorsize)) {
300 error("remaining length not sectorsize aligned: %llu",
301 (unsigned long long)len);
304 ret = btrfs_record_file_extent(trans, root, ino, inode, bytenr,
310 ret = csum_disk_extent(trans, root, bytenr, len);
316 * Relocate old fs data in one reserved ranges
318 * Since all old fs data in reserved range is not covered by any chunk nor
319 * data extent, we don't need to handle any reference but add new
320 * extent/reference, which makes codes more clear
322 static int migrate_one_reserved_range(struct btrfs_trans_handle *trans,
323 struct btrfs_root *root,
324 struct cache_tree *used,
325 struct btrfs_inode_item *inode, int fd,
326 u64 ino, const struct simple_range *range,
329 u64 cur_off = range->start;
330 u64 cur_len = range->len;
331 u64 hole_start = range->start;
333 struct cache_extent *cache;
334 struct btrfs_key key;
335 struct extent_buffer *eb;
339 * It's possible that there are holes in reserved range:
340 * |<---------------- Reserved range ---------------------->|
341 * |<- Old fs data ->| |<- Old fs data ->|
342 * So here we need to iterate through old fs used space and only
343 * migrate ranges that covered by old fs data.
345 while (cur_off < range_end(range)) {
346 cache = search_cache_extent(used, cur_off);
349 cur_off = max(cache->start, cur_off);
350 if (cur_off >= range_end(range))
352 cur_len = min(cache->start + cache->size, range_end(range)) -
354 BUG_ON(cur_len < root->fs_info->sectorsize);
356 /* reserve extent for the data */
357 ret = btrfs_reserve_extent(trans, root, cur_len, 0, 0, (u64)-1,
362 eb = malloc(sizeof(*eb) + cur_len);
368 ret = pread(fd, eb->data, cur_len, cur_off);
370 ret = (ret < 0 ? ret : -EIO);
374 eb->start = key.objectid;
375 eb->len = key.offset;
378 ret = write_and_map_eb(root->fs_info, eb);
383 /* Now handle extent item and file extent things */
384 ret = btrfs_record_file_extent(trans, root, ino, inode, cur_off,
385 key.objectid, key.offset);
388 /* Finally, insert csum items */
389 if (convert_flags & CONVERT_FLAG_DATACSUM)
390 ret = csum_disk_extent(trans, root, key.objectid,
393 /* Don't forget to insert hole */
394 hole_len = cur_off - hole_start;
396 ret = btrfs_record_file_extent(trans, root, ino, inode,
397 hole_start, 0, hole_len);
402 cur_off += key.offset;
403 hole_start = cur_off;
404 cur_len = range_end(range) - cur_off;
408 * |<---- reserved -------->|
409 * |<- Old fs data ->| |
412 if (range_end(range) - hole_start > 0)
413 ret = btrfs_record_file_extent(trans, root, ino, inode,
414 hole_start, 0, range_end(range) - hole_start);
419 * Relocate the used ext2 data in reserved ranges
421 static int migrate_reserved_ranges(struct btrfs_trans_handle *trans,
422 struct btrfs_root *root,
423 struct cache_tree *used,
424 struct btrfs_inode_item *inode, int fd,
425 u64 ino, u64 total_bytes, u32 convert_flags)
430 for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
431 const struct simple_range *range = &btrfs_reserved_ranges[i];
433 if (range->start > total_bytes)
435 ret = migrate_one_reserved_range(trans, root, used, inode, fd,
436 ino, range, convert_flags);
445 * Helper for expand and merge extent_cache for wipe_one_reserved_range() to
446 * handle wiping a range that exists in cache.
448 static int _expand_extent_cache(struct cache_tree *tree,
449 struct cache_extent *entry,
450 u64 min_stripe_size, int backward)
452 struct cache_extent *ce;
455 if (entry->size >= min_stripe_size)
457 diff = min_stripe_size - entry->size;
460 ce = prev_cache_extent(entry);
463 if (ce->start + ce->size >= entry->start - diff) {
464 /* Directly merge with previous extent */
465 ce->size = entry->start + entry->size - ce->start;
466 remove_cache_extent(tree, entry);
471 /* No overlap, normal extent */
472 if (entry->start < diff) {
473 error("cannot find space for data chunk layout");
476 entry->start -= diff;
480 ce = next_cache_extent(entry);
483 if (entry->start + entry->size + diff >= ce->start) {
484 /* Directly merge with next extent */
485 entry->size = ce->start + ce->size - entry->start;
486 remove_cache_extent(tree, ce);
496 * Remove one reserve range from given cache tree
497 * if min_stripe_size is non-zero, it will ensure for split case,
498 * all its split cache extent is no smaller than @min_strip_size / 2.
500 static int wipe_one_reserved_range(struct cache_tree *tree,
501 u64 start, u64 len, u64 min_stripe_size,
504 struct cache_extent *cache;
507 BUG_ON(ensure_size && min_stripe_size == 0);
509 * The logical here is simplified to handle special cases only
510 * So we don't need to consider merge case for ensure_size
512 BUG_ON(min_stripe_size && (min_stripe_size < len * 2 ||
513 min_stripe_size / 2 < BTRFS_STRIPE_LEN));
515 /* Also, wipe range should already be aligned */
516 BUG_ON(start != round_down(start, BTRFS_STRIPE_LEN) ||
517 start + len != round_up(start + len, BTRFS_STRIPE_LEN));
519 min_stripe_size /= 2;
521 cache = lookup_cache_extent(tree, start, len);
525 if (start <= cache->start) {
527 * |--------cache---------|
530 BUG_ON(start + len <= cache->start);
533 * The wipe size is smaller than min_stripe_size / 2,
534 * so the result length should still meet min_stripe_size
535 * And no need to do alignment
537 cache->size -= (start + len - cache->start);
538 if (cache->size == 0) {
539 remove_cache_extent(tree, cache);
544 BUG_ON(ensure_size && cache->size < min_stripe_size);
546 cache->start = start + len;
548 } else if (start > cache->start && start + len < cache->start +
551 * |-------cache-----|
554 u64 old_start = cache->start;
555 u64 old_len = cache->size;
556 u64 insert_start = start + len;
559 cache->size = start - cache->start;
560 /* Expand the leading half part if needed */
561 if (ensure_size && cache->size < min_stripe_size) {
562 ret = _expand_extent_cache(tree, cache,
568 /* And insert the new one */
569 insert_len = old_start + old_len - start - len;
570 ret = add_merge_cache_extent(tree, insert_start, insert_len);
574 /* Expand the last half part if needed */
575 if (ensure_size && insert_len < min_stripe_size) {
576 cache = lookup_cache_extent(tree, insert_start,
578 if (!cache || cache->start != insert_start ||
579 cache->size != insert_len)
581 ret = _expand_extent_cache(tree, cache,
590 * Wipe len should be small enough and no need to expand the
593 cache->size = start - cache->start;
594 BUG_ON(ensure_size && cache->size < min_stripe_size);
599 * Remove reserved ranges from given cache_tree
601 * It will remove the following ranges
603 * 2) 2nd superblock, +64K (make sure chunks are 64K aligned)
604 * 3) 3rd superblock, +64K
606 * @min_stripe must be given for safety check
607 * and if @ensure_size is given, it will ensure affected cache_extent will be
608 * larger than min_stripe_size
610 static int wipe_reserved_ranges(struct cache_tree *tree, u64 min_stripe_size,
616 for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
617 const struct simple_range *range = &btrfs_reserved_ranges[i];
619 ret = wipe_one_reserved_range(tree, range->start, range->len,
620 min_stripe_size, ensure_size);
627 static int calculate_available_space(struct btrfs_convert_context *cctx)
629 struct cache_tree *used = &cctx->used_space;
630 struct cache_tree *data_chunks = &cctx->data_chunks;
631 struct cache_tree *free = &cctx->free_space;
632 struct cache_extent *cache;
635 * Twice the minimal chunk size, to allow later wipe_reserved_ranges()
636 * works without need to consider overlap
638 u64 min_stripe_size = SZ_32M;
641 /* Calculate data_chunks */
642 for (cache = first_cache_extent(used); cache;
643 cache = next_cache_extent(cache)) {
646 if (cache->start + cache->size < cur_off)
648 if (cache->start > cur_off + min_stripe_size)
649 cur_off = cache->start;
650 cur_len = max(cache->start + cache->size - cur_off,
652 ret = add_merge_cache_extent(data_chunks, cur_off, cur_len);
658 * remove reserved ranges, so we won't ever bother relocating an old
659 * filesystem extent to other place.
661 ret = wipe_reserved_ranges(data_chunks, min_stripe_size, 1);
667 * Calculate free space
668 * Always round up the start bytenr, to avoid metadata extent corss
669 * stripe boundary, as later mkfs_convert() won't have all the extent
672 for (cache = first_cache_extent(data_chunks); cache;
673 cache = next_cache_extent(cache)) {
674 if (cache->start < cur_off)
676 if (cache->start > cur_off) {
680 len = cache->start - round_up(cur_off,
682 insert_start = round_up(cur_off, BTRFS_STRIPE_LEN);
684 ret = add_merge_cache_extent(free, insert_start, len);
688 cur_off = cache->start + cache->size;
690 /* Don't forget the last range */
691 if (cctx->total_bytes > cur_off) {
692 u64 len = cctx->total_bytes - cur_off;
695 insert_start = round_up(cur_off, BTRFS_STRIPE_LEN);
697 ret = add_merge_cache_extent(free, insert_start, len);
702 /* Remove reserved bytes */
703 ret = wipe_reserved_ranges(free, min_stripe_size, 0);
709 * Read used space, and since we have the used space,
710 * calcuate data_chunks and free for later mkfs
712 static int convert_read_used_space(struct btrfs_convert_context *cctx)
716 ret = cctx->convert_ops->read_used_space(cctx);
720 ret = calculate_available_space(cctx);
725 * Create the fs image file of old filesystem.
727 * This is completely fs independent as we have cctx->used, only
728 * need to create file extents pointing to all the positions.
730 static int create_image(struct btrfs_root *root,
731 struct btrfs_mkfs_config *cfg,
732 struct btrfs_convert_context *cctx, int fd,
733 u64 size, char *name, u32 convert_flags)
735 struct btrfs_inode_item buf;
736 struct btrfs_trans_handle *trans;
737 struct btrfs_path path;
738 struct btrfs_key key;
739 struct cache_extent *cache;
740 struct cache_tree used_tmp;
743 u64 flags = BTRFS_INODE_READONLY;
746 if (!(convert_flags & CONVERT_FLAG_DATACSUM))
747 flags |= BTRFS_INODE_NODATASUM;
749 trans = btrfs_start_transaction(root, 1);
753 cache_tree_init(&used_tmp);
754 btrfs_init_path(&path);
756 ret = btrfs_find_free_objectid(trans, root, BTRFS_FIRST_FREE_OBJECTID,
760 ret = btrfs_new_inode(trans, root, ino, 0400 | S_IFREG);
763 ret = btrfs_change_inode_flags(trans, root, ino, flags);
766 ret = btrfs_add_link(trans, root, ino, BTRFS_FIRST_FREE_OBJECTID, name,
767 strlen(name), BTRFS_FT_REG_FILE, NULL, 1);
772 key.type = BTRFS_INODE_ITEM_KEY;
775 ret = btrfs_search_slot(trans, root, &key, &path, 0, 1);
777 ret = (ret > 0 ? -ENOENT : ret);
780 read_extent_buffer(path.nodes[0], &buf,
781 btrfs_item_ptr_offset(path.nodes[0], path.slots[0]),
783 btrfs_release_path(&path);
786 * Create a new used space cache, which doesn't contain the reserved
789 for (cache = first_cache_extent(&cctx->used_space); cache;
790 cache = next_cache_extent(cache)) {
791 ret = add_cache_extent(&used_tmp, cache->start, cache->size);
795 ret = wipe_reserved_ranges(&used_tmp, 0, 0);
800 * Start from 1M, as 0~1M is reserved, and create_image_file_range()
801 * can't handle bytenr 0(will consider it as a hole)
805 u64 len = size - cur;
807 ret = create_image_file_range(trans, root, &used_tmp,
808 &buf, ino, cur, &len,
814 /* Handle the reserved ranges */
815 ret = migrate_reserved_ranges(trans, root, &cctx->used_space, &buf, fd,
816 ino, cfg->num_bytes, convert_flags);
819 key.type = BTRFS_INODE_ITEM_KEY;
821 ret = btrfs_search_slot(trans, root, &key, &path, 0, 1);
823 ret = (ret > 0 ? -ENOENT : ret);
826 btrfs_set_stack_inode_size(&buf, cfg->num_bytes);
827 write_extent_buffer(path.nodes[0], &buf,
828 btrfs_item_ptr_offset(path.nodes[0], path.slots[0]),
831 free_extent_cache_tree(&used_tmp);
832 btrfs_release_path(&path);
833 btrfs_commit_transaction(trans, root);
837 static struct btrfs_root* link_subvol(struct btrfs_root *root,
838 const char *base, u64 root_objectid)
840 struct btrfs_trans_handle *trans;
841 struct btrfs_fs_info *fs_info = root->fs_info;
842 struct btrfs_root *tree_root = fs_info->tree_root;
843 struct btrfs_root *new_root = NULL;
844 struct btrfs_path path;
845 struct btrfs_inode_item *inode_item;
846 struct extent_buffer *leaf;
847 struct btrfs_key key;
848 u64 dirid = btrfs_root_dirid(&root->root_item);
850 char buf[BTRFS_NAME_LEN + 1]; /* for snprintf null */
856 if (len == 0 || len > BTRFS_NAME_LEN)
859 btrfs_init_path(&path);
860 key.objectid = dirid;
861 key.type = BTRFS_DIR_INDEX_KEY;
862 key.offset = (u64)-1;
864 ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
866 error("search for DIR_INDEX dirid %llu failed: %d",
867 (unsigned long long)dirid, ret);
871 if (path.slots[0] > 0) {
873 btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
874 if (key.objectid == dirid && key.type == BTRFS_DIR_INDEX_KEY)
875 index = key.offset + 1;
877 btrfs_release_path(&path);
879 trans = btrfs_start_transaction(root, 1);
881 error("unable to start transaction");
885 key.objectid = dirid;
887 key.type = BTRFS_INODE_ITEM_KEY;
889 ret = btrfs_lookup_inode(trans, root, &path, &key, 1);
891 error("search for INODE_ITEM %llu failed: %d",
892 (unsigned long long)dirid, ret);
895 leaf = path.nodes[0];
896 inode_item = btrfs_item_ptr(leaf, path.slots[0],
897 struct btrfs_inode_item);
899 key.objectid = root_objectid;
900 key.offset = (u64)-1;
901 key.type = BTRFS_ROOT_ITEM_KEY;
903 memcpy(buf, base, len);
904 for (i = 0; i < 1024; i++) {
905 ret = btrfs_insert_dir_item(trans, root, buf, len,
906 dirid, &key, BTRFS_FT_DIR, index);
909 len = snprintf(buf, ARRAY_SIZE(buf), "%s%d", base, i);
910 if (len < 1 || len > BTRFS_NAME_LEN) {
918 btrfs_set_inode_size(leaf, inode_item, len * 2 +
919 btrfs_inode_size(leaf, inode_item));
920 btrfs_mark_buffer_dirty(leaf);
921 btrfs_release_path(&path);
923 /* add the backref first */
924 ret = btrfs_add_root_ref(trans, tree_root, root_objectid,
925 BTRFS_ROOT_BACKREF_KEY,
926 root->root_key.objectid,
927 dirid, index, buf, len);
929 error("unable to add root backref for %llu: %d",
930 root->root_key.objectid, ret);
934 /* now add the forward ref */
935 ret = btrfs_add_root_ref(trans, tree_root, root->root_key.objectid,
936 BTRFS_ROOT_REF_KEY, root_objectid,
937 dirid, index, buf, len);
939 error("unable to add root ref for %llu: %d",
940 root->root_key.objectid, ret);
944 ret = btrfs_commit_transaction(trans, root);
946 error("transaction commit failed: %d", ret);
950 new_root = btrfs_read_fs_root(fs_info, &key);
951 if (IS_ERR(new_root)) {
952 error("unable to fs read root: %lu", PTR_ERR(new_root));
956 btrfs_init_path(&path);
960 static int create_subvol(struct btrfs_trans_handle *trans,
961 struct btrfs_root *root, u64 root_objectid)
963 struct extent_buffer *tmp;
964 struct btrfs_root *new_root;
965 struct btrfs_key key;
966 struct btrfs_root_item root_item;
969 ret = btrfs_copy_root(trans, root, root->node, &tmp,
974 memcpy(&root_item, &root->root_item, sizeof(root_item));
975 btrfs_set_root_bytenr(&root_item, tmp->start);
976 btrfs_set_root_level(&root_item, btrfs_header_level(tmp));
977 btrfs_set_root_generation(&root_item, trans->transid);
978 free_extent_buffer(tmp);
980 key.objectid = root_objectid;
981 key.type = BTRFS_ROOT_ITEM_KEY;
982 key.offset = trans->transid;
983 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
986 key.offset = (u64)-1;
987 new_root = btrfs_read_fs_root(root->fs_info, &key);
988 if (!new_root || IS_ERR(new_root)) {
989 error("unable to fs read root: %lu", PTR_ERR(new_root));
990 return PTR_ERR(new_root);
993 ret = btrfs_make_root_dir(trans, new_root, BTRFS_FIRST_FREE_OBJECTID);
999 * New make_btrfs() has handle system and meta chunks quite well.
1000 * So only need to add remaining data chunks.
1002 static int make_convert_data_block_groups(struct btrfs_trans_handle *trans,
1003 struct btrfs_fs_info *fs_info,
1004 struct btrfs_mkfs_config *cfg,
1005 struct btrfs_convert_context *cctx)
1007 struct btrfs_root *extent_root = fs_info->extent_root;
1008 struct cache_tree *data_chunks = &cctx->data_chunks;
1009 struct cache_extent *cache;
1014 * Don't create data chunk over 10% of the convert device
1015 * And for single chunk, don't create chunk larger than 1G.
1017 max_chunk_size = cfg->num_bytes / 10;
1018 max_chunk_size = min((u64)(SZ_1G), max_chunk_size);
1019 max_chunk_size = round_down(max_chunk_size,
1020 extent_root->fs_info->sectorsize);
1022 for (cache = first_cache_extent(data_chunks); cache;
1023 cache = next_cache_extent(cache)) {
1024 u64 cur = cache->start;
1026 while (cur < cache->start + cache->size) {
1028 u64 cur_backup = cur;
1030 len = min(max_chunk_size,
1031 cache->start + cache->size - cur);
1032 ret = btrfs_alloc_data_chunk(trans, fs_info,
1034 BTRFS_BLOCK_GROUP_DATA, 1);
1037 ret = btrfs_make_block_group(trans, fs_info, 0,
1038 BTRFS_BLOCK_GROUP_DATA,
1039 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
1050 * Init the temp btrfs to a operational status.
1052 * It will fix the extent usage accounting(XXX: Do we really need?) and
1053 * insert needed data chunks, to ensure all old fs data extents are covered
1054 * by DATA chunks, preventing wrong chunks are allocated.
1056 * And also create convert image subvolume and relocation tree.
1057 * (XXX: Not need again?)
1058 * But the convert image subvolume is *NOT* linked to fs tree yet.
1060 static int init_btrfs(struct btrfs_mkfs_config *cfg, struct btrfs_root *root,
1061 struct btrfs_convert_context *cctx, u32 convert_flags)
1063 struct btrfs_key location;
1064 struct btrfs_trans_handle *trans;
1065 struct btrfs_fs_info *fs_info = root->fs_info;
1069 * Don't alloc any metadata/system chunk, as we don't want
1070 * any meta/sys chunk allcated before all data chunks are inserted.
1071 * Or we screw up the chunk layout just like the old implement.
1073 fs_info->avoid_sys_chunk_alloc = 1;
1074 fs_info->avoid_meta_chunk_alloc = 1;
1075 trans = btrfs_start_transaction(root, 1);
1077 error("unable to start transaction");
1081 ret = btrfs_fix_block_accounting(trans, root);
1084 ret = make_convert_data_block_groups(trans, fs_info, cfg, cctx);
1087 ret = btrfs_make_root_dir(trans, fs_info->tree_root,
1088 BTRFS_ROOT_TREE_DIR_OBJECTID);
1091 memcpy(&location, &root->root_key, sizeof(location));
1092 location.offset = (u64)-1;
1093 ret = btrfs_insert_dir_item(trans, fs_info->tree_root, "default", 7,
1094 btrfs_super_root_dir(fs_info->super_copy),
1095 &location, BTRFS_FT_DIR, 0);
1098 ret = btrfs_insert_inode_ref(trans, fs_info->tree_root, "default", 7,
1100 btrfs_super_root_dir(fs_info->super_copy), 0);
1103 btrfs_set_root_dirid(&fs_info->fs_root->root_item,
1104 BTRFS_FIRST_FREE_OBJECTID);
1106 /* subvol for fs image file */
1107 ret = create_subvol(trans, root, CONV_IMAGE_SUBVOL_OBJECTID);
1109 error("failed to create subvolume image root: %d", ret);
1112 /* subvol for data relocation tree */
1113 ret = create_subvol(trans, root, BTRFS_DATA_RELOC_TREE_OBJECTID);
1115 error("failed to create DATA_RELOC root: %d", ret);
1119 ret = btrfs_commit_transaction(trans, root);
1120 fs_info->avoid_sys_chunk_alloc = 0;
1121 fs_info->avoid_meta_chunk_alloc = 0;
1127 * Migrate super block to its default position and zero 0 ~ 16k
1129 static int migrate_super_block(int fd, u64 old_bytenr)
1132 struct extent_buffer *buf;
1133 struct btrfs_super_block *super;
1137 buf = malloc(sizeof(*buf) + BTRFS_SUPER_INFO_SIZE);
1141 buf->len = BTRFS_SUPER_INFO_SIZE;
1142 ret = pread(fd, buf->data, BTRFS_SUPER_INFO_SIZE, old_bytenr);
1143 if (ret != BTRFS_SUPER_INFO_SIZE)
1146 super = (struct btrfs_super_block *)buf->data;
1147 BUG_ON(btrfs_super_bytenr(super) != old_bytenr);
1148 btrfs_set_super_bytenr(super, BTRFS_SUPER_INFO_OFFSET);
1150 csum_tree_block_size(buf, BTRFS_CRC32_SIZE, 0);
1151 ret = pwrite(fd, buf->data, BTRFS_SUPER_INFO_SIZE,
1152 BTRFS_SUPER_INFO_OFFSET);
1153 if (ret != BTRFS_SUPER_INFO_SIZE)
1160 memset(buf->data, 0, BTRFS_SUPER_INFO_SIZE);
1161 for (bytenr = 0; bytenr < BTRFS_SUPER_INFO_OFFSET; ) {
1162 len = BTRFS_SUPER_INFO_OFFSET - bytenr;
1163 if (len > BTRFS_SUPER_INFO_SIZE)
1164 len = BTRFS_SUPER_INFO_SIZE;
1165 ret = pwrite(fd, buf->data, len, bytenr);
1167 fprintf(stderr, "unable to zero fill device\n");
1181 static int convert_open_fs(const char *devname,
1182 struct btrfs_convert_context *cctx)
1186 for (i = 0; i < ARRAY_SIZE(convert_operations); i++) {
1187 int ret = convert_operations[i]->open_fs(cctx, devname);
1190 cctx->convert_ops = convert_operations[i];
1195 error("no file system found to convert");
1199 static int do_convert(const char *devname, u32 convert_flags, u32 nodesize,
1200 const char *fslabel, int progress, u64 features)
1206 struct btrfs_root *root;
1207 struct btrfs_root *image_root;
1208 struct btrfs_convert_context cctx;
1209 struct btrfs_key key;
1210 char subvol_name[SOURCE_FS_NAME_LEN + 8];
1211 struct task_ctx ctx;
1212 char features_buf[64];
1213 struct btrfs_mkfs_config mkfs_cfg;
1215 init_convert_context(&cctx);
1216 ret = convert_open_fs(devname, &cctx);
1219 ret = convert_check_state(&cctx);
1222 "source filesystem is not clean, running filesystem check is recommended");
1223 ret = convert_read_used_space(&cctx);
1227 blocksize = cctx.blocksize;
1228 total_bytes = (u64)blocksize * (u64)cctx.block_count;
1229 if (blocksize < 4096) {
1230 error("block size is too small: %u < 4096", blocksize);
1233 if (btrfs_check_nodesize(nodesize, blocksize, features))
1235 fd = open(devname, O_RDWR);
1237 error("unable to open %s: %s", devname, strerror(errno));
1240 btrfs_parse_features_to_string(features_buf, features);
1241 if (features == BTRFS_MKFS_DEFAULT_FEATURES)
1242 strcat(features_buf, " (default)");
1244 printf("create btrfs filesystem:\n");
1245 printf("\tblocksize: %u\n", blocksize);
1246 printf("\tnodesize: %u\n", nodesize);
1247 printf("\tfeatures: %s\n", features_buf);
1249 memset(&mkfs_cfg, 0, sizeof(mkfs_cfg));
1250 mkfs_cfg.label = cctx.volume_name;
1251 mkfs_cfg.num_bytes = total_bytes;
1252 mkfs_cfg.nodesize = nodesize;
1253 mkfs_cfg.sectorsize = blocksize;
1254 mkfs_cfg.stripesize = blocksize;
1255 mkfs_cfg.features = features;
1257 ret = make_convert_btrfs(fd, &mkfs_cfg, &cctx);
1259 error("unable to create initial ctree: %s", strerror(-ret));
1263 root = open_ctree_fd(fd, devname, mkfs_cfg.super_bytenr,
1264 OPEN_CTREE_WRITES | OPEN_CTREE_FS_PARTIAL);
1266 error("unable to open ctree");
1269 ret = init_btrfs(&mkfs_cfg, root, &cctx, convert_flags);
1271 error("unable to setup the root tree: %d", ret);
1275 printf("creating %s image file\n", cctx.convert_ops->name);
1276 snprintf(subvol_name, sizeof(subvol_name), "%s_saved",
1277 cctx.convert_ops->name);
1278 key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
1279 key.offset = (u64)-1;
1280 key.type = BTRFS_ROOT_ITEM_KEY;
1281 image_root = btrfs_read_fs_root(root->fs_info, &key);
1283 error("unable to create image subvolume");
1286 ret = create_image(image_root, &mkfs_cfg, &cctx, fd,
1287 mkfs_cfg.num_bytes, "image",
1290 error("failed to create %s/image: %d", subvol_name, ret);
1294 printf("creating btrfs metadata\n");
1295 ret = pthread_mutex_init(&ctx.mutex, NULL);
1297 error("failed to initialize mutex: %d", ret);
1300 ctx.max_copy_inodes = (cctx.inodes_count - cctx.free_inodes_count);
1301 ctx.cur_copy_inodes = 0;
1304 ctx.info = task_init(print_copied_inodes, after_copied_inodes,
1306 task_start(ctx.info);
1308 ret = copy_inodes(&cctx, root, convert_flags, &ctx);
1310 error("error during copy_inodes %d", ret);
1314 task_stop(ctx.info);
1315 task_deinit(ctx.info);
1318 image_root = link_subvol(root, subvol_name, CONV_IMAGE_SUBVOL_OBJECTID);
1320 error("unable to link subvolume %s", subvol_name);
1324 memset(root->fs_info->super_copy->label, 0, BTRFS_LABEL_SIZE);
1325 if (convert_flags & CONVERT_FLAG_COPY_LABEL) {
1326 __strncpy_null(root->fs_info->super_copy->label,
1327 cctx.volume_name, BTRFS_LABEL_SIZE - 1);
1328 printf("copy label '%s'\n", root->fs_info->super_copy->label);
1329 } else if (convert_flags & CONVERT_FLAG_SET_LABEL) {
1330 strcpy(root->fs_info->super_copy->label, fslabel);
1331 printf("set label to '%s'\n", fslabel);
1334 ret = close_ctree(root);
1336 error("close_ctree failed: %d", ret);
1339 convert_close_fs(&cctx);
1340 clean_convert_context(&cctx);
1343 * If this step succeed, we get a mountable btrfs. Otherwise
1344 * the source fs is left unchanged.
1346 ret = migrate_super_block(fd, mkfs_cfg.super_bytenr);
1348 error("unable to migrate super block: %d", ret);
1352 root = open_ctree_fd(fd, devname, 0,
1353 OPEN_CTREE_WRITES | OPEN_CTREE_FS_PARTIAL);
1355 error("unable to open ctree for finalization");
1358 root->fs_info->finalize_on_close = 1;
1362 printf("conversion complete\n");
1365 clean_convert_context(&cctx);
1369 "an error occurred during conversion, filesystem is partially created but not finalized and not mountable");
1374 * Read out data of convert image which is in btrfs reserved ranges so we can
1375 * use them to overwrite the ranges during rollback.
1377 static int read_reserved_ranges(struct btrfs_root *root, u64 ino,
1378 u64 total_bytes, char *reserved_ranges[])
1383 for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
1384 const struct simple_range *range = &btrfs_reserved_ranges[i];
1386 if (range->start + range->len >= total_bytes)
1388 ret = btrfs_read_file(root, ino, range->start, range->len,
1389 reserved_ranges[i]);
1390 if (ret < range->len) {
1392 "failed to read data of convert image, offset=%llu len=%llu ret=%d",
1393 range->start, range->len, ret);
1403 static bool is_subset_of_reserved_ranges(u64 start, u64 len)
1408 for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
1409 const struct simple_range *range = &btrfs_reserved_ranges[i];
1411 if (start >= range->start && start + len <= range_end(range)) {
1419 static bool is_chunk_direct_mapped(struct btrfs_fs_info *fs_info, u64 start)
1421 struct cache_extent *ce;
1422 struct map_lookup *map;
1425 ce = search_cache_extent(&fs_info->mapping_tree.cache_tree, start);
1428 if (ce->start > start || ce->start + ce->size < start)
1431 map = container_of(ce, struct map_lookup, ce);
1433 /* Not SINGLE chunk */
1434 if (map->num_stripes != 1)
1437 /* Chunk's logical doesn't match with phisical, not 1:1 mapped */
1438 if (map->ce.start != map->stripes[0].physical)
1446 * Iterate all file extents of the convert image.
1448 * All file extents except ones in btrfs_reserved_ranges must be mapped 1:1
1449 * on disk. (Means thier file_offset must match their on disk bytenr)
1451 * File extents in reserved ranges can be relocated to other place, and in
1452 * that case we will read them out for later use.
1454 static int check_convert_image(struct btrfs_root *image_root, u64 ino,
1455 u64 total_size, char *reserved_ranges[])
1457 struct btrfs_key key;
1458 struct btrfs_path path;
1459 struct btrfs_fs_info *fs_info = image_root->fs_info;
1460 u64 checked_bytes = 0;
1465 key.type = BTRFS_EXTENT_DATA_KEY;
1467 btrfs_init_path(&path);
1468 ret = btrfs_search_slot(NULL, image_root, &key, &path, 0, 0);
1470 * It's possible that some fs doesn't store any (including sb)
1471 * data into 0~1M range, and NO_HOLES is enabled.
1473 * So we only need to check if ret < 0
1476 error("failed to iterate file extents at offset 0: %s",
1478 btrfs_release_path(&path);
1482 /* Loop from the first file extents */
1484 struct btrfs_file_extent_item *fi;
1485 struct extent_buffer *leaf = path.nodes[0];
1489 int slot = path.slots[0];
1491 if (slot >= btrfs_header_nritems(leaf))
1493 btrfs_item_key_to_cpu(leaf, &key, slot);
1496 * Iteration is done, exit normally, we have extra check out of
1499 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
1503 file_offset = key.offset;
1504 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1505 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) {
1508 "ino %llu offset %llu doesn't have a regular file extent",
1512 if (btrfs_file_extent_compression(leaf, fi) ||
1513 btrfs_file_extent_encryption(leaf, fi) ||
1514 btrfs_file_extent_other_encoding(leaf, fi)) {
1517 "ino %llu offset %llu doesn't have a plain file extent",
1522 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1523 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1525 checked_bytes += ram_bytes;
1527 if (disk_bytenr == 0)
1531 * Most file extents must be 1:1 mapped, which means 2 things:
1532 * 1) File extent file offset == disk_bytenr
1533 * 2) That data chunk's logical == chunk's physical
1535 * So file extent's file offset == physical position on disk.
1537 * And after rolling back btrfs reserved range, other part
1538 * remains what old fs used to be.
1540 if (file_offset != disk_bytenr ||
1541 !is_chunk_direct_mapped(fs_info, disk_bytenr)) {
1543 * Only file extent in btrfs reserved ranges are
1544 * allowed to be non-1:1 mapped
1546 if (!is_subset_of_reserved_ranges(file_offset,
1550 "ino %llu offset %llu file extent should not be relocated",
1556 ret = btrfs_next_item(image_root, &path);
1563 btrfs_release_path(&path);
1565 * For HOLES mode (without NO_HOLES), we must ensure file extents
1566 * cover the whole range of the image
1568 if (!ret && !btrfs_fs_incompat(fs_info, NO_HOLES)) {
1569 if (checked_bytes != total_size) {
1571 error("inode %llu has some file extents not checked",
1577 /* So far so good, read old data located in btrfs reserved ranges */
1578 ret = read_reserved_ranges(image_root, ino, total_size,
1584 * btrfs rollback is just reverted convert:
1585 * |<---------------Btrfs fs------------------------------>|
1586 * |<- Old data chunk ->|< new chunk (D/M/S)>|<- ODC ->|
1587 * |<-Old-FE->| |<-Old-FE->|<- Btrfs extents ->|<-Old-FE->|
1590 * |<------------------Old fs----------------------------->|
1591 * |<- used ->| |<- used ->| |<- used ->|
1593 * However things are much easier than convert, we don't really need to
1594 * do the complex space calculation, but only to handle btrfs reserved space
1596 * |<---------------------------Btrfs fs----------------------------->|
1597 * | RSV 1 | | Old | | RSV 2 | | Old | | RSV 3 |
1598 * | 0~1M | | Fs | | SB2 + 64K | | Fs | | SB3 + 64K |
1600 * On the other hand, the converted fs image in btrfs is a completely
1603 * |<-----------------Converted fs image in btrfs-------------------->|
1604 * | RSV 1 | | Old | | RSV 2 | | Old | | RSV 3 |
1605 * | Relocated | | Fs | | Relocated | | Fs | | Relocated |
1607 * Used space in fs image should be at the same physical position on disk.
1608 * We only need to recover the data in reserved ranges, so the whole
1611 * The idea to rollback is also straightforward, we just "read" out the data
1612 * of reserved ranges, and write them back to there they should be.
1613 * Then the old fs is back.
1615 static int do_rollback(const char *devname)
1617 struct btrfs_root *root;
1618 struct btrfs_root *image_root;
1619 struct btrfs_fs_info *fs_info;
1620 struct btrfs_key key;
1621 struct btrfs_path path;
1622 struct btrfs_dir_item *dir;
1623 struct btrfs_inode_item *inode_item;
1624 char *image_name = "image";
1625 char *reserved_ranges[ARRAY_SIZE(btrfs_reserved_ranges)] = { NULL };
1634 for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
1635 const struct simple_range *range = &btrfs_reserved_ranges[i];
1637 reserved_ranges[i] = calloc(1, range->len);
1638 if (!reserved_ranges[i]) {
1643 fd = open(devname, O_RDWR);
1645 error("unable to open %s: %s", devname, strerror(errno));
1649 fsize = lseek(fd, 0, SEEK_END);
1650 root = open_ctree_fd(fd, devname, 0, OPEN_CTREE_WRITES);
1652 error("unable to open ctree");
1656 fs_info = root->fs_info;
1659 * Search root backref first, or after subvolume deletion (orphan),
1660 * we can still rollback the image.
1662 key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
1663 key.type = BTRFS_ROOT_BACKREF_KEY;
1664 key.offset = BTRFS_FS_TREE_OBJECTID;
1665 btrfs_init_path(&path);
1666 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, &path, 0, 0);
1667 btrfs_release_path(&path);
1669 error("unable to find ext2 image subvolume, is it deleted?");
1672 } else if (ret < 0) {
1673 error("failed to find ext2 image subvolume: %s",
1678 /* Search convert subvolume */
1679 key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
1680 key.type = BTRFS_ROOT_ITEM_KEY;
1681 key.offset = (u64)-1;
1682 image_root = btrfs_read_fs_root(fs_info, &key);
1683 if (IS_ERR(image_root)) {
1684 ret = PTR_ERR(image_root);
1685 error("failed to open convert image subvolume: %s",
1690 /* Search the image file */
1691 root_dir = btrfs_root_dirid(&image_root->root_item);
1692 dir = btrfs_lookup_dir_item(NULL, image_root, &path, root_dir,
1693 image_name, strlen(image_name), 0);
1695 if (!dir || IS_ERR(dir)) {
1696 btrfs_release_path(&path);
1701 error("failed to locate file %s: %s", image_name,
1705 btrfs_dir_item_key_to_cpu(path.nodes[0], dir, &key);
1706 btrfs_release_path(&path);
1708 /* Get total size of the original image */
1711 ret = btrfs_lookup_inode(NULL, image_root, &path, &key, 0);
1714 btrfs_release_path(&path);
1715 error("unable to find inode %llu: %s", ino, strerror(-ret));
1718 inode_item = btrfs_item_ptr(path.nodes[0], path.slots[0],
1719 struct btrfs_inode_item);
1720 total_bytes = btrfs_inode_size(path.nodes[0], inode_item);
1721 btrfs_release_path(&path);
1723 /* Check if we can rollback the image */
1724 ret = check_convert_image(image_root, ino, total_bytes, reserved_ranges);
1726 error("old fs image can't be rolled back");
1730 btrfs_release_path(&path);
1731 close_ctree_fs_info(fs_info);
1736 * Everything is OK, just write back old fs data into btrfs reserved
1739 * Here, we starts from the backup blocks first, so if something goes
1740 * wrong, the fs is still mountable
1743 for (i = ARRAY_SIZE(btrfs_reserved_ranges) - 1; i >= 0; i--) {
1745 const struct simple_range *range = &btrfs_reserved_ranges[i];
1747 if (range_end(range) >= fsize)
1750 real_size = min(range_end(range), fsize) - range->start;
1751 ret = pwrite(fd, reserved_ranges[i], real_size, range->start);
1752 if (ret < real_size) {
1757 error("failed to recover range [%llu, %llu): %s",
1758 range->start, real_size, strerror(-ret));
1765 for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++)
1766 free(reserved_ranges[i]);
1768 error("rollback failed");
1770 printf("rollback succeeded\n");
1774 static void print_usage(void)
1776 printf("usage: btrfs-convert [options] device\n");
1777 printf("options:\n");
1778 printf("\t-d|--no-datasum disable data checksum, sets NODATASUM\n");
1779 printf("\t-i|--no-xattr ignore xattrs and ACLs\n");
1780 printf("\t-n|--no-inline disable inlining of small files to metadata\n");
1781 printf("\t-N|--nodesize SIZE set filesystem metadata nodesize\n");
1782 printf("\t-r|--rollback roll back to the original filesystem\n");
1783 printf("\t-l|--label LABEL set filesystem label\n");
1784 printf("\t-L|--copy-label use label from converted filesystem\n");
1785 printf("\t-p|--progress show converting progress (default)\n");
1786 printf("\t-O|--features LIST comma separated list of filesystem features\n");
1787 printf("\t--no-progress show only overview, not the detailed progress\n");
1789 printf("Supported filesystems:\n");
1790 printf("\text2/3/4: %s\n", BTRFSCONVERT_EXT2 ? "yes" : "no");
1793 int main(int argc, char *argv[])
1799 u32 nodesize = max_t(u32, sysconf(_SC_PAGESIZE),
1800 BTRFS_MKFS_DEFAULT_NODE_SIZE);
1803 int usage_error = 0;
1806 char fslabel[BTRFS_LABEL_SIZE];
1807 u64 features = BTRFS_MKFS_DEFAULT_FEATURES;
1810 enum { GETOPT_VAL_NO_PROGRESS = 256 };
1811 static const struct option long_options[] = {
1812 { "no-progress", no_argument, NULL,
1813 GETOPT_VAL_NO_PROGRESS },
1814 { "no-datasum", no_argument, NULL, 'd' },
1815 { "no-inline", no_argument, NULL, 'n' },
1816 { "no-xattr", no_argument, NULL, 'i' },
1817 { "rollback", no_argument, NULL, 'r' },
1818 { "features", required_argument, NULL, 'O' },
1819 { "progress", no_argument, NULL, 'p' },
1820 { "label", required_argument, NULL, 'l' },
1821 { "copy-label", no_argument, NULL, 'L' },
1822 { "nodesize", required_argument, NULL, 'N' },
1823 { "help", no_argument, NULL, GETOPT_VAL_HELP},
1824 { NULL, 0, NULL, 0 }
1826 int c = getopt_long(argc, argv, "dinN:rl:LpO:", long_options, NULL);
1841 nodesize = parse_size(optarg);
1847 copylabel = CONVERT_FLAG_SET_LABEL;
1848 if (strlen(optarg) >= BTRFS_LABEL_SIZE) {
1850 "label too long, trimmed to %d bytes",
1851 BTRFS_LABEL_SIZE - 1);
1853 __strncpy_null(fslabel, optarg, BTRFS_LABEL_SIZE - 1);
1856 copylabel = CONVERT_FLAG_COPY_LABEL;
1862 char *orig = strdup(optarg);
1865 tmp = btrfs_parse_fs_features(tmp, &features);
1867 error("unrecognized filesystem feature: %s",
1873 if (features & BTRFS_FEATURE_LIST_ALL) {
1874 btrfs_list_all_fs_features(
1875 ~BTRFS_CONVERT_ALLOWED_FEATURES);
1878 if (features & ~BTRFS_CONVERT_ALLOWED_FEATURES) {
1881 btrfs_parse_features_to_string(buf,
1882 features & ~BTRFS_CONVERT_ALLOWED_FEATURES);
1883 error("features not allowed for convert: %s",
1890 case GETOPT_VAL_NO_PROGRESS:
1893 case GETOPT_VAL_HELP:
1896 return c != GETOPT_VAL_HELP;
1900 if (check_argc_exact(argc - optind, 1)) {
1905 if (rollback && (!datacsum || noxattr || !packing)) {
1907 "Usage error: -d, -i, -n options do not apply to rollback\n");
1916 file = argv[optind];
1917 ret = check_mounted(file);
1919 error("could not check mount status: %s", strerror(-ret));
1922 error("%s is mounted", file);
1927 ret = do_rollback(file);
1931 cf |= datacsum ? CONVERT_FLAG_DATACSUM : 0;
1932 cf |= packing ? CONVERT_FLAG_INLINE_DATA : 0;
1933 cf |= noxattr ? 0 : CONVERT_FLAG_XATTR;
1935 ret = do_convert(file, cf, nodesize, fslabel, progress, features);