This patch updates btrfs-progs for superblock duplication.
Note: I didn't make this patch as complete as the one for
kernel since updating the converter requires changing the
code again. Thank you,
Signed-off-by: Yan Zheng <zheng.yan@oracle.com>
ret = btrfs_open_devices(fs_devices, O_RDONLY);
BUG_ON(ret);
- ret = btrfs_bootstrap_super_map(&fs_info->mapping_tree, fs_devices);
- BUG_ON(ret);
fs_info->sb_buffer = btrfs_find_create_tree_block(tree_root, sb_bytenr,
4096);
BUG_ON(!fs_info->sb_buffer);
return root;
}
+int write_dev_supers(struct btrfs_root *root, struct extent_buffer *sb,
+ struct btrfs_device *device)
+{
+ u64 bytenr;
+ u64 flags;
+ int i, ret;
+
+ sb->fd = device->fd;
+ for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+ bytenr = btrfs_sb_offset(i);
+ if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
+ break;
+
+ btrfs_set_header_bytenr(sb, bytenr);
+ flags = btrfs_header_flags(sb);
+ btrfs_set_header_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
+ csum_tree_block(root, sb, 0);
+
+ sb->dev_bytenr = bytenr;
+ ret = write_extent_to_disk(sb);
+ BUG_ON(ret);
+ }
+ return 0;
+}
+
int write_all_supers(struct btrfs_root *root)
{
struct list_head *cur;
write_extent_buffer(sb, dev->fs_devices->fsid,
(unsigned long)btrfs_device_fsid(dev_item),
BTRFS_UUID_SIZE);
- sb->fd = dev->fd;
- sb->dev_bytenr = sb->start;
- btrfs_set_header_flag(sb, BTRFS_HEADER_FLAG_WRITTEN);
- csum_tree_block(root, sb, 0);
- ret = write_extent_to_disk(sb);
+ ret = write_dev_supers(root, sb, dev);
BUG_ON(ret);
}
return 0;
#ifndef __DISKIO__
#define __DISKIO__
-#define BTRFS_SUPER_INFO_OFFSET (16 * 1024)
+#define BTRFS_SUPER_INFO_OFFSET (64 * 1024)
#define BTRFS_SUPER_INFO_SIZE 4096
+#define BTRFS_SUPER_MIRROR_MAX 3
+#define BTRFS_SUPER_MIRROR_SHIFT 12
+
+static inline u64 btrfs_sb_offset(int mirror)
+{
+ u64 start = 16 * 1024;
+ if (mirror)
+ return start << (BTRFS_SUPER_MIRROR_SHIFT * mirror);
+ return BTRFS_SUPER_INFO_OFFSET;
+}
+
struct btrfs_device;
struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
{
}
+static int remove_sb_from_cache(struct btrfs_root *root,
+ struct btrfs_block_group_cache *cache)
+{
+ u64 bytenr;
+ u64 *logical;
+ int stripe_len;
+ int i, nr, ret;
+ struct extent_io_tree *free_space_cache;
+
+ free_space_cache = &root->fs_info->free_space_cache;
+ for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+ bytenr = btrfs_sb_offset(i);
+ ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
+ cache->key.objectid, bytenr, 0,
+ &logical, &nr, &stripe_len);
+ BUG_ON(ret);
+ while (nr--) {
+ clear_extent_dirty(free_space_cache, logical[nr],
+ logical[nr] + stripe_len - 1, GFP_NOFS);
+ }
+ kfree(logical);
+ }
+ return 0;
+}
+
static int cache_block_group(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group)
{
set_extent_dirty(free_space_cache, last,
last + hole_size - 1, GFP_NOFS);
}
+ remove_sb_from_cache(root, block_group);
block_group->cached = 1;
err:
btrfs_free_path(path);
if (block_count == 0)
block_count = dev_block_count;
- for (i = 0; i < 7; i++)
- blocks[i] = BTRFS_SUPER_INFO_OFFSET + leafsize * i;
+ blocks[0] = BTRFS_SUPER_INFO_OFFSET;
+ for (i = 1; i < 7; i++) {
+ blocks[i] = BTRFS_SUPER_INFO_OFFSET + 1024 * 1024 +
+ leafsize * i;
+ }
ret = make_btrfs(fd, file, label, blocks, block_count,
nodesize, leafsize,
/* create the items for the extent tree */
nritems = 0;
- itemoff = __BTRFS_LEAF_DATA_SIZE(leafsize) -
- sizeof(struct btrfs_extent_item);
- btrfs_set_disk_key_objectid(&disk_key, 0);
- btrfs_set_disk_key_offset(&disk_key, first_free);
- btrfs_set_disk_key_type(&disk_key, BTRFS_EXTENT_ITEM_KEY);
- btrfs_set_item_key(buf, &disk_key, nritems);
- btrfs_set_item_offset(buf, btrfs_item_nr(buf, nritems), itemoff);
- btrfs_set_item_size(buf, btrfs_item_nr(buf, nritems),
- sizeof(struct btrfs_extent_item));
- extent_item = btrfs_item_ptr(buf, nritems, struct btrfs_extent_item);
- btrfs_set_extent_refs(buf, extent_item, 1);
- nritems++;
+ itemoff = __BTRFS_LEAF_DATA_SIZE(leafsize);
for (i = 1; i < 7; i++) {
BUG_ON(blocks[i] < first_free);
BUG_ON(blocks[i] < blocks[i - 1]);
kfree(buf);
list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
device->fs_devices = root->fs_info->fs_devices;
- ret = btrfs_bootstrap_super_map(&root->fs_info->mapping_tree,
- root->fs_info->fs_devices);
- BUG_ON(ret);
return 0;
}
int btrfs_prepare_device(int fd, char *file, int zero_end, u64 *block_count_ret)
{
u64 block_count;
+ u64 bytenr;
struct stat st;
- int ret;
+ int i, ret;
ret = fstat(fd, &st);
if (ret < 0) {
exit(1);
}
+ for (i = 0 ; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+ bytenr = btrfs_sb_offset(i);
+ if (bytenr >= block_count)
+ break;
+ zero_blocks(fd, bytenr, BTRFS_SUPER_INFO_SIZE);
+ }
+
if (zero_end) {
ret = zero_dev_end(fd, block_count);
if (ret) {
return ret;
}
+int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
+ u64 chunk_start, u64 physical, u64 devid,
+ u64 **logical, int *naddrs, int *stripe_len)
+{
+ struct cache_extent *ce;
+ struct map_lookup *map;
+ u64 *buf;
+ u64 bytenr;
+ u64 length;
+ u64 stripe_nr;
+ int i, j, nr = 0;
+
+ ce = find_first_cache_extent(&map_tree->cache_tree, chunk_start);
+ BUG_ON(!ce || ce->start != chunk_start);
+ map = container_of(ce, struct map_lookup, ce);
+
+ length = ce->size;
+ if (map->type & BTRFS_BLOCK_GROUP_RAID10)
+ length = ce->size / (map->num_stripes / map->sub_stripes);
+ else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
+ length = ce->size / map->num_stripes;
+
+ buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
+
+ for (i = 0; i < map->num_stripes; i++) {
+ if (devid && map->stripes[i].dev->devid != devid)
+ continue;
+ if (map->stripes[i].physical > physical ||
+ map->stripes[i].physical + length <= physical)
+ continue;
+
+ stripe_nr = (physical - map->stripes[i].physical) /
+ map->stripe_len;
+
+ if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
+ stripe_nr = (stripe_nr * map->num_stripes + i) /
+ map->sub_stripes;
+ } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
+ stripe_nr = stripe_nr * map->num_stripes + i;
+ }
+ bytenr = chunk_start + stripe_nr * map->stripe_len;
+ for (j = 0; j < nr; j++) {
+ if (buf[j] == bytenr)
+ break;
+ }
+ if (j == nr)
+ buf[nr++] = bytenr;
+ }
+
+ *logical = buf;
+ *naddrs = nr;
+ *stripe_len = map->stripe_len;
+
+ return 0;
+}
+
int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
u64 logical, u64 *length,
struct btrfs_multi_bio **multi_ret, int mirror_num)
}
BUG_ON(stripe_index >= map->num_stripes);
- BUG_ON(stripe_index != 0 && multi->num_stripes > 1);
for (i = 0; i < multi->num_stripes; i++) {
multi->stripes[i].physical =
map->stripes[stripe_index].physical + stripe_offset +
u64 logical;
u64 length;
u64 devid;
- u64 super_offset_diff = 0;
u8 uuid[BTRFS_UUID_SIZE];
int num_stripes;
int ret;
logical = key->offset;
length = btrfs_chunk_length(leaf, chunk);
- if (logical < BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE) {
- super_offset_diff = BTRFS_SUPER_INFO_OFFSET +
- BTRFS_SUPER_INFO_SIZE - logical;
- logical = BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE;
- }
-
ce = find_first_cache_extent(&map_tree->cache_tree, logical);
/* already mapped? */
return -ENOMEM;
map->ce.start = logical;
- map->ce.size = length - super_offset_diff;
+ map->ce.size = length;
map->num_stripes = num_stripes;
map->io_width = btrfs_chunk_io_width(leaf, chunk);
map->io_align = btrfs_chunk_io_align(leaf, chunk);
for (i = 0; i < num_stripes; i++) {
map->stripes[i].physical =
- btrfs_stripe_offset_nr(leaf, chunk, i) +
- super_offset_diff;
+ btrfs_stripe_offset_nr(leaf, chunk, i);
devid = btrfs_stripe_devid_nr(leaf, chunk, i);
read_extent_buffer(leaf, uuid, (unsigned long)
btrfs_stripe_dev_uuid_nr(chunk, i),
int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
u64 logical, u64 *length,
struct btrfs_multi_bio **multi_ret, int mirror_num);
+int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
+ u64 chunk_start, u64 physical, u64 devid,
+ u64 **logical, int *naddrs, int *stripe_len);
int btrfs_read_sys_array(struct btrfs_root *root);
int btrfs_read_chunk_tree(struct btrfs_root *root);
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,