}
if (!found)
fprintf(stderr, "\tstart: 0, len: %llu\n",
- round_up(rec->isize, root->sectorsize));
+ round_up(rec->isize,
+ root->fs_info->sectorsize));
}
}
start = key.offset;
size = btrfs_item_size_nr(leaf, path.slots[0]);
- csum_end = key.offset + (size / csum_size) * root->sectorsize;
+ csum_end = key.offset + (size / csum_size) *
+ root->fs_info->sectorsize;
if (csum_end > start) {
size = min(csum_end - start, len);
len -= size;
u64 num_bytes = 0;
u64 disk_bytenr = 0;
u64 extent_offset = 0;
- u64 mask = root->sectorsize - 1;
+ u64 mask = root->fs_info->sectorsize - 1;
int extent_type;
int ret;
static void reada_walk_down(struct btrfs_root *root,
struct extent_buffer *node, int slot)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 bytenr;
u64 ptr_gen;
u32 nritems;
return;
nritems = btrfs_header_nritems(node);
- blocksize = root->nodesize;
+ blocksize = fs_info->nodesize;
for (i = slot; i < nritems; i++) {
bytenr = btrfs_node_blockptr(node, i);
ptr_gen = btrfs_node_ptr_generation(node, i);
- readahead_tree_block(root, bytenr, blocksize, ptr_gen);
+ readahead_tree_block(fs_info, bytenr, blocksize, ptr_gen);
}
}
enum btrfs_tree_block_status status;
u64 bytenr;
u64 ptr_gen;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *next;
struct extent_buffer *cur;
u32 blocksize;
}
bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
- blocksize = root->nodesize;
+ blocksize = fs_info->nodesize;
if (bytenr == nrefs->bytenr[*level - 1]) {
refs = nrefs->refs[*level - 1];
}
}
- next = btrfs_find_tree_block(root, bytenr, blocksize);
+ next = btrfs_find_tree_block(fs_info, bytenr, blocksize);
if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
free_extent_buffer(next);
reada_walk_down(root, cur, path->slots[*level]);
- next = read_tree_block(root, bytenr, blocksize,
+ next = read_tree_block(root->fs_info, bytenr, blocksize,
ptr_gen);
if (!extent_buffer_uptodate(next)) {
struct btrfs_key node_key;
btrfs_add_corrupt_extent_record(root->fs_info,
&node_key,
path->nodes[*level]->start,
- root->nodesize, *level);
+ root->fs_info->nodesize,
+ *level);
err = -EIO;
goto out;
}
enum btrfs_tree_block_status status;
u64 bytenr;
u64 ptr_gen;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *next;
struct extent_buffer *cur;
u32 blocksize;
}
bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
- blocksize = root->nodesize;
+ blocksize = fs_info->nodesize;
ret = update_nodes_refs(root, bytenr, nrefs, *level - 1);
if (ret)
continue;
}
- next = btrfs_find_tree_block(root, bytenr, blocksize);
+ next = btrfs_find_tree_block(fs_info, bytenr, blocksize);
if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
free_extent_buffer(next);
reada_walk_down(root, cur, path->slots[*level]);
- next = read_tree_block(root, bytenr, blocksize,
+ next = read_tree_block(fs_info, bytenr, blocksize,
ptr_gen);
if (!extent_buffer_uptodate(next)) {
struct btrfs_key node_key;
btrfs_node_key_to_cpu(path->nodes[*level],
&node_key,
path->slots[*level]);
- btrfs_add_corrupt_extent_record(root->fs_info,
+ btrfs_add_corrupt_extent_record(fs_info,
&node_key,
path->nodes[*level]->start,
- root->nodesize, *level);
+ fs_info->nodesize,
+ *level);
ret = -EIO;
break;
}
/* special case for a file losing all its file extent */
if (!found) {
ret = btrfs_punch_hole(trans, root, rec->ino, 0,
- round_up(rec->isize, root->sectorsize));
+ round_up(rec->isize,
+ root->fs_info->sectorsize));
if (ret < 0)
goto out;
}
* return value is not concerned.
*/
btrfs_release_path(&path);
- ret = btrfs_free_extent(trans, root, offset, root->nodesize,
- 0, root->root_key.objectid,
- level - 1, 0);
+ ret = btrfs_free_extent(trans, root, offset,
+ root->fs_info->nodesize, 0,
+ root->root_key.objectid, level - 1, 0);
cache = next_cache_extent(cache);
}
extent_num_bytes, item_inline_len);
err |= FILE_EXTENT_ERROR;
}
+ *end += extent_num_bytes;
*size += extent_num_bytes;
return err;
}
* Just a warning, as dir inode nbytes is just an
* instructive value.
*/
- if (!IS_ALIGNED(nbytes, root->nodesize)) {
+ if (!IS_ALIGNED(nbytes, root->fs_info->nodesize)) {
warning("root %llu DIR INODE[%llu] nbytes should be aligned to %u",
- root->objectid, inode_id, root->nodesize);
+ root->objectid, inode_id,
+ root->fs_info->nodesize);
}
if (isize != size) {
if (tmpl->metadata)
rec->crossing_stripes = check_crossing_stripes(global_info,
- rec->start, global_info->tree_root->nodesize);
+ rec->start, global_info->nodesize);
check_extent_type(rec);
return ret;
}
if (tmpl->metadata)
rec->crossing_stripes = check_crossing_stripes(
global_info, rec->start,
- global_info->tree_root->nodesize);
+ global_info->nodesize);
check_extent_type(rec);
maybe_free_extent_rec(extent_cache, rec);
return ret;
* wrong onwer(3) out of chunk tree, to pass both chunk tree check
* and owner<->key_type check.
*/
- ret = btrfs_check_chunk_valid(global_info->tree_root, eb, chunk, slot,
+ ret = btrfs_check_chunk_valid(global_info, eb, chunk, slot,
key->offset);
if (ret < 0) {
error("chunk(%llu, %llu) is not valid, ignore it",
if (key.type == BTRFS_METADATA_ITEM_KEY) {
metadata = 1;
- num_bytes = root->nodesize;
+ num_bytes = root->fs_info->nodesize;
} else {
num_bytes = key.offset;
}
- if (!IS_ALIGNED(key.objectid, root->sectorsize)) {
+ if (!IS_ALIGNED(key.objectid, root->fs_info->sectorsize)) {
error("ignoring invalid extent, bytenr %llu is not aligned to %u",
- key.objectid, root->sectorsize);
+ key.objectid, root->fs_info->sectorsize);
return -EIO;
}
if (item_size < sizeof(*ei)) {
metadata = 1;
else
metadata = 0;
- if (metadata && num_bytes != root->nodesize) {
+ if (metadata && num_bytes != root->fs_info->nodesize) {
error("ignore invalid metadata extent, length %llu does not equal to %u",
- num_bytes, root->nodesize);
+ num_bytes, root->fs_info->nodesize);
return -EIO;
}
- if (!metadata && !IS_ALIGNED(num_bytes, root->sectorsize)) {
+ if (!metadata && !IS_ALIGNED(num_bytes, root->fs_info->sectorsize)) {
error("ignore invalid data extent, length %llu is not aligned to %u",
- num_bytes, root->sectorsize);
+ num_bytes, root->fs_info->sectorsize);
return -EIO;
}
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
- ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
+ ret = btrfs_rmap_block(root->fs_info,
cache->key.objectid, bytenr, 0,
&logical, &nr, &stripe_len);
if (ret)
if (key.type == BTRFS_EXTENT_ITEM_KEY)
last = key.objectid + key.offset;
else
- last = key.objectid + root->nodesize;
+ last = key.objectid + root->fs_info->nodesize;
path.slots[0]++;
continue;
}
if (key.type == BTRFS_EXTENT_ITEM_KEY)
last = key.objectid + key.offset;
else
- last = key.objectid + root->nodesize;
+ last = key.objectid + root->fs_info->nodesize;
path.slots[0]++;
}
start = cache->key.objectid + cache->key.offset;
if (!cache->free_space_ctl) {
if (btrfs_init_free_space_ctl(cache,
- root->sectorsize)) {
+ root->fs_info->sectorsize)) {
ret = -ENOMEM;
break;
}
u64 num_bytes, unsigned long leaf_offset,
struct extent_buffer *eb) {
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 offset = 0;
- u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+ u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
char *data;
unsigned long csum_offset;
u32 csum;
int mirror;
int num_copies;
- if (num_bytes % root->sectorsize)
+ if (num_bytes % fs_info->sectorsize)
return -EINVAL;
data = malloc(num_bytes);
again:
read_len = num_bytes - offset;
/* read as much space once a time */
- ret = read_extent_data(root, data + offset,
+ ret = read_extent_data(fs_info, data + offset,
bytenr + offset, &read_len, mirror);
if (ret)
goto out;
tmp = offset + data_checked;
csum = btrfs_csum_data((char *)data + tmp,
- csum, root->sectorsize);
+ csum, fs_info->sectorsize);
btrfs_csum_final(csum, (u8 *)&csum);
csum_offset = leaf_offset +
- tmp / root->sectorsize * csum_size;
+ tmp / fs_info->sectorsize * csum_size;
read_extent_buffer(eb, (char *)&csum_expected,
csum_offset, csum_size);
/* try another mirror */
fprintf(stderr, "mirror %d bytenr %llu csum %u expected csum %u\n",
mirror, bytenr + tmp,
csum, csum_expected);
- num_copies = btrfs_num_copies(
- &root->fs_info->mapping_tree,
+ num_copies = btrfs_num_copies(root->fs_info,
bytenr, num_bytes);
if (mirror < num_copies - 1) {
mirror += 1;
goto again;
}
}
- data_checked += root->sectorsize;
+ data_checked += fs_info->sectorsize;
}
offset += read_len;
}
}
data_len = (btrfs_item_size_nr(leaf, path.slots[0]) /
- csum_size) * root->sectorsize;
+ csum_size) * root->fs_info->sectorsize;
if (!check_data_csum)
goto skip_csum_check;
leaf_offset = btrfs_item_ptr_offset(leaf, path.slots[0]);
struct device_extent_tree *dev_extent_cache,
struct root_item_record *ri)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *buf;
struct extent_record *rec = NULL;
u64 bytenr;
continue;
/* fixme, get the parent transid */
- readahead_tree_block(root, bits[i].start,
+ readahead_tree_block(fs_info, bits[i].start,
bits[i].size, 0);
}
}
}
/* fixme, get the real parent transid */
- buf = read_tree_block(root, bytenr, size, gen);
+ buf = read_tree_block(root->fs_info, bytenr, size, gen);
if (!extent_buffer_uptodate(buf)) {
record_bad_block_io(root->fs_info,
extent_cache, bytenr, size);
ref),
btrfs_extent_data_ref_offset(buf, ref),
btrfs_extent_data_ref_count(buf, ref),
- 0, root->sectorsize);
+ 0, root->fs_info->sectorsize);
continue;
}
if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
add_data_backref(extent_cache,
key.objectid, key.offset, 0, 0, 0,
btrfs_shared_data_ref_count(buf, ref),
- 0, root->sectorsize);
+ 0, root->fs_info->sectorsize);
continue;
}
if (key.type == BTRFS_ORPHAN_ITEM_KEY) {
data_bytes_allocated +=
btrfs_file_extent_disk_num_bytes(buf, fi);
- if (data_bytes_allocated < root->sectorsize) {
+ if (data_bytes_allocated < root->fs_info->sectorsize) {
abort();
}
data_bytes_referenced +=
struct extent_record tmpl;
ptr = btrfs_node_blockptr(buf, i);
- size = root->nodesize;
+ size = root->fs_info->nodesize;
btrfs_node_key_to_cpu(buf, &key, i);
if (ri != NULL) {
if ((level == ri->drop_level)
if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
found_key.type == BTRFS_METADATA_ITEM_KEY) {
u64 bytes = (found_key.type == BTRFS_EXTENT_ITEM_KEY) ?
- found_key.offset : root->nodesize;
+ found_key.offset : root->fs_info->nodesize;
ret = btrfs_update_block_group(trans, root, bytenr,
bytes, 0, 0);
if (!back->is_data)
rec->max_size = max_t(u64, rec->max_size,
- info->extent_root->nodesize);
+ info->nodesize);
if (!allocated) {
u32 item_size = sizeof(*ei);
rec = list_entry(list->next,
struct root_item_record, list);
last = 0;
- buf = read_tree_block(root->fs_info->tree_root,
+ buf = read_tree_block(root->fs_info,
rec->bytenr, rec->level_size, 0);
if (!extent_buffer_uptodate(buf)) {
free_extent_buffer(buf);
level = btrfs_header_level(root1->node);
ret = add_root_item_to_list(&normal_trees, root1->root_key.objectid,
root1->node->start, 0, level, 0,
- root1->nodesize, NULL);
+ root1->fs_info->nodesize, NULL);
if (ret < 0)
goto out;
root1 = root->fs_info->chunk_root;
level = btrfs_header_level(root1->node);
ret = add_root_item_to_list(&normal_trees, root1->root_key.objectid,
root1->node->start, 0, level, 0,
- root1->nodesize, NULL);
+ root1->fs_info->nodesize, NULL);
if (ret < 0)
goto out;
btrfs_init_path(&path);
last_snapshot = btrfs_root_last_snapshot(&ri);
if (btrfs_disk_key_objectid(&ri.drop_progress) == 0) {
level = btrfs_root_level(&ri);
- level_size = root->nodesize;
+ level_size = root->fs_info->nodesize;
ret = add_root_item_to_list(&normal_trees,
found_key.objectid,
btrfs_root_bytenr(&ri),
goto out;
} else {
level = btrfs_root_level(&ri);
- level_size = root->nodesize;
+ level_size = root->fs_info->nodesize;
objectid = found_key.objectid;
btrfs_disk_key_to_cpu(&found_key,
&ri.drop_progress);
int slot;
int skinny_level;
int type;
- u32 nodesize = root->nodesize;
+ u32 nodesize = root->fs_info->nodesize;
u32 item_size;
u64 offset;
int tree_reloc_root = 0;
extent_num_bytes = btrfs_file_extent_num_bytes(eb, fi);
/* Check unaligned disk_num_bytes and num_bytes */
- if (!IS_ALIGNED(disk_num_bytes, root->sectorsize)) {
+ if (!IS_ALIGNED(disk_num_bytes, root->fs_info->sectorsize)) {
error(
"file extent [%llu, %llu] has unaligned disk num bytes: %llu, should be aligned to %u",
fi_key.objectid, fi_key.offset, disk_num_bytes,
- root->sectorsize);
+ root->fs_info->sectorsize);
err |= BYTES_UNALIGNED;
} else {
data_bytes_allocated += disk_num_bytes;
}
- if (!IS_ALIGNED(extent_num_bytes, root->sectorsize)) {
+ if (!IS_ALIGNED(extent_num_bytes, root->fs_info->sectorsize)) {
error(
"file extent [%llu, %llu] has unaligned num bytes: %llu, should be aligned to %u",
fi_key.objectid, fi_key.offset, extent_num_bytes,
- root->sectorsize);
+ root->fs_info->sectorsize);
err |= BYTES_UNALIGNED;
} else {
data_bytes_referenced += extent_num_bytes;
btrfs_release_path(&path);
/* Get level from tree block as an alternative source */
- eb = read_tree_block_fs_info(fs_info, bytenr, nodesize, transid);
+ eb = read_tree_block(fs_info, bytenr, nodesize, transid);
if (!extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
return -EIO;
}
/* Read out the tree block to get item/node key */
- eb = read_tree_block(root, bytenr, root->nodesize, 0);
+ eb = read_tree_block(fs_info, bytenr, root->fs_info->nodesize, 0);
if (!extent_buffer_uptodate(eb)) {
err |= REFERENCER_MISSING;
free_extent_buffer(eb);
int found_parent = 0;
int i;
- eb = read_tree_block_fs_info(fs_info, parent, nodesize, 0);
+ eb = read_tree_block(fs_info, parent, nodesize, 0);
if (!extent_buffer_uptodate(eb))
goto out;
int found_parent = 0;
int i;
- eb = read_tree_block_fs_info(fs_info, parent, nodesize, 0);
+ eb = read_tree_block(fs_info, parent, nodesize, 0);
if (!extent_buffer_uptodate(eb))
goto out;
l = path.nodes[0];
chunk = btrfs_item_ptr(l, path.slots[0], struct btrfs_chunk);
- if (btrfs_chunk_length(l, chunk) != length)
+ ret = btrfs_check_chunk_valid(fs_info, l, chunk, path.slots[0],
+ chunk_key.offset);
+ if (ret < 0)
+ goto out;
+
+ if (btrfs_stripe_length(fs_info, l, chunk) != length)
goto out;
num_stripes = btrfs_chunk_num_stripes(l, chunk);
struct btrfs_block_group_item *bi;
struct btrfs_block_group_item bg_item;
struct btrfs_dev_extent *ptr;
- u32 sectorsize = btrfs_super_sectorsize(fs_info->super_copy);
u64 length;
u64 chunk_end;
+ u64 stripe_len;
u64 type;
- u64 profile;
int num_stripes;
u64 offset;
u64 objectid;
chunk = btrfs_item_ptr(eb, slot, struct btrfs_chunk);
length = btrfs_chunk_length(eb, chunk);
chunk_end = chunk_key.offset + length;
- if (!IS_ALIGNED(length, sectorsize)) {
- error("chunk[%llu %llu) not aligned to %u",
- chunk_key.offset, chunk_end, sectorsize);
- err |= BYTES_UNALIGNED;
+ ret = btrfs_check_chunk_valid(fs_info, eb, chunk, slot,
+ chunk_key.offset);
+ if (ret < 0) {
+ error("chunk[%llu %llu) is invalid", chunk_key.offset,
+ chunk_end);
+ err |= BYTES_UNALIGNED | UNKNOWN_TYPE;
goto out;
}
-
type = btrfs_chunk_type(eb, chunk);
- profile = type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
- if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
- error("chunk[%llu %llu) has no chunk type",
- chunk_key.offset, chunk_end);
- err |= UNKNOWN_TYPE;
- }
- if (profile && (profile & (profile - 1))) {
- error("chunk[%llu %llu) multiple profiles detected: %llx",
- chunk_key.offset, chunk_end, profile);
- err |= UNKNOWN_TYPE;
- }
bg_key.objectid = chunk_key.offset;
bg_key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
}
num_stripes = btrfs_chunk_num_stripes(eb, chunk);
+ stripe_len = btrfs_stripe_length(fs_info, eb, chunk);
for (i = 0; i < num_stripes; i++) {
btrfs_release_path(&path);
btrfs_init_path(&path);
offset = btrfs_dev_extent_chunk_offset(leaf, ptr);
if (objectid != chunk_key.objectid ||
offset != chunk_key.offset ||
- btrfs_dev_extent_length(leaf, ptr) != length)
+ btrfs_dev_extent_length(leaf, ptr) != stripe_len)
goto not_match_dev;
continue;
not_match_dev:
* As a btrfs tree has most 8 levels (0..7), so it's quite safe
* to call the function itself.
*/
- eb = read_tree_block(root, blocknr, root->nodesize, 0);
+ eb = read_tree_block(root->fs_info, blocknr,
+ root->fs_info->nodesize, 0);
if (extent_buffer_uptodate(eb)) {
ret = traverse_tree_block(root, eb);
err |= ret;
goto init;
}
c = btrfs_alloc_free_block(trans, root,
- root->nodesize,
+ root->fs_info->nodesize,
root->root_key.objectid,
&disk_key, level, 0, 0);
if (IS_ERR(c)) {
* in, but for now this doesn't actually use the root so
* just pass in extent_root.
*/
- tmp = read_tree_block(fs_info->extent_root, bytenr,
- nodesize, 0);
+ tmp = read_tree_block(fs_info, bytenr, nodesize, 0);
if (!extent_buffer_uptodate(tmp)) {
fprintf(stderr, "Error reading root block\n");
return -EIO;
continue;
}
- tmp = read_tree_block(fs_info->extent_root, bytenr,
+ tmp = read_tree_block(fs_info, bytenr,
nodesize, 0);
if (!extent_buffer_uptodate(tmp)) {
fprintf(stderr, "Error reading tree block\n");
struct btrfs_root *csum_root, char *buf, u64 start,
u64 len)
{
+ struct btrfs_fs_info *fs_info = csum_root->fs_info;
u64 offset = 0;
u64 sectorsize;
int ret = 0;
while (offset < len) {
- sectorsize = csum_root->sectorsize;
- ret = read_extent_data(csum_root, buf, start + offset,
+ sectorsize = fs_info->sectorsize;
+ ret = read_extent_data(fs_info, buf, start + offset,
§orsize, 0);
if (ret)
break;
int slot = 0;
int ret = 0;
- buf = malloc(cur_root->fs_info->csum_root->sectorsize);
+ buf = malloc(cur_root->fs_info->sectorsize);
if (!buf)
return -ENOMEM;
return ret;
}
- buf = malloc(csum_root->sectorsize);
+ buf = malloc(csum_root->fs_info->sectorsize);
if (!buf) {
btrfs_release_path(&path);
return -ENOMEM;