#include "qgroup-verify.h"
#include "rbtree-utils.h"
#include "backref.h"
-#include "ulist.h"
+#include "kernel-shared/ulist.h"
#include "hash.h"
+#include "help.h"
enum task_position {
TASK_EXTENTS,
}
if (!found)
fprintf(stderr, "\tstart: 0, len: %llu\n",
- round_up(rec->isize, root->sectorsize));
+ round_up(rec->isize,
+ root->fs_info->sectorsize));
}
}
return has_parent ? 0 : 2;
}
-static int process_dir_item(struct btrfs_root *root,
- struct extent_buffer *eb,
+static int process_dir_item(struct extent_buffer *eb,
int slot, struct btrfs_key *key,
struct shared_node *active_node)
{
filetype = btrfs_dir_type(eb, di);
rec->found_size += name_len;
- if (name_len <= BTRFS_NAME_LEN) {
+ if (cur + sizeof(*di) + name_len > total ||
+ name_len > BTRFS_NAME_LEN) {
+ error = REF_ERR_NAME_TOO_LONG;
+
+ if (cur + sizeof(*di) > total)
+ break;
+ len = min_t(u32, total - cur - sizeof(*di),
+ BTRFS_NAME_LEN);
+ } else {
len = name_len;
error = 0;
- } else {
- len = BTRFS_NAME_LEN;
- error = REF_ERR_NAME_TOO_LONG;
}
+
read_extent_buffer(eb, namebuf, (unsigned long)(di + 1), len);
if (location.type == BTRFS_INODE_ITEM_KEY) {
while (cur < total) {
name_len = btrfs_inode_ref_name_len(eb, ref);
index = btrfs_inode_ref_index(eb, ref);
- if (name_len <= BTRFS_NAME_LEN) {
+
+ /* inode_ref + namelen should not cross item boundary */
+ if (cur + sizeof(*ref) + name_len > total ||
+ name_len > BTRFS_NAME_LEN) {
+ if (total < cur + sizeof(*ref))
+ break;
+
+ /* Still try to read out the remaining part */
+ len = min_t(u32, total - cur - sizeof(*ref),
+ BTRFS_NAME_LEN);
+ error = REF_ERR_NAME_TOO_LONG;
+ } else {
len = name_len;
error = 0;
- } else {
- len = BTRFS_NAME_LEN;
- error = REF_ERR_NAME_TOO_LONG;
}
+
read_extent_buffer(eb, namebuf, (unsigned long)(ref + 1), len);
add_inode_backref(inode_cache, key->objectid, key->offset,
index, namebuf, len, 0, key->type, error);
start = key.offset;
size = btrfs_item_size_nr(leaf, path.slots[0]);
- csum_end = key.offset + (size / csum_size) * root->sectorsize;
+ csum_end = key.offset + (size / csum_size) *
+ root->fs_info->sectorsize;
if (csum_end > start) {
size = min(csum_end - start, len);
len -= size;
u64 num_bytes = 0;
u64 disk_bytenr = 0;
u64 extent_offset = 0;
- u64 mask = root->sectorsize - 1;
+ u64 mask = root->fs_info->sectorsize - 1;
int extent_type;
int ret;
switch (key.type) {
case BTRFS_DIR_ITEM_KEY:
case BTRFS_DIR_INDEX_KEY:
- ret = process_dir_item(root, eb, i, &key, active_node);
+ ret = process_dir_item(eb, i, &key, active_node);
break;
case BTRFS_INODE_REF_KEY:
ret = process_inode_ref(eb, i, &key, active_node);
static int check_inode_item(struct btrfs_root *root, struct btrfs_path *path,
unsigned int ext_ref);
+/*
+ * Returns >0 Found error, not fatal, should continue
+ * Returns <0 Fatal error, must exit the whole check
+ * Returns 0 No errors found
+ */
static int process_one_leaf_v2(struct btrfs_root *root, struct btrfs_path *path,
struct node_refs *nrefs, int *level, int ext_ref)
{
}
out:
err &= ~LAST_ITEM;
- /*
- * Convert any error bitmap to -EIO, as we should avoid
- * mixing positive and negative return value to represent
- * error
- */
if (err && !ret)
- ret = -EIO;
+ ret = err;
return ret;
}
static void reada_walk_down(struct btrfs_root *root,
struct extent_buffer *node, int slot)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 bytenr;
u64 ptr_gen;
u32 nritems;
return;
nritems = btrfs_header_nritems(node);
- blocksize = root->nodesize;
+ blocksize = fs_info->nodesize;
for (i = slot; i < nritems; i++) {
bytenr = btrfs_node_blockptr(node, i);
ptr_gen = btrfs_node_ptr_generation(node, i);
- readahead_tree_block(root, bytenr, blocksize, ptr_gen);
+ readahead_tree_block(fs_info, bytenr, blocksize, ptr_gen);
}
}
* which makes leaf owner check not so strong, key check should be
* sufficient enough for that case.
*/
-static int check_child_node(struct btrfs_root *root,
- struct extent_buffer *parent, int slot,
+static int check_child_node(struct extent_buffer *parent, int slot,
struct extent_buffer *child)
{
struct btrfs_key parent_key;
enum btrfs_tree_block_status status;
u64 bytenr;
u64 ptr_gen;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *next;
struct extent_buffer *cur;
u32 blocksize;
}
bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
- blocksize = root->nodesize;
+ blocksize = fs_info->nodesize;
if (bytenr == nrefs->bytenr[*level - 1]) {
refs = nrefs->refs[*level - 1];
}
}
- next = btrfs_find_tree_block(root, bytenr, blocksize);
+ next = btrfs_find_tree_block(fs_info, bytenr, blocksize);
if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
free_extent_buffer(next);
reada_walk_down(root, cur, path->slots[*level]);
- next = read_tree_block(root, bytenr, blocksize,
+ next = read_tree_block(root->fs_info, bytenr, blocksize,
ptr_gen);
if (!extent_buffer_uptodate(next)) {
struct btrfs_key node_key;
btrfs_add_corrupt_extent_record(root->fs_info,
&node_key,
path->nodes[*level]->start,
- root->nodesize, *level);
+ root->fs_info->nodesize,
+ *level);
err = -EIO;
goto out;
}
}
- ret = check_child_node(root, cur, path->slots[*level], next);
+ ret = check_child_node(cur, path->slots[*level], next);
if (ret) {
+ free_extent_buffer(next);
err = ret;
goto out;
}
static int check_inode_item(struct btrfs_root *root, struct btrfs_path *path,
unsigned int ext_ref);
+/*
+ * Returns >0 Found error, should continue
+ * Returns <0 Fatal error, must exit the whole check
+ * Returns 0 No errors found
+ */
static int walk_down_tree_v2(struct btrfs_root *root, struct btrfs_path *path,
int *level, struct node_refs *nrefs, int ext_ref)
{
enum btrfs_tree_block_status status;
u64 bytenr;
u64 ptr_gen;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *next;
struct extent_buffer *cur;
u32 blocksize;
}
bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
- blocksize = root->nodesize;
+ blocksize = fs_info->nodesize;
ret = update_nodes_refs(root, bytenr, nrefs, *level - 1);
if (ret)
continue;
}
- next = btrfs_find_tree_block(root, bytenr, blocksize);
+ next = btrfs_find_tree_block(fs_info, bytenr, blocksize);
if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
free_extent_buffer(next);
reada_walk_down(root, cur, path->slots[*level]);
- next = read_tree_block(root, bytenr, blocksize,
+ next = read_tree_block(fs_info, bytenr, blocksize,
ptr_gen);
if (!extent_buffer_uptodate(next)) {
struct btrfs_key node_key;
btrfs_node_key_to_cpu(path->nodes[*level],
&node_key,
path->slots[*level]);
- btrfs_add_corrupt_extent_record(root->fs_info,
+ btrfs_add_corrupt_extent_record(fs_info,
&node_key,
path->nodes[*level]->start,
- root->nodesize, *level);
+ fs_info->nodesize,
+ *level);
ret = -EIO;
break;
}
}
- ret = check_child_node(root, cur, path->slots[*level], next);
+ ret = check_child_node(cur, path->slots[*level], next);
if (ret < 0)
break;
}
static int delete_dir_index(struct btrfs_root *root,
- struct cache_tree *inode_cache,
- struct inode_record *rec,
struct inode_backref *backref)
{
struct btrfs_trans_handle *trans;
static int create_inode_item(struct btrfs_root *root,
struct inode_record *rec,
- struct inode_backref *backref, int root_dir)
+ int root_dir)
{
struct btrfs_trans_handle *trans;
struct btrfs_inode_item inode_item;
list_for_each_entry_safe(backref, tmp, &rec->backrefs, list) {
if (!delete && rec->ino == root_dirid) {
if (!rec->found_inode_item) {
- ret = create_inode_item(root, rec, backref, 1);
+ ret = create_inode_item(root, rec, 1);
if (ret)
break;
repaired++;
((backref->found_dir_index && !backref->found_inode_ref) ||
(backref->found_dir_index && backref->found_inode_ref &&
(backref->errors & REF_ERR_INDEX_UNMATCH)))) {
- ret = delete_dir_index(root, inode_cache, rec, backref);
+ ret = delete_dir_index(root, backref);
if (ret)
break;
repaired++;
list_del(&backref->list);
free(backref);
+ continue;
}
if (!delete && !backref->found_dir_index &&
break;
repaired++;
if (backref->found_dir_item &&
- backref->found_dir_index &&
backref->found_dir_index) {
if (!backref->errors &&
backref->found_inode_ref) {
list_del(&backref->list);
free(backref);
+ continue;
}
}
}
backref->found_dir_item &&
!(backref->errors & REF_ERR_INDEX_UNMATCH) &&
!rec->found_inode_item)) {
- ret = create_inode_item(root, rec, backref, 0);
+ ret = create_inode_item(root, rec, 0);
if (ret)
break;
repaired++;
return ret;
}
+static int get_highest_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ u64 *highest_ino)
+{
+ struct btrfs_key key, found_key;
+ int ret;
+
+ btrfs_init_path(path);
+ key.objectid = BTRFS_LAST_FREE_OBJECTID;
+ key.offset = -1;
+ key.type = BTRFS_INODE_ITEM_KEY;
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ if (ret == 1) {
+ btrfs_item_key_to_cpu(path->nodes[0], &found_key,
+ path->slots[0] - 1);
+ *highest_ino = found_key.objectid;
+ ret = 0;
+ }
+ if (*highest_ino >= BTRFS_LAST_FREE_OBJECTID)
+ ret = -EOVERFLOW;
+ btrfs_release_path(path);
+ return ret;
+}
+
static int repair_inode_nlinks(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
}
if (rec->found_link == 0) {
- lost_found_ino = root->highest_inode;
- if (lost_found_ino >= BTRFS_LAST_FREE_OBJECTID) {
- ret = -EOVERFLOW;
+ ret = get_highest_inode(trans, root, path, &lost_found_ino);
+ if (ret < 0)
goto out;
- }
lost_found_ino++;
ret = btrfs_mkdir(trans, root, dir_name, strlen(dir_name),
BTRFS_FIRST_FREE_OBJECTID, &lost_found_ino,
/* special case for a file losing all its file extent */
if (!found) {
ret = btrfs_punch_hole(trans, root, rec->ino, 0,
- round_up(rec->isize, root->sectorsize));
+ round_up(rec->isize,
+ root->fs_info->sectorsize));
if (ret < 0)
goto out;
}
}
/*
- * We need to record the highest inode number for later 'lost+found'
- * dir creation.
- * We must select an ino not used/referred by any existing inode, or
- * 'lost+found' ino may be a missing ino in a corrupted leaf,
- * this may cause 'lost+found' dir has wrong nlinks.
- */
- cache = last_cache_extent(inode_cache);
- if (cache) {
- node = container_of(cache, struct ptr_node, cache);
- rec = node->data;
- if (rec->ino > root->highest_inode)
- root->highest_inode = rec->ino;
- }
-
- /*
* We need to repair backrefs first because we could change some of the
* errors in the inode recs.
*
path.slots[level]);
/* Remove the ptr */
- ret = btrfs_del_ptr(trans, root, &path, level,
- path.slots[level]);
+ ret = btrfs_del_ptr(root, &path, level, path.slots[level]);
if (ret < 0)
goto out;
/*
* return value is not concerned.
*/
btrfs_release_path(&path);
- ret = btrfs_free_extent(trans, root, offset, root->nodesize,
- 0, root->root_key.objectid,
- level - 1, 0);
+ ret = btrfs_free_extent(trans, root, offset,
+ root->fs_info->nodesize, 0,
+ root->root_key.objectid, level - 1, 0);
cache = next_cache_extent(cache);
}
if (imode_to_type(mode) != filetype)
goto next;
- if (name_len <= BTRFS_NAME_LEN) {
- len = name_len;
- } else {
- len = BTRFS_NAME_LEN;
+ if (cur + sizeof(*di) + name_len > total ||
+ name_len > BTRFS_NAME_LEN) {
warning("root %llu %s[%llu %llu] name too long %u, trimmed",
- root->objectid,
- key->type == BTRFS_DIR_ITEM_KEY ?
- "DIR_ITEM" : "DIR_INDEX",
- key->objectid, key->offset, name_len);
+ root->objectid,
+ key->type == BTRFS_DIR_ITEM_KEY ?
+ "DIR_ITEM" : "DIR_INDEX",
+ key->objectid, key->offset, name_len);
+
+ if (cur + sizeof(*di) > total)
+ break;
+ len = min_t(u32, total - cur - sizeof(*di),
+ BTRFS_NAME_LEN);
+ } else {
+ len = name_len;
}
+
read_extent_buffer(node, namebuf, (unsigned long)(di + 1), len);
if (len != namelen || strncmp(namebuf, name, len))
goto next;
index = btrfs_inode_ref_index(node, ref);
name_len = btrfs_inode_ref_name_len(node, ref);
- if (name_len <= BTRFS_NAME_LEN) {
- len = name_len;
- } else {
- len = BTRFS_NAME_LEN;
+ if (cur + sizeof(*ref) + name_len > total ||
+ name_len > BTRFS_NAME_LEN) {
warning("root %llu INODE_REF[%llu %llu] name too long",
root->objectid, ref_key->objectid, ref_key->offset);
+
+ if (total < cur + sizeof(*ref))
+ goto out;
+ len = min_t(u32, total - cur - sizeof(*ref), BTRFS_NAME_LEN);
+ } else {
+ len = name_len;
}
read_extent_buffer(node, namebuf, (unsigned long)(ref + 1), len);
if (cur < total)
goto next;
+out:
return err;
}
if (index != (u64)-1 && index != ref_index)
goto next_ref;
- if (ref_namelen <= BTRFS_NAME_LEN) {
- len = ref_namelen;
- } else {
- len = BTRFS_NAME_LEN;
+ if (cur + sizeof(*ref) + ref_namelen > total ||
+ ref_namelen > BTRFS_NAME_LEN) {
warning("root %llu INODE %s[%llu %llu] name too long",
root->objectid,
key->type == BTRFS_INODE_REF_KEY ?
"REF" : "EXTREF",
key->objectid, key->offset);
+
+ if (cur + sizeof(*ref) > total)
+ break;
+ len = min_t(u32, total - cur - sizeof(*ref),
+ BTRFS_NAME_LEN);
+ } else {
+ len = ref_namelen;
}
+
read_extent_buffer(node, ref_namebuf, (unsigned long)(ref + 1),
len);
len = ref_namelen;
} else {
len = BTRFS_NAME_LEN;
- warning("Warning: root %llu INODE %s[%llu %llu] name too long\n",
+ warning("root %llu INODE %s[%llu %llu] name too long",
root->objectid,
key->type == BTRFS_INODE_REF_KEY ?
"REF" : "EXTREF",
key->objectid, key->offset, data_len);
name_len = btrfs_dir_name_len(node, di);
- if (name_len <= BTRFS_NAME_LEN) {
- len = name_len;
- } else {
- len = BTRFS_NAME_LEN;
+ if (cur + sizeof(*di) + name_len > total ||
+ name_len > BTRFS_NAME_LEN) {
warning("root %llu %s[%llu %llu] name too long",
root->objectid,
key->type == BTRFS_DIR_ITEM_KEY ?
"DIR_ITEM" : "DIR_INDEX",
key->objectid, key->offset);
+
+ if (cur + sizeof(*di) > total)
+ break;
+ len = min_t(u32, total - cur - sizeof(*di),
+ BTRFS_NAME_LEN);
+ } else {
+ len = name_len;
}
(*size) += name_len;
u64 disk_bytenr;
u64 disk_num_bytes;
u64 extent_num_bytes;
- u64 found;
+ u64 extent_offset;
+ u64 csum_found; /* In byte size, sectorsize aligned */
+ u64 search_start; /* Logical range start we search for csum */
+ u64 search_len; /* Logical range len we search for csum */
unsigned int extent_type;
unsigned int is_hole;
+ int compressed = 0;
int ret;
int err = 0;
fi = btrfs_item_ptr(node, slot, struct btrfs_file_extent_item);
+ /* Check inline extent */
extent_type = btrfs_file_extent_type(node, fi);
- /* Skip if file extent is inline */
if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
struct btrfs_item *e = btrfs_item_nr(slot);
u32 item_inline_len;
item_inline_len = btrfs_file_extent_inline_item_len(node, e);
extent_num_bytes = btrfs_file_extent_inline_len(node, slot, fi);
- if (extent_num_bytes == 0 ||
- extent_num_bytes != item_inline_len)
+ compressed = btrfs_file_extent_compression(node, fi);
+ if (extent_num_bytes == 0) {
+ error(
+ "root %llu EXTENT_DATA[%llu %llu] has empty inline extent",
+ root->objectid, fkey->objectid, fkey->offset);
+ err |= FILE_EXTENT_ERROR;
+ }
+ if (!compressed && extent_num_bytes != item_inline_len) {
+ error(
+ "root %llu EXTENT_DATA[%llu %llu] wrong inline size, have: %llu, expected: %u",
+ root->objectid, fkey->objectid, fkey->offset,
+ extent_num_bytes, item_inline_len);
err |= FILE_EXTENT_ERROR;
+ }
+ *end += extent_num_bytes;
*size += extent_num_bytes;
return err;
}
disk_bytenr = btrfs_file_extent_disk_bytenr(node, fi);
disk_num_bytes = btrfs_file_extent_disk_num_bytes(node, fi);
extent_num_bytes = btrfs_file_extent_num_bytes(node, fi);
+ extent_offset = btrfs_file_extent_offset(node, fi);
+ compressed = btrfs_file_extent_compression(node, fi);
is_hole = (disk_bytenr == 0) && (disk_num_bytes == 0);
- /* Check EXTENT_DATA datasum */
- ret = count_csum_range(root, disk_bytenr, disk_num_bytes, &found);
- if (found > 0 && nodatasum) {
+ /*
+ * Check EXTENT_DATA csum
+ *
+ * For plain (uncompressed) extent, we should only check the range
+ * we're referring to, as it's possible that part of prealloc extent
+ * has been written, and has csum:
+ *
+ * |<--- Original large preallocated extent A ---->|
+ * |<- Prealloc File Extent ->|<- Regular Extent ->|
+ * No csum Has csum
+ *
+ * For compressed extent, we should check the whole range.
+ */
+ if (!compressed) {
+ search_start = disk_bytenr + extent_offset;
+ search_len = extent_num_bytes;
+ } else {
+ search_start = disk_bytenr;
+ search_len = disk_num_bytes;
+ }
+ ret = count_csum_range(root, search_start, search_len, &csum_found);
+ if (csum_found > 0 && nodatasum) {
err |= ODD_CSUM_ITEM;
error("root %llu EXTENT_DATA[%llu %llu] nodatasum shouldn't have datasum",
root->objectid, fkey->objectid, fkey->offset);
} else if (extent_type == BTRFS_FILE_EXTENT_REG && !nodatasum &&
- !is_hole &&
- (ret < 0 || found == 0 || found < disk_num_bytes)) {
+ !is_hole && (ret < 0 || csum_found < search_len)) {
err |= CSUM_ITEM_MISSING;
- error("root %llu EXTENT_DATA[%llu %llu] datasum missing",
- root->objectid, fkey->objectid, fkey->offset);
- } else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC && found > 0) {
+ error("root %llu EXTENT_DATA[%llu %llu] csum missing, have: %llu, expected: %llu",
+ root->objectid, fkey->objectid, fkey->offset,
+ csum_found, search_len);
+ } else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC && csum_found > 0) {
err |= ODD_CSUM_ITEM;
- error("root %llu EXTENT_DATA[%llu %llu] prealloc shouldn't have datasum",
- root->objectid, fkey->objectid, fkey->offset);
+ error("root %llu EXTENT_DATA[%llu %llu] prealloc shouldn't have csum, but has: %llu",
+ root->objectid, fkey->objectid, fkey->offset, csum_found);
}
/* Check EXTENT_DATA hole */
- if (no_holes && is_hole) {
- err |= FILE_EXTENT_ERROR;
- error("root %llu EXTENT_DATA[%llu %llu] shouldn't be hole",
- root->objectid, fkey->objectid, fkey->offset);
- } else if (!no_holes && *end != fkey->offset) {
+ if (!no_holes && *end != fkey->offset) {
err |= FILE_EXTENT_ERROR;
error("root %llu EXTENT_DATA[%llu %llu] interrupt",
root->objectid, fkey->objectid, fkey->offset);
* Just a warning, as dir inode nbytes is just an
* instructive value.
*/
- if (!IS_ALIGNED(nbytes, root->nodesize)) {
+ if (!IS_ALIGNED(nbytes, root->fs_info->nodesize)) {
warning("root %llu DIR INODE[%llu] nbytes should be aligned to %u",
- root->objectid, inode_id, root->nodesize);
+ root->objectid, inode_id,
+ root->fs_info->nodesize);
}
if (isize != size) {
static int check_fs_first_inode(struct btrfs_root *root, unsigned int ext_ref)
{
- struct btrfs_path *path;
+ struct btrfs_path path;
struct btrfs_key key;
int err = 0;
int ret;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
- key.objectid = 256;
+ key.objectid = BTRFS_FIRST_FREE_OBJECTID;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ /* For root being dropped, we don't need to check first inode */
+ if (btrfs_root_refs(&root->root_item) == 0 &&
+ btrfs_disk_key_objectid(&root->root_item.drop_progress) >=
+ key.objectid)
+ return 0;
+
+ btrfs_init_path(&path);
+
+ ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
if (ret < 0)
goto out;
if (ret > 0) {
ret = 0;
err |= INODE_ITEM_MISSING;
+ error("first inode item of root %llu is missing",
+ root->objectid);
}
- err |= check_inode_item(root, path, ext_ref);
+ err |= check_inode_item(root, &path, ext_ref);
err &= ~LAST_ITEM;
if (err && !ret)
ret = -EIO;
out:
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
*/
static int check_fs_root_v2(struct btrfs_root *root, unsigned int ext_ref)
{
- struct btrfs_path *path;
+ struct btrfs_path path;
struct node_refs nrefs;
struct btrfs_root_item *root_item = &root->root_item;
- int ret, wret;
+ int ret;
int level;
+ int err = 0;
/*
* We need to manually check the first inode item(256)
if (ret < 0)
return ret;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
memset(&nrefs, 0, sizeof(nrefs));
level = btrfs_header_level(root->node);
+ btrfs_init_path(&path);
if (btrfs_root_refs(root_item) > 0 ||
btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
- path->nodes[level] = root->node;
- path->slots[level] = 0;
+ path.nodes[level] = root->node;
+ path.slots[level] = 0;
extent_buffer_get(root->node);
} else {
struct btrfs_key key;
btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
level = root_item->drop_level;
- path->lowest_level = level;
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ path.lowest_level = level;
+ ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
if (ret < 0)
goto out;
ret = 0;
}
while (1) {
- wret = walk_down_tree_v2(root, path, &level, &nrefs, ext_ref);
- if (wret < 0)
- ret = wret;
- if (wret != 0)
+ ret = walk_down_tree_v2(root, &path, &level, &nrefs, ext_ref);
+ err |= !!ret;
+
+ /* if ret is negative, walk shall stop */
+ if (ret < 0) {
+ ret = err;
break;
+ }
- wret = walk_up_tree_v2(root, path, &level);
- if (wret < 0)
- ret = wret;
- if (wret != 0)
+ ret = walk_up_tree_v2(root, &path, &level);
+ if (ret != 0) {
+ /* Normal exit, reset ret to err */
+ ret = err;
break;
+ }
}
out:
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
static int check_root_ref(struct btrfs_root *root, struct btrfs_key *ref_key,
struct extent_buffer *node, int slot)
{
- struct btrfs_path *path;
+ struct btrfs_path path;
struct btrfs_key key;
struct btrfs_root_ref *ref;
struct btrfs_root_ref *backref;
key.type = BTRFS_ROOT_BACKREF_KEY + BTRFS_ROOT_REF_KEY - ref_key->type;
key.offset = ref_key->objectid;
- path = btrfs_alloc_path();
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ btrfs_init_path(&path);
+ ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
if (ret) {
err |= ROOT_REF_MISSING;
error("%s[%llu %llu] couldn't find relative ref",
goto out;
}
- backref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ backref = btrfs_item_ptr(path.nodes[0], path.slots[0],
struct btrfs_root_ref);
- backref_dirid = btrfs_root_ref_dirid(path->nodes[0], backref);
- backref_seq = btrfs_root_ref_sequence(path->nodes[0], backref);
- backref_namelen = btrfs_root_ref_name_len(path->nodes[0], backref);
+ backref_dirid = btrfs_root_ref_dirid(path.nodes[0], backref);
+ backref_seq = btrfs_root_ref_sequence(path.nodes[0], backref);
+ backref_namelen = btrfs_root_ref_name_len(path.nodes[0], backref);
if (backref_namelen <= BTRFS_NAME_LEN) {
len = backref_namelen;
"ROOT_REF" : "ROOT_BACKREF",
key.objectid, key.offset);
}
- read_extent_buffer(path->nodes[0], backref_name,
+ read_extent_buffer(path.nodes[0], backref_name,
(unsigned long)(backref + 1), len);
if (ref_dirid != backref_dirid || ref_seq != backref_seq ||
ref_key->objectid, ref_key->offset);
}
out:
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return err;
}
{
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root *cur_root = NULL;
- struct btrfs_path *path;
+ struct btrfs_path path;
struct btrfs_key key;
struct extent_buffer *node;
unsigned int ext_ref;
int ret;
int err = 0;
- ext_ref = btrfs_fs_incompat(fs_info,
- BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF);
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
+ ext_ref = btrfs_fs_incompat(fs_info, EXTENDED_IREF);
+ btrfs_init_path(&path);
key.objectid = BTRFS_FS_TREE_OBJECTID;
key.offset = 0;
key.type = BTRFS_ROOT_ITEM_KEY;
- ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, tree_root, &key, &path, 0, 0);
if (ret < 0) {
err = ret;
goto out;
}
while (1) {
- node = path->nodes[0];
- slot = path->slots[0];
+ node = path.nodes[0];
+ slot = path.slots[0];
btrfs_item_key_to_cpu(node, &key, slot);
if (key.objectid > BTRFS_LAST_FREE_OBJECTID)
goto out;
err |= ret;
}
next:
- ret = btrfs_next_item(tree_root, path);
+ ret = btrfs_next_item(tree_root, &path);
if (ret > 0)
goto out;
if (ret < 0) {
}
out:
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return err;
}
return 0;
}
-static void free_extent_record_cache(struct btrfs_fs_info *fs_info,
- struct cache_tree *extent_cache)
+static void free_extent_record_cache(struct cache_tree *extent_cache)
{
struct cache_extent *cache;
struct extent_record *rec;
return 0;
}
-static int fix_key_order(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path)
+static int fix_key_order(struct btrfs_root *root, struct btrfs_path *path)
{
struct extent_buffer *buf;
struct btrfs_key k1, k2;
return ret;
}
-static int delete_bogus_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+static int delete_bogus_item(struct btrfs_root *root,
struct btrfs_path *path,
struct extent_buffer *buf, int slot)
{
return 0;
}
-static int fix_item_offset(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path)
+static int fix_item_offset(struct btrfs_root *root, struct btrfs_path *path)
{
struct extent_buffer *buf;
int i;
BTRFS_LEAF_DATA_SIZE(root)) {
if (btrfs_item_end_nr(buf, i) >
BTRFS_LEAF_DATA_SIZE(root)) {
- ret = delete_bogus_item(trans, root, path,
- buf, i);
+ ret = delete_bogus_item(root, path, buf, i);
if (!ret)
goto again;
fprintf(stderr, "item is off the end of the "
btrfs_item_offset_nr(buf, i - 1)) {
if (btrfs_item_end_nr(buf, i) >
btrfs_item_offset_nr(buf, i - 1)) {
- ret = delete_bogus_item(trans, root, path,
- buf, i);
+ ret = delete_bogus_item(root, path, buf, i);
if (!ret)
goto again;
fprintf(stderr, "items overlap, can't fix\n");
break;
}
if (status == BTRFS_TREE_BLOCK_BAD_KEY_ORDER)
- ret = fix_key_order(trans, search_root, &path);
+ ret = fix_key_order(search_root, &path);
else if (status == BTRFS_TREE_BLOCK_INVALID_OFFSETS)
- ret = fix_item_offset(trans, search_root, &path);
+ ret = fix_item_offset(search_root, &path);
if (ret) {
btrfs_commit_transaction(trans, search_root);
break;
struct extent_record *rec;
int ret = 0;
+ BUG_ON(tmpl->max_size == 0);
rec = malloc(sizeof(*rec));
if (!rec)
return -ENOMEM;
if (tmpl->metadata)
rec->crossing_stripes = check_crossing_stripes(global_info,
- rec->start, global_info->tree_root->nodesize);
+ rec->start, global_info->nodesize);
check_extent_type(rec);
return ret;
}
if (tmpl->metadata)
rec->crossing_stripes = check_crossing_stripes(
global_info, rec->start,
- global_info->tree_root->nodesize);
+ global_info->nodesize);
check_extent_type(rec);
maybe_free_extent_rec(extent_cache, rec);
return ret;
tmpl.start = bytenr;
tmpl.nr = 1;
tmpl.metadata = 1;
+ tmpl.max_size = 1;
ret = add_extent_rec_nolookup(extent_cache, &tmpl);
if (ret)
* wrong onwer(3) out of chunk tree, to pass both chunk tree check
* and owner<->key_type check.
*/
- ret = btrfs_check_chunk_valid(global_info->tree_root, eb, chunk, slot,
+ ret = btrfs_check_chunk_valid(global_info, eb, chunk, slot,
key->offset);
if (ret < 0) {
error("chunk(%llu, %llu) is not valid, ignore it",
if (key.type == BTRFS_METADATA_ITEM_KEY) {
metadata = 1;
- num_bytes = root->nodesize;
+ num_bytes = root->fs_info->nodesize;
} else {
num_bytes = key.offset;
}
- if (!IS_ALIGNED(key.objectid, root->sectorsize)) {
+ if (!IS_ALIGNED(key.objectid, root->fs_info->sectorsize)) {
error("ignoring invalid extent, bytenr %llu is not aligned to %u",
- key.objectid, root->sectorsize);
+ key.objectid, root->fs_info->sectorsize);
return -EIO;
}
if (item_size < sizeof(*ei)) {
metadata = 1;
else
metadata = 0;
- if (metadata && num_bytes != root->nodesize) {
+ if (metadata && num_bytes != root->fs_info->nodesize) {
error("ignore invalid metadata extent, length %llu does not equal to %u",
- num_bytes, root->nodesize);
+ num_bytes, root->fs_info->nodesize);
return -EIO;
}
- if (!metadata && !IS_ALIGNED(num_bytes, root->sectorsize)) {
+ if (!metadata && !IS_ALIGNED(num_bytes, root->fs_info->sectorsize)) {
error("ignore invalid data extent, length %llu is not aligned to %u",
- num_bytes, root->sectorsize);
+ num_bytes, root->fs_info->sectorsize);
return -EIO;
}
ret = add_tree_backref(extent_cache, key.objectid,
0, offset, 0);
if (ret < 0)
- error("add_tree_backref failed: %s",
+ error(
+ "add_tree_backref failed (extent items tree block): %s",
strerror(-ret));
break;
case BTRFS_SHARED_BLOCK_REF_KEY:
ret = add_tree_backref(extent_cache, key.objectid,
offset, 0, 0);
if (ret < 0)
- error("add_tree_backref failed: %s",
+ error(
+ "add_tree_backref failed (extent items shared block): %s",
strerror(-ret));
break;
case BTRFS_EXTENT_DATA_REF_KEY:
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
- ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
+ ret = btrfs_rmap_block(root->fs_info,
cache->key.objectid, bytenr, 0,
&logical, &nr, &stripe_len);
if (ret)
if (key.type == BTRFS_EXTENT_ITEM_KEY)
last = key.objectid + key.offset;
else
- last = key.objectid + root->nodesize;
+ last = key.objectid + root->fs_info->nodesize;
path.slots[0]++;
continue;
}
if (key.type == BTRFS_EXTENT_ITEM_KEY)
last = key.objectid + key.offset;
else
- last = key.objectid + root->nodesize;
+ last = key.objectid + root->fs_info->nodesize;
path.slots[0]++;
}
start = cache->key.objectid + cache->key.offset;
if (!cache->free_space_ctl) {
if (btrfs_init_free_space_ctl(cache,
- root->sectorsize)) {
+ root->fs_info->sectorsize)) {
ret = -ENOMEM;
break;
}
btrfs_remove_free_space_cache(cache);
}
- if (btrfs_fs_compat_ro(root->fs_info,
- BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE)) {
+ if (btrfs_fs_compat_ro(root->fs_info, FREE_SPACE_TREE)) {
ret = exclude_super_stripes(root, cache);
if (ret) {
fprintf(stderr, "could not exclude super stripes: %s\n",
u64 num_bytes, unsigned long leaf_offset,
struct extent_buffer *eb) {
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 offset = 0;
- u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+ u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
char *data;
unsigned long csum_offset;
u32 csum;
int mirror;
int num_copies;
- if (num_bytes % root->sectorsize)
+ if (num_bytes % fs_info->sectorsize)
return -EINVAL;
data = malloc(num_bytes);
again:
read_len = num_bytes - offset;
/* read as much space once a time */
- ret = read_extent_data(root, data + offset,
+ ret = read_extent_data(fs_info, data + offset,
bytenr + offset, &read_len, mirror);
if (ret)
goto out;
csum = ~(u32)0;
tmp = offset + data_checked;
- csum = btrfs_csum_data(NULL, (char *)data + tmp,
- csum, root->sectorsize);
+ csum = btrfs_csum_data((char *)data + tmp,
+ csum, fs_info->sectorsize);
btrfs_csum_final(csum, (u8 *)&csum);
csum_offset = leaf_offset +
- tmp / root->sectorsize * csum_size;
+ tmp / fs_info->sectorsize * csum_size;
read_extent_buffer(eb, (char *)&csum_expected,
csum_offset, csum_size);
/* try another mirror */
fprintf(stderr, "mirror %d bytenr %llu csum %u expected csum %u\n",
mirror, bytenr + tmp,
csum, csum_expected);
- num_copies = btrfs_num_copies(
- &root->fs_info->mapping_tree,
+ num_copies = btrfs_num_copies(root->fs_info,
bytenr, num_bytes);
if (mirror < num_copies - 1) {
mirror += 1;
goto again;
}
}
- data_checked += root->sectorsize;
+ data_checked += fs_info->sectorsize;
}
offset += read_len;
}
}
data_len = (btrfs_item_size_nr(leaf, path.slots[0]) /
- csum_size) * root->sectorsize;
+ csum_size) * root->fs_info->sectorsize;
if (!check_data_csum)
goto skip_csum_check;
leaf_offset = btrfs_item_ptr_offset(leaf, path.slots[0]);
* assumption and simply indicate that we _think_ that the FULL BACKREF needs to
* be set or not and then we can check later once we've gathered all the refs.
*/
-static int calc_extent_flag(struct btrfs_root *root,
- struct cache_tree *extent_cache,
+static int calc_extent_flag(struct cache_tree *extent_cache,
struct extent_buffer *buf,
struct root_item_record *ri,
u64 *flags)
struct device_extent_tree *dev_extent_cache,
struct root_item_record *ri)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *buf;
struct extent_record *rec = NULL;
u64 bytenr;
continue;
/* fixme, get the parent transid */
- readahead_tree_block(root, bits[i].start,
+ readahead_tree_block(fs_info, bits[i].start,
bits[i].size, 0);
}
}
}
/* fixme, get the real parent transid */
- buf = read_tree_block(root, bytenr, size, gen);
+ buf = read_tree_block(root->fs_info, bytenr, size, gen);
if (!extent_buffer_uptodate(buf)) {
record_bad_block_io(root->fs_info,
extent_cache, bytenr, size);
btrfs_header_level(buf), 1, NULL,
&flags);
if (ret < 0) {
- ret = calc_extent_flag(root, extent_cache, buf, ri, &flags);
+ ret = calc_extent_flag(extent_cache, buf, ri, &flags);
if (ret < 0) {
fprintf(stderr, "Couldn't calc extent flags\n");
flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
}
} else {
flags = 0;
- ret = calc_extent_flag(root, extent_cache, buf, ri, &flags);
+ ret = calc_extent_flag(extent_cache, buf, ri, &flags);
if (ret < 0) {
fprintf(stderr, "Couldn't calc extent flags\n");
flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
ret = add_tree_backref(extent_cache,
key.objectid, 0, key.offset, 0);
if (ret < 0)
- error("add_tree_backref failed: %s",
+ error(
+ "add_tree_backref failed (leaf tree block): %s",
strerror(-ret));
continue;
}
ret = add_tree_backref(extent_cache,
key.objectid, key.offset, 0, 0);
if (ret < 0)
- error("add_tree_backref failed: %s",
+ error(
+ "add_tree_backref failed (leaf shared block): %s",
strerror(-ret));
continue;
}
ref),
btrfs_extent_data_ref_offset(buf, ref),
btrfs_extent_data_ref_count(buf, ref),
- 0, root->sectorsize);
+ 0, root->fs_info->sectorsize);
continue;
}
if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
add_data_backref(extent_cache,
key.objectid, key.offset, 0, 0, 0,
btrfs_shared_data_ref_count(buf, ref),
- 0, root->sectorsize);
+ 0, root->fs_info->sectorsize);
continue;
}
if (key.type == BTRFS_ORPHAN_ITEM_KEY) {
data_bytes_allocated +=
btrfs_file_extent_disk_num_bytes(buf, fi);
- if (data_bytes_allocated < root->sectorsize) {
+ if (data_bytes_allocated < root->fs_info->sectorsize) {
abort();
}
data_bytes_referenced +=
struct extent_record tmpl;
ptr = btrfs_node_blockptr(buf, i);
- size = root->nodesize;
+ size = root->fs_info->nodesize;
btrfs_node_key_to_cpu(buf, &key, i);
if (ri != NULL) {
if ((level == ri->drop_level)
ret = add_tree_backref(extent_cache, ptr, parent,
owner, 1);
if (ret < 0) {
- error("add_tree_backref failed: %s",
+ error(
+ "add_tree_backref failed (non-leaf block): %s",
strerror(-ret));
continue;
}
static int delete_extent_records(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
- u64 bytenr, u64 new_len)
+ u64 bytenr)
{
struct btrfs_key key;
struct btrfs_key found_key;
if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
found_key.type == BTRFS_METADATA_ITEM_KEY) {
u64 bytes = (found_key.type == BTRFS_EXTENT_ITEM_KEY) ?
- found_key.offset : root->nodesize;
+ found_key.offset : root->fs_info->nodesize;
ret = btrfs_update_block_group(trans, root, bytenr,
bytes, 0, 0);
struct extent_backref *back,
int allocated, u64 flags)
{
- int ret;
+ int ret = 0;
struct btrfs_root *extent_root = info->extent_root;
struct extent_buffer *leaf;
struct btrfs_key ins_key;
if (!back->is_data)
rec->max_size = max_t(u64, rec->max_size,
- info->extent_root->nodesize);
+ info->nodesize);
if (!allocated) {
u32 item_size = sizeof(*ei);
return ret;
}
-static int process_duplicates(struct btrfs_root *root,
- struct cache_tree *extent_cache,
+static int process_duplicates(struct cache_tree *extent_cache,
struct extent_record *rec)
{
struct extent_record *good, *tmp;
/* step two, delete all the existing records */
ret = delete_extent_records(trans, info->extent_root, &path,
- rec->start, rec->max_size);
+ rec->start);
if (ret < 0)
goto out;
ret = err;
}
+ if (!ret)
+ fprintf(stderr, "Repaired extent references for %llu\n",
+ (unsigned long long)rec->start);
+
btrfs_release_path(&path);
return ret;
}
btrfs_set_extent_flags(path.nodes[0], ei, flags);
btrfs_mark_buffer_dirty(path.nodes[0]);
btrfs_release_path(&path);
- return btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans, root);
+ if (!ret)
+ fprintf(stderr, "Repaired extent flags for %llu\n",
+ (unsigned long long)rec->start);
+
+ return ret;
}
/* right now we only prune from the extent allocation tree */
del_ptr:
printk("deleting pointer to block %Lu\n", corrupt->cache.start);
- ret = btrfs_del_ptr(trans, info->extent_root, &path, level, slot);
+ ret = btrfs_del_ptr(info->extent_root, &path, level, slot);
out:
btrfs_release_path(&path);
&start, &end, EXTENT_DIRTY);
if (ret)
break;
- clear_extent_dirty(&fs_info->free_space_cache, start, end,
- GFP_NOFS);
+ clear_extent_dirty(&fs_info->free_space_cache, start, end);
}
start = 0;
{
struct extent_record *rec;
struct cache_extent *cache;
- int err = 0;
int ret = 0;
- int fixed = 0;
int had_dups = 0;
- int recorded = 0;
if (repair) {
/*
rec = container_of(cache, struct extent_record, cache);
set_extent_dirty(root->fs_info->excluded_extents,
rec->start,
- rec->start + rec->max_size - 1,
- GFP_NOFS);
+ rec->start + rec->max_size - 1);
cache = next_cache_extent(cache);
}
while(cache) {
set_extent_dirty(root->fs_info->excluded_extents,
cache->start,
- cache->start + cache->size - 1,
- GFP_NOFS);
+ cache->start + cache->size - 1);
cache = next_cache_extent(cache);
}
prune_corrupt_blocks(root->fs_info);
* process_duplicates() will return 0, otherwise it will return
* 1 and we
*/
- if (process_duplicates(root, extent_cache, rec))
+ if (process_duplicates(extent_cache, rec))
continue;
ret = delete_duplicate_records(root, rec);
if (ret < 0)
while(1) {
int cur_err = 0;
+ int fix = 0;
- fixed = 0;
- recorded = 0;
cache = search_cache_extent(extent_cache, 0);
if (!cache)
break;
if (rec->num_duplicates) {
fprintf(stderr, "extent item %llu has multiple extent "
"items\n", (unsigned long long)rec->start);
- err = 1;
cur_err = 1;
}
ret = record_orphan_data_extents(root->fs_info, rec);
if (ret < 0)
goto repair_abort;
- if (ret == 0) {
- recorded = 1;
- } else {
- /*
- * we can't use the extent to repair file
- * extent, let the fallback method handle it.
- */
- if (!fixed && repair) {
- ret = fixup_extent_refs(
- root->fs_info,
- extent_cache, rec);
- if (ret)
- goto repair_abort;
- fixed = 1;
- }
- }
- err = 1;
+ fix = ret;
cur_err = 1;
}
if (all_backpointers_checked(rec, 1)) {
fprintf(stderr, "backpointer mismatch on [%llu %llu]\n",
(unsigned long long)rec->start,
(unsigned long long)rec->nr);
-
- if (!fixed && !recorded && repair) {
- ret = fixup_extent_refs(root->fs_info,
- extent_cache, rec);
- if (ret)
- goto repair_abort;
- fixed = 1;
- }
+ fix = 1;
cur_err = 1;
- err = 1;
}
if (!rec->owner_ref_checked) {
fprintf(stderr, "owner ref check failed [%llu %llu]\n",
(unsigned long long)rec->start,
(unsigned long long)rec->nr);
- if (!fixed && !recorded && repair) {
- ret = fixup_extent_refs(root->fs_info,
- extent_cache, rec);
- if (ret)
- goto repair_abort;
- fixed = 1;
- }
- err = 1;
+ fix = 1;
cur_err = 1;
}
+
+ if (repair && fix) {
+ ret = fixup_extent_refs(root->fs_info, extent_cache, rec);
+ if (ret)
+ goto repair_abort;
+ }
+
+
if (rec->bad_full_backref) {
fprintf(stderr, "bad full backref, on [%llu]\n",
(unsigned long long)rec->start);
ret = fixup_extent_flags(root->fs_info, rec);
if (ret)
goto repair_abort;
- fixed = 1;
+ fix = 1;
}
- err = 1;
cur_err = 1;
}
/*
fprintf(stderr,
"bad metadata [%llu, %llu) crossing stripe boundary\n",
rec->start, rec->start + rec->max_size);
- err = 1;
cur_err = 1;
}
fprintf(stderr,
"bad extent [%llu, %llu), type mismatch with chunk\n",
rec->start, rec->start + rec->max_size);
- err = 1;
cur_err = 1;
}
remove_cache_extent(extent_cache, cache);
free_all_extent_backrefs(rec);
- if (!init_extent_tree && repair && (!cur_err || fixed))
+ if (!init_extent_tree && repair && (!cur_err || fix))
clear_extent_dirty(root->fs_info->excluded_extents,
rec->start,
- rec->start + rec->max_size - 1,
- GFP_NOFS);
+ rec->start + rec->max_size - 1);
free(rec);
}
repair_abort:
if (ret)
goto repair_abort;
}
- if (err)
- fprintf(stderr, "repaired damaged extent references\n");
return ret;
}
- return err;
+ return 0;
}
u64 calc_stripe_length(u64 type, u64 length, int num_stripes)
rec = list_entry(list->next,
struct root_item_record, list);
last = 0;
- buf = read_tree_block(root->fs_info->tree_root,
+ buf = read_tree_block(root->fs_info,
rec->bytenr, rec->level_size, 0);
if (!extent_buffer_uptodate(buf)) {
free_extent_buffer(buf);
level = btrfs_header_level(root1->node);
ret = add_root_item_to_list(&normal_trees, root1->root_key.objectid,
root1->node->start, 0, level, 0,
- root1->nodesize, NULL);
+ root1->fs_info->nodesize, NULL);
if (ret < 0)
goto out;
root1 = root->fs_info->chunk_root;
level = btrfs_header_level(root1->node);
ret = add_root_item_to_list(&normal_trees, root1->root_key.objectid,
root1->node->start, 0, level, 0,
- root1->nodesize, NULL);
+ root1->fs_info->nodesize, NULL);
if (ret < 0)
goto out;
btrfs_init_path(&path);
last_snapshot = btrfs_root_last_snapshot(&ri);
if (btrfs_disk_key_objectid(&ri.drop_progress) == 0) {
level = btrfs_root_level(&ri);
- level_size = root->nodesize;
+ level_size = root->fs_info->nodesize;
ret = add_root_item_to_list(&normal_trees,
found_key.objectid,
btrfs_root_bytenr(&ri),
goto out;
} else {
level = btrfs_root_level(&ri);
- level_size = root->nodesize;
+ level_size = root->fs_info->nodesize;
objectid = found_key.objectid;
btrfs_disk_key_to_cpu(&found_key,
&ri.drop_progress);
free_extent_cache_tree(&pending);
free_extent_cache_tree(&reada);
free_extent_cache_tree(&nodes);
+ free_root_item_list(&normal_trees);
+ free_root_item_list(&dropping_trees);
return ret;
loop:
free_corrupt_blocks_tree(root->fs_info->corrupt_blocks);
free_block_group_tree(&block_group_cache);
free_device_cache_tree(&dev_cache);
free_device_extent_tree(&dev_extent_cache);
- free_extent_record_cache(root->fs_info, &extent_cache);
+ free_extent_record_cache(&extent_cache);
free_root_item_list(&normal_trees);
free_root_item_list(&dropping_trees);
extent_io_tree_cleanup(&excluded_extents);
int slot;
int skinny_level;
int type;
- u32 nodesize = root->nodesize;
+ u32 nodesize = root->fs_info->nodesize;
u32 item_size;
u64 offset;
+ int tree_reloc_root = 0;
int found_ref = 0;
int err = 0;
int ret;
+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
+ btrfs_header_bytenr(root->node) == bytenr)
+ tree_reloc_root = 1;
+
btrfs_init_path(&path);
key.objectid = bytenr;
- if (btrfs_fs_incompat(root->fs_info,
- BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA))
+ if (btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
key.type = BTRFS_METADATA_ITEM_KEY;
else
key.type = BTRFS_EXTENT_ITEM_KEY;
(offset == root->objectid || offset == owner)) {
found_ref = 1;
} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
+ /*
+ * Backref of tree reloc root points to itself, no need
+ * to check backref any more.
+ */
+ if (tree_reloc_root)
+ found_ref = 1;
+ else
/* Check if the backref points to valid referencer */
- found_ref = !check_tree_block_ref(root, NULL, offset,
- level + 1, owner);
+ found_ref = !check_tree_block_ref(root, NULL,
+ offset, level + 1, owner);
}
if (found_ref)
struct btrfs_extent_inline_ref *iref;
struct btrfs_extent_data_ref *dref;
u64 owner;
- u64 file_extent_gen;
u64 disk_bytenr;
u64 disk_num_bytes;
u64 extent_num_bytes;
u64 extent_flags;
- u64 extent_gen;
u32 item_size;
unsigned long end;
unsigned long ptr;
btrfs_item_key_to_cpu(eb, &fi_key, slot);
fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
- file_extent_gen = btrfs_file_extent_generation(eb, fi);
/* Nothing to check for hole and inline data extents */
if (btrfs_file_extent_type(eb, fi) == BTRFS_FILE_EXTENT_INLINE ||
extent_num_bytes = btrfs_file_extent_num_bytes(eb, fi);
/* Check unaligned disk_num_bytes and num_bytes */
- if (!IS_ALIGNED(disk_num_bytes, root->sectorsize)) {
+ if (!IS_ALIGNED(disk_num_bytes, root->fs_info->sectorsize)) {
error(
"file extent [%llu, %llu] has unaligned disk num bytes: %llu, should be aligned to %u",
fi_key.objectid, fi_key.offset, disk_num_bytes,
- root->sectorsize);
+ root->fs_info->sectorsize);
err |= BYTES_UNALIGNED;
} else {
data_bytes_allocated += disk_num_bytes;
}
- if (!IS_ALIGNED(extent_num_bytes, root->sectorsize)) {
+ if (!IS_ALIGNED(extent_num_bytes, root->fs_info->sectorsize)) {
error(
"file extent [%llu, %llu] has unaligned num bytes: %llu, should be aligned to %u",
fi_key.objectid, fi_key.offset, extent_num_bytes,
- root->sectorsize);
+ root->fs_info->sectorsize);
err |= BYTES_UNALIGNED;
} else {
data_bytes_referenced += extent_num_bytes;
dbref_key.offset = btrfs_file_extent_disk_num_bytes(eb, fi);
ret = btrfs_search_slot(NULL, extent_root, &dbref_key, &path, 0, 0);
- if (ret) {
- err |= BACKREF_MISSING;
- goto error;
- }
+ if (ret)
+ goto out;
leaf = path.nodes[0];
slot = path.slots[0];
ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
extent_flags = btrfs_extent_flags(leaf, ei);
- extent_gen = btrfs_extent_generation(leaf, ei);
if (!(extent_flags & BTRFS_EXTENT_FLAG_DATA)) {
error(
err |= BACKREF_MISMATCH;
}
- if (file_extent_gen < extent_gen) {
- error(
-"extent[%llu %llu] backref generation mismatch, wanted: <=%llu, have: %llu",
- disk_bytenr, disk_num_bytes, file_extent_gen,
- extent_gen);
- err |= BACKREF_MISMATCH;
- }
-
/* Check data backref inside that extent item */
item_size = btrfs_item_size_nr(leaf, path.slots[0]);
iref = (struct btrfs_extent_inline_ref *)(ei + 1);
ptr += btrfs_extent_inline_ref_size(type);
}
- /* Didn't found inlined data backref, try EXTENT_DATA_REF_KEY */
if (!found_dbackref) {
btrfs_release_path(&path);
- btrfs_init_path(&path);
+ /* Didn't find inlined data backref, try EXTENT_DATA_REF_KEY */
dbref_key.objectid = btrfs_file_extent_disk_bytenr(eb, fi);
dbref_key.type = BTRFS_EXTENT_DATA_REF_KEY;
dbref_key.offset = hash_extent_data_ref(root->objectid,
ret = btrfs_search_slot(NULL, root->fs_info->extent_root,
&dbref_key, &path, 0, 0);
- if (!ret)
+ if (!ret) {
found_dbackref = 1;
+ goto out;
+ }
+
+ btrfs_release_path(&path);
+
+ /*
+ * Neither inlined nor EXTENT_DATA_REF found, try
+ * SHARED_DATA_REF as last chance.
+ */
+ dbref_key.objectid = disk_bytenr;
+ dbref_key.type = BTRFS_SHARED_DATA_REF_KEY;
+ dbref_key.offset = eb->start;
+
+ ret = btrfs_search_slot(NULL, root->fs_info->extent_root,
+ &dbref_key, &path, 0, 0);
+ if (!ret) {
+ found_dbackref = 1;
+ goto out;
+ }
}
+out:
if (!found_dbackref)
err |= BACKREF_MISSING;
-error:
btrfs_release_path(&path);
if (err & BACKREF_MISSING) {
error("data extent[%llu %llu] backref lost",
btrfs_release_path(&path);
/* Get level from tree block as an alternative source */
- eb = read_tree_block_fs_info(fs_info, bytenr, nodesize, transid);
+ eb = read_tree_block(fs_info, bytenr, nodesize, transid);
if (!extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
return -EIO;
}
/* Read out the tree block to get item/node key */
- eb = read_tree_block(root, bytenr, root->nodesize, 0);
+ eb = read_tree_block(fs_info, bytenr, root->fs_info->nodesize, 0);
if (!extent_buffer_uptodate(eb)) {
err |= REFERENCER_MISSING;
free_extent_buffer(eb);
}
/*
+ * Check if tree block @eb is tree reloc root.
+ * Return 0 if it's not or any problem happens
+ * Return 1 if it's a tree reloc root
+ */
+static int is_tree_reloc_root(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb)
+{
+ struct btrfs_root *tree_reloc_root;
+ struct btrfs_key key;
+ u64 bytenr = btrfs_header_bytenr(eb);
+ u64 owner = btrfs_header_owner(eb);
+ int ret = 0;
+
+ key.objectid = BTRFS_TREE_RELOC_OBJECTID;
+ key.offset = owner;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+
+ tree_reloc_root = btrfs_read_fs_root_no_cache(fs_info, &key);
+ if (IS_ERR(tree_reloc_root))
+ return 0;
+
+ if (bytenr == btrfs_header_bytenr(tree_reloc_root->node))
+ ret = 1;
+ btrfs_free_fs_root(tree_reloc_root);
+ return ret;
+}
+
+/*
* Check referencer for shared block backref
* If level == -1, this function will resolve the level.
*/
int found_parent = 0;
int i;
- eb = read_tree_block_fs_info(fs_info, parent, nodesize, 0);
+ eb = read_tree_block(fs_info, parent, nodesize, 0);
if (!extent_buffer_uptodate(eb))
goto out;
if (level < 0)
goto out;
+ /* It's possible it's a tree reloc root */
+ if (parent == bytenr) {
+ if (is_tree_reloc_root(fs_info, eb))
+ found_parent = 1;
+ goto out;
+ }
+
if (level + 1 != btrfs_header_level(eb))
goto out;
leaf = path.nodes[0];
slot = path.slots[0];
+ if (slot >= btrfs_header_nritems(leaf))
+ goto next;
btrfs_item_key_to_cpu(leaf, &key, slot);
if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
break;
offset)
found_count++;
+next:
ret = btrfs_next_item(root, &path);
if (ret)
break;
int found_parent = 0;
int i;
- eb = read_tree_block_fs_info(fs_info, parent, nodesize, 0);
+ eb = read_tree_block(fs_info, parent, nodesize, 0);
if (!extent_buffer_uptodate(eb))
goto out;
}
end = (unsigned long)ei + item_size;
- if (ptr >= end) {
+next:
+ /* Reached extent item end normally */
+ if (ptr == end)
+ goto out;
+
+ /* Beyond extent item end, wrong item size */
+ if (ptr > end) {
err |= ITEM_SIZE_MISMATCH;
+ error("extent item at bytenr %llu slot %d has wrong size",
+ eb->start, slot);
goto out;
}
/* Now check every backref in this extent item */
-next:
iref = (struct btrfs_extent_inline_ref *)ptr;
type = btrfs_extent_inline_ref_type(eb, iref);
offset = btrfs_extent_inline_ref_offset(eb, iref);
}
ptr += btrfs_extent_inline_ref_size(type);
- if (ptr < end)
- goto next;
+ goto next;
out:
return err;
l = path.nodes[0];
chunk = btrfs_item_ptr(l, path.slots[0], struct btrfs_chunk);
- if (btrfs_chunk_length(l, chunk) != length)
+ ret = btrfs_check_chunk_valid(fs_info, l, chunk, path.slots[0],
+ chunk_key.offset);
+ if (ret < 0)
+ goto out;
+
+ if (btrfs_stripe_length(fs_info, l, chunk) != length)
goto out;
num_stripes = btrfs_chunk_num_stripes(l, chunk);
/* Iterate dev_extents to calculate the used space of a device */
while (1) {
- btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
+ if (path.slots[0] >= btrfs_header_nritems(path.nodes[0]))
+ goto next;
+ btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
if (key.objectid > dev_id)
break;
if (key.type != BTRFS_DEV_EXTENT_KEY || key.objectid != dev_id)
/* Iterate extent tree to account used space */
while (1) {
leaf = path.nodes[0];
+
+ /* Search slot can point to the last item beyond leaf nritems */
+ if (path.slots[0] >= btrfs_header_nritems(leaf))
+ goto next;
+
btrfs_item_key_to_cpu(leaf, &extent_key, path.slots[0]);
if (extent_key.objectid >= bg_key.objectid + bg_key.offset)
break;
struct btrfs_block_group_item *bi;
struct btrfs_block_group_item bg_item;
struct btrfs_dev_extent *ptr;
- u32 sectorsize = btrfs_super_sectorsize(fs_info->super_copy);
u64 length;
u64 chunk_end;
+ u64 stripe_len;
u64 type;
- u64 profile;
int num_stripes;
u64 offset;
u64 objectid;
chunk = btrfs_item_ptr(eb, slot, struct btrfs_chunk);
length = btrfs_chunk_length(eb, chunk);
chunk_end = chunk_key.offset + length;
- if (!IS_ALIGNED(length, sectorsize)) {
- error("chunk[%llu %llu) not aligned to %u",
- chunk_key.offset, chunk_end, sectorsize);
- err |= BYTES_UNALIGNED;
+ ret = btrfs_check_chunk_valid(fs_info, eb, chunk, slot,
+ chunk_key.offset);
+ if (ret < 0) {
+ error("chunk[%llu %llu) is invalid", chunk_key.offset,
+ chunk_end);
+ err |= BYTES_UNALIGNED | UNKNOWN_TYPE;
goto out;
}
-
type = btrfs_chunk_type(eb, chunk);
- profile = type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
- if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
- error("chunk[%llu %llu) has no chunk type",
- chunk_key.offset, chunk_end);
- err |= UNKNOWN_TYPE;
- }
- if (profile && (profile & (profile - 1))) {
- error("chunk[%llu %llu) multiple profiles detected: %llx",
- chunk_key.offset, chunk_end, profile);
- err |= UNKNOWN_TYPE;
- }
bg_key.objectid = chunk_key.offset;
bg_key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
}
num_stripes = btrfs_chunk_num_stripes(eb, chunk);
+ stripe_len = btrfs_stripe_length(fs_info, eb, chunk);
for (i = 0; i < num_stripes; i++) {
btrfs_release_path(&path);
btrfs_init_path(&path);
offset = btrfs_dev_extent_chunk_offset(leaf, ptr);
if (objectid != chunk_key.objectid ||
offset != chunk_key.offset ||
- btrfs_dev_extent_length(leaf, ptr) != length)
+ btrfs_dev_extent_length(leaf, ptr) != stripe_len)
goto not_match_dev;
continue;
not_match_dev:
* As a btrfs tree has most 8 levels (0..7), so it's quite safe
* to call the function itself.
*/
- eb = read_tree_block(root, blocknr, root->nodesize, 0);
+ eb = read_tree_block(root->fs_info, blocknr,
+ root->fs_info->nodesize, 0);
if (extent_buffer_uptodate(eb)) {
ret = traverse_tree_block(root, eb);
err |= ret;
goto next;
key.offset = (u64)-1;
- cur_root = btrfs_read_fs_root(root->fs_info, &key);
+ if (key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+ cur_root = btrfs_read_fs_root_no_cache(root->fs_info,
+ &key);
+ else
+ cur_root = btrfs_read_fs_root(root->fs_info, &key);
if (IS_ERR(cur_root) || !cur_root) {
error("failed to read tree: %lld", key.objectid);
goto next;
ret = traverse_tree_block(cur_root, cur_root->node);
err |= ret;
+ if (key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+ btrfs_free_fs_root(cur_root);
next:
ret = btrfs_next_item(root1, &path);
if (ret)
goto init;
}
c = btrfs_alloc_free_block(trans, root,
- root->nodesize,
+ root->fs_info->nodesize,
root->root_key.objectid,
&disk_key, level, 0, 0);
if (IS_ERR(c)) {
* in, but for now this doesn't actually use the root so
* just pass in extent_root.
*/
- tmp = read_tree_block(fs_info->extent_root, bytenr,
- nodesize, 0);
+ tmp = read_tree_block(fs_info, bytenr, nodesize, 0);
if (!extent_buffer_uptodate(tmp)) {
fprintf(stderr, "Error reading root block\n");
return -EIO;
continue;
}
- tmp = read_tree_block(fs_info->extent_root, bytenr,
+ tmp = read_tree_block(fs_info, bytenr,
nodesize, 0);
if (!extent_buffer_uptodate(tmp)) {
fprintf(stderr, "Error reading tree block\n");
key.objectid, key.offset,
btrfs_chunk_length(leaf, chunk));
set_extent_dirty(&fs_info->free_space_cache, key.offset,
- key.offset + btrfs_chunk_length(leaf, chunk),
- GFP_NOFS);
+ key.offset + btrfs_chunk_length(leaf, chunk));
path.slots[0]++;
}
start = 0;
* the leaves of any fs roots and pin down the bytes for any file
* extents we find. Not hard but why do it if we don't have to?
*/
- if (btrfs_fs_incompat(fs_info, BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)) {
+ if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
fprintf(stderr, "We don't support re-initing the extent tree "
"for mixed block groups yet, please notify a btrfs "
"developer you want to do this so they can add this "
struct btrfs_root *csum_root, char *buf, u64 start,
u64 len)
{
+ struct btrfs_fs_info *fs_info = csum_root->fs_info;
u64 offset = 0;
u64 sectorsize;
int ret = 0;
while (offset < len) {
- sectorsize = csum_root->sectorsize;
- ret = read_extent_data(csum_root, buf, start + offset,
+ sectorsize = fs_info->sectorsize;
+ ret = read_extent_data(fs_info, buf, start + offset,
§orsize, 0);
if (ret)
break;
int slot = 0;
int ret = 0;
- buf = malloc(cur_root->fs_info->csum_root->sectorsize);
+ buf = malloc(cur_root->fs_info->sectorsize);
if (!buf)
return -ENOMEM;
return ret;
}
- buf = malloc(csum_root->sectorsize);
+ buf = malloc(csum_root->fs_info->sectorsize);
if (!buf) {
btrfs_release_path(&path);
return -ENOMEM;
return ret;
}
-static int maybe_repair_root_item(struct btrfs_fs_info *info,
- struct btrfs_path *path,
+static int maybe_repair_root_item(struct btrfs_path *path,
const struct btrfs_key *root_key,
const int read_only_mode)
{
if (found_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
goto next;
- ret = maybe_repair_root_item(info, &path, &found_key,
- trans ? 0 : 1);
+ ret = maybe_repair_root_item(&path, &found_key, trans ? 0 : 1);
if (ret < 0)
goto out;
if (ret) {
global_info = info;
root = info->fs_root;
if (clear_space_cache == 1) {
- if (btrfs_fs_compat_ro(info,
- BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE)) {
+ if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE)) {
error(
"free space cache v2 detected, use --clear-space-cache v2");
ret = 1;
}
goto close_out;
} else if (clear_space_cache == 2) {
- if (!btrfs_fs_compat_ro(info,
- BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE)) {
+ if (!btrfs_fs_compat_ro(info, FREE_SPACE_TREE)) {
printf("no free space cache v2 to clear\n");
ret = 0;
goto close_out;
ret = repair_root_items(info);
err |= !!ret;
- if (ret < 0)
+ if (ret < 0) {
+ error("failed to repair root items: %s", strerror(-ret));
goto close_out;
+ }
if (repair) {
fprintf(stderr, "Fixed %d roots.\n", ret);
ret = 0;
}
if (!ctx.progress_enabled) {
- if (btrfs_fs_compat_ro(info, BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE))
+ if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
fprintf(stderr, "checking free space tree\n");
else
fprintf(stderr, "checking free space cache\n");
}
ret = check_space_cache(root);
err |= !!ret;
- if (ret)
+ if (ret) {
+ if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
+ error("errors found in free space tree");
+ else
+ error("errors found in free space cache");
goto out;
+ }
/*
* We used to have to have these hole extents in between our real
* are no gaps in the file extents for inodes, otherwise we can just
* ignore it when this happens.
*/
- no_holes = btrfs_fs_incompat(root->fs_info,
- BTRFS_FEATURE_INCOMPAT_NO_HOLES);
+ no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
if (!ctx.progress_enabled)
fprintf(stderr, "checking fs roots\n");
if (check_mode == CHECK_MODE_LOWMEM)
else
ret = check_fs_roots(root, &root_cache);
err |= !!ret;
- if (ret)
+ if (ret) {
+ error("errors found in fs roots");
goto out;
+ }
fprintf(stderr, "checking csums\n");
ret = check_csums(root);
err |= !!ret;
- if (ret)
+ if (ret) {
+ error("errors found in csum tree");
goto out;
+ }
fprintf(stderr, "checking root refs\n");
/* For low memory mode, check_fs_roots_v2 handles root refs */
if (check_mode != CHECK_MODE_LOWMEM) {
ret = check_root_refs(root, &root_cache);
err |= !!ret;
- if (ret)
+ if (ret) {
+ error("errors found in root refs");
goto out;
+ }
}
while (repair && !list_empty(&root->fs_info->recow_ebs)) {
list_del_init(&eb->recow);
ret = recow_extent_buffer(root, eb);
err |= !!ret;
- if (ret)
+ if (ret) {
+ error("fails to fix transid errors");
break;
+ }
}
while (!list_empty(&delete_items)) {
fprintf(stderr, "checking quota groups\n");
ret = qgroup_verify_all(info);
err |= !!ret;
- if (ret)
+ if (ret) {
+ error("failed to check quota groups");
goto out;
+ }
report_qgroups(0);
ret = repair_qgroups(info, &qgroups_repaired);
err |= !!ret;
- if (err)
+ if (err) {
+ error("failed to repair quota groups");
goto out;
+ }
ret = 0;
}
"backup data and re-format the FS. *\n\n");
err |= 1;
}
- printf("found %llu bytes used err is %d\n",
- (unsigned long long)bytes_used, ret);
+ printf("found %llu bytes used, ",
+ (unsigned long long)bytes_used);
+ if (err)
+ printf("error(s) found\n");
+ else
+ printf("no error found\n");
printf("total csum bytes: %llu\n",(unsigned long long)total_csum_bytes);
printf("total tree bytes: %llu\n",
(unsigned long long)total_btree_bytes);