#include "free-space-cache.h"
#include "btrfsck.h"
#include "qgroup-verify.h"
+#include "rbtree-utils.h"
+#include "backref.h"
+#include "ulist.h"
static u64 bytes_used = 0;
static u64 total_csum_bytes = 0;
struct inode_backref *backref;
list_for_each_entry(backref, &rec->backrefs, list) {
+ if (rec->ino == BTRFS_MULTIPLE_OBJECTIDS)
+ break;
if (backref->dir != dir || backref->namelen != namelen)
continue;
if (memcmp(name, backref->name, namelen))
backref->errors |= REF_ERR_DUP_INODE_REF;
if (backref->found_dir_index && backref->index != index)
backref->errors |= REF_ERR_INDEX_UNMATCH;
+ else
+ backref->index = index;
backref->ref_type = itemtype;
- backref->index = index;
backref->found_inode_ref = 1;
} else {
BUG_ON(1);
return 0;
}
+/*
+ * Returns:
+ * < 0 - on error
+ * 1 - if the root with id child_root_id is a child of root parent_root_id
+ * 0 - if the root child_root_id isn't a child of the root parent_root_id but
+ * has other root(s) as parent(s)
+ * 2 - if the root child_root_id doesn't have any parent roots
+ */
static int is_child_root(struct btrfs_root *root, u64 parent_root_id,
u64 child_root_id)
{
btrfs_release_path(&path);
if (ret < 0)
return ret;
- return has_parent? 0 : -1;
+ return has_parent ? 0 : 2;
}
static int process_dir_item(struct btrfs_root *root,
namebuf, len, filetype,
key->type, error);
} else {
- fprintf(stderr, "warning line %d\n", __LINE__);
+ fprintf(stderr, "invalid location in dir item %u\n",
+ location.type);
+ add_inode_backref(inode_cache, BTRFS_MULTIPLE_OBJECTIDS,
+ key->objectid, key->offset, namebuf,
+ len, filetype, key->type, error);
}
len = sizeof(*di) + name_len + data_len;
}
}
+/*
+ * Check the child node/leaf by the following condition:
+ * 1. the first item key of the node/leaf should be the same with the one
+ * in parent.
+ * 2. block in parent node should match the child node/leaf.
+ * 3. generation of parent node and child's header should be consistent.
+ *
+ * Or the child node/leaf pointed by the key in parent is not valid.
+ *
+ * We hope to check leaf owner too, but since subvol may share leaves,
+ * which makes leaf owner check not so strong, key check should be
+ * sufficient enough for that case.
+ */
+static int check_child_node(struct btrfs_root *root,
+ struct extent_buffer *parent, int slot,
+ struct extent_buffer *child)
+{
+ struct btrfs_key parent_key;
+ struct btrfs_key child_key;
+ int ret = 0;
+
+ btrfs_node_key_to_cpu(parent, &parent_key, slot);
+ if (btrfs_header_level(child) == 0)
+ btrfs_item_key_to_cpu(child, &child_key, 0);
+ else
+ btrfs_node_key_to_cpu(child, &child_key, 0);
+
+ if (memcmp(&parent_key, &child_key, sizeof(parent_key))) {
+ ret = -EINVAL;
+ fprintf(stderr,
+ "Wrong key of child node/leaf, wanted: (%llu, %u, %llu), have: (%llu, %u, %llu)\n",
+ parent_key.objectid, parent_key.type, parent_key.offset,
+ child_key.objectid, child_key.type, child_key.offset);
+ }
+ if (btrfs_header_bytenr(child) != btrfs_node_blockptr(parent, slot)) {
+ ret = -EINVAL;
+ fprintf(stderr, "Wrong block of child node/leaf, wanted: %llu, have: %llu\n",
+ btrfs_node_blockptr(parent, slot),
+ btrfs_header_bytenr(child));
+ }
+ if (btrfs_node_ptr_generation(parent, slot) !=
+ btrfs_header_generation(child)) {
+ ret = -EINVAL;
+ fprintf(stderr, "Wrong generation of child node/leaf, wanted: %llu, have: %llu\n",
+ btrfs_header_generation(child),
+ btrfs_node_ptr_generation(parent, slot));
+ }
+ return ret;
+}
+
static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
struct walk_control *wc, int *level)
{
+ enum btrfs_tree_block_status status;
u64 bytenr;
u64 ptr_gen;
struct extent_buffer *next;
}
}
+ ret = check_child_node(root, cur, path->slots[*level], next);
+ if (ret) {
+ err = ret;
+ goto out;
+ }
+
+ if (btrfs_is_leaf(next))
+ status = btrfs_check_leaf(root, NULL, next);
+ else
+ status = btrfs_check_node(root, NULL, next);
+ if (status != BTRFS_TREE_BLOCK_CLEAN) {
+ free_extent_buffer(next);
+ err = -EIO;
+ goto out;
+ }
+
*level = *level - 1;
free_extent_buffer(path->nodes[*level]);
path->nodes[*level] = next;
return ret;
}
+static int add_missing_dir_index(struct btrfs_root *root,
+ struct cache_tree *inode_cache,
+ struct inode_record *rec,
+ struct inode_backref *backref)
+{
+ struct btrfs_path *path;
+ struct btrfs_trans_handle *trans;
+ struct btrfs_dir_item *dir_item;
+ struct extent_buffer *leaf;
+ struct btrfs_key key;
+ struct btrfs_disk_key disk_key;
+ struct inode_record *dir_rec;
+ unsigned long name_ptr;
+ u32 data_size = sizeof(*dir_item) + backref->namelen;
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans)) {
+ btrfs_free_path(path);
+ return PTR_ERR(trans);
+ }
+
+ fprintf(stderr, "repairing missing dir index item for inode %llu\n",
+ (unsigned long long)rec->ino);
+ key.objectid = backref->dir;
+ key.type = BTRFS_DIR_INDEX_KEY;
+ key.offset = backref->index;
+
+ ret = btrfs_insert_empty_item(trans, root, path, &key, data_size);
+ BUG_ON(ret);
+
+ leaf = path->nodes[0];
+ dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
+
+ disk_key.objectid = cpu_to_le64(rec->ino);
+ disk_key.type = BTRFS_INODE_ITEM_KEY;
+ disk_key.offset = 0;
+
+ btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
+ btrfs_set_dir_type(leaf, dir_item, imode_to_type(rec->imode));
+ btrfs_set_dir_data_len(leaf, dir_item, 0);
+ btrfs_set_dir_name_len(leaf, dir_item, backref->namelen);
+ name_ptr = (unsigned long)(dir_item + 1);
+ write_extent_buffer(leaf, backref->name, name_ptr, backref->namelen);
+ btrfs_mark_buffer_dirty(leaf);
+ btrfs_free_path(path);
+ btrfs_commit_transaction(trans, root);
+
+ backref->found_dir_index = 1;
+ dir_rec = get_inode_rec(inode_cache, backref->dir, 0);
+ if (!dir_rec)
+ return 0;
+ dir_rec->found_size += backref->namelen;
+ if (dir_rec->found_size == dir_rec->isize &&
+ (dir_rec->errors & I_ERR_DIR_ISIZE_WRONG))
+ dir_rec->errors &= ~I_ERR_DIR_ISIZE_WRONG;
+ if (dir_rec->found_size != dir_rec->isize)
+ dir_rec->errors |= I_ERR_DIR_ISIZE_WRONG;
+
+ return 0;
+}
+
+static int delete_dir_index(struct btrfs_root *root,
+ struct cache_tree *inode_cache,
+ struct inode_record *rec,
+ struct inode_backref *backref)
+{
+ struct btrfs_trans_handle *trans;
+ struct btrfs_dir_item *di;
+ struct btrfs_path *path;
+ int ret = 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans)) {
+ btrfs_free_path(path);
+ return PTR_ERR(trans);
+ }
+
+
+ fprintf(stderr, "Deleting bad dir index [%llu,%u,%llu] root %llu\n",
+ (unsigned long long)backref->dir,
+ BTRFS_DIR_INDEX_KEY, (unsigned long long)backref->index,
+ (unsigned long long)root->objectid);
+
+ di = btrfs_lookup_dir_index(trans, root, path, backref->dir,
+ backref->name, backref->namelen,
+ backref->index, -1);
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ btrfs_free_path(path);
+ btrfs_commit_transaction(trans, root);
+ if (ret == -ENOENT)
+ return 0;
+ return ret;
+ }
+
+ if (!di)
+ ret = btrfs_del_item(trans, root, path);
+ else
+ ret = btrfs_delete_one_dir_name(trans, root, path, di);
+ BUG_ON(ret);
+ btrfs_free_path(path);
+ btrfs_commit_transaction(trans, root);
+ return ret;
+}
+
+static int repair_inode_backrefs(struct btrfs_root *root,
+ struct inode_record *rec,
+ struct cache_tree *inode_cache,
+ int delete)
+{
+ struct inode_backref *tmp, *backref;
+ u64 root_dirid = btrfs_root_dirid(&root->root_item);
+ int ret = 0;
+ int repaired = 0;
+
+ list_for_each_entry_safe(backref, tmp, &rec->backrefs, list) {
+ /* Index 0 for root dir's are special, don't mess with it */
+ if (rec->ino == root_dirid && backref->index == 0)
+ continue;
+
+ if (delete &&
+ ((backref->found_dir_index && !backref->found_inode_ref) ||
+ (backref->found_dir_index && backref->found_inode_ref &&
+ (backref->errors & REF_ERR_INDEX_UNMATCH)))) {
+ ret = delete_dir_index(root, inode_cache, rec, backref);
+ if (ret)
+ break;
+ repaired++;
+ list_del(&backref->list);
+ free(backref);
+ }
+
+ if (!delete && !backref->found_dir_index &&
+ backref->found_dir_item && backref->found_inode_ref) {
+ ret = add_missing_dir_index(root, inode_cache, rec,
+ backref);
+ if (ret)
+ break;
+ repaired++;
+ if (backref->found_dir_item &&
+ backref->found_dir_index &&
+ backref->found_dir_index) {
+ if (!backref->errors &&
+ backref->found_inode_ref) {
+ list_del(&backref->list);
+ free(backref);
+ }
+ }
+ }
+
+ }
+ return ret ? ret : repaired;
+}
+
static int try_repair_inode(struct btrfs_root *root, struct inode_record *rec)
{
struct btrfs_trans_handle *trans;
struct btrfs_path *path;
int ret = 0;
- /* So far we just fix dir isize wrong */
if (!(rec->errors & (I_ERR_DIR_ISIZE_WRONG | I_ERR_NO_ORPHAN_ITEM)))
- return 1;
+ return rec->errors;
path = btrfs_alloc_path();
if (!path)
struct ptr_node *node;
struct inode_record *rec;
struct inode_backref *backref;
+ int stage = 0;
int ret;
+ int err = 0;
u64 error = 0;
u64 root_dirid = btrfs_root_dirid(&root->root_item);
return 0;
}
+ /*
+ * We need to repair backrefs first because we could change some of the
+ * errors in the inode recs.
+ *
+ * We also need to go through and delete invalid backrefs first and then
+ * add the correct ones second. We do this because we may get EEXIST
+ * when adding back the correct index because we hadn't yet deleted the
+ * invalid index.
+ *
+ * For example, if we were missing a dir index then the directories
+ * isize would be wrong, so if we fixed the isize to what we thought it
+ * would be and then fixed the backref we'd still have a invalid fs, so
+ * we need to add back the dir index and then check to see if the isize
+ * is still wrong.
+ */
+ while (stage < 3) {
+ stage++;
+ if (stage == 3 && !err)
+ break;
+
+ cache = search_cache_extent(inode_cache, 0);
+ while (repair && cache) {
+ node = container_of(cache, struct ptr_node, cache);
+ rec = node->data;
+ cache = next_cache_extent(cache);
+
+ /* Need to free everything up and rescan */
+ if (stage == 3) {
+ remove_cache_extent(inode_cache, &node->cache);
+ free(node);
+ free_inode_rec(rec);
+ continue;
+ }
+
+ if (list_empty(&rec->backrefs))
+ continue;
+
+ ret = repair_inode_backrefs(root, rec, inode_cache,
+ stage == 1);
+ if (ret < 0) {
+ err = ret;
+ stage = 2;
+ break;
+ } if (ret > 0) {
+ err = -EAGAIN;
+ }
+ }
+ }
+ if (err)
+ return err;
+
rec = get_inode_rec(inode_cache, root_dirid, 0);
if (rec) {
ret = check_root_dir(rec);
struct shared_node root_node;
struct root_record *rec;
struct btrfs_root_item *root_item = &root->root_item;
+ enum btrfs_tree_block_status status;
if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
rec = get_root_rec(root_cache, root->root_key.objectid);
wc->active_node = level;
wc->root_level = level;
+ /* We may not have checked the root block, lets do that now */
+ if (btrfs_is_leaf(root->node))
+ status = btrfs_check_leaf(root, NULL, root->node);
+ else
+ status = btrfs_check_node(root, NULL, root->node);
+ if (status != BTRFS_TREE_BLOCK_CLEAN)
+ return -EIO;
+
if (btrfs_root_refs(root_item) > 0 ||
btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
path.nodes[level] = root->node;
struct btrfs_path path;
struct btrfs_key key;
struct walk_control wc;
- struct extent_buffer *leaf;
+ struct extent_buffer *leaf, *tree_node;
struct btrfs_root *tmp_root;
struct btrfs_root *tree_root = root->fs_info->tree_root;
int ret;
cache_tree_init(&wc.shared);
btrfs_init_path(&path);
+again:
key.offset = 0;
key.objectid = 0;
key.type = BTRFS_ROOT_ITEM_KEY;
err = 1;
goto out;
}
+ tree_node = tree_root->node;
while (1) {
+ if (tree_node != tree_root->node) {
+ free_root_recs_tree(root_cache);
+ btrfs_release_path(&path);
+ goto again;
+ }
leaf = path.nodes[0];
if (path.slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(tree_root, &path);
goto next;
}
ret = check_fs_root(tmp_root, root_cache, &wc);
+ if (ret == -EAGAIN) {
+ free_root_recs_tree(root_cache);
+ btrfs_release_path(&path);
+ goto again;
+ }
if (ret)
err = 1;
if (key.objectid == BTRFS_TREE_RELOC_OBJECTID)
return 0;
}
+static int fix_key_order(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path)
+{
+ struct extent_buffer *buf;
+ struct btrfs_key k1, k2;
+ int i;
+ int level = path->lowest_level;
+ int ret;
+
+ buf = path->nodes[level];
+ for (i = 0; i < btrfs_header_nritems(buf) - 1; i++) {
+ if (level) {
+ btrfs_node_key_to_cpu(buf, &k1, i);
+ btrfs_node_key_to_cpu(buf, &k2, i + 1);
+ } else {
+ btrfs_item_key_to_cpu(buf, &k1, i);
+ btrfs_item_key_to_cpu(buf, &k2, i + 1);
+ }
+ if (btrfs_comp_cpu_keys(&k1, &k2) < 0)
+ continue;
+ ret = swap_values(root, path, buf, i);
+ if (ret)
+ break;
+ btrfs_mark_buffer_dirty(buf);
+ i = 0;
+ }
+ return ret;
+}
+
+static int delete_bogus_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct extent_buffer *buf, int slot)
+{
+ struct btrfs_key key;
+ int nritems = btrfs_header_nritems(buf);
+
+ btrfs_item_key_to_cpu(buf, &key, slot);
+
+ /* These are all the keys we can deal with missing. */
+ if (key.type != BTRFS_DIR_INDEX_KEY &&
+ key.type != BTRFS_EXTENT_ITEM_KEY &&
+ key.type != BTRFS_METADATA_ITEM_KEY &&
+ key.type != BTRFS_TREE_BLOCK_REF_KEY &&
+ key.type != BTRFS_EXTENT_DATA_REF_KEY)
+ return -1;
+
+ printf("Deleting bogus item [%llu,%u,%llu] at slot %d on block %llu\n",
+ (unsigned long long)key.objectid, key.type,
+ (unsigned long long)key.offset, slot, buf->start);
+ memmove_extent_buffer(buf, btrfs_item_nr_offset(slot),
+ btrfs_item_nr_offset(slot + 1),
+ sizeof(struct btrfs_item) *
+ (nritems - slot - 1));
+ btrfs_set_header_nritems(buf, nritems - 1);
+ if (slot == 0) {
+ struct btrfs_disk_key disk_key;
+
+ btrfs_item_key(buf, &disk_key, 0);
+ btrfs_fixup_low_keys(root, path, &disk_key, 1);
+ }
+ btrfs_mark_buffer_dirty(buf);
+ return 0;
+}
+
+static int fix_item_offset(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path)
+{
+ struct extent_buffer *buf;
+ int i;
+ int ret = 0;
+
+ /* We should only get this for leaves */
+ BUG_ON(path->lowest_level);
+ buf = path->nodes[0];
+again:
+ for (i = 0; i < btrfs_header_nritems(buf); i++) {
+ unsigned int shift = 0, offset;
+
+ if (i == 0 && btrfs_item_end_nr(buf, i) !=
+ BTRFS_LEAF_DATA_SIZE(root)) {
+ if (btrfs_item_end_nr(buf, i) >
+ BTRFS_LEAF_DATA_SIZE(root)) {
+ ret = delete_bogus_item(trans, root, path,
+ buf, i);
+ if (!ret)
+ goto again;
+ fprintf(stderr, "item is off the end of the "
+ "leaf, can't fix\n");
+ ret = -EIO;
+ break;
+ }
+ shift = BTRFS_LEAF_DATA_SIZE(root) -
+ btrfs_item_end_nr(buf, i);
+ } else if (i > 0 && btrfs_item_end_nr(buf, i) !=
+ btrfs_item_offset_nr(buf, i - 1)) {
+ if (btrfs_item_end_nr(buf, i) >
+ btrfs_item_offset_nr(buf, i - 1)) {
+ ret = delete_bogus_item(trans, root, path,
+ buf, i);
+ if (!ret)
+ goto again;
+ fprintf(stderr, "items overlap, can't fix\n");
+ ret = -EIO;
+ break;
+ }
+ shift = btrfs_item_offset_nr(buf, i - 1) -
+ btrfs_item_end_nr(buf, i);
+ }
+ if (!shift)
+ continue;
+
+ printf("Shifting item nr %d by %u bytes in block %llu\n",
+ i, shift, (unsigned long long)buf->start);
+ offset = btrfs_item_offset_nr(buf, i);
+ memmove_extent_buffer(buf,
+ btrfs_leaf_data(buf) + offset + shift,
+ btrfs_leaf_data(buf) + offset,
+ btrfs_item_size_nr(buf, i));
+ btrfs_set_item_offset(buf, btrfs_item_nr(i),
+ offset + shift);
+ btrfs_mark_buffer_dirty(buf);
+ }
+
+ /*
+ * We may have moved things, in which case we want to exit so we don't
+ * write those changes out. Once we have proper abort functionality in
+ * progs this can be changed to something nicer.
+ */
+ BUG_ON(ret);
+ return ret;
+}
+
/*
- * Attempt to fix basic block failures. Currently we only handle bad key
- * orders, we will cycle through the keys and swap them if necessary.
+ * Attempt to fix basic block failures. If we can't fix it for whatever reason
+ * then just return -EIO.
*/
static int try_to_fix_bad_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
- struct btrfs_disk_key *parent_key,
enum btrfs_tree_block_status status)
{
+ struct ulist *roots;
+ struct ulist_node *node;
+ struct btrfs_root *search_root;
struct btrfs_path *path;
- struct btrfs_key k1, k2;
- int i;
- int level;
+ struct ulist_iterator iter;
+ struct btrfs_key root_key, key;
int ret;
- if (status != BTRFS_TREE_BLOCK_BAD_KEY_ORDER)
+ if (status != BTRFS_TREE_BLOCK_BAD_KEY_ORDER &&
+ status != BTRFS_TREE_BLOCK_INVALID_OFFSETS)
return -EIO;
- k1.objectid = btrfs_header_owner(buf);
- k1.type = BTRFS_ROOT_ITEM_KEY;
- k1.offset = (u64)-1;
-
- root = btrfs_read_fs_root(root->fs_info, &k1);
- if (IS_ERR(root))
- return -EIO;
-
- record_root_in_trans(trans, root);
-
path = btrfs_alloc_path();
if (!path)
return -EIO;
- level = btrfs_header_level(buf);
- path->lowest_level = level;
- path->skip_check_block = 1;
- if (level)
- btrfs_node_key_to_cpu(buf, &k1, 0);
- else
- btrfs_item_key_to_cpu(buf, &k1, 0);
-
- ret = btrfs_search_slot(trans, root, &k1, path, 0, 1);
+ ret = btrfs_find_all_roots(trans, root->fs_info, buf->start,
+ 0, &roots);
if (ret) {
btrfs_free_path(path);
return -EIO;
}
- buf = path->nodes[level];
- for (i = 0; i < btrfs_header_nritems(buf) - 1; i++) {
- if (level) {
- btrfs_node_key_to_cpu(buf, &k1, i);
- btrfs_node_key_to_cpu(buf, &k2, i + 1);
- } else {
- btrfs_item_key_to_cpu(buf, &k1, i);
- btrfs_item_key_to_cpu(buf, &k2, i + 1);
+ ULIST_ITER_INIT(&iter);
+ while ((node = ulist_next(roots, &iter))) {
+ root_key.objectid = node->val;
+ root_key.type = BTRFS_ROOT_ITEM_KEY;
+ root_key.offset = (u64)-1;
+
+ search_root = btrfs_read_fs_root(root->fs_info, &root_key);
+ if (IS_ERR(root)) {
+ ret = -EIO;
+ break;
}
- if (btrfs_comp_cpu_keys(&k1, &k2) < 0)
- continue;
- ret = swap_values(root, path, buf, i);
+
+ record_root_in_trans(trans, search_root);
+
+ path->lowest_level = btrfs_header_level(buf);
+ path->skip_check_block = 1;
+ if (path->lowest_level)
+ btrfs_node_key_to_cpu(buf, &key, 0);
+ else
+ btrfs_item_key_to_cpu(buf, &key, 0);
+ ret = btrfs_search_slot(trans, search_root, &key, path, 0, 1);
+ if (ret) {
+ ret = -EIO;
+ break;
+ }
+ if (status == BTRFS_TREE_BLOCK_BAD_KEY_ORDER)
+ ret = fix_key_order(trans, search_root, path);
+ else if (status == BTRFS_TREE_BLOCK_INVALID_OFFSETS)
+ ret = fix_item_offset(trans, search_root, path);
if (ret)
break;
- btrfs_mark_buffer_dirty(buf);
- i = 0;
+ btrfs_release_path(path);
}
-
+ ulist_free(roots);
btrfs_free_path(path);
return ret;
}
if (status != BTRFS_TREE_BLOCK_CLEAN) {
if (repair)
status = try_to_fix_bad_block(trans, root, buf,
- &rec->parent_key,
status);
if (status != BTRFS_TREE_BLOCK_CLEAN) {
ret = -EIO;
struct chunk_record *rec;
rec = container_of(cache, struct chunk_record, cache);
+ list_del_init(&rec->list);
+ list_del_init(&rec->dextents);
free(rec);
}
struct block_group_record *rec;
rec = container_of(cache, struct block_group_record, cache);
+ list_del_init(&rec->list);
free(rec);
}
struct device_extent_record *rec;
rec = container_of(cache, struct device_extent_record, cache);
+ if (!list_empty(&rec->chunk_list))
+ list_del_init(&rec->chunk_list);
+ if (!list_empty(&rec->device_list))
+ list_del_init(&rec->device_list);
free(rec);
}
return 0;
list_for_each_entry(back, &rec->backrefs, list) {
+ if (back->full_backref || !back->is_data)
+ continue;
+
dback = (struct data_backref *)back;
+
/*
* We only pay attention to backrefs that we found a real
* backref for.
*/
if (dback->found_ref == 0)
continue;
- if (back->full_backref)
- continue;
/*
* For now we only catch when the bytes don't match, not the
* references and fix up the ones that don't match.
*/
list_for_each_entry(back, &rec->backrefs, list) {
+ if (back->full_backref || !back->is_data)
+ continue;
+
dback = (struct data_backref *)back;
/*
*/
if (dback->found_ref == 0)
continue;
- if (back->full_backref)
- continue;
if (dback->bytes == best->bytes &&
dback->disk_bytenr == best->bytenr)
int ret;
list_for_each_entry(back, &rec->backrefs, list) {
+ /* Don't care about full backrefs (poor unloved backrefs) */
+ if (back->full_backref || !back->is_data)
+ continue;
+
dback = (struct data_backref *)back;
/* We found this one, we don't need to do a lookup */
if (dback->found_ref)
continue;
- /* Don't care about full backrefs (poor unloved backrefs) */
- if (back->full_backref)
- continue;
+
key.objectid = dback->root;
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
if (dev_extent_rec->objectid != dev_rec->devid)
break;
- list_del(&dev_extent_rec->device_list);
+ list_del_init(&dev_extent_rec->device_list);
total_byte += dev_extent_rec->length;
cache = next_cache_extent(cache);
}
free_extent_cache_tree(&pending);
free_extent_cache_tree(&reada);
free_extent_cache_tree(&nodes);
+ free_chunk_cache_tree(&chunk_cache);
+ free_block_group_tree(&block_group_cache);
+ free_device_cache_tree(&dev_cache);
+ free_device_extent_tree(&dev_extent_cache);
free_extent_record_cache(root->fs_info, &extent_cache);
goto again;
}
int option_index = 0;
int init_csum_tree = 0;
int qgroup_report = 0;
- enum btrfs_open_ctree_flags ctree_flags =
- OPEN_CTREE_PARTIAL | OPEN_CTREE_EXCLUSIVE;
+ enum btrfs_open_ctree_flags ctree_flags = OPEN_CTREE_EXCLUSIVE;
while(1) {
int c;
goto err_out;
}
+ /* only allow partial opening under repair mode */
+ if (repair)
+ ctree_flags |= OPEN_CTREE_PARTIAL;
+
info = open_ctree_fs_info(argv[optind], bytenr, 0, ctree_flags);
if (!info) {
fprintf(stderr, "Couldn't open file system\n");
ret = -EIO;
goto close_out;
}
+ if (!extent_buffer_uptodate(info->csum_root->node)) {
+ fprintf(stderr, "Checksum root corrupted, rerun with --init-csum-tree option\n");
+ ret = -EIO;
+ goto close_out;
+ }
fprintf(stderr, "checking extents\n");
ret = check_chunks_and_extents(root);
eb = list_first_entry(&root->fs_info->recow_ebs,
struct extent_buffer, recow);
+ list_del_init(&eb->recow);
ret = recow_extent_buffer(root, eb);
if (ret)
break;