static int no_holes = 0;
static int init_extent_tree = 0;
static int check_data_csum = 0;
-static int low_memory = 0;
static struct btrfs_fs_info *global_info;
static struct task_ctx ctx = { 0 };
static struct cache_tree *roots_info_cache = NULL;
+enum btrfs_check_mode {
+ CHECK_MODE_ORIGINAL,
+ CHECK_MODE_LOWMEM,
+ CHECK_MODE_UNKNOWN,
+ CHECK_MODE_DEFAULT = CHECK_MODE_ORIGINAL
+};
+
+static enum btrfs_check_mode check_mode = CHECK_MODE_DEFAULT;
+
struct extent_backref {
- struct rb_node node;
+ struct list_head list;
unsigned int is_data:1;
unsigned int found_extent_tree:1;
unsigned int full_backref:1;
unsigned int broken:1;
};
-static inline struct extent_backref* rb_node_to_extent_backref(struct rb_node *node)
+static inline struct extent_backref* to_extent_backref(struct list_head *entry)
{
- return rb_entry(node, struct extent_backref, node);
+ return list_entry(entry, struct extent_backref, list);
}
struct data_backref {
return container_of(back, struct data_backref, node);
}
-static int compare_data_backref(struct rb_node *node1, struct rb_node *node2)
-{
- struct extent_backref *ext1 = rb_node_to_extent_backref(node1);
- struct extent_backref *ext2 = rb_node_to_extent_backref(node2);
- struct data_backref *back1 = to_data_backref(ext1);
- struct data_backref *back2 = to_data_backref(ext2);
-
- WARN_ON(!ext1->is_data);
- WARN_ON(!ext2->is_data);
-
- /* parent and root are a union, so this covers both */
- if (back1->parent > back2->parent)
- return 1;
- if (back1->parent < back2->parent)
- return -1;
-
- /* This is a full backref and the parents match. */
- if (back1->node.full_backref)
- return 0;
-
- if (back1->owner > back2->owner)
- return 1;
- if (back1->owner < back2->owner)
- return -1;
-
- if (back1->offset > back2->offset)
- return 1;
- if (back1->offset < back2->offset)
- return -1;
-
- if (back1->bytes > back2->bytes)
- return 1;
- if (back1->bytes < back2->bytes)
- return -1;
-
- if (back1->found_ref && back2->found_ref) {
- if (back1->disk_bytenr > back2->disk_bytenr)
- return 1;
- if (back1->disk_bytenr < back2->disk_bytenr)
- return -1;
-
- if (back1->found_ref > back2->found_ref)
- return 1;
- if (back1->found_ref < back2->found_ref)
- return -1;
- }
-
- return 0;
-}
-
/*
* Much like data_backref, just removed the undetermined members
* and change it to use list_head.
return container_of(back, struct tree_backref, node);
}
-static int compare_tree_backref(struct rb_node *node1, struct rb_node *node2)
-{
- struct extent_backref *ext1 = rb_node_to_extent_backref(node1);
- struct extent_backref *ext2 = rb_node_to_extent_backref(node2);
- struct tree_backref *back1 = to_tree_backref(ext1);
- struct tree_backref *back2 = to_tree_backref(ext2);
-
- WARN_ON(ext1->is_data);
- WARN_ON(ext2->is_data);
-
- /* parent and root are a union, so this covers both */
- if (back1->parent > back2->parent)
- return 1;
- if (back1->parent < back2->parent)
- return -1;
-
- return 0;
-}
-
-static int compare_extent_backref(struct rb_node *node1, struct rb_node *node2)
-{
- struct extent_backref *ext1 = rb_node_to_extent_backref(node1);
- struct extent_backref *ext2 = rb_node_to_extent_backref(node2);
-
- if (ext1->is_data > ext2->is_data)
- return 1;
-
- if (ext1->is_data < ext2->is_data)
- return -1;
-
- if (ext1->full_backref > ext2->full_backref)
- return 1;
- if (ext1->full_backref < ext2->full_backref)
- return -1;
-
- if (ext1->is_data)
- return compare_data_backref(node1, node2);
- else
- return compare_tree_backref(node1, node2);
-}
-
/* Explicit initialization for extent_record::flag_block_full_backref */
enum { FLAG_UNSET = 2 };
struct extent_record {
struct list_head backrefs;
struct list_head dups;
- struct rb_root backref_tree;
struct list_head list;
struct cache_extent cache;
struct btrfs_disk_key parent_key;
unsigned int found_dir_item:1;
unsigned int found_dir_index:1;
unsigned int found_inode_ref:1;
- unsigned int filetype:8;
+ u8 filetype;
+ u8 ref_type;
int errors;
- unsigned int ref_type;
u64 dir;
u64 index;
u16 namelen;
return 0;
}
+static enum btrfs_check_mode parse_check_mode(const char *str)
+{
+ if (strcmp(str, "lowmem") == 0)
+ return CHECK_MODE_LOWMEM;
+ if (strcmp(str, "orig") == 0)
+ return CHECK_MODE_ORIGINAL;
+ if (strcmp(str, "original") == 0)
+ return CHECK_MODE_ORIGINAL;
+
+ return CHECK_MODE_UNKNOWN;
+}
+
/* Compatible function to allow reuse of old codes */
static u64 first_extent_gap(struct rb_root *holes)
{
struct inode_backref *tmp;
struct orphan_data_extent *src_orphan;
struct orphan_data_extent *dst_orphan;
+ struct rb_node *rb;
size_t size;
int ret;
list_add_tail(&dst_orphan->list, &rec->orphan_extents);
}
ret = copy_file_extent_holes(&rec->holes, &orig_rec->holes);
- BUG_ON(ret < 0);
+ if (ret < 0)
+ goto cleanup_rb;
return rec;
+cleanup_rb:
+ rb = rb_first(&rec->holes);
+ while (rb) {
+ struct file_extent_hole *hole;
+
+ hole = rb_entry(rb, struct file_extent_hole, node);
+ rb = rb_next(rb);
+ free(hole);
+ }
+
cleanup:
if (!list_empty(&rec->backrefs))
list_for_each_entry_safe(orig, tmp, &rec->backrefs, list) {
struct cache_extent *cache;
struct inode_backref *tmp, *backref;
struct ptr_node *node;
- unsigned char filetype;
+ u8 filetype;
if (!rec->found_inode_item)
return;
static int add_inode_backref(struct cache_tree *inode_cache,
u64 ino, u64 dir, u64 index,
const char *name, int namelen,
- int filetype, int itemtype, int errors)
+ u8 filetype, u8 itemtype, int errors)
{
struct inode_record *rec;
struct inode_backref *backref;
u32 data_len;
int error;
int nritems = 0;
- int filetype;
+ u8 filetype;
struct btrfs_dir_item *di;
struct inode_record *rec;
struct cache_tree *root_cache;
struct inode_record *rec,
struct inode_backref *backref)
{
- struct btrfs_path *path;
+ struct btrfs_path path;
struct btrfs_trans_handle *trans;
struct btrfs_dir_item *dir_item;
struct extent_buffer *leaf;
u32 data_size = sizeof(*dir_item) + backref->namelen;
int ret;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- btrfs_free_path(path);
+ if (IS_ERR(trans))
return PTR_ERR(trans);
- }
fprintf(stderr, "repairing missing dir index item for inode %llu\n",
(unsigned long long)rec->ino);
+
+ btrfs_init_path(&path);
key.objectid = backref->dir;
key.type = BTRFS_DIR_INDEX_KEY;
key.offset = backref->index;
-
- ret = btrfs_insert_empty_item(trans, root, path, &key, data_size);
+ ret = btrfs_insert_empty_item(trans, root, &path, &key, data_size);
BUG_ON(ret);
- leaf = path->nodes[0];
- dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
+ leaf = path.nodes[0];
+ dir_item = btrfs_item_ptr(leaf, path.slots[0], struct btrfs_dir_item);
disk_key.objectid = cpu_to_le64(rec->ino);
disk_key.type = BTRFS_INODE_ITEM_KEY;
name_ptr = (unsigned long)(dir_item + 1);
write_extent_buffer(leaf, backref->name, name_ptr, backref->namelen);
btrfs_mark_buffer_dirty(leaf);
- btrfs_free_path(path);
+ btrfs_release_path(&path);
btrfs_commit_transaction(trans, root);
backref->found_dir_index = 1;
{
struct btrfs_trans_handle *trans;
struct btrfs_dir_item *di;
- struct btrfs_path *path;
+ struct btrfs_path path;
int ret = 0;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- btrfs_free_path(path);
+ if (IS_ERR(trans))
return PTR_ERR(trans);
- }
-
fprintf(stderr, "Deleting bad dir index [%llu,%u,%llu] root %llu\n",
(unsigned long long)backref->dir,
BTRFS_DIR_INDEX_KEY, (unsigned long long)backref->index,
(unsigned long long)root->objectid);
- di = btrfs_lookup_dir_index(trans, root, path, backref->dir,
+ btrfs_init_path(&path);
+ di = btrfs_lookup_dir_index(trans, root, &path, backref->dir,
backref->name, backref->namelen,
backref->index, -1);
if (IS_ERR(di)) {
ret = PTR_ERR(di);
- btrfs_free_path(path);
+ btrfs_release_path(&path);
btrfs_commit_transaction(trans, root);
if (ret == -ENOENT)
return 0;
}
if (!di)
- ret = btrfs_del_item(trans, root, path);
+ ret = btrfs_del_item(trans, root, &path);
else
- ret = btrfs_delete_one_dir_name(trans, root, path, di);
+ ret = btrfs_delete_one_dir_name(trans, root, &path, di);
BUG_ON(ret);
- btrfs_free_path(path);
+ btrfs_release_path(&path);
btrfs_commit_transaction(trans, root);
return ret;
}
*/
static int find_normal_file_extent(struct btrfs_root *root, u64 ino)
{
- struct btrfs_path *path;
+ struct btrfs_path path;
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_file_extent_item *fi;
u8 type;
int ret = 0;
- path = btrfs_alloc_path();
- if (!path)
- goto out;
+ btrfs_init_path(&path);
key.objectid = ino;
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = 0;
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
if (ret < 0) {
ret = 0;
goto out;
}
- if (ret && path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
- ret = btrfs_next_leaf(root, path);
+ if (ret && path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
+ ret = btrfs_next_leaf(root, &path);
if (ret) {
ret = 0;
goto out;
}
}
while (1) {
- btrfs_item_key_to_cpu(path->nodes[0], &found_key,
- path->slots[0]);
+ btrfs_item_key_to_cpu(path.nodes[0], &found_key,
+ path.slots[0]);
if (found_key.objectid != ino ||
found_key.type != BTRFS_EXTENT_DATA_KEY)
break;
- fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ fi = btrfs_item_ptr(path.nodes[0], path.slots[0],
struct btrfs_file_extent_item);
- type = btrfs_file_extent_type(path->nodes[0], fi);
+ type = btrfs_file_extent_type(path.nodes[0], fi);
if (type != BTRFS_FILE_EXTENT_INLINE) {
ret = 1;
goto out;
}
}
out:
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
static int try_repair_inode(struct btrfs_root *root, struct inode_record *rec)
{
struct btrfs_trans_handle *trans;
- struct btrfs_path *path;
+ struct btrfs_path path;
int ret = 0;
if (!(rec->errors & (I_ERR_DIR_ISIZE_WRONG |
I_ERR_FILE_NBYTES_WRONG)))
return rec->errors;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
/*
* For nlink repair, it may create a dir and add link, so
* 2 for parent(256)'s dir_index and dir_item
* 2 for lost+found dir's dir_index and dir_item for the file
*/
trans = btrfs_start_transaction(root, 7);
- if (IS_ERR(trans)) {
- btrfs_free_path(path);
+ if (IS_ERR(trans))
return PTR_ERR(trans);
- }
+ btrfs_init_path(&path);
if (rec->errors & I_ERR_NO_INODE_ITEM)
- ret = repair_inode_no_item(trans, root, path, rec);
+ ret = repair_inode_no_item(trans, root, &path, rec);
if (!ret && rec->errors & I_ERR_FILE_EXTENT_ORPHAN)
- ret = repair_inode_orphan_extent(trans, root, path, rec);
+ ret = repair_inode_orphan_extent(trans, root, &path, rec);
if (!ret && rec->errors & I_ERR_FILE_EXTENT_DISCOUNT)
- ret = repair_inode_discount_extent(trans, root, path, rec);
+ ret = repair_inode_discount_extent(trans, root, &path, rec);
if (!ret && rec->errors & I_ERR_DIR_ISIZE_WRONG)
- ret = repair_inode_isize(trans, root, path, rec);
+ ret = repair_inode_isize(trans, root, &path, rec);
if (!ret && rec->errors & I_ERR_NO_ORPHAN_ITEM)
- ret = repair_inode_orphan_item(trans, root, path, rec);
+ ret = repair_inode_orphan_item(trans, root, &path, rec);
if (!ret && rec->errors & I_ERR_LINK_COUNT_WRONG)
- ret = repair_inode_nlinks(trans, root, path, rec);
+ ret = repair_inode_nlinks(trans, root, &path, rec);
if (!ret && rec->errors & I_ERR_FILE_NBYTES_WRONG)
- ret = repair_inode_nbytes(trans, root, path, rec);
+ ret = repair_inode_nbytes(trans, root, &path, rec);
btrfs_commit_transaction(trans, root);
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
free(backref);
}
- kfree(rec);
+ free(rec);
}
FREE_EXTENT_CACHE_BASED_TREE(root_recs, free_root_record);
struct cache_tree *corrupt_blocks)
{
struct btrfs_trans_handle *trans;
- struct btrfs_path *path;
+ struct btrfs_path path;
struct btrfs_corrupt_block *corrupt;
struct cache_extent *cache;
struct btrfs_key key;
if (cache_tree_empty(corrupt_blocks))
return 0;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
fprintf(stderr, "Error starting transaction: %s\n",
strerror(-ret));
- goto out_free_path;
+ return ret;
}
+ btrfs_init_path(&path);
cache = first_cache_extent(corrupt_blocks);
while (cache) {
corrupt = container_of(cache, struct btrfs_corrupt_block,
cache);
level = corrupt->level;
- path->lowest_level = level;
+ path.lowest_level = level;
key.objectid = corrupt->key.objectid;
key.type = corrupt->key.type;
key.offset = corrupt->key.offset;
* so ins_len set to 0 here.
* Balance will be done after all corrupt node/leaf is deleted.
*/
- ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ ret = btrfs_search_slot(trans, root, &key, &path, 0, 1);
if (ret < 0)
goto out;
- offset = btrfs_node_blockptr(path->nodes[level],
- path->slots[level]);
+ offset = btrfs_node_blockptr(path.nodes[level],
+ path.slots[level]);
/* Remove the ptr */
- ret = btrfs_del_ptr(trans, root, path, level,
- path->slots[level]);
+ ret = btrfs_del_ptr(trans, root, &path, level,
+ path.slots[level]);
if (ret < 0)
goto out;
/*
* Remove the corresponding extent
* return value is not concerned.
*/
- btrfs_release_path(path);
+ btrfs_release_path(&path);
ret = btrfs_free_extent(trans, root, offset, root->nodesize,
0, root->root_key.objectid,
level - 1, 0);
corrupt = container_of(cache, struct btrfs_corrupt_block,
cache);
memcpy(&key, &corrupt->key, sizeof(key));
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ ret = btrfs_search_slot(trans, root, &key, &path, -1, 1);
if (ret < 0)
goto out;
/* return will always >0 since it won't find the item */
ret = 0;
- btrfs_release_path(path);
+ btrfs_release_path(&path);
cache = next_cache_extent(cache);
}
out:
btrfs_commit_transaction(trans, root);
-out_free_path:
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
level = root_item->drop_level;
path.lowest_level = level;
+ if (level > btrfs_header_level(root->node) ||
+ level >= BTRFS_MAX_LEVEL) {
+ error("ignoring invalid drop level: %u", level);
+ goto skip_walking;
+ }
wret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
if (wret < 0)
goto skip_walking;
static int all_backpointers_checked(struct extent_record *rec, int print_errs)
{
- struct rb_node *n;
+ struct list_head *cur = rec->backrefs.next;
struct extent_backref *back;
struct tree_backref *tback;
struct data_backref *dback;
u64 found = 0;
int err = 0;
- for (n = rb_first(&rec->backref_tree); n; n = rb_next(n)) {
- back = rb_node_to_extent_backref(n);
+ while(cur != &rec->backrefs) {
+ back = to_extent_backref(cur);
+ cur = cur->next;
if (!back->found_extent_tree) {
err = 1;
if (!print_errs)
return err;
}
-static void __free_one_backref(struct rb_node *node)
-{
- struct extent_backref *back = rb_node_to_extent_backref(node);
-
- free(back);
-}
-
-static void free_all_extent_backrefs(struct extent_record *rec)
+static int free_all_extent_backrefs(struct extent_record *rec)
{
- rb_free_nodes(&rec->backref_tree, __free_one_backref);
+ struct extent_backref *back;
+ struct list_head *cur;
+ while (!list_empty(&rec->backrefs)) {
+ cur = rec->backrefs.next;
+ back = to_extent_backref(cur);
+ list_del(cur);
+ free(back);
+ }
+ return 0;
}
static void free_extent_record_cache(struct btrfs_fs_info *fs_info,
struct extent_record *rec,
struct extent_buffer *buf)
{
- struct extent_backref *node, *tmp;
+ struct extent_backref *node;
struct tree_backref *back;
struct btrfs_root *ref_root;
struct btrfs_key key;
int found = 0;
int ret;
- rbtree_postorder_for_each_entry_safe(node, tmp,
- &rec->backref_tree, node) {
+ list_for_each_entry(node, &rec->backrefs, list) {
if (node->is_data)
continue;
if (!node->found_ref)
static int is_extent_tree_record(struct extent_record *rec)
{
- struct extent_backref *ref, *tmp;
+ struct list_head *cur = rec->backrefs.next;
+ struct extent_backref *node;
struct tree_backref *back;
int is_extent = 0;
- rbtree_postorder_for_each_entry_safe(ref, tmp,
- &rec->backref_tree, node) {
- if (ref->is_data)
+ while(cur != &rec->backrefs) {
+ node = to_extent_backref(cur);
+ cur = cur->next;
+ if (node->is_data)
return 0;
- back = to_tree_backref(ref);
- if (ref->full_backref)
+ back = to_tree_backref(node);
+ if (node->full_backref)
return 0;
if (back->root == BTRFS_EXTENT_TREE_OBJECTID)
is_extent = 1;
struct ulist *roots;
struct ulist_node *node;
struct btrfs_root *search_root;
- struct btrfs_path *path;
+ struct btrfs_path path;
struct ulist_iterator iter;
struct btrfs_key root_key, key;
int ret;
status != BTRFS_TREE_BLOCK_INVALID_OFFSETS)
return -EIO;
- path = btrfs_alloc_path();
- if (!path)
- return -EIO;
-
- ret = btrfs_find_all_roots(NULL, root->fs_info, buf->start,
- 0, &roots);
- if (ret) {
- btrfs_free_path(path);
+ ret = btrfs_find_all_roots(NULL, root->fs_info, buf->start, 0, &roots);
+ if (ret)
return -EIO;
- }
+ btrfs_init_path(&path);
ULIST_ITER_INIT(&iter);
while ((node = ulist_next(roots, &iter))) {
root_key.objectid = node->val;
break;
}
- path->lowest_level = btrfs_header_level(buf);
- path->skip_check_block = 1;
- if (path->lowest_level)
+ path.lowest_level = btrfs_header_level(buf);
+ path.skip_check_block = 1;
+ if (path.lowest_level)
btrfs_node_key_to_cpu(buf, &key, 0);
else
btrfs_item_key_to_cpu(buf, &key, 0);
- ret = btrfs_search_slot(trans, search_root, &key, path, 0, 1);
+ ret = btrfs_search_slot(trans, search_root, &key, &path, 0, 1);
if (ret) {
ret = -EIO;
btrfs_commit_transaction(trans, search_root);
break;
}
if (status == BTRFS_TREE_BLOCK_BAD_KEY_ORDER)
- ret = fix_key_order(trans, search_root, path);
+ ret = fix_key_order(trans, search_root, &path);
else if (status == BTRFS_TREE_BLOCK_INVALID_OFFSETS)
- ret = fix_item_offset(trans, search_root, path);
+ ret = fix_item_offset(trans, search_root, &path);
if (ret) {
btrfs_commit_transaction(trans, search_root);
break;
}
- btrfs_release_path(path);
+ btrfs_release_path(&path);
btrfs_commit_transaction(trans, search_root);
}
ulist_free(roots);
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
return ret;
}
-
static struct tree_backref *find_tree_backref(struct extent_record *rec,
u64 parent, u64 root)
{
- struct rb_node *node;
- struct tree_backref *back = NULL;
- struct tree_backref match = {
- .node = {
- .is_data = 0,
- },
- };
+ struct list_head *cur = rec->backrefs.next;
+ struct extent_backref *node;
+ struct tree_backref *back;
- if (parent) {
- match.parent = parent;
- match.node.full_backref = 1;
- } else {
- match.root = root;
+ while(cur != &rec->backrefs) {
+ node = to_extent_backref(cur);
+ cur = cur->next;
+ if (node->is_data)
+ continue;
+ back = to_tree_backref(node);
+ if (parent > 0) {
+ if (!node->full_backref)
+ continue;
+ if (parent == back->parent)
+ return back;
+ } else {
+ if (node->full_backref)
+ continue;
+ if (back->root == root)
+ return back;
+ }
}
-
- node = rb_search(&rec->backref_tree, &match.node.node,
- (rb_compare_keys)compare_extent_backref, NULL);
- if (node)
- back = to_tree_backref(rb_node_to_extent_backref(node));
-
- return back;
+ return NULL;
}
static struct tree_backref *alloc_tree_backref(struct extent_record *rec,
ref->root = root;
ref->node.full_backref = 0;
}
- rb_insert(&rec->backref_tree, &ref->node.node, compare_extent_backref);
+ list_add_tail(&ref->node.list, &rec->backrefs);
return ref;
}
int found_ref,
u64 disk_bytenr, u64 bytes)
{
- struct rb_node *node;
- struct data_backref *back = NULL;
- struct data_backref match = {
- .node = {
- .is_data = 1,
- },
- .owner = owner,
- .offset = offset,
- .bytes = bytes,
- .found_ref = found_ref,
- .disk_bytenr = disk_bytenr,
- };
+ struct list_head *cur = rec->backrefs.next;
+ struct extent_backref *node;
+ struct data_backref *back;
- if (parent) {
- match.parent = parent;
- match.node.full_backref = 1;
- } else {
- match.root = root;
+ while(cur != &rec->backrefs) {
+ node = to_extent_backref(cur);
+ cur = cur->next;
+ if (!node->is_data)
+ continue;
+ back = to_data_backref(node);
+ if (parent > 0) {
+ if (!node->full_backref)
+ continue;
+ if (parent == back->parent)
+ return back;
+ } else {
+ if (node->full_backref)
+ continue;
+ if (back->root == root && back->owner == owner &&
+ back->offset == offset) {
+ if (found_ref && node->found_ref &&
+ (back->bytes != bytes ||
+ back->disk_bytenr != disk_bytenr))
+ continue;
+ return back;
+ }
+ }
}
-
- node = rb_search(&rec->backref_tree, &match.node.node,
- (rb_compare_keys)compare_extent_backref, NULL);
- if (node)
- back = to_data_backref(rb_node_to_extent_backref(node));
-
- return back;
+ return NULL;
}
static struct data_backref *alloc_data_backref(struct extent_record *rec,
ref->bytes = max_size;
ref->found_ref = 0;
ref->num_refs = 0;
- rb_insert(&rec->backref_tree, &ref->node.node, compare_extent_backref);
+ list_add_tail(&ref->node.list, &rec->backrefs);
if (max_size > rec->max_size)
rec->max_size = max_size;
return ref;
* Check SYSTEM extent, as it's also marked as metadata, we can only
* make sure it's a SYSTEM extent by its backref
*/
- if (!RB_EMPTY_ROOT(&rec->backref_tree)) {
+ if (!list_empty(&rec->backrefs)) {
struct extent_backref *node;
struct tree_backref *tback;
u64 bg_type;
- node = rb_node_to_extent_backref(rb_first(&rec->backref_tree));
+ node = to_extent_backref(rec->backrefs.next);
if (node->is_data) {
/* tree block shouldn't have data backref */
rec->wrong_chunk_type = 1;
INIT_LIST_HEAD(&rec->backrefs);
INIT_LIST_HEAD(&rec->dups);
INIT_LIST_HEAD(&rec->list);
- rec->backref_tree = RB_ROOT;
memcpy(&rec->parent_key, &tmpl->parent_key, sizeof(tmpl->parent_key));
rec->cache.start = tmpl->start;
rec->cache.size = tmpl->nr;
ret = insert_cache_extent(extent_cache, &rec->cache);
- BUG_ON(ret);
+ if (ret) {
+ free(rec);
+ return ret;
+ }
bytes_used += rec->nr;
if (tmpl->metadata)
- rec->crossing_stripes = check_crossing_stripes(rec->start,
- global_info->tree_root->nodesize);
+ rec->crossing_stripes = check_crossing_stripes(global_info,
+ rec->start, global_info->tree_root->nodesize);
check_extent_type(rec);
return ret;
}
*/
if (tmpl->metadata)
rec->crossing_stripes = check_crossing_stripes(
- rec->start, global_info->tree_root->nodesize);
+ global_info, rec->start,
+ global_info->tree_root->nodesize);
check_extent_type(rec);
maybe_free_extent_rec(extent_cache, rec);
return ret;
struct extent_record *rec;
struct tree_backref *back;
struct cache_extent *cache;
+ int ret;
cache = lookup_cache_extent(extent_cache, bytenr, 1);
if (!cache) {
tmpl.nr = 1;
tmpl.metadata = 1;
- add_extent_rec_nolookup(extent_cache, &tmpl);
+ ret = add_extent_rec_nolookup(extent_cache, &tmpl);
+ if (ret)
+ return ret;
+ /* really a bug in cache_extent implement now */
cache = lookup_cache_extent(extent_cache, bytenr, 1);
if (!cache)
- abort();
+ return -ENOENT;
}
rec = container_of(cache, struct extent_record, cache);
if (rec->start != bytenr) {
- abort();
+ /*
+ * Several cause, from unaligned bytenr to over lapping extents
+ */
+ return -EEXIST;
}
back = find_tree_backref(rec, parent, root);
if (!back) {
back = alloc_tree_backref(rec, parent, root);
- BUG_ON(!back);
+ if (!back)
+ return -ENOMEM;
}
if (found_ref) {
struct extent_record *rec;
struct data_backref *back;
struct cache_extent *cache;
+ int ret;
cache = lookup_cache_extent(extent_cache, bytenr, 1);
if (!cache) {
tmpl.nr = 1;
tmpl.max_size = max_size;
- add_extent_rec_nolookup(extent_cache, &tmpl);
+ ret = add_extent_rec_nolookup(extent_cache, &tmpl);
+ if (ret)
+ return ret;
cache = lookup_cache_extent(extent_cache, bytenr, 1);
if (!cache)
{
struct btrfs_extent_ref_v0 *ref0;
struct btrfs_key key;
+ int ret;
btrfs_item_key_to_cpu(leaf, &key, slot);
ref0 = btrfs_item_ptr(leaf, slot, struct btrfs_extent_ref_v0);
if (btrfs_ref_objectid_v0(leaf, ref0) < BTRFS_FIRST_FREE_OBJECTID) {
- add_tree_backref(extent_cache, key.objectid, key.offset, 0, 0);
+ ret = add_tree_backref(extent_cache, key.objectid, key.offset,
+ 0, 0);
} else {
- add_data_backref(extent_cache, key.objectid, key.offset, 0,
- 0, 0, btrfs_ref_count_v0(leaf, ref0), 0, 0);
+ ret = add_data_backref(extent_cache, key.objectid, key.offset,
+ 0, 0, 0, btrfs_ref_count_v0(leaf, ref0), 0, 0);
}
- return 0;
+ return ret;
}
#endif
int slot)
{
struct chunk_record *rec;
+ struct btrfs_chunk *chunk;
int ret = 0;
+ chunk = btrfs_item_ptr(eb, slot, struct btrfs_chunk);
+ /*
+ * Do extra check for this chunk item,
+ *
+ * It's still possible one can craft a leaf with CHUNK_ITEM, with
+ * wrong onwer(3) out of chunk tree, to pass both chunk tree check
+ * and owner<->key_type check.
+ */
+ ret = btrfs_check_chunk_valid(global_info->tree_root, eb, chunk, slot,
+ key->offset);
+ if (ret < 0) {
+ error("chunk(%llu, %llu) is not valid, ignore it",
+ key->offset, btrfs_chunk_length(eb, chunk));
+ return 0;
+ }
rec = btrfs_new_chunk_record(eb, key, slot);
ret = insert_cache_extent(chunk_cache, &rec->cache);
if (ret) {
struct extent_record tmpl;
unsigned long end;
unsigned long ptr;
+ int ret;
int type;
u32 item_size = btrfs_item_size_nr(eb, slot);
u64 refs = 0;
num_bytes = key.offset;
}
+ if (!IS_ALIGNED(key.objectid, root->sectorsize)) {
+ error("ignoring invalid extent, bytenr %llu is not aligned to %u",
+ key.objectid, root->sectorsize);
+ return -EIO;
+ }
if (item_size < sizeof(*ei)) {
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
struct btrfs_extent_item_v0 *ei0;
metadata = 1;
else
metadata = 0;
+ if (metadata && num_bytes != root->nodesize) {
+ error("ignore invalid metadata extent, length %llu does not equal to %u",
+ num_bytes, root->nodesize);
+ return -EIO;
+ }
+ if (!metadata && !IS_ALIGNED(num_bytes, root->sectorsize)) {
+ error("ignore invalid data extent, length %llu is not aligned to %u",
+ num_bytes, root->sectorsize);
+ return -EIO;
+ }
memset(&tmpl, 0, sizeof(tmpl));
tmpl.start = key.objectid;
offset = btrfs_extent_inline_ref_offset(eb, iref);
switch (type) {
case BTRFS_TREE_BLOCK_REF_KEY:
- add_tree_backref(extent_cache, key.objectid,
- 0, offset, 0);
+ ret = add_tree_backref(extent_cache, key.objectid,
+ 0, offset, 0);
+ if (ret < 0)
+ error("add_tree_backref failed: %s",
+ strerror(-ret));
break;
case BTRFS_SHARED_BLOCK_REF_KEY:
- add_tree_backref(extent_cache, key.objectid,
- offset, 0, 0);
+ ret = add_tree_backref(extent_cache, key.objectid,
+ offset, 0, 0);
+ if (ret < 0)
+ error("add_tree_backref failed: %s",
+ strerror(-ret));
break;
case BTRFS_EXTENT_DATA_REF_KEY:
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
continue;
if (logical[nr] == offset) {
if (stripe_len >= bytes) {
- kfree(logical);
+ free(logical);
return 0;
}
bytes -= stripe_len;
} else if (logical[nr] < offset) {
if (logical[nr] + stripe_len >=
offset + bytes) {
- kfree(logical);
+ free(logical);
return 0;
}
bytes = (offset + bytes) -
offset,
logical[nr] - offset);
if (ret) {
- kfree(logical);
+ free(logical);
return ret;
}
}
}
- kfree(logical);
+ free(logical);
}
entry = btrfs_find_free_space(cache->free_space_ctl, offset, bytes);
static int verify_space_cache(struct btrfs_root *root,
struct btrfs_block_group_cache *cache)
{
- struct btrfs_path *path;
+ struct btrfs_path path;
struct extent_buffer *leaf;
struct btrfs_key key;
u64 last;
int ret = 0;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
root = root->fs_info->extent_root;
last = max_t(u64, cache->key.objectid, BTRFS_SUPER_INFO_OFFSET);
+ btrfs_init_path(&path);
key.objectid = last;
key.offset = 0;
key.type = BTRFS_EXTENT_ITEM_KEY;
-
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
if (ret < 0)
goto out;
ret = 0;
while (1) {
- if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
- ret = btrfs_next_leaf(root, path);
+ if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
+ ret = btrfs_next_leaf(root, &path);
if (ret < 0)
goto out;
if (ret > 0) {
break;
}
}
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ leaf = path.nodes[0];
+ btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
if (key.objectid >= cache->key.offset + cache->key.objectid)
break;
if (key.type != BTRFS_EXTENT_ITEM_KEY &&
key.type != BTRFS_METADATA_ITEM_KEY) {
- path->slots[0]++;
+ path.slots[0]++;
continue;
}
last = key.objectid + key.offset;
else
last = key.objectid + root->nodesize;
- path->slots[0]++;
+ path.slots[0]++;
continue;
}
last = key.objectid + key.offset;
else
last = key.objectid + root->nodesize;
- path->slots[0]++;
+ path.slots[0]++;
}
if (last < cache->key.objectid + cache->key.offset)
cache->key.offset - last);
out:
- btrfs_free_path(path);
+ btrfs_release_path(&path);
if (!ret &&
!RB_EMPTY_ROOT(&cache->free_space_ctl->free_space_offset)) {
csum = btrfs_csum_data(NULL, (char *)data + tmp,
csum, root->sectorsize);
- btrfs_csum_final(csum, (char *)&csum);
+ btrfs_csum_final(csum, (u8 *)&csum);
csum_offset = leaf_offset +
tmp / root->sectorsize * csum_size;
static int check_extent_exists(struct btrfs_root *root, u64 bytenr,
u64 num_bytes)
{
- struct btrfs_path *path;
+ struct btrfs_path path;
struct extent_buffer *leaf;
struct btrfs_key key;
int ret;
- path = btrfs_alloc_path();
- if (!path) {
- fprintf(stderr, "Error allocating path\n");
- return -ENOMEM;
- }
-
+ btrfs_init_path(&path);
key.objectid = bytenr;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = (u64)-1;
again:
- ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
+ ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, &path,
0, 0);
if (ret < 0) {
fprintf(stderr, "Error looking up extent record %d\n", ret);
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
} else if (ret) {
- if (path->slots[0] > 0) {
- path->slots[0]--;
+ if (path.slots[0] > 0) {
+ path.slots[0]--;
} else {
- ret = btrfs_prev_leaf(root, path);
+ ret = btrfs_prev_leaf(root, &path);
if (ret < 0) {
goto out;
} else if (ret > 0) {
}
}
- btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
/*
* Block group items come before extent items if they have the same
* EXTENT_ITEM_KEY please?
*/
while (key.type > BTRFS_EXTENT_ITEM_KEY) {
- if (path->slots[0] > 0) {
- path->slots[0]--;
+ if (path.slots[0] > 0) {
+ path.slots[0]--;
} else {
- ret = btrfs_prev_leaf(root, path);
+ ret = btrfs_prev_leaf(root, &path);
if (ret < 0) {
goto out;
} else if (ret > 0) {
goto out;
}
}
- btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
}
while (num_bytes) {
- if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
- ret = btrfs_next_leaf(root, path);
+ if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
+ ret = btrfs_next_leaf(root, &path);
if (ret < 0) {
fprintf(stderr, "Error going to next leaf "
"%d\n", ret);
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
} else if (ret) {
break;
}
}
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ leaf = path.nodes[0];
+ btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
if (key.type != BTRFS_EXTENT_ITEM_KEY) {
- path->slots[0]++;
+ path.slots[0]++;
continue;
}
if (key.objectid + key.offset < bytenr) {
- path->slots[0]++;
+ path.slots[0]++;
continue;
}
if (key.objectid > bytenr + num_bytes)
* in real life, but no harm in coding it up
* anyway just in case.
*/
- btrfs_release_path(path);
+ btrfs_release_path(&path);
ret = check_extent_exists(root, new_start,
new_bytes);
if (ret) {
}
num_bytes = key.objectid - bytenr;
}
- path->slots[0]++;
+ path.slots[0]++;
}
ret = 0;
ret = 1;
}
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
static int check_csums(struct btrfs_root *root)
{
- struct btrfs_path *path;
+ struct btrfs_path path;
struct extent_buffer *leaf;
struct btrfs_key key;
u64 offset = 0, num_bytes = 0;
return -ENOENT;
}
+ btrfs_init_path(&path);
key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
key.type = BTRFS_EXTENT_CSUM_KEY;
key.offset = 0;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
if (ret < 0) {
fprintf(stderr, "Error searching csum tree %d\n", ret);
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
- if (ret > 0 && path->slots[0])
- path->slots[0]--;
+ if (ret > 0 && path.slots[0])
+ path.slots[0]--;
ret = 0;
while (1) {
- if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
- ret = btrfs_next_leaf(root, path);
+ if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
+ ret = btrfs_next_leaf(root, &path);
if (ret < 0) {
fprintf(stderr, "Error going to next leaf "
"%d\n", ret);
if (ret)
break;
}
- leaf = path->nodes[0];
+ leaf = path.nodes[0];
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
if (key.type != BTRFS_EXTENT_CSUM_KEY) {
- path->slots[0]++;
+ path.slots[0]++;
continue;
}
- data_len = (btrfs_item_size_nr(leaf, path->slots[0]) /
+ data_len = (btrfs_item_size_nr(leaf, path.slots[0]) /
csum_size) * root->sectorsize;
if (!check_data_csum)
goto skip_csum_check;
- leaf_offset = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ leaf_offset = btrfs_item_ptr_offset(leaf, path.slots[0]);
ret = check_extent_csums(root, key.offset, data_len,
leaf_offset, leaf);
if (ret)
num_bytes = 0;
}
num_bytes += data_len;
- path->slots[0]++;
+ path.slots[0]++;
}
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return errors;
}
cache = lookup_cache_extent(extent_cache, buf->start, 1);
/* we have added this extent before */
- BUG_ON(!cache);
+ if (!cache)
+ return -ENOENT;
+
rec = container_of(cache, struct extent_record, cache);
/*
return 0;
}
+static void report_mismatch_key_root(u8 key_type, u64 rootid)
+{
+ fprintf(stderr, "Invalid key type(");
+ print_key_type(stderr, 0, key_type);
+ fprintf(stderr, ") found in root(");
+ print_objectid(stderr, rootid, 0);
+ fprintf(stderr, ")\n");
+}
+
+/*
+ * Check if the key is valid with its extent buffer.
+ *
+ * This is a early check in case invalid key exists in a extent buffer
+ * This is not comprehensive yet, but should prevent wrong key/item passed
+ * further
+ */
+static int check_type_with_root(u64 rootid, u8 key_type)
+{
+ switch (key_type) {
+ /* Only valid in chunk tree */
+ case BTRFS_DEV_ITEM_KEY:
+ case BTRFS_CHUNK_ITEM_KEY:
+ if (rootid != BTRFS_CHUNK_TREE_OBJECTID)
+ goto err;
+ break;
+ /* valid in csum and log tree */
+ case BTRFS_CSUM_TREE_OBJECTID:
+ if (!(rootid == BTRFS_TREE_LOG_OBJECTID ||
+ is_fstree(rootid)))
+ goto err;
+ break;
+ case BTRFS_EXTENT_ITEM_KEY:
+ case BTRFS_METADATA_ITEM_KEY:
+ case BTRFS_BLOCK_GROUP_ITEM_KEY:
+ if (rootid != BTRFS_EXTENT_TREE_OBJECTID)
+ goto err;
+ break;
+ case BTRFS_ROOT_ITEM_KEY:
+ if (rootid != BTRFS_ROOT_TREE_OBJECTID)
+ goto err;
+ break;
+ case BTRFS_DEV_EXTENT_KEY:
+ if (rootid != BTRFS_DEV_TREE_OBJECTID)
+ goto err;
+ break;
+ }
+ return 0;
+err:
+ report_mismatch_key_root(key_type, rootid);
+ return -EINVAL;
+}
+
static int run_next_block(struct btrfs_root *root,
struct block_info *bits,
int bits_nr,
for (i = 0; i < nritems; i++) {
struct btrfs_file_extent_item *fi;
btrfs_item_key_to_cpu(buf, &key, i);
+ /*
+ * Check key type against the leaf owner.
+ * Could filter quite a lot of early error if
+ * owner is correct
+ */
+ if (check_type_with_root(btrfs_header_owner(buf),
+ key.type)) {
+ fprintf(stderr, "ignoring invalid key\n");
+ continue;
+ }
if (key.type == BTRFS_EXTENT_ITEM_KEY) {
process_extent_item(root, extent_cache, buf,
i);
}
if (key.type == BTRFS_TREE_BLOCK_REF_KEY) {
- add_tree_backref(extent_cache, key.objectid, 0,
- key.offset, 0);
+ ret = add_tree_backref(extent_cache,
+ key.objectid, 0, key.offset, 0);
+ if (ret < 0)
+ error("add_tree_backref failed: %s",
+ strerror(-ret));
continue;
}
if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
- add_tree_backref(extent_cache, key.objectid,
- key.offset, 0, 0);
+ ret = add_tree_backref(extent_cache,
+ key.objectid, key.offset, 0, 0);
+ if (ret < 0)
+ error("add_tree_backref failed: %s",
+ strerror(-ret));
continue;
}
if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
tmpl.metadata = 1;
tmpl.max_size = size;
ret = add_extent_rec(extent_cache, &tmpl);
- BUG_ON(ret);
+ if (ret < 0)
+ goto out;
- add_tree_backref(extent_cache, ptr, parent, owner, 1);
+ ret = add_tree_backref(extent_cache, ptr, parent,
+ owner, 1);
+ if (ret < 0) {
+ error("add_tree_backref failed: %s",
+ strerror(-ret));
+ continue;
+ }
if (level > 1) {
add_pending(nodes, seen, ptr, size);
u64 objectid)
{
struct extent_record tmpl;
+ int ret;
if (btrfs_header_level(buf) > 0)
add_pending(nodes, seen, buf->start, buf->len);
if (objectid == BTRFS_TREE_RELOC_OBJECTID ||
btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
- add_tree_backref(extent_cache, buf->start, buf->start,
- 0, 1);
+ ret = add_tree_backref(extent_cache, buf->start, buf->start,
+ 0, 1);
else
- add_tree_backref(extent_cache, buf->start, 0, objectid, 1);
- return 0;
+ ret = add_tree_backref(extent_cache, buf->start, 0, objectid,
+ 1);
+ return ret;
}
/* as we fix the tree, we might be deleting blocks that
back->node.found_extent_tree = 0;
if (!back->node.found_extent_tree && back->node.found_ref) {
- rb_erase(&back->node.node, &rec->backref_tree);
+ list_del(&back->node.list);
free(back);
}
} else {
back->node.found_extent_tree = 0;
}
if (!back->node.found_extent_tree && back->node.found_ref) {
- rb_erase(&back->node.node, &rec->backref_tree);
+ list_del(&back->node.list);
free(back);
}
}
struct extent_buffer *leaf;
struct btrfs_key ins_key;
struct btrfs_extent_item *ei;
- struct tree_backref *tback;
struct data_backref *dback;
struct btrfs_tree_block_info *bi;
} else {
struct btrfs_disk_key copy_key;;
- tback = to_tree_backref(back);
bi = (struct btrfs_tree_block_info *)(ei + 1);
memset_extent_buffer(leaf, 0, (unsigned long)bi,
sizeof(*bi));
dback->found_ref);
} else {
u64 parent;
+ struct tree_backref *tback;
tback = to_tree_backref(back);
if (back->full_backref)
struct extent_entry *entry, *best = NULL, *prev = NULL;
list_for_each_entry(entry, entries, list) {
- if (!prev) {
- prev = entry;
- continue;
- }
-
/*
* If there are as many broken entries as entries then we know
* not to trust this particular entry.
continue;
/*
+ * Special case, when there are only two entries and 'best' is
+ * the first one
+ */
+ if (!prev) {
+ best = entry;
+ prev = entry;
+ continue;
+ }
+
+ /*
* If our current entry == best then we can't be sure our best
* is really the best, so we need to keep searching.
*/
static int verify_backrefs(struct btrfs_fs_info *info, struct btrfs_path *path,
struct extent_record *rec)
{
- struct extent_backref *back, *tmp;
+ struct extent_backref *back;
struct data_backref *dback;
struct extent_entry *entry, *best = NULL;
LIST_HEAD(entries);
if (rec->metadata)
return 0;
- rbtree_postorder_for_each_entry_safe(back, tmp,
- &rec->backref_tree, node) {
+ list_for_each_entry(back, &rec->backrefs, list) {
if (back->full_backref || !back->is_data)
continue;
* Ok great we all agreed on an extent record, let's go find the real
* references and fix up the ones that don't match.
*/
- rbtree_postorder_for_each_entry_safe(back, tmp,
- &rec->backref_tree, node) {
+ list_for_each_entry(back, &rec->backrefs, list) {
if (back->full_backref || !back->is_data)
continue;
{
struct btrfs_trans_handle *trans;
LIST_HEAD(delete_list);
- struct btrfs_path *path;
+ struct btrfs_path path;
struct extent_record *tmp, *good, *n;
int nr_del = 0;
int ret = 0, err;
struct btrfs_key key;
- path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ btrfs_init_path(&path);
good = rec;
/* Find the record that covers all of the duplicates. */
abort();
}
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ ret = btrfs_search_slot(trans, root, &key, &path, -1, 1);
if (ret) {
if (ret > 0)
ret = -EINVAL;
break;
}
- ret = btrfs_del_item(trans, root, path);
+ ret = btrfs_del_item(trans, root, &path);
if (ret)
break;
- btrfs_release_path(path);
+ btrfs_release_path(&path);
nr_del++;
}
err = btrfs_commit_transaction(trans, root);
free(tmp);
}
- btrfs_free_path(path);
+ btrfs_release_path(&path);
if (!ret && !nr_del)
rec->num_duplicates = 0;
struct extent_record *rec)
{
struct btrfs_root *root;
- struct extent_backref *back, *tmp;
+ struct extent_backref *back;
struct data_backref *dback;
struct cache_extent *cache;
struct btrfs_file_extent_item *fi;
u64 bytenr, bytes;
int ret;
- rbtree_postorder_for_each_entry_safe(back, tmp,
- &rec->backref_tree, node) {
+ list_for_each_entry(back, &rec->backrefs, list) {
/* Don't care about full backrefs (poor unloved backrefs) */
if (back->full_backref || !back->is_data)
continue;
{
struct btrfs_key key;
struct btrfs_root *dest_root;
- struct extent_backref *back, *tmp;
+ struct extent_backref *back;
struct data_backref *dback;
struct orphan_data_extent *orphan;
- struct btrfs_path *path;
+ struct btrfs_path path;
int recorded_data_ref = 0;
int ret = 0;
if (rec->metadata)
return 1;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
- rbtree_postorder_for_each_entry_safe(back, tmp,
- &rec->backref_tree, node) {
+ btrfs_init_path(&path);
+ list_for_each_entry(back, &rec->backrefs, list) {
if (back->full_backref || !back->is_data ||
!back->found_extent_tree)
continue;
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = dback->offset;
- ret = btrfs_search_slot(NULL, dest_root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, dest_root, &key, &path, 0, 0);
+ btrfs_release_path(&path);
/*
* For ret < 0, it's OK since the fs-tree may be corrupted,
* we need to record it for inode/file extent rebuild.
recorded_data_ref = 1;
}
out:
- btrfs_free_path(path);
+ btrfs_release_path(&path);
if (!ret)
return !recorded_data_ref;
else
{
struct btrfs_trans_handle *trans = NULL;
int ret;
- struct btrfs_path *path;
+ struct btrfs_path path;
+ struct list_head *cur = rec->backrefs.next;
struct cache_extent *cache;
- struct extent_backref *back, *tmp;
+ struct extent_backref *back;
int allocated = 0;
u64 flags = 0;
if (rec->flag_block_full_backref)
flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
+ btrfs_init_path(&path);
if (rec->refs != rec->extent_item_refs && !rec->metadata) {
/*
* Sometimes the backrefs themselves are so broken they don't
* them into the list if we find the backref so that
* verify_backrefs can figure out what to do.
*/
- ret = find_possible_backrefs(info, path, extent_cache, rec);
+ ret = find_possible_backrefs(info, &path, extent_cache, rec);
if (ret < 0)
goto out;
}
/* step one, make sure all of the backrefs agree */
- ret = verify_backrefs(info, path, rec);
+ ret = verify_backrefs(info, &path, rec);
if (ret < 0)
goto out;
}
/* step two, delete all the existing records */
- ret = delete_extent_records(trans, info->extent_root, path,
+ ret = delete_extent_records(trans, info->extent_root, &path,
rec->start, rec->max_size);
if (ret < 0)
}
/* step three, recreate all the refs we did find */
- rbtree_postorder_for_each_entry_safe(back, tmp,
- &rec->backref_tree, node) {
+ while(cur != &rec->backrefs) {
+ back = to_extent_backref(cur);
+ cur = cur->next;
+
/*
* if we didn't find any references, don't create a
* new extent record
continue;
rec->bad_full_backref = 0;
- ret = record_extent(trans, info, path, rec, back, allocated, flags);
+ ret = record_extent(trans, info, &path, rec, back, allocated, flags);
allocated = 1;
if (ret)
ret = err;
}
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = fs_info->extent_root;
- struct btrfs_path *path;
+ struct btrfs_path path;
struct btrfs_extent_item *ei;
struct btrfs_key key;
u64 flags;
key.offset = rec->max_size;
}
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans)) {
- btrfs_free_path(path);
+ if (IS_ERR(trans))
return PTR_ERR(trans);
- }
- ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ btrfs_init_path(&path);
+ ret = btrfs_search_slot(trans, root, &key, &path, 0, 1);
if (ret < 0) {
- btrfs_free_path(path);
+ btrfs_release_path(&path);
btrfs_commit_transaction(trans, root);
return ret;
} else if (ret) {
fprintf(stderr, "Didn't find extent for %llu\n",
(unsigned long long)rec->start);
- btrfs_free_path(path);
+ btrfs_release_path(&path);
btrfs_commit_transaction(trans, root);
return -ENOENT;
}
- ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ ei = btrfs_item_ptr(path.nodes[0], path.slots[0],
struct btrfs_extent_item);
- flags = btrfs_extent_flags(path->nodes[0], ei);
+ flags = btrfs_extent_flags(path.nodes[0], ei);
if (rec->flag_block_full_backref) {
fprintf(stderr, "setting full backref on %llu\n",
(unsigned long long)key.objectid);
(unsigned long long)key.objectid);
flags &= ~BTRFS_BLOCK_FLAG_FULL_BACKREF;
}
- btrfs_set_extent_flags(path->nodes[0], ei, flags);
- btrfs_mark_buffer_dirty(path->nodes[0]);
- btrfs_free_path(path);
+ btrfs_set_extent_flags(path.nodes[0], ei, flags);
+ btrfs_mark_buffer_dirty(path.nodes[0]);
+ btrfs_release_path(&path);
return btrfs_commit_transaction(trans, root);
}
ret = -EIO;
break;
}
- add_root_to_pending(buf, extent_cache, pending,
+ ret = add_root_to_pending(buf, extent_cache, pending,
seen, nodes, rec->objectid);
+ if (ret < 0)
+ break;
/*
* To rebuild extent tree, we need deal with snapshot
* one by one, otherwise we deal with node firstly which
btrfs_init_path(&path);
key.offset = 0;
key.objectid = 0;
- btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
+ key.type = BTRFS_ROOT_ITEM_KEY;
ret = btrfs_search_slot(NULL, root->fs_info->tree_root,
&key, &path, 0, 0);
if (ret < 0)
slot = path.slots[0];
}
btrfs_item_key_to_cpu(leaf, &found_key, path.slots[0]);
- if (btrfs_key_type(&found_key) == BTRFS_ROOT_ITEM_KEY) {
+ if (found_key.type == BTRFS_ROOT_ITEM_KEY) {
unsigned long offset;
u64 last_snapshot;
free_extent_buffer(eb);
btrfs_init_path(&path);
+ path.lowest_level = level;
/* Search with the first key, to ensure we can reach it */
ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
- if (ret) {
+ if (ret < 0) {
err |= REFERENCER_MISSING;
goto release_out;
}
btrfs_release_path(&path);
}
key.objectid = root_id;
- btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
+ key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
btrfs_init_path(&path);
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
metadata = 1;
- if (metadata && check_crossing_stripes(key.objectid, eb->len)) {
+ if (metadata && check_crossing_stripes(global_info, key.objectid,
+ eb->len)) {
error("bad metadata [%llu, %llu) crossing stripe boundary",
key.objectid, key.objectid + nodesize);
err |= CROSSING_STRIPE_BOUNDARY;
next:
btrfs_item_key_to_cpu(eb, &key, slot);
- type = btrfs_key_type(&key);
+ type = key.type;
switch (type) {
case BTRFS_EXTENT_DATA_KEY:
struct extent_buffer *node)
{
struct extent_buffer *eb;
+ struct btrfs_key key;
+ struct btrfs_key drop_key;
int level;
u64 nr;
int i;
}
nr = btrfs_header_nritems(node);
+ btrfs_disk_key_to_cpu(&drop_key, &root->root_item.drop_progress);
btree_space_waste += (BTRFS_NODEPTRS_PER_BLOCK(root) - nr) *
sizeof(struct btrfs_key_ptr);
for (i = 0; i < nr; i++) {
u64 blocknr = btrfs_node_blockptr(node, i);
+ btrfs_node_key_to_cpu(node, &key, i);
+ if (level == root->root_item.drop_level &&
+ is_dropped_key(&key, &drop_key))
+ continue;
+
/*
* As a btrfs tree has most 8 levels (0..7), so it's quite safe
* to call the function itself.
static int reset_block_groups(struct btrfs_fs_info *fs_info)
{
struct btrfs_block_group_cache *cache;
- struct btrfs_path *path;
+ struct btrfs_path path;
struct extent_buffer *leaf;
struct btrfs_chunk *chunk;
struct btrfs_key key;
int ret;
u64 start;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
+ btrfs_init_path(&path);
key.objectid = 0;
key.type = BTRFS_CHUNK_ITEM_KEY;
key.offset = 0;
-
- ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, &path, 0, 0);
if (ret < 0) {
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
/* First we need to create the in-memory block groups */
while (1) {
- if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
- ret = btrfs_next_leaf(fs_info->chunk_root, path);
+ if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
+ ret = btrfs_next_leaf(fs_info->chunk_root, &path);
if (ret < 0) {
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
if (ret) {
break;
}
}
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ leaf = path.nodes[0];
+ btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
if (key.type != BTRFS_CHUNK_ITEM_KEY) {
- path->slots[0]++;
+ path.slots[0]++;
continue;
}
- chunk = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_chunk);
+ chunk = btrfs_item_ptr(leaf, path.slots[0], struct btrfs_chunk);
btrfs_add_block_group(fs_info, 0,
btrfs_chunk_type(leaf, chunk),
key.objectid, key.offset,
set_extent_dirty(&fs_info->free_space_cache, key.offset,
key.offset + btrfs_chunk_length(leaf, chunk),
GFP_NOFS);
- path->slots[0]++;
+ path.slots[0]++;
}
start = 0;
while (1) {
start = cache->key.objectid + cache->key.offset;
}
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return 0;
}
struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root = fs_info->tree_root;
- struct btrfs_path *path;
+ struct btrfs_path path;
struct extent_buffer *leaf;
struct btrfs_key key;
int del_slot, del_nr = 0;
int ret;
int found = 0;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
+ btrfs_init_path(&path);
key.objectid = BTRFS_BALANCE_OBJECTID;
key.type = BTRFS_BALANCE_ITEM_KEY;
key.offset = 0;
-
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ ret = btrfs_search_slot(trans, root, &key, &path, -1, 1);
if (ret) {
if (ret > 0)
ret = 0;
goto out;
}
- ret = btrfs_del_item(trans, root, path);
+ ret = btrfs_del_item(trans, root, &path);
if (ret)
goto out;
- btrfs_release_path(path);
+ btrfs_release_path(&path);
key.objectid = BTRFS_TREE_RELOC_OBJECTID;
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = 0;
-
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ ret = btrfs_search_slot(trans, root, &key, &path, -1, 1);
if (ret < 0)
goto out;
while (1) {
- if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
+ if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
if (!found)
break;
if (del_nr) {
- ret = btrfs_del_items(trans, root, path,
+ ret = btrfs_del_items(trans, root, &path,
del_slot, del_nr);
del_nr = 0;
if (ret)
goto out;
}
key.offset++;
- btrfs_release_path(path);
+ btrfs_release_path(&path);
found = 0;
- ret = btrfs_search_slot(trans, root, &key, path,
+ ret = btrfs_search_slot(trans, root, &key, &path,
-1, 1);
if (ret < 0)
goto out;
continue;
}
found = 1;
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ leaf = path.nodes[0];
+ btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
if (key.objectid > BTRFS_TREE_RELOC_OBJECTID)
break;
if (key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
- path->slots[0]++;
+ path.slots[0]++;
continue;
}
if (!del_nr) {
- del_slot = path->slots[0];
+ del_slot = path.slots[0];
del_nr = 1;
} else {
del_nr++;
}
- path->slots[0]++;
+ path.slots[0]++;
}
if (del_nr) {
- ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+ ret = btrfs_del_items(trans, root, &path, del_slot, del_nr);
if (ret)
goto out;
}
- btrfs_release_path(path);
+ btrfs_release_path(&path);
reinit_data_reloc:
key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
goto out;
ret = btrfs_make_root_dir(trans, root, BTRFS_FIRST_FREE_OBJECTID);
out:
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
static int recow_extent_buffer(struct btrfs_root *root, struct extent_buffer *eb)
{
- struct btrfs_path *path;
+ struct btrfs_path path;
struct btrfs_trans_handle *trans;
struct btrfs_key key;
int ret;
return PTR_ERR(root);
}
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- btrfs_free_path(path);
+ if (IS_ERR(trans))
return PTR_ERR(trans);
- }
- path->lowest_level = btrfs_header_level(eb);
- if (path->lowest_level)
+ btrfs_init_path(&path);
+ path.lowest_level = btrfs_header_level(eb);
+ if (path.lowest_level)
btrfs_node_key_to_cpu(eb, &key, 0);
else
btrfs_item_key_to_cpu(eb, &key, 0);
- ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ ret = btrfs_search_slot(trans, root, &key, &path, 0, 1);
btrfs_commit_transaction(trans, root);
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
static int delete_bad_item(struct btrfs_root *root, struct bad_item *bad)
{
- struct btrfs_path *path;
+ struct btrfs_path path;
struct btrfs_trans_handle *trans;
struct btrfs_key key;
int ret;
return PTR_ERR(root);
}
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- btrfs_free_path(path);
+ if (IS_ERR(trans))
return PTR_ERR(trans);
- }
- ret = btrfs_search_slot(trans, root, &bad->key, path, -1, 1);
+ btrfs_init_path(&path);
+ ret = btrfs_search_slot(trans, root, &bad->key, &path, -1, 1);
if (ret) {
if (ret > 0)
ret = 0;
goto out;
}
- ret = btrfs_del_item(trans, root, path);
+ ret = btrfs_del_item(trans, root, &path);
out:
btrfs_commit_transaction(trans, root);
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
struct btrfs_root *csum_root,
struct btrfs_root *cur_root)
{
- struct btrfs_path *path;
+ struct btrfs_path path;
struct btrfs_key key;
struct extent_buffer *node;
struct btrfs_file_extent_item *fi;
int slot = 0;
int ret = 0;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
buf = malloc(cur_root->fs_info->csum_root->sectorsize);
- if (!buf) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!buf)
+ return -ENOMEM;
+ btrfs_init_path(&path);
key.objectid = 0;
key.offset = 0;
key.type = 0;
-
- ret = btrfs_search_slot(NULL, cur_root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, cur_root, &key, &path, 0, 0);
if (ret < 0)
goto out;
/* Iterate all regular file extents and fill its csum */
while (1) {
- btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
if (key.type != BTRFS_EXTENT_DATA_KEY)
goto next;
- node = path->nodes[0];
- slot = path->slots[0];
+ node = path.nodes[0];
+ slot = path.slots[0];
fi = btrfs_item_ptr(node, slot, struct btrfs_file_extent_item);
if (btrfs_file_extent_type(node, fi) != BTRFS_FILE_EXTENT_REG)
goto next;
* TODO: if next leaf is corrupted, jump to nearest next valid
* leaf.
*/
- ret = btrfs_next_item(cur_root, path);
+ ret = btrfs_next_item(cur_root, &path);
if (ret < 0)
goto out;
if (ret > 0) {
}
out:
- btrfs_free_path(path);
+ btrfs_release_path(&path);
free(buf);
return ret;
}
struct btrfs_root *csum_root)
{
struct btrfs_fs_info *fs_info = csum_root->fs_info;
- struct btrfs_path *path;
+ struct btrfs_path path;
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root *cur_root;
struct extent_buffer *node;
int slot = 0;
int ret = 0;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
+ btrfs_init_path(&path);
key.objectid = BTRFS_FS_TREE_OBJECTID;
key.offset = 0;
key.type = BTRFS_ROOT_ITEM_KEY;
-
- ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, tree_root, &key, &path, 0, 0);
if (ret < 0)
goto out;
if (ret > 0) {
}
while (1) {
- node = path->nodes[0];
- slot = path->slots[0];
+ node = path.nodes[0];
+ slot = path.slots[0];
btrfs_item_key_to_cpu(node, &key, slot);
if (key.objectid > BTRFS_LAST_FREE_OBJECTID)
goto out;
if (ret < 0)
goto out;
next:
- ret = btrfs_next_item(tree_root, path);
+ ret = btrfs_next_item(tree_root, &path);
if (ret > 0) {
ret = 0;
goto out;
}
out:
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
struct btrfs_root *csum_root)
{
struct btrfs_root *extent_root = csum_root->fs_info->extent_root;
- struct btrfs_path *path;
+ struct btrfs_path path;
struct btrfs_extent_item *ei;
struct extent_buffer *leaf;
char *buf;
struct btrfs_key key;
int ret;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
+ btrfs_init_path(&path);
key.objectid = 0;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = 0;
-
- ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, extent_root, &key, &path, 0, 0);
if (ret < 0) {
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
buf = malloc(csum_root->sectorsize);
if (!buf) {
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return -ENOMEM;
}
while (1) {
- if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
- ret = btrfs_next_leaf(extent_root, path);
+ if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
+ ret = btrfs_next_leaf(extent_root, &path);
if (ret < 0)
break;
if (ret) {
break;
}
}
- leaf = path->nodes[0];
+ leaf = path.nodes[0];
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
if (key.type != BTRFS_EXTENT_ITEM_KEY) {
- path->slots[0]++;
+ path.slots[0]++;
continue;
}
- ei = btrfs_item_ptr(leaf, path->slots[0],
+ ei = btrfs_item_ptr(leaf, path.slots[0],
struct btrfs_extent_item);
if (!(btrfs_extent_flags(leaf, ei) &
BTRFS_EXTENT_FLAG_DATA)) {
- path->slots[0]++;
+ path.slots[0]++;
continue;
}
key.offset);
if (ret)
break;
- path->slots[0]++;
+ path.slots[0]++;
}
- btrfs_free_path(path);
+ btrfs_release_path(&path);
free(buf);
return ret;
}
int ret = 0;
struct btrfs_key key;
struct extent_buffer *leaf;
- struct btrfs_path *path;
+ struct btrfs_path path;
if (!roots_info_cache) {
roots_info_cache = malloc(sizeof(*roots_info_cache));
cache_tree_init(roots_info_cache);
}
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
+ btrfs_init_path(&path);
key.objectid = 0;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = 0;
-
- ret = btrfs_search_slot(NULL, info->extent_root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, info->extent_root, &key, &path, 0, 0);
if (ret < 0)
goto out;
- leaf = path->nodes[0];
+ leaf = path.nodes[0];
while (1) {
struct btrfs_key found_key;
struct btrfs_extent_item *ei;
struct btrfs_extent_inline_ref *iref;
- int slot = path->slots[0];
+ int slot = path.slots[0];
int type;
u64 flags;
u64 root_id;
struct root_item_info *rii;
if (slot >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(info->extent_root, path);
+ ret = btrfs_next_leaf(info->extent_root, &path);
if (ret < 0) {
break;
} else if (ret) {
ret = 0;
break;
}
- leaf = path->nodes[0];
- slot = path->slots[0];
+ leaf = path.nodes[0];
+ slot = path.slots[0];
}
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+ btrfs_item_key_to_cpu(leaf, &found_key, path.slots[0]);
if (found_key.type != BTRFS_EXTENT_ITEM_KEY &&
found_key.type != BTRFS_METADATA_ITEM_KEY)
rii->node_count++;
}
next:
- path->slots[0]++;
+ path.slots[0]++;
}
out:
- btrfs_free_path(path);
+ btrfs_release_path(&path);
return ret;
}
*/
static int repair_root_items(struct btrfs_fs_info *info)
{
- struct btrfs_path *path = NULL;
+ struct btrfs_path path;
struct btrfs_key key;
struct extent_buffer *leaf;
struct btrfs_trans_handle *trans = NULL;
int bad_roots = 0;
int need_trans = 0;
+ btrfs_init_path(&path);
+
ret = build_roots_info_cache(info);
if (ret)
goto out;
- path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
-
key.objectid = BTRFS_FIRST_FREE_OBJECTID;
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = 0;
}
}
- ret = btrfs_search_slot(trans, info->tree_root, &key, path,
+ ret = btrfs_search_slot(trans, info->tree_root, &key, &path,
0, trans ? 1 : 0);
if (ret < 0)
goto out;
- leaf = path->nodes[0];
+ leaf = path.nodes[0];
while (1) {
struct btrfs_key found_key;
- if (path->slots[0] >= btrfs_header_nritems(leaf)) {
- int no_more_keys = find_next_key(path, &key);
+ if (path.slots[0] >= btrfs_header_nritems(leaf)) {
+ int no_more_keys = find_next_key(&path, &key);
- btrfs_release_path(path);
+ btrfs_release_path(&path);
if (trans) {
ret = btrfs_commit_transaction(trans,
info->tree_root);
goto again;
}
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+ btrfs_item_key_to_cpu(leaf, &found_key, path.slots[0]);
if (found_key.type != BTRFS_ROOT_ITEM_KEY)
goto next;
if (found_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
goto next;
- ret = maybe_repair_root_item(info, path, &found_key,
+ ret = maybe_repair_root_item(info, &path, &found_key,
trans ? 0 : 1);
if (ret < 0)
goto out;
if (!trans && repair) {
need_trans = 1;
key = found_key;
- btrfs_release_path(path);
+ btrfs_release_path(&path);
goto again;
}
bad_roots++;
}
next:
- path->slots[0]++;
+ path.slots[0]++;
}
ret = 0;
out:
free_roots_info_cache();
- btrfs_free_path(path);
+ btrfs_release_path(&path);
if (trans)
btrfs_commit_transaction(trans, info->tree_root);
if (ret < 0)
return bad_roots;
}
+static int clear_free_space_cache(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_trans_handle *trans;
+ struct btrfs_block_group_cache *bg_cache;
+ u64 current = 0;
+ int ret = 0;
+
+ /* Clear all free space cache inodes and its extent data */
+ while (1) {
+ bg_cache = btrfs_lookup_first_block_group(fs_info, current);
+ if (!bg_cache)
+ break;
+ ret = btrfs_clear_free_space_cache(fs_info, bg_cache);
+ if (ret < 0)
+ return ret;
+ current = bg_cache->key.objectid + bg_cache->key.offset;
+ }
+
+ /* Don't forget to set cache_generation to -1 */
+ trans = btrfs_start_transaction(fs_info->tree_root, 0);
+ if (IS_ERR(trans)) {
+ error("failed to update super block cache generation");
+ return PTR_ERR(trans);
+ }
+ btrfs_set_super_cache_generation(fs_info->super_copy, (u64)-1);
+ btrfs_commit_transaction(trans, fs_info->tree_root);
+
+ return ret;
+}
+
const char * const cmd_check_usage[] = {
"btrfs check [options] <device>",
"Check structural integrity of a filesystem (unmounted).",
"Check structural integrity of an unmounted filesystem. Verify internal",
"trees' consistency and item connectivity. In the repair mode try to",
- "fix the problems found.",
+ "fix the problems found. ",
"WARNING: the repair mode is considered dangerous",
"",
"-s|--super <superblock> use this superblock copy",
"--readonly run in read-only mode (default)",
"--init-csum-tree create a new CRC tree",
"--init-extent-tree create a new extent tree",
- "--low-memory check in low memory usage mode(experimental)",
+ "--mode <MODE> allows choice of memory/IO trade-offs",
+ " where MODE is one of:",
+ " original - read inodes and extents to memory (requires",
+ " more memory, does less IO)",
+ " lowmem - try to use less memory but read blocks again",
+ " when needed",
"--check-data-csum verify checksums of data blocks",
- "-Q|--qgroup-report print a report on qgroup consistency",
+ "-Q|--qgroup-report print a report on qgroup consistency",
"-E|--subvol-extents <subvolid>",
" print subvolume extents and sharing state",
"-r|--tree-root <bytenr> use the given bytenr for the tree root",
"--chunk-root <bytenr> use the given bytenr for the chunk tree root",
"-p|--progress indicate progress",
+ "--clear-space-cache v1|v2 clear space cache for v1 or v2",
+ " NOTE: v1 support implemented",
NULL
};
u64 num;
int init_csum_tree = 0;
int readonly = 0;
+ int clear_space_cache = 0;
int qgroup_report = 0;
int qgroups_repaired = 0;
- enum btrfs_open_ctree_flags ctree_flags = OPEN_CTREE_EXCLUSIVE;
+ unsigned ctree_flags = OPEN_CTREE_EXCLUSIVE;
while(1) {
int c;
enum { GETOPT_VAL_REPAIR = 257, GETOPT_VAL_INIT_CSUM,
GETOPT_VAL_INIT_EXTENT, GETOPT_VAL_CHECK_CSUM,
GETOPT_VAL_READONLY, GETOPT_VAL_CHUNK_TREE,
- GETOPT_VAL_LOW_MEMORY };
+ GETOPT_VAL_MODE, GETOPT_VAL_CLEAR_SPACE_CACHE };
static const struct option long_options[] = {
{ "super", required_argument, NULL, 's' },
{ "repair", no_argument, NULL, GETOPT_VAL_REPAIR },
{ "chunk-root", required_argument, NULL,
GETOPT_VAL_CHUNK_TREE },
{ "progress", no_argument, NULL, 'p' },
- { "low-memory", no_argument, NULL,
- GETOPT_VAL_LOW_MEMORY },
+ { "mode", required_argument, NULL,
+ GETOPT_VAL_MODE },
+ { "clear-space-cache", required_argument, NULL,
+ GETOPT_VAL_CLEAR_SPACE_CACHE},
{ NULL, 0, NULL, 0}
};
case 's':
num = arg_strtou64(optarg);
if (num >= BTRFS_SUPER_MIRROR_MAX) {
- fprintf(stderr,
- "ERROR: super mirror should be less than: %d\n",
+ error(
+ "super mirror should be less than %d",
BTRFS_SUPER_MIRROR_MAX);
exit(1);
}
case GETOPT_VAL_CHECK_CSUM:
check_data_csum = 1;
break;
- case GETOPT_VAL_LOW_MEMORY:
- low_memory = 1;
+ case GETOPT_VAL_MODE:
+ check_mode = parse_check_mode(optarg);
+ if (check_mode == CHECK_MODE_UNKNOWN) {
+ error("unknown mode: %s", optarg);
+ exit(1);
+ }
+ break;
+ case GETOPT_VAL_CLEAR_SPACE_CACHE:
+ if (strcmp(optarg, "v1") != 0) {
+ error(
+ "only v1 support implmented, unrecognized value %s",
+ optarg);
+ exit(1);
+ }
+ clear_space_cache = 1;
+ ctree_flags |= OPEN_CTREE_WRITES;
break;
}
}
/* This check is the only reason for --readonly to exist */
if (readonly && repair) {
- fprintf(stderr, "Repair options are not compatible with --readonly\n");
+ error("repair options are not compatible with --readonly");
exit(1);
}
/*
* Not supported yet
*/
- if (repair && low_memory) {
- error("Low memory mode doesn't support repair yet");
+ if (repair && check_mode == CHECK_MODE_LOWMEM) {
+ error("low memory mode doesn't support repair yet");
exit(1);
}
cache_tree_init(&root_cache);
if((ret = check_mounted(argv[optind])) < 0) {
- fprintf(stderr, "Could not check mount status: %s\n", strerror(-ret));
+ error("could not check mount status: %s", strerror(-ret));
goto err_out;
} else if(ret) {
- fprintf(stderr, "%s is currently mounted. Aborting.\n", argv[optind]);
+ error("%s is currently mounted, aborting", argv[optind]);
ret = -EBUSY;
goto err_out;
}
info = open_ctree_fs_info(argv[optind], bytenr, tree_root_bytenr,
chunk_root_bytenr, ctree_flags);
if (!info) {
- fprintf(stderr, "Couldn't open file system\n");
+ error("cannot open file system");
ret = -EIO;
goto err_out;
}
global_info = info;
root = info->fs_root;
+ if (clear_space_cache) {
+ if (btrfs_fs_compat_ro(info,
+ BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE)) {
+ error(
+ "free space cache v2 detected, clearing not implemented");
+ ret = 1;
+ goto close_out;
+ }
+ printf("Clearing free space cache\n");
+ ret = clear_free_space_cache(info);
+ if (ret) {
+ error("failed to clear free space cache");
+ ret = 1;
+ } else {
+ printf("Free space cache cleared\n");
+ }
+ goto close_out;
+ }
/*
* repair mode will force us to commit transaction which
* will make us fail to load log tree when mounting.
*/
if (repair && btrfs_super_log_root(info->super_copy)) {
- ret = ask_user("repair mode will force to clear out log tree, Are you sure?");
+ ret = ask_user("repair mode will force to clear out log tree, are you sure?");
if (!ret) {
ret = 1;
goto close_out;
}
ret = zero_log_tree(root);
if (ret) {
- fprintf(stderr, "fail to zero log tree\n");
+ error("failed to zero log tree: %d", ret);
goto close_out;
}
}
if (!extent_buffer_uptodate(info->tree_root->node) ||
!extent_buffer_uptodate(info->dev_root->node) ||
!extent_buffer_uptodate(info->chunk_root->node)) {
- fprintf(stderr, "Critical roots corrupted, unable to fsck the FS\n");
+ error("critical roots corrupted, unable to check the filesystem");
ret = -EIO;
goto close_out;
}
trans = btrfs_start_transaction(info->extent_root, 0);
if (IS_ERR(trans)) {
- fprintf(stderr, "Error starting transaction\n");
+ error("error starting transaction");
ret = PTR_ERR(trans);
goto close_out;
}
}
if (init_csum_tree) {
- fprintf(stderr, "Reinit crc root\n");
+ printf("Reinitialize checksum tree\n");
ret = btrfs_fsck_reinit_root(trans, info->csum_root, 0);
if (ret) {
- fprintf(stderr, "crc root initialization failed\n");
+ error("checksum tree initialization failed: %d",
+ ret);
ret = -EIO;
goto close_out;
}
ret = fill_csum_tree(trans, info->csum_root,
init_extent_tree);
if (ret) {
- fprintf(stderr, "crc refilling failed\n");
+ error("checksum tree refilling failed: %d", ret);
return -EIO;
}
}
goto close_out;
}
if (!extent_buffer_uptodate(info->extent_root->node)) {
- fprintf(stderr, "Critical roots corrupted, unable to fsck the FS\n");
+ error("critical: extent_root, unable to check the filesystem");
ret = -EIO;
goto close_out;
}
if (!extent_buffer_uptodate(info->csum_root->node)) {
- fprintf(stderr, "Checksum root corrupted, rerun with --init-csum-tree option\n");
+ error("critical: csum_root, unable to check the filesystem");
ret = -EIO;
goto close_out;
}
if (!ctx.progress_enabled)
- fprintf(stderr, "checking extents\n");
- if (low_memory)
+ printf("checking extents");
+ if (check_mode == CHECK_MODE_LOWMEM)
ret = check_chunks_and_extents_v2(root);
else
ret = check_chunks_and_extents(root);
if (ret)
- fprintf(stderr, "Errors found in extent allocation tree or chunk allocation\n");
+ printf("Errors found in extent allocation tree or chunk allocation");
ret = repair_root_items(info);
if (ret < 0)
}
if (!list_empty(&root->fs_info->recow_ebs)) {
- fprintf(stderr, "Transid errors in file system\n");
+ error("transid errors in file system");
ret = 1;
}
out: