static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
struct ulist *parents, struct __prelim_ref *ref,
- int level, u64 time_seq, const u64 *extent_item_pos)
+ int level, u64 time_seq, const u64 *extent_item_pos,
+ u64 total_refs)
{
int ret = 0;
int slot;
if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
ret = btrfs_next_old_leaf(root, path, time_seq);
- while (!ret && count < ref->count) {
+ while (!ret && count < total_refs) {
eb = path->nodes[0];
slot = path->slots[0];
struct btrfs_path *path, u64 time_seq,
struct __prelim_ref *ref,
struct ulist *parents,
- const u64 *extent_item_pos)
+ const u64 *extent_item_pos, u64 total_refs)
{
struct btrfs_root *root;
struct btrfs_key root_key;
}
ret = add_all_parents(root, path, parents, ref, level, time_seq,
- extent_item_pos);
+ extent_item_pos, total_refs);
out:
path->lowest_level = 0;
btrfs_release_path(path);
static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u64 time_seq,
struct list_head *head,
- const u64 *extent_item_pos)
+ const u64 *extent_item_pos, u64 total_refs)
{
int err;
int ret = 0;
if (ref->count == 0)
continue;
err = __resolve_indirect_ref(fs_info, path, time_seq, ref,
- parents, extent_item_pos);
+ parents, extent_item_pos,
+ total_refs);
/*
* we can only tolerate ENOENT,otherwise,we should catch error
* and return directly.
* smaller or equal that seq to the list
*/
static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
- struct list_head *prefs)
+ struct list_head *prefs, u64 *total_refs)
{
struct btrfs_delayed_extent_op *extent_op = head->extent_op;
struct rb_node *n = &head->node.rb_node;
default:
BUG_ON(1);
}
+ *total_refs += (node->ref_mod * sgn);
switch (node->type) {
case BTRFS_TREE_BLOCK_REF_KEY: {
struct btrfs_delayed_tree_ref *ref;
*/
static int __add_inline_refs(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u64 bytenr,
- int *info_level, struct list_head *prefs)
+ int *info_level, struct list_head *prefs,
+ u64 *total_refs)
{
int ret = 0;
int slot;
ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
flags = btrfs_extent_flags(leaf, ei);
+ *total_refs += btrfs_extent_refs(leaf, ei);
btrfs_item_key_to_cpu(leaf, &found_key, slot);
ptr = (unsigned long)(ei + 1);
struct list_head prefs;
struct __prelim_ref *ref;
struct extent_inode_elem *eie = NULL;
+ u64 total_refs = 0;
INIT_LIST_HEAD(&prefs);
INIT_LIST_HEAD(&prefs_delayed);
}
spin_unlock(&delayed_refs->lock);
ret = __add_delayed_refs(head, time_seq,
- &prefs_delayed);
+ &prefs_delayed, &total_refs);
mutex_unlock(&head->mutex);
if (ret)
goto out;
(key.type == BTRFS_EXTENT_ITEM_KEY ||
key.type == BTRFS_METADATA_ITEM_KEY)) {
ret = __add_inline_refs(fs_info, path, bytenr,
- &info_level, &prefs);
+ &info_level, &prefs,
+ &total_refs);
if (ret)
goto out;
ret = __add_keyed_refs(fs_info, path, bytenr,
__merge_refs(&prefs, 1);
ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
- extent_item_pos);
+ extent_item_pos, total_refs);
if (ret)
goto out;