* fs_info->commit_root_sem semaphore, so no need to worry about the root's last
* snapshot field changing while updating or checking the cache.
*/
-static bool lookup_backref_shared_cache(struct btrfs_backref_shared_cache *cache,
+static bool lookup_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
struct btrfs_root *root,
u64 bytenr, int level, bool *is_shared)
{
struct btrfs_backref_shared_cache_entry *entry;
- if (!cache->use_cache)
+ if (!ctx->use_path_cache)
return false;
if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
*/
ASSERT(level >= 0);
- entry = &cache->entries[level];
+ entry = &ctx->path_cache_entries[level];
/* Unused cache entry or being used for some other extent buffer. */
if (entry->bytenr != bytenr)
*/
if (*is_shared) {
for (int i = 0; i < level; i++) {
- cache->entries[i].is_shared = true;
- cache->entries[i].gen = entry->gen;
+ ctx->path_cache_entries[i].is_shared = true;
+ ctx->path_cache_entries[i].gen = entry->gen;
}
}
* fs_info->commit_root_sem semaphore, so no need to worry about the root's last
* snapshot field changing while updating or checking the cache.
*/
-static void store_backref_shared_cache(struct btrfs_backref_shared_cache *cache,
+static void store_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
struct btrfs_root *root,
u64 bytenr, int level, bool is_shared)
{
struct btrfs_backref_shared_cache_entry *entry;
u64 gen;
- if (!cache->use_cache)
+ if (!ctx->use_path_cache)
return;
if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
else
gen = btrfs_root_last_snapshot(&root->root_item);
- entry = &cache->entries[level];
+ entry = &ctx->path_cache_entries[level];
entry->bytenr = bytenr;
entry->is_shared = is_shared;
entry->gen = gen;
*/
if (is_shared) {
for (int i = 0; i < level; i++) {
- entry = &cache->entries[i];
+ entry = &ctx->path_cache_entries[i];
entry->is_shared = is_shared;
entry->gen = gen;
}
* not known.
* @roots: List of roots this extent is shared among.
* @tmp: Temporary list used for iteration.
- * @cache: A backref lookup result cache.
+ * @ctx: A backref sharedness check context.
*
* btrfs_is_data_extent_shared uses the backref walking code but will short
* circuit as soon as it finds a root or inode that doesn't match the
int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
u64 extent_gen,
struct ulist *roots, struct ulist *tmp,
- struct btrfs_backref_shared_cache *cache)
+ struct btrfs_backref_share_check_ctx *ctx)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
/* -1 means we are in the bytenr of the data extent. */
level = -1;
ULIST_ITER_INIT(&uiter);
- cache->use_cache = true;
+ ctx->use_path_cache = true;
while (1) {
bool is_shared;
bool cached;
/* this is the only condition under which we return 1 */
ret = 1;
if (level >= 0)
- store_backref_shared_cache(cache, root, bytenr,
+ store_backref_shared_cache(ctx, root, bytenr,
level, true);
break;
}
* (which implies multiple paths).
*/
if (level == -1 && tmp->nnodes > 1)
- cache->use_cache = false;
+ ctx->use_path_cache = false;
if (level >= 0)
- store_backref_shared_cache(cache, root, bytenr,
+ store_backref_shared_cache(ctx, root, bytenr,
level, false);
node = ulist_next(tmp, &uiter);
if (!node)
break;
bytenr = node->val;
level++;
- cached = lookup_backref_shared_cache(cache, root, bytenr, level,
+ cached = lookup_backref_shared_cache(ctx, root, bytenr, level,
&is_shared);
if (cached) {
ret = (is_shared ? 1 : 0);
bool is_shared;
};
-struct btrfs_backref_shared_cache {
+struct btrfs_backref_share_check_ctx {
/*
* A path from a root to a leaf that has a file extent item pointing to
* a given data extent should never exceed the maximum b+tree height.
*/
- struct btrfs_backref_shared_cache_entry entries[BTRFS_MAX_LEVEL];
- bool use_cache;
+ struct btrfs_backref_shared_cache_entry path_cache_entries[BTRFS_MAX_LEVEL];
+ bool use_path_cache;
};
typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root,
int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
u64 extent_gen,
struct ulist *roots, struct ulist *tmp,
- struct btrfs_backref_shared_cache *cache);
+ struct btrfs_backref_share_check_ctx *ctx);
int __init btrfs_prelim_ref_init(void);
void __cold btrfs_prelim_ref_exit(void);
static int fiemap_process_hole(struct btrfs_inode *inode,
struct fiemap_extent_info *fieinfo,
struct fiemap_cache *cache,
- struct btrfs_backref_shared_cache *backref_cache,
+ struct btrfs_backref_share_check_ctx *backref_ctx,
u64 disk_bytenr, u64 extent_offset,
u64 extent_gen,
struct ulist *roots, struct ulist *tmp_ulist,
disk_bytenr,
extent_gen, roots,
tmp_ulist,
- backref_cache);
+ backref_ctx);
if (ret < 0)
return ret;
else if (ret > 0)
disk_bytenr,
extent_gen, roots,
tmp_ulist,
- backref_cache);
+ backref_ctx);
if (ret < 0)
return ret;
else if (ret > 0)
struct extent_state *cached_state = NULL;
struct btrfs_path *path;
struct fiemap_cache cache = { 0 };
- struct btrfs_backref_shared_cache *backref_cache;
+ struct btrfs_backref_share_check_ctx *backref_ctx;
struct ulist *roots;
struct ulist *tmp_ulist;
u64 last_extent_end;
bool stopped = false;
int ret;
- backref_cache = kzalloc(sizeof(*backref_cache), GFP_KERNEL);
+ backref_ctx = kzalloc(sizeof(*backref_ctx), GFP_KERNEL);
path = btrfs_alloc_path();
roots = ulist_alloc(GFP_KERNEL);
tmp_ulist = ulist_alloc(GFP_KERNEL);
- if (!backref_cache || !path || !roots || !tmp_ulist) {
+ if (!backref_ctx || !path || !roots || !tmp_ulist) {
ret = -ENOMEM;
goto out;
}
const u64 range_end = min(key.offset, lockend) - 1;
ret = fiemap_process_hole(inode, fieinfo, &cache,
- backref_cache, 0, 0, 0,
+ backref_ctx, 0, 0, 0,
roots, tmp_ulist,
prev_extent_end, range_end);
if (ret < 0) {
extent_len, flags);
} else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
ret = fiemap_process_hole(inode, fieinfo, &cache,
- backref_cache,
+ backref_ctx,
disk_bytenr, extent_offset,
extent_gen, roots, tmp_ulist,
key.offset, extent_end - 1);
} else if (disk_bytenr == 0) {
/* We have an explicit hole. */
ret = fiemap_process_hole(inode, fieinfo, &cache,
- backref_cache, 0, 0, 0,
+ backref_ctx, 0, 0, 0,
roots, tmp_ulist,
key.offset, extent_end - 1);
} else {
extent_gen,
roots,
tmp_ulist,
- backref_cache);
+ backref_ctx);
if (ret < 0)
goto out_unlock;
else if (ret > 0)
path = NULL;
if (!stopped && prev_extent_end < lockend) {
- ret = fiemap_process_hole(inode, fieinfo, &cache, backref_cache,
+ ret = fiemap_process_hole(inode, fieinfo, &cache, backref_ctx,
0, 0, 0, roots, tmp_ulist,
prev_extent_end, lockend - 1);
if (ret < 0)
out_unlock:
unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
out:
- kfree(backref_cache);
+ kfree(backref_ctx);
btrfs_free_path(path);
ulist_free(roots);
ulist_free(tmp_ulist);