static int fiemap_process_hole(struct btrfs_inode *inode,
struct fiemap_extent_info *fieinfo,
struct fiemap_cache *cache,
+ struct extent_state **delalloc_cached_state,
struct btrfs_backref_share_check_ctx *backref_ctx,
u64 disk_bytenr, u64 extent_offset,
u64 extent_gen,
bool delalloc;
delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
+ delalloc_cached_state,
&delalloc_start,
&delalloc_end);
if (!delalloc)
{
const u64 ino = btrfs_ino(inode);
struct extent_state *cached_state = NULL;
+ struct extent_state *delalloc_cached_state = NULL;
struct btrfs_path *path;
struct fiemap_cache cache = { 0 };
struct btrfs_backref_share_check_ctx *backref_ctx;
const u64 range_end = min(key.offset, lockend) - 1;
ret = fiemap_process_hole(inode, fieinfo, &cache,
+ &delalloc_cached_state,
backref_ctx, 0, 0, 0,
prev_extent_end, range_end);
if (ret < 0) {
extent_len, flags);
} else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
ret = fiemap_process_hole(inode, fieinfo, &cache,
+ &delalloc_cached_state,
backref_ctx,
disk_bytenr, extent_offset,
extent_gen, key.offset,
} else if (disk_bytenr == 0) {
/* We have an explicit hole. */
ret = fiemap_process_hole(inode, fieinfo, &cache,
+ &delalloc_cached_state,
backref_ctx, 0, 0, 0,
key.offset, extent_end - 1);
} else {
path = NULL;
if (!stopped && prev_extent_end < lockend) {
- ret = fiemap_process_hole(inode, fieinfo, &cache, backref_ctx,
+ ret = fiemap_process_hole(inode, fieinfo, &cache,
+ &delalloc_cached_state, backref_ctx,
0, 0, 0, prev_extent_end, lockend - 1);
if (ret < 0)
goto out_unlock;
delalloc = btrfs_find_delalloc_in_range(inode,
prev_extent_end,
i_size - 1,
+ &delalloc_cached_state,
&delalloc_start,
&delalloc_end);
if (!delalloc)
out_unlock:
unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
out:
+ free_extent_state(delalloc_cached_state);
btrfs_free_backref_share_ctx(backref_ctx);
btrfs_free_path(path);
return ret;
* looping while it gets adjacent subranges, and merging them together.
*/
static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end,
+ struct extent_state **cached_state,
bool *search_io_tree,
u64 *delalloc_start_ret, u64 *delalloc_end_ret)
{
delalloc_len = count_range_bits(&inode->io_tree,
delalloc_start_ret, end,
len, EXTENT_DELALLOC, 1,
- NULL);
+ cached_state);
} else {
spin_unlock(&inode->lock);
}
* sector size aligned.
* @end: The end offset (inclusive value) of the search range.
* It does not need to be sector size aligned.
+ * @cached_state: Extent state record used for speeding up delalloc
+ * searches in the inode's io_tree. Can be NULL.
* @delalloc_start_ret: Output argument, set to the start offset of the
* subrange found with delalloc (may not be sector size
* aligned).
* end offsets of the subrange.
*/
bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
+ struct extent_state **cached_state,
u64 *delalloc_start_ret, u64 *delalloc_end_ret)
{
u64 cur_offset = round_down(start, inode->root->fs_info->sectorsize);
bool delalloc;
delalloc = find_delalloc_subrange(inode, cur_offset, end,
- &search_io_tree,
+ cached_state, &search_io_tree,
&delalloc_start,
&delalloc_end);
if (!delalloc)
u64 delalloc_end;
bool delalloc;
- delalloc = btrfs_find_delalloc_in_range(inode, start, end,
+ delalloc = btrfs_find_delalloc_in_range(inode, start, end, NULL,
&delalloc_start, &delalloc_end);
if (delalloc && whence == SEEK_DATA) {
*start_ret = delalloc_start;