From f0792b792dbe862847b1d590beed372a01b99af0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 28 Mar 2023 14:19:53 +0900 Subject: [PATCH] btrfs: fold btrfs_clone_ordered_extent into btrfs_split_ordered_extent The function btrfs_clone_ordered_extent is very specific to the usage in btrfs_split_ordered_extent. Now that only a single call to btrfs_clone_ordered_extent is left, just fold it into btrfs_split_ordered_extent to make the operation more clear. Reviewed-by: Josef Bacik Tested-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Signed-off-by: David Sterba --- fs/btrfs/ordered-data.c | 37 ++++++++++++++----------------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index c638b1b..a9778a9 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -1116,38 +1116,21 @@ bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end, return false; } - -static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos, - u64 len) -{ - struct inode *inode = ordered->inode; - struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; - u64 file_offset = ordered->file_offset + pos; - u64 disk_bytenr = ordered->disk_bytenr + pos; - unsigned long flags = ordered->flags & BTRFS_ORDERED_TYPE_FLAGS; - - /* - * The splitting extent is already counted and will be added again in - * btrfs_add_ordered_extent_*(). Subtract len to avoid double counting. - */ - percpu_counter_add_batch(&fs_info->ordered_bytes, -len, - fs_info->delalloc_batch); - WARN_ON_ONCE(flags & (1 << BTRFS_ORDERED_COMPRESSED)); - return btrfs_add_ordered_extent(BTRFS_I(inode), file_offset, len, len, - disk_bytenr, len, 0, flags, - ordered->compress_type); -} - /* Split out a new ordered extent for this first @len bytes of @ordered. */ int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 len) { struct inode *inode = ordered->inode; struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + u64 file_offset = ordered->file_offset; + u64 disk_bytenr = ordered->disk_bytenr; + unsigned long flags = ordered->flags & BTRFS_ORDERED_TYPE_FLAGS; struct rb_node *node; trace_btrfs_ordered_extent_split(BTRFS_I(inode), ordered); + ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED))); + /* * The entire bio must be covered by the ordered extent, but we can't * reduce the original extent to a zero length either. @@ -1187,7 +1170,15 @@ int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 len) spin_unlock_irq(&tree->lock); - return clone_ordered_extent(ordered, 0, len); + /* + * The splitting extent is already counted and will be added again in + * btrfs_add_ordered_extent(). Subtract len to avoid double counting. + */ + percpu_counter_add_batch(&fs_info->ordered_bytes, -len, fs_info->delalloc_batch); + + return btrfs_add_ordered_extent(BTRFS_I(inode), file_offset, len, len, + disk_bytenr, len, 0, flags, + ordered->compress_type); } int __init ordered_data_init(void) -- 2.7.4