btrfs: move extent locking outside of btrfs_truncate_inode_items
authorJosef Bacik <josef@toxicpanda.com>
Fri, 3 Dec 2021 22:18:05 +0000 (17:18 -0500)
committerDavid Sterba <dsterba@suse.com>
Fri, 7 Jan 2022 13:18:24 +0000 (14:18 +0100)
Currently we are locking the extent and dropping the extent cache for
any inodes we truncate, unless they're in the tree log.  We call this
helper from:

- truncate
- evict
- tree log
- free space cache truncation

For evict we've already dropped all of the extent cache for this inode
once we've gotten here, and we're the only one accessing this inode, so
this step is unnecessary.

For the tree log code we already skip this part.

Pull this work into the truncate path and the free space cache
truncation path.

Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/free-space-cache.c
fs/btrfs/inode-item.c
fs/btrfs/inode.c

index 55e1be7..28b9c63 100644 (file)
@@ -289,9 +289,11 @@ int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
 
 int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
                                    struct btrfs_block_group *block_group,
-                                   struct inode *inode)
+                                   struct inode *vfs_inode)
 {
-       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_inode *inode = BTRFS_I(vfs_inode);
+       struct btrfs_root *root = inode->root;
+       struct extent_state *cached_state = NULL;
        int ret = 0;
        bool locked = false;
 
@@ -321,19 +323,23 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
                btrfs_free_path(path);
        }
 
-       btrfs_i_size_write(BTRFS_I(inode), 0);
-       truncate_pagecache(inode, 0);
+       btrfs_i_size_write(inode, 0);
+       truncate_pagecache(vfs_inode, 0);
+
+       lock_extent_bits(&inode->io_tree, 0, (u64)-1, &cached_state);
+       btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
 
        /*
         * We skip the throttling logic for free space cache inodes, so we don't
         * need to check for -EAGAIN.
         */
-       ret = btrfs_truncate_inode_items(trans, root, BTRFS_I(inode),
-                                        0, BTRFS_EXTENT_DATA_KEY, NULL);
+       ret = btrfs_truncate_inode_items(trans, root, inode, 0,
+                                        BTRFS_EXTENT_DATA_KEY, NULL);
+       unlock_extent_cached(&inode->io_tree, 0, (u64)-1, &cached_state);
        if (ret)
                goto fail;
 
-       ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+       ret = btrfs_update_inode(trans, root, inode);
 
 fail:
        if (locked)
index c43a361..604ace6 100644 (file)
@@ -473,8 +473,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
        u64 bytes_deleted = 0;
        bool be_nice = false;
        bool should_throttle = false;
-       const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
-       struct extent_state *cached_state = NULL;
 
        BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
 
@@ -492,20 +490,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
                return -ENOMEM;
        path->reada = READA_BACK;
 
-       if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
-               lock_extent_bits(&inode->io_tree, lock_start, (u64)-1,
-                                &cached_state);
-
-               /*
-                * We want to drop from the next block forward in case this
-                * new size is not block aligned since we will be keeping the
-                * last block of the extent just the way it is.
-                */
-               btrfs_drop_extent_cache(inode, ALIGN(new_size,
-                                       fs_info->sectorsize),
-                                       (u64)-1, 0);
-       }
-
        /*
         * This function is also used to drop the items in the log tree before
         * we relog the inode, so if root != BTRFS_I(inode)->root, it means
@@ -783,8 +767,6 @@ out:
                if (!ret && last_size > new_size)
                        last_size = new_size;
                btrfs_inode_safe_disk_i_size_write(inode, last_size);
-               unlock_extent_cached(&inode->io_tree, lock_start, (u64)-1,
-                                    &cached_state);
        }
 
        btrfs_free_path(path);
index c29e7c8..9f9cd6c 100644 (file)
@@ -8583,10 +8583,28 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
        trans->block_rsv = rsv;
 
        while (1) {
+               struct extent_state *cached_state = NULL;
+               const u64 new_size = inode->i_size;
+               const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
+
+               lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1,
+                                &cached_state);
+               /*
+                * We want to drop from the next block forward in case this new
+                * size is not block aligned since we will be keeping the last
+                * block of the extent just the way it is.
+                */
+               btrfs_drop_extent_cache(BTRFS_I(inode),
+                                       ALIGN(new_size, fs_info->sectorsize),
+                                       (u64)-1, 0);
+
                ret = btrfs_truncate_inode_items(trans, root, BTRFS_I(inode),
                                                 inode->i_size,
                                                 BTRFS_EXTENT_DATA_KEY,
                                                 &extents_found);
+               unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start,
+                                    (u64)-1, &cached_state);
+
                trans->block_rsv = &fs_info->trans_block_rsv;
                if (ret != -ENOSPC && ret != -EAGAIN)
                        break;