From 82187d2ecdfb22ab7ee05f388402a39236d31428 Mon Sep 17 00:00:00 2001 From: Naohiro Aota Date: Wed, 8 Dec 2021 00:35:49 +0900 Subject: [PATCH] btrfs: zoned: fix chunk allocation condition for zoned allocator The ZNS specification defines a limit on the number of "active" zones. That limit impose us to limit the number of block groups which can be used for an allocation at the same time. Not to exceed the limit, we reuse the existing active block groups as much as possible when we can't activate any other zones without sacrificing an already activated block group in commit a85f05e59bc1 ("btrfs: zoned: avoid chunk allocation if active block group has enough space"). However, the check is wrong in two ways. First, it checks the condition for every raid index (ffe_ctl->index). Even if it reaches the condition and "ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size" is met, there can be other block groups having enough space to hold ffe_ctl->num_bytes. (Actually, this won't happen in the current zoned code as it only supports SINGLE profile. But, it can happen once it enables other RAID types.) Second, it checks the active zone availability depending on the raid index. The raid index is just an index for space_info->block_groups, so it has nothing to do with chunk allocation. These mistakes are causing a faulty allocation in a certain situation. Consider we are running zoned btrfs on a device whose max_active_zone == 0 (no limit). And, suppose no block group have a room to fit ffe_ctl->num_bytes but some room to meet ffe_ctl->min_alloc_size (i.e. max_extent_size > num_bytes >= min_alloc_size). In this situation, the following occur: - With SINGLE raid_index, it reaches the chunk allocation checking code - The check returns true because we can activate a new zone (no limit) - But, before allocating the chunk, it iterates to the next raid index (RAID5) - Since there are no RAID5 block groups on zoned mode, it again reaches the check code - The check returns false because of btrfs_can_activate_zone()'s "if (raid_index != BTRFS_RAID_SINGLE)" part - That results in returning -ENOSPC without allocating a new chunk As a result, we end up hitting -ENOSPC too early. Move the check to the right place in the can_allocate_chunk() hook, and do the active zone check depending on the allocation flag, not on the raid index. CC: stable@vger.kernel.org # 5.16 Signed-off-by: Naohiro Aota Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 21 +++++++++------------ fs/btrfs/zoned.c | 5 ++--- fs/btrfs/zoned.h | 5 ++--- 3 files changed, 13 insertions(+), 18 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 910bb08..d89273c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3981,6 +3981,15 @@ static bool can_allocate_chunk(struct btrfs_fs_info *fs_info, case BTRFS_EXTENT_ALLOC_CLUSTERED: return true; case BTRFS_EXTENT_ALLOC_ZONED: + /* + * If we have enough free space left in an already + * active block group and we can't activate any other + * zone now, do not allow allocating a new chunk and + * let find_free_extent() retry with a smaller size. + */ + if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size && + !btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags)) + return false; return true; default: BUG(); @@ -4027,18 +4036,6 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info, return 0; } - if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size && - !btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->index)) { - /* - * If we have enough free space left in an already active block - * group and we can't activate any other zone now, retry the - * active ones with a smaller allocation size. Returning early - * from here will tell btrfs_reserve_extent() to haven the - * size. - */ - return -ENOSPC; - } - if (ffe_ctl->loop >= LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg) return 1; diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 767ff6c..f559d51 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -1925,7 +1925,7 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group) return ret; } -bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, int raid_index) +bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags) { struct btrfs_device *device; bool ret = false; @@ -1934,8 +1934,7 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, int raid_index return true; /* Non-single profiles are not supported yet */ - if (raid_index != BTRFS_RAID_SINGLE) - return false; + ASSERT((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0); /* Check if there is a device with active zones left */ mutex_lock(&fs_devices->device_list_mutex); diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h index a7b4cd6..cbf016a 100644 --- a/fs/btrfs/zoned.h +++ b/fs/btrfs/zoned.h @@ -73,8 +73,7 @@ struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info, u64 logical, u64 length); bool btrfs_zone_activate(struct btrfs_block_group *block_group); int btrfs_zone_finish(struct btrfs_block_group *block_group); -bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, - int raid_index); +bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags); void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length); void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg); @@ -226,7 +225,7 @@ static inline int btrfs_zone_finish(struct btrfs_block_group *block_group) } static inline bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, - int raid_index) + u64 flags) { return true; } -- 2.7.4