btrfs: make the bg_reclaim_threshold per-space info
authorJosef Bacik <josef@toxicpanda.com>
Tue, 29 Mar 2022 08:56:06 +0000 (01:56 -0700)
committerDavid Sterba <dsterba@suse.com>
Mon, 16 May 2022 15:03:11 +0000 (17:03 +0200)
For non-zoned file systems it's useful to have the auto reclaim feature,
however there are different use cases for non-zoned, for example we may
not want to reclaim metadata chunks ever, only data chunks.  Move this
sysfs flag to per-space_info.  This won't affect current users because
this tunable only ever did anything for zoned, and that is currently
hidden behind BTRFS_CONFIG_DEBUG.

Tested-by: Pankaj Raghav <p.raghav@samsung.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
[ jth restore global bg_reclaim_threshold ]
Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/free-space-cache.c
fs/btrfs/space-info.c
fs/btrfs/space-info.h
fs/btrfs/sysfs.c
fs/btrfs/zoned.h

index 01a408d..ef84bc5 100644 (file)
@@ -2630,16 +2630,19 @@ out:
 static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
                                        u64 bytenr, u64 size, bool used)
 {
-       struct btrfs_fs_info *fs_info = block_group->fs_info;
+       struct btrfs_space_info *sinfo = block_group->space_info;
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        u64 offset = bytenr - block_group->start;
        u64 to_free, to_unusable;
-       const int bg_reclaim_threshold = READ_ONCE(fs_info->bg_reclaim_threshold);
+       int bg_reclaim_threshold = 0;
        bool initial = (size == block_group->length);
        u64 reclaimable_unusable;
 
        WARN_ON(!initial && offset + size > block_group->zone_capacity);
 
+       if (!initial)
+               bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
+
        spin_lock(&ctl->tree_lock);
        if (!used)
                to_free = size;
index 4de2c82..2dd8754 100644 (file)
@@ -181,6 +181,12 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
                found->full = 0;
 }
 
+/*
+ * Block groups with more than this value (percents) of unusable space will be
+ * scheduled for background reclaim.
+ */
+#define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH                     (75)
+
 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
 {
 
@@ -203,6 +209,9 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
        INIT_LIST_HEAD(&space_info->priority_tickets);
        space_info->clamp = 1;
 
+       if (btrfs_is_zoned(info))
+               space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
+
        ret = btrfs_sysfs_add_space_info_type(info, space_info);
        if (ret)
                return ret;
index d841fed..a803e29 100644 (file)
@@ -24,6 +24,12 @@ struct btrfs_space_info {
                                   the space info if we had an ENOSPC in the
                                   allocator. */
 
+       /*
+        * Once a block group drops below this threshold (percents) we'll
+        * schedule it for reclaim.
+        */
+       int bg_reclaim_threshold;
+
        int clamp;              /* Used to scale our threshold for preemptive
                                   flushing. The value is >> clamp, so turns
                                   out to be a 2^clamp divisor. */
index d742330..b9fdb22 100644 (file)
@@ -720,6 +720,42 @@ SPACE_INFO_ATTR(bytes_zone_unusable);
 SPACE_INFO_ATTR(disk_used);
 SPACE_INFO_ATTR(disk_total);
 
+static ssize_t btrfs_sinfo_bg_reclaim_threshold_show(struct kobject *kobj,
+                                                    struct kobj_attribute *a,
+                                                    char *buf)
+{
+       struct btrfs_space_info *space_info = to_space_info(kobj);
+       ssize_t ret;
+
+       ret = sysfs_emit(buf, "%d\n", READ_ONCE(space_info->bg_reclaim_threshold));
+
+       return ret;
+}
+
+static ssize_t btrfs_sinfo_bg_reclaim_threshold_store(struct kobject *kobj,
+                                                     struct kobj_attribute *a,
+                                                     const char *buf, size_t len)
+{
+       struct btrfs_space_info *space_info = to_space_info(kobj);
+       int thresh;
+       int ret;
+
+       ret = kstrtoint(buf, 10, &thresh);
+       if (ret)
+               return ret;
+
+       if (thresh != 0 && (thresh <= 50 || thresh > 100))
+               return -EINVAL;
+
+       WRITE_ONCE(space_info->bg_reclaim_threshold, thresh);
+
+       return len;
+}
+
+BTRFS_ATTR_RW(space_info, bg_reclaim_threshold,
+             btrfs_sinfo_bg_reclaim_threshold_show,
+             btrfs_sinfo_bg_reclaim_threshold_store);
+
 /*
  * Allocation information about block group types.
  *
@@ -736,6 +772,7 @@ static struct attribute *space_info_attrs[] = {
        BTRFS_ATTR_PTR(space_info, bytes_zone_unusable),
        BTRFS_ATTR_PTR(space_info, disk_used),
        BTRFS_ATTR_PTR(space_info, disk_total),
+       BTRFS_ATTR_PTR(space_info, bg_reclaim_threshold),
        NULL,
 };
 ATTRIBUTE_GROUPS(space_info);
index 6dee762..12aaacc 100644 (file)
 #include "block-group.h"
 #include "btrfs_inode.h"
 
-/*
- * Block groups with more than this value (percents) of unusable space will be
- * scheduled for background reclaim.
- */
-#define BTRFS_DEFAULT_RECLAIM_THRESH           75
+#define BTRFS_DEFAULT_RECLAIM_THRESH                                   (75)
 
 struct btrfs_zoned_device_info {
        /*