btrfs: scrub: remove btrfs_fs_info::scrub_wr_completion_workers
authorQu Wenruo <wqu@suse.com>
Mon, 12 Jun 2023 07:23:29 +0000 (15:23 +0800)
committerDavid Sterba <dsterba@suse.com>
Mon, 19 Jun 2023 11:59:40 +0000 (13:59 +0200)
Since the scrub rework introduced by commit 2af2aaf98205 ("btrfs:
scrub: introduce structure for new BTRFS_STRIPE_LEN based interface")
and later commits, scrub only needs one single workqueue,
fs_info::scrub_worker.

That scrub_wr_completion_workers is initially to handle the delay work
after write bios finished.  But the new scrub code goes submit-and-wait
for write bios, thus all the work are done inside the scrub_worker.

The last user of fs_info::scrub_wr_completion_workers is removed in
commit 16f93993498b ("btrfs: scrub: remove the old writeback
infrastructure"), so we can safely remove the workqueue.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/fs.h
fs/btrfs/scrub.c

index 5dd24c2..396e2a4 100644 (file)
@@ -642,7 +642,6 @@ struct btrfs_fs_info {
         */
        refcount_t scrub_workers_refcnt;
        struct workqueue_struct *scrub_workers;
-       struct workqueue_struct *scrub_wr_completion_workers;
        struct btrfs_subpage_info *subpage_info;
 
        struct btrfs_discard_ctl discard_ctl;
index 297beae..2c7fdbb 100644 (file)
@@ -2698,17 +2698,12 @@ static void scrub_workers_put(struct btrfs_fs_info *fs_info)
        if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
                                        &fs_info->scrub_lock)) {
                struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
-               struct workqueue_struct *scrub_wr_comp =
-                                               fs_info->scrub_wr_completion_workers;
 
                fs_info->scrub_workers = NULL;
-               fs_info->scrub_wr_completion_workers = NULL;
                mutex_unlock(&fs_info->scrub_lock);
 
                if (scrub_workers)
                        destroy_workqueue(scrub_workers);
-               if (scrub_wr_comp)
-                       destroy_workqueue(scrub_wr_comp);
        }
 }
 
@@ -2719,7 +2714,6 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
                                                int is_dev_replace)
 {
        struct workqueue_struct *scrub_workers = NULL;
-       struct workqueue_struct *scrub_wr_comp = NULL;
        unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
        int max_active = fs_info->thread_pool_size;
        int ret = -ENOMEM;
@@ -2732,18 +2726,12 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
        else
                scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
        if (!scrub_workers)
-               goto fail_scrub_workers;
-
-       scrub_wr_comp = alloc_workqueue("btrfs-scrubwrc", flags, max_active);
-       if (!scrub_wr_comp)
-               goto fail_scrub_wr_completion_workers;
+               return -ENOMEM;
 
        mutex_lock(&fs_info->scrub_lock);
        if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
-               ASSERT(fs_info->scrub_workers == NULL &&
-                      fs_info->scrub_wr_completion_workers == NULL);
+               ASSERT(fs_info->scrub_workers == NULL);
                fs_info->scrub_workers = scrub_workers;
-               fs_info->scrub_wr_completion_workers = scrub_wr_comp;
                refcount_set(&fs_info->scrub_workers_refcnt, 1);
                mutex_unlock(&fs_info->scrub_lock);
                return 0;
@@ -2754,10 +2742,7 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
 
        ret = 0;
 
-       destroy_workqueue(scrub_wr_comp);
-fail_scrub_wr_completion_workers:
        destroy_workqueue(scrub_workers);
-fail_scrub_workers:
        return ret;
 }