bcache: improve multithreaded bch_btree_check()
authorColy Li <colyli@suse.de>
Tue, 24 May 2022 10:23:33 +0000 (18:23 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 9 Jun 2022 08:23:28 +0000 (10:23 +0200)
commit 622536443b6731ec82c563aae7807165adbe9178 upstream.

Commit 8e7102273f59 ("bcache: make bch_btree_check() to be
multithreaded") makes bch_btree_check() to be much faster when checking
all btree nodes during cache device registration. But it isn't in ideal
shap yet, still can be improved.

This patch does the following thing to improve current parallel btree
nodes check by multiple threads in bch_btree_check(),
- Add read lock to root node while checking all the btree nodes with
  multiple threads. Although currently it is not mandatory but it is
  good to have a read lock in code logic.
- Remove local variable 'char name[32]', and generate kernel thread name
  string directly when calling kthread_run().
- Allocate local variable "struct btree_check_state check_state" on the
  stack and avoid unnecessary dynamic memory allocation for it.
- Reduce BCH_BTR_CHKTHREAD_MAX from 64 to 12 which is enough indeed.
- Increase check_state->started to count created kernel thread after it
  succeeds to create.
- When wait for all checking kernel threads to finish, use wait_event()
  to replace wait_event_interruptible().

With this change, the code is more clear, and some potential error
conditions are avoided.

Fixes: 8e7102273f59 ("bcache: make bch_btree_check() to be multithreaded")
Signed-off-by: Coly Li <colyli@suse.de>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20220524102336.10684-2-colyli@suse.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/md/bcache/btree.c
drivers/md/bcache/btree.h

index 8eecc9df319b56f14a8038dfba1d9fa2f1ce564b..7b6f8bfef92708624ea83708084f69f985362fb7 100644 (file)
@@ -2006,8 +2006,7 @@ int bch_btree_check(struct cache_set *c)
        int i;
        struct bkey *k = NULL;
        struct btree_iter iter;
-       struct btree_check_state *check_state;
-       char name[32];
+       struct btree_check_state check_state;
 
        /* check and mark root node keys */
        for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
@@ -2018,63 +2017,58 @@ int bch_btree_check(struct cache_set *c)
        if (c->root->level == 0)
                return 0;
 
-       check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL);
-       if (!check_state)
-               return -ENOMEM;
-
-       check_state->c = c;
-       check_state->total_threads = bch_btree_chkthread_nr();
-       check_state->key_idx = 0;
-       spin_lock_init(&check_state->idx_lock);
-       atomic_set(&check_state->started, 0);
-       atomic_set(&check_state->enough, 0);
-       init_waitqueue_head(&check_state->wait);
+       check_state.c = c;
+       check_state.total_threads = bch_btree_chkthread_nr();
+       check_state.key_idx = 0;
+       spin_lock_init(&check_state.idx_lock);
+       atomic_set(&check_state.started, 0);
+       atomic_set(&check_state.enough, 0);
+       init_waitqueue_head(&check_state.wait);
 
+       rw_lock(0, c->root, c->root->level);
        /*
         * Run multiple threads to check btree nodes in parallel,
-        * if check_state->enough is non-zero, it means current
+        * if check_state.enough is non-zero, it means current
         * running check threads are enough, unncessary to create
         * more.
         */
-       for (i = 0; i < check_state->total_threads; i++) {
-               /* fetch latest check_state->enough earlier */
+       for (i = 0; i < check_state.total_threads; i++) {
+               /* fetch latest check_state.enough earlier */
                smp_mb__before_atomic();
-               if (atomic_read(&check_state->enough))
+               if (atomic_read(&check_state.enough))
                        break;
 
-               check_state->infos[i].result = 0;
-               check_state->infos[i].state = check_state;
-               snprintf(name, sizeof(name), "bch_btrchk[%u]", i);
-               atomic_inc(&check_state->started);
+               check_state.infos[i].result = 0;
+               check_state.infos[i].state = &check_state;
 
-               check_state->infos[i].thread =
+               check_state.infos[i].thread =
                        kthread_run(bch_btree_check_thread,
-                                   &check_state->infos[i],
-                                   name);
-               if (IS_ERR(check_state->infos[i].thread)) {
+                                   &check_state.infos[i],
+                                   "bch_btrchk[%d]", i);
+               if (IS_ERR(check_state.infos[i].thread)) {
                        pr_err("fails to run thread bch_btrchk[%d]\n", i);
                        for (--i; i >= 0; i--)
-                               kthread_stop(check_state->infos[i].thread);
+                               kthread_stop(check_state.infos[i].thread);
                        ret = -ENOMEM;
                        goto out;
                }
+               atomic_inc(&check_state.started);
        }
 
        /*
         * Must wait for all threads to stop.
         */
-       wait_event_interruptible(check_state->wait,
-                                atomic_read(&check_state->started) == 0);
+       wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
 
-       for (i = 0; i < check_state->total_threads; i++) {
-               if (check_state->infos[i].result) {
-                       ret = check_state->infos[i].result;
+       for (i = 0; i < check_state.total_threads; i++) {
+               if (check_state.infos[i].result) {
+                       ret = check_state.infos[i].result;
                        goto out;
                }
        }
 
 out:
-       kfree(check_state);
+       rw_unlock(0, c->root);
        return ret;
 }
 
index 50482107134f12745066dfab31750693c6bba04a..1b5fdbc0d83eba863d4915ee47b3a8c27643694e 100644 (file)
@@ -226,7 +226,7 @@ struct btree_check_info {
        int                             result;
 };
 
-#define BCH_BTR_CHKTHREAD_MAX  64
+#define BCH_BTR_CHKTHREAD_MAX  12
 struct btree_check_state {
        struct cache_set                *c;
        int                             total_threads;