bcache: improve multithreaded bch_sectors_dirty_init()
[platform/kernel/linux-starfive.git] / drivers / md / bcache / writeback.c
index 9ee0005..d24c094 100644 (file)
@@ -948,10 +948,10 @@ void bch_sectors_dirty_init(struct bcache_device *d)
        struct btree_iter iter;
        struct sectors_dirty_init op;
        struct cache_set *c = d->c;
-       struct bch_dirty_init_state *state;
-       char name[32];
+       struct bch_dirty_init_state state;
 
        /* Just count root keys if no leaf node */
+       rw_lock(0, c->root, c->root->level);
        if (c->root->level == 0) {
                bch_btree_op_init(&op.op, -1);
                op.inode = d->id;
@@ -961,54 +961,42 @@ void bch_sectors_dirty_init(struct bcache_device *d)
                for_each_key_filter(&c->root->keys,
                                    k, &iter, bch_ptr_invalid)
                        sectors_dirty_init_fn(&op.op, c->root, k);
+               rw_unlock(0, c->root);
                return;
        }
 
-       state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL);
-       if (!state) {
-               pr_warn("sectors dirty init failed: cannot allocate memory\n");
-               return;
-       }
-
-       state->c = c;
-       state->d = d;
-       state->total_threads = bch_btre_dirty_init_thread_nr();
-       state->key_idx = 0;
-       spin_lock_init(&state->idx_lock);
-       atomic_set(&state->started, 0);
-       atomic_set(&state->enough, 0);
-       init_waitqueue_head(&state->wait);
-
-       for (i = 0; i < state->total_threads; i++) {
-               /* Fetch latest state->enough earlier */
+       state.c = c;
+       state.d = d;
+       state.total_threads = bch_btre_dirty_init_thread_nr();
+       state.key_idx = 0;
+       spin_lock_init(&state.idx_lock);
+       atomic_set(&state.started, 0);
+       atomic_set(&state.enough, 0);
+       init_waitqueue_head(&state.wait);
+
+       for (i = 0; i < state.total_threads; i++) {
+               /* Fetch latest state.enough earlier */
                smp_mb__before_atomic();
-               if (atomic_read(&state->enough))
+               if (atomic_read(&state.enough))
                        break;
 
-               state->infos[i].state = state;
-               atomic_inc(&state->started);
-               snprintf(name, sizeof(name), "bch_dirty_init[%d]", i);
-
-               state->infos[i].thread =
-                       kthread_run(bch_dirty_init_thread,
-                                   &state->infos[i],
-                                   name);
-               if (IS_ERR(state->infos[i].thread)) {
+               state.infos[i].state = &state;
+               state.infos[i].thread =
+                       kthread_run(bch_dirty_init_thread, &state.infos[i],
+                                   "bch_dirtcnt[%d]", i);
+               if (IS_ERR(state.infos[i].thread)) {
                        pr_err("fails to run thread bch_dirty_init[%d]\n", i);
                        for (--i; i >= 0; i--)
-                               kthread_stop(state->infos[i].thread);
+                               kthread_stop(state.infos[i].thread);
                        goto out;
                }
+               atomic_inc(&state.started);
        }
 
-       /*
-        * Must wait for all threads to stop.
-        */
-       wait_event_interruptible(state->wait,
-                atomic_read(&state->started) == 0);
-
 out:
-       kfree(state);
+       /* Must wait for all threads to stop. */
+       wait_event(state.wait, atomic_read(&state.started) == 0);
+       rw_unlock(0, c->root);
 }
 
 void bch_cached_dev_writeback_init(struct cached_dev *dc)