Revert "bcache: Kill btree_io_wq"
authorKai Krakow <kai@kaishome.de>
Wed, 10 Feb 2021 05:07:25 +0000 (13:07 +0800)
committerJens Axboe <axboe@kernel.dk>
Wed, 10 Feb 2021 15:06:00 +0000 (08:06 -0700)
This reverts commit 56b30770b27d54d68ad51eccc6d888282b568cee.

With the btree using the `system_wq`, I seem to see a lot more desktop
latency than I should.

After some more investigation, it looks like the original assumption
of 56b3077 no longer is true, and bcache has a very high potential of
congesting the `system_wq`. In turn, this introduces laggy desktop
performance, IO stalls (at least with btrfs), and input events may be
delayed.

So let's revert this. It's important to note that the semantics of
using `system_wq` previously mean that `btree_io_wq` should be created
before and destroyed after other bcache wqs to keep the same
assumptions.

Cc: Coly Li <colyli@suse.de>
Cc: stable@vger.kernel.org # 5.4+
Signed-off-by: Kai Krakow <kai@kaishome.de>
Signed-off-by: Coly Li <colyli@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c
drivers/md/bcache/super.c

index d7a8432..2b8c7dd 100644 (file)
@@ -1046,5 +1046,7 @@ void bch_debug_exit(void);
 void bch_debug_init(void);
 void bch_request_exit(void);
 int bch_request_init(void);
+void bch_btree_exit(void);
+int bch_btree_init(void);
 
 #endif /* _BCACHE_H */
index 910df24..952f022 100644 (file)
@@ -99,6 +99,8 @@
 #define PTR_HASH(c, k)                                                 \
        (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
 
+static struct workqueue_struct *btree_io_wq;
+
 #define insert_lock(s, b)      ((b)->level <= (s)->lock)
 
 
@@ -308,7 +310,7 @@ static void __btree_node_write_done(struct closure *cl)
        btree_complete_write(b, w);
 
        if (btree_node_dirty(b))
-               schedule_delayed_work(&b->work, 30 * HZ);
+               queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
 
        closure_return_with_destructor(cl, btree_node_write_unlock);
 }
@@ -481,7 +483,7 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
        BUG_ON(!i->keys);
 
        if (!btree_node_dirty(b))
-               schedule_delayed_work(&b->work, 30 * HZ);
+               queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
 
        set_btree_node_dirty(b);
 
@@ -2764,3 +2766,18 @@ void bch_keybuf_init(struct keybuf *buf)
        spin_lock_init(&buf->lock);
        array_allocator_init(&buf->freelist);
 }
+
+void bch_btree_exit(void)
+{
+       if (btree_io_wq)
+               destroy_workqueue(btree_io_wq);
+}
+
+int __init bch_btree_init(void)
+{
+       btree_io_wq = create_singlethread_workqueue("bch_btree_io");
+       if (!btree_io_wq)
+               return -ENOMEM;
+
+       return 0;
+}
index dfbaf6a..97405ae 100644 (file)
@@ -2821,6 +2821,7 @@ static void bcache_exit(void)
                destroy_workqueue(bcache_wq);
        if (bch_journal_wq)
                destroy_workqueue(bch_journal_wq);
+       bch_btree_exit();
 
        if (bcache_major)
                unregister_blkdev(bcache_major, "bcache");
@@ -2876,6 +2877,9 @@ static int __init bcache_init(void)
                return bcache_major;
        }
 
+       if (bch_btree_init())
+               goto err;
+
        bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
        if (!bcache_wq)
                goto err;