bcache: fixup btree_cache_wait list damage
authorMingzhe Zou <mingzhe.zou@easystack.cn>
Thu, 15 Jun 2023 12:12:23 +0000 (20:12 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 19 Jul 2023 14:22:10 +0000 (16:22 +0200)
commit f0854489fc07d2456f7cc71a63f4faf9c716ffbe upstream.

We get a kernel crash about "list_add corruption. next->prev should be
prev (ffff9c801bc01210), but was ffff9c77b688237c.
(next=ffffae586d8afe68)."

crash> struct list_head 0xffff9c801bc01210
struct list_head {
  next = 0xffffae586d8afe68,
  prev = 0xffffae586d8afe68
}
crash> struct list_head 0xffff9c77b688237c
struct list_head {
  next = 0x0,
  prev = 0x0
}
crash> struct list_head 0xffffae586d8afe68
struct list_head struct: invalid kernel virtual address: ffffae586d8afe68  type: "gdb_readmem_callback"
Cannot access memory at address 0xffffae586d8afe68

[230469.019492] Call Trace:
[230469.032041]  prepare_to_wait+0x8a/0xb0
[230469.044363]  ? bch_btree_keys_free+0x6c/0xc0 [escache]
[230469.056533]  mca_cannibalize_lock+0x72/0x90 [escache]
[230469.068788]  mca_alloc+0x2ae/0x450 [escache]
[230469.080790]  bch_btree_node_get+0x136/0x2d0 [escache]
[230469.092681]  bch_btree_check_thread+0x1e1/0x260 [escache]
[230469.104382]  ? finish_wait+0x80/0x80
[230469.115884]  ? bch_btree_check_recurse+0x1a0/0x1a0 [escache]
[230469.127259]  kthread+0x112/0x130
[230469.138448]  ? kthread_flush_work_fn+0x10/0x10
[230469.149477]  ret_from_fork+0x35/0x40

bch_btree_check_thread() and bch_dirty_init_thread() may call
mca_cannibalize() to cannibalize other cached btree nodes. Only one thread
can do it at a time, so the op of other threads will be added to the
btree_cache_wait list.

We must call finish_wait() to remove op from btree_cache_wait before free
it's memory address. Otherwise, the list will be damaged. Also should call
bch_cannibalize_unlock() to release the btree_cache_alloc_lock and wake_up
other waiters.

Fixes: 8e7102273f59 ("bcache: make bch_btree_check() to be multithreaded")
Fixes: b144e45fc576 ("bcache: make bch_sectors_dirty_init() to be multithreaded")
Cc: stable@vger.kernel.org
Signed-off-by: Mingzhe Zou <mingzhe.zou@easystack.cn>
Signed-off-by: Coly Li <colyli@suse.de>
Link: https://lore.kernel.org/r/20230615121223.22502-7-colyli@suse.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/md/bcache/btree.c
drivers/md/bcache/btree.h
drivers/md/bcache/writeback.c

index 147c493..df9986a 100644 (file)
@@ -885,7 +885,7 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
  * cannibalize_bucket() will take. This means every time we unlock the root of
  * the btree, we need to release this lock if we have it held.
  */
-static void bch_cannibalize_unlock(struct cache_set *c)
+void bch_cannibalize_unlock(struct cache_set *c)
 {
        spin_lock(&c->btree_cannibalize_lock);
        if (c->btree_cache_alloc_lock == current) {
@@ -1968,6 +1968,15 @@ static int bch_btree_check_thread(void *arg)
                        c->gc_stats.nodes++;
                        bch_btree_op_init(&op, 0);
                        ret = bcache_btree(check_recurse, p, c->root, &op);
+                       /*
+                        * The op may be added to cache_set's btree_cache_wait
+                        * in mca_cannibalize(), must ensure it is removed from
+                        * the list and release btree_cache_alloc_lock before
+                        * free op memory.
+                        * Otherwise, the btree_cache_wait will be damaged.
+                        */
+                       bch_cannibalize_unlock(c);
+                       finish_wait(&c->btree_cache_wait, &(&op)->wait);
                        if (ret)
                                goto out;
                }
index 1b5fdbc..a2920bb 100644 (file)
@@ -282,6 +282,7 @@ void bch_initial_gc_finish(struct cache_set *c);
 void bch_moving_gc(struct cache_set *c);
 int bch_btree_check(struct cache_set *c);
 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k);
+void bch_cannibalize_unlock(struct cache_set *c);
 
 static inline void wake_up_gc(struct cache_set *c)
 {
index 0285b67..7bac2a8 100644 (file)
@@ -890,6 +890,16 @@ static int bch_root_node_dirty_init(struct cache_set *c,
        if (ret < 0)
                pr_warn("sectors dirty init failed, ret=%d!\n", ret);
 
+       /*
+        * The op may be added to cache_set's btree_cache_wait
+        * in mca_cannibalize(), must ensure it is removed from
+        * the list and release btree_cache_alloc_lock before
+        * free op memory.
+        * Otherwise, the btree_cache_wait will be damaged.
+        */
+       bch_cannibalize_unlock(c);
+       finish_wait(&c->btree_cache_wait, &(&op.op)->wait);
+
        return ret;
 }