Merge tag 'locking-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-rpi.git] / drivers / md / bcache / btree.c
index 569f489..fd121a6 100644 (file)
@@ -906,7 +906,7 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
  * cannibalize_bucket() will take. This means every time we unlock the root of
  * the btree, we need to release this lock if we have it held.
  */
-static void bch_cannibalize_unlock(struct cache_set *c)
+void bch_cannibalize_unlock(struct cache_set *c)
 {
        spin_lock(&c->btree_cannibalize_lock);
        if (c->btree_cache_alloc_lock == current) {
@@ -1111,10 +1111,12 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
                                     struct btree *parent)
 {
        BKEY_PADDED(key) k;
-       struct btree *b = ERR_PTR(-EAGAIN);
+       struct btree *b;
 
        mutex_lock(&c->bucket_lock);
 retry:
+       /* return ERR_PTR(-EAGAIN) when it fails */
+       b = ERR_PTR(-EAGAIN);
        if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
                goto err;
 
@@ -1159,7 +1161,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
 {
        struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
 
-       if (!IS_ERR_OR_NULL(n)) {
+       if (!IS_ERR(n)) {
                mutex_lock(&n->write_lock);
                bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
                bkey_copy_key(&n->key, &b->key);
@@ -1361,7 +1363,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
        memset(new_nodes, 0, sizeof(new_nodes));
        closure_init_stack(&cl);
 
-       while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
+       while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b))
                keys += r[nodes++].keys;
 
        blocks = btree_default_blocks(b->c) * 2 / 3;
@@ -1373,7 +1375,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
 
        for (i = 0; i < nodes; i++) {
                new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
-               if (IS_ERR_OR_NULL(new_nodes[i]))
+               if (IS_ERR(new_nodes[i]))
                        goto out_nocoalesce;
        }
 
@@ -1508,7 +1510,7 @@ out_nocoalesce:
        bch_keylist_free(&keylist);
 
        for (i = 0; i < nodes; i++)
-               if (!IS_ERR_OR_NULL(new_nodes[i])) {
+               if (!IS_ERR(new_nodes[i])) {
                        btree_node_free(new_nodes[i]);
                        rw_unlock(true, new_nodes[i]);
                }
@@ -1690,7 +1692,7 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
        if (should_rewrite) {
                n = btree_node_alloc_replacement(b, NULL);
 
-               if (!IS_ERR_OR_NULL(n)) {
+               if (!IS_ERR(n)) {
                        bch_btree_node_write_sync(n);
 
                        bch_btree_set_root(n);
@@ -1989,6 +1991,15 @@ static int bch_btree_check_thread(void *arg)
                        c->gc_stats.nodes++;
                        bch_btree_op_init(&op, 0);
                        ret = bcache_btree(check_recurse, p, c->root, &op);
+                       /*
+                        * The op may be added to cache_set's btree_cache_wait
+                        * in mca_cannibalize(), must ensure it is removed from
+                        * the list and release btree_cache_alloc_lock before
+                        * free op memory.
+                        * Otherwise, the btree_cache_wait will be damaged.
+                        */
+                       bch_cannibalize_unlock(c);
+                       finish_wait(&c->btree_cache_wait, &(&op)->wait);
                        if (ret)
                                goto out;
                }