Revert "blk-cgroup: synchronize pd_free_fn() from blkg_free_workfn() and blkcg_deacti...
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 11 Mar 2023 09:34:32 +0000 (10:34 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 11 Mar 2023 10:03:45 +0000 (11:03 +0100)
This reverts commit 81c1188905f88b77743d1fdeeedfc8cb7b67787d which is
commit f1c006f1c6850c14040f8337753a63119bba39b9 upstream.

It is reported to cause problems, as only 2 of the 3 patch series were
applied to the stable branches.

Reported-by: Mike Cloaked <mike.cloaked@gmail.com>
Reported-by: Eric Biggers <ebiggers@kernel.org>
Cc: Yu Kuai <yukuai3@huawei.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Sasha Levin <sashal@kernel.org>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=217174
Link: https://lore.kernel.org/r/ZAuPkCn49urWBN5P@sol.localdomain
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
block/blk-cgroup.c
include/linux/blkdev.h

index f8b21be..8d1b775 100644 (file)
@@ -87,32 +87,16 @@ static void blkg_free_workfn(struct work_struct *work)
 {
        struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
                                             free_work);
-       struct request_queue *q = blkg->q;
        int i;
 
-       /*
-        * pd_free_fn() can also be called from blkcg_deactivate_policy(),
-        * in order to make sure pd_free_fn() is called in order, the deletion
-        * of the list blkg->q_node is delayed to here from blkg_destroy(), and
-        * blkcg_mutex is used to synchronize blkg_free_workfn() and
-        * blkcg_deactivate_policy().
-        */
-       if (q)
-               mutex_lock(&q->blkcg_mutex);
-
        for (i = 0; i < BLKCG_MAX_POLS; i++)
                if (blkg->pd[i])
                        blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
 
        if (blkg->parent)
                blkg_put(blkg->parent);
-
-       if (q) {
-               list_del_init(&blkg->q_node);
-               mutex_unlock(&q->blkcg_mutex);
-               blk_put_queue(q);
-       }
-
+       if (blkg->q)
+               blk_put_queue(blkg->q);
        free_percpu(blkg->iostat_cpu);
        percpu_ref_exit(&blkg->refcnt);
        kfree(blkg);
@@ -441,14 +425,9 @@ static void blkg_destroy(struct blkcg_gq *blkg)
        lockdep_assert_held(&blkg->q->queue_lock);
        lockdep_assert_held(&blkcg->lock);
 
-       /*
-        * blkg stays on the queue list until blkg_free_workfn(), see details in
-        * blkg_free_workfn(), hence this function can be called from
-        * blkcg_destroy_blkgs() first and again from blkg_destroy_all() before
-        * blkg_free_workfn().
-        */
-       if (hlist_unhashed(&blkg->blkcg_node))
-               return;
+       /* Something wrong if we are trying to remove same group twice */
+       WARN_ON_ONCE(list_empty(&blkg->q_node));
+       WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 
        for (i = 0; i < BLKCG_MAX_POLS; i++) {
                struct blkcg_policy *pol = blkcg_policy[i];
@@ -460,6 +439,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
        blkg->online = false;
 
        radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
+       list_del_init(&blkg->q_node);
        hlist_del_init_rcu(&blkg->blkcg_node);
 
        /*
@@ -1246,7 +1226,6 @@ int blkcg_init_disk(struct gendisk *disk)
        int ret;
 
        INIT_LIST_HEAD(&q->blkg_list);
-       mutex_init(&q->blkcg_mutex);
 
        new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
        if (!new_blkg)
@@ -1484,7 +1463,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
        if (queue_is_mq(q))
                blk_mq_freeze_queue(q);
 
-       mutex_lock(&q->blkcg_mutex);
        spin_lock_irq(&q->queue_lock);
 
        __clear_bit(pol->plid, q->blkcg_pols);
@@ -1503,7 +1481,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
        }
 
        spin_unlock_irq(&q->queue_lock);
-       mutex_unlock(&q->blkcg_mutex);
 
        if (queue_is_mq(q))
                blk_mq_unfreeze_queue(q);
index 1680b6e..891f8cb 100644 (file)
@@ -487,7 +487,6 @@ struct request_queue {
        DECLARE_BITMAP          (blkcg_pols, BLKCG_MAX_POLS);
        struct blkcg_gq         *root_blkg;
        struct list_head        blkg_list;
-       struct mutex            blkcg_mutex;
 #endif
 
        struct queue_limits     limits;