cgroup: protect cgroup->nr_(dying_)descendants by css_set_lock
authorRoman Gushchin <guro@fb.com>
Fri, 19 Apr 2019 17:03:03 +0000 (10:03 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 31 May 2019 13:46:19 +0000 (06:46 -0700)
[ Upstream commit 4dcabece4c3a9f9522127be12cc12cc120399b2f ]

The number of descendant cgroups and the number of dying
descendant cgroups are currently synchronized using the cgroup_mutex.

The number of descendant cgroups will be required by the cgroup v2
freezer, which will use it to determine if a cgroup is frozen
(depending on total number of descendants and number of frozen
descendants). It's not always acceptable to grab the cgroup_mutex,
especially from quite hot paths (e.g. exit()).

To avoid this, let's additionally synchronize these counters using
the css_set_lock.

So, it's safe to read these counters with either cgroup_mutex or
css_set_lock locked, and for changing both locks should be acquired.

Signed-off-by: Roman Gushchin <guro@fb.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: kernel-team@fb.com
Signed-off-by: Sasha Levin <sashal@kernel.org>
include/linux/cgroup-defs.h
kernel/cgroup/cgroup.c

index 6002275..a609015 100644 (file)
@@ -346,6 +346,11 @@ struct cgroup {
         * Dying cgroups are cgroups which were deleted by a user,
         * but are still existing because someone else is holding a reference.
         * max_descendants is a maximum allowed number of descent cgroups.
+        *
+        * nr_descendants and nr_dying_descendants are protected
+        * by cgroup_mutex and css_set_lock. It's fine to read them holding
+        * any of cgroup_mutex and css_set_lock; for writing both locks
+        * should be held.
         */
        int nr_descendants;
        int nr_dying_descendants;
index 63dae7e..8144111 100644 (file)
@@ -4659,9 +4659,11 @@ static void css_release_work_fn(struct work_struct *work)
                if (cgroup_on_dfl(cgrp))
                        cgroup_rstat_flush(cgrp);
 
+               spin_lock_irq(&css_set_lock);
                for (tcgrp = cgroup_parent(cgrp); tcgrp;
                     tcgrp = cgroup_parent(tcgrp))
                        tcgrp->nr_dying_descendants--;
+               spin_unlock_irq(&css_set_lock);
 
                cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
                cgrp->id = -1;
@@ -4874,12 +4876,14 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
        if (ret)
                goto out_idr_free;
 
+       spin_lock_irq(&css_set_lock);
        for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
                cgrp->ancestor_ids[tcgrp->level] = tcgrp->id;
 
                if (tcgrp != cgrp)
                        tcgrp->nr_descendants++;
        }
+       spin_unlock_irq(&css_set_lock);
 
        if (notify_on_release(parent))
                set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
@@ -5162,10 +5166,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
        if (parent && cgroup_is_threaded(cgrp))
                parent->nr_threaded_children--;
 
+       spin_lock_irq(&css_set_lock);
        for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) {
                tcgrp->nr_descendants--;
                tcgrp->nr_dying_descendants++;
        }
+       spin_unlock_irq(&css_set_lock);
 
        cgroup1_check_for_release(parent);