Merge tag 'backport/v3.14.24-ltsi-rc1/mach-shmobile-for-hdmi-20141211' into backport...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / cgroup.c
index 3edf716..550e205 100644 (file)
@@ -886,7 +886,9 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
                 * per-subsystem and moved to css->id so that lookups are
                 * successful until the target css is released.
                 */
+               mutex_lock(&cgroup_mutex);
                idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
+               mutex_unlock(&cgroup_mutex);
                cgrp->id = -1;
 
                call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
@@ -2903,9 +2905,14 @@ static void cgroup_enable_task_cg_lists(void)
                 * We should check if the process is exiting, otherwise
                 * it will race with cgroup_exit() in that the list
                 * entry won't be deleted though the process has exited.
+                * Do it while holding siglock so that we don't end up
+                * racing against cgroup_exit().
                 */
+               spin_lock_irq(&p->sighand->siglock);
                if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
                        list_add(&p->cg_list, &task_css_set(p)->tasks);
+               spin_unlock_irq(&p->sighand->siglock);
+
                task_unlock(p);
        } while_each_thread(g, p);
        read_unlock(&tasklist_lock);
@@ -3656,7 +3663,6 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
 
        l = cgroup_pidlist_find_create(cgrp, type);
        if (!l) {
-               mutex_unlock(&cgrp->pidlist_mutex);
                pidlist_free(array);
                return -ENOMEM;
        }
@@ -4105,17 +4111,17 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
 
        err = percpu_ref_init(&css->refcnt, css_release);
        if (err)
-               goto err_free;
+               goto err_free_css;
 
        init_css(css, ss, cgrp);
 
        err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id);
        if (err)
-               goto err_free;
+               goto err_free_percpu_ref;
 
        err = online_css(css);
        if (err)
-               goto err_free;
+               goto err_clear_dir;
 
        dget(cgrp->dentry);
        css_get(css->parent);
@@ -4131,8 +4137,11 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
 
        return 0;
 
-err_free:
+err_clear_dir:
+       cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id);
+err_free_percpu_ref:
        percpu_ref_cancel_init(&css->refcnt);
+err_free_css:
        ss->css_free(css);
        return err;
 }
@@ -4168,16 +4177,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
        rcu_assign_pointer(cgrp->name, name);
 
        /*
-        * Temporarily set the pointer to NULL, so idr_find() won't return
-        * a half-baked cgroup.
-        */
-       cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
-       if (cgrp->id < 0) {
-               err = -ENOMEM;
-               goto err_free_name;
-       }
-
-       /*
         * Only live parents can have children.  Note that the liveliness
         * check isn't strictly necessary because cgroup_mkdir() and
         * cgroup_rmdir() are fully synchronized by i_mutex; however, do it
@@ -4186,7 +4185,17 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
         */
        if (!cgroup_lock_live_group(parent)) {
                err = -ENODEV;
-               goto err_free_id;
+               goto err_free_name;
+       }
+
+       /*
+        * Temporarily set the pointer to NULL, so idr_find() won't return
+        * a half-baked cgroup.
+        */
+       cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
+       if (cgrp->id < 0) {
+               err = -ENOMEM;
+               goto err_unlock;
        }
 
        /* Grab a reference on the superblock so the hierarchy doesn't
@@ -4218,7 +4227,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
         */
        err = cgroup_create_file(dentry, S_IFDIR | mode, sb);
        if (err < 0)
-               goto err_unlock;
+               goto err_free_id;
        lockdep_assert_held(&dentry->d_inode->i_mutex);
 
        cgrp->serial_nr = cgroup_serial_nr_next++;
@@ -4254,12 +4263,12 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
 
        return 0;
 
-err_unlock:
-       mutex_unlock(&cgroup_mutex);
-       /* Release the reference count that we took on the superblock */
-       deactivate_super(sb);
 err_free_id:
        idr_remove(&root->cgroup_idr, cgrp->id);
+       /* Release the reference count that we took on the superblock */
+       deactivate_super(sb);
+err_unlock:
+       mutex_unlock(&cgroup_mutex);
 err_free_name:
        kfree(rcu_dereference_raw(cgrp->name));
 err_free_cgrp:
@@ -4842,16 +4851,12 @@ static int __init cgroup_wq_init(void)
        /*
         * There isn't much point in executing destruction path in
         * parallel.  Good chunk is serialized with cgroup_mutex anyway.
-        *
-        * XXX: Must be ordered to make sure parent is offlined after
-        * children.  The ordering requirement is for memcg where a
-        * parent's offline may wait for a child's leading to deadlock.  In
-        * the long term, this should be fixed from memcg side.
+        * Use 1 for @max_active.
         *
         * We would prefer to do this in cgroup_init() above, but that
         * is called before init_workqueues(): so leave this until after.
         */
-       cgroup_destroy_wq = alloc_ordered_workqueue("cgroup_destroy", 0);
+       cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
        BUG_ON(!cgroup_destroy_wq);
 
        /*