cgroup: fix fail path in cgroup_load_subsys()
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / cgroup.c
index 036c05d..7030f04 100644 (file)
@@ -93,6 +93,19 @@ static DEFINE_MUTEX(cgroup_mutex);
 
 static DEFINE_MUTEX(cgroup_root_mutex);
 
+#define cgroup_assert_mutex_or_rcu_locked()                            \
+       rcu_lockdep_assert(rcu_read_lock_held() ||                      \
+                          lockdep_is_held(&cgroup_mutex),              \
+                          "cgroup_mutex or RCU read lock required");
+
+#ifdef CONFIG_LOCKDEP
+#define cgroup_assert_mutex_or_root_locked()                           \
+       WARN_ON_ONCE(debug_locks && (!lockdep_is_held(&cgroup_mutex) && \
+                                    !lockdep_is_held(&cgroup_root_mutex)))
+#else
+#define cgroup_assert_mutex_or_root_locked()   do { } while (0)
+#endif
+
 /*
  * cgroup destruction makes heavy use of work items and there can be a lot
  * of concurrent destructions.  Use a separate workqueue so that cgroup
@@ -230,16 +243,32 @@ static int notify_on_release(const struct cgroup *cgrp)
 }
 
 /**
+ * for_each_css - iterate all css's of a cgroup
+ * @css: the iteration cursor
+ * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
+ * @cgrp: the target cgroup to iterate css's of
+ *
+ * Should be called under cgroup_mutex.
+ */
+#define for_each_css(css, ssid, cgrp)                                  \
+       for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)        \
+               if (!((css) = rcu_dereference_check(                    \
+                               (cgrp)->subsys[(ssid)],                 \
+                               lockdep_is_held(&cgroup_mutex)))) { }   \
+               else
+
+/**
  * for_each_subsys - iterate all loaded cgroup subsystems
  * @ss: the iteration cursor
- * @i: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
+ * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
  *
- * Should be called under cgroup_mutex.
+ * Iterates through all loaded subsystems.  Should be called under
+ * cgroup_mutex or cgroup_root_mutex.
  */
-#define for_each_subsys(ss, i)                                         \
-       for ((i) = 0; (i) < CGROUP_SUBSYS_COUNT; (i)++)                 \
-               if (({ lockdep_assert_held(&cgroup_mutex);              \
-                      !((ss) = cgroup_subsys[i]); })) { }              \
+#define for_each_subsys(ss, ssid)                                      \
+       for (({ cgroup_assert_mutex_or_root_locked(); (ssid) = 0; });   \
+            (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)                    \
+               if (!((ss) = cgroup_subsys[(ssid)])) { }                \
                else
 
 /**
@@ -254,10 +283,6 @@ static int notify_on_release(const struct cgroup *cgrp)
        for ((i) = 0; (i) < CGROUP_BUILTIN_SUBSYS_COUNT &&              \
             (((ss) = cgroup_subsys[i]) || true); (i)++)
 
-/* iterate each subsystem attached to a hierarchy */
-#define for_each_root_subsys(root, ss)                                 \
-       list_for_each_entry((ss), &(root)->subsys_list, sibling)
-
 /* iterate across the active hierarchies */
 #define for_each_active_root(root)                                     \
        list_for_each_entry((root), &cgroup_roots, root_list)
@@ -1004,7 +1029,6 @@ static int rebind_subsystems(struct cgroupfs_root *root,
                                           cgroup_css(cgroup_dummy_top, ss));
                        cgroup_css(cgrp, ss)->cgroup = cgrp;
 
-                       list_move(&ss->sibling, &root->subsys_list);
                        ss->root = root;
                        if (ss->bind)
                                ss->bind(cgroup_css(cgrp, ss));
@@ -1023,7 +1047,6 @@ static int rebind_subsystems(struct cgroupfs_root *root,
                        RCU_INIT_POINTER(cgrp->subsys[i], NULL);
 
                        cgroup_subsys[i]->root = &cgroup_dummy_root;
-                       list_move(&ss->sibling, &cgroup_dummy_root.subsys_list);
 
                        /* subsystem is now free - drop reference on module */
                        module_put(ss->module);
@@ -1050,10 +1073,12 @@ static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry)
 {
        struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
        struct cgroup_subsys *ss;
+       int ssid;
 
        mutex_lock(&cgroup_root_mutex);
-       for_each_root_subsys(root, ss)
-               seq_printf(seq, ",%s", ss->name);
+       for_each_subsys(ss, ssid)
+               if (root->subsys_mask & (1 << ssid))
+                       seq_printf(seq, ",%s", ss->name);
        if (root->flags & CGRP_ROOT_SANE_BEHAVIOR)
                seq_puts(seq, ",sane_behavior");
        if (root->flags & CGRP_ROOT_NOPREFIX)
@@ -1323,7 +1348,6 @@ static void init_cgroup_root(struct cgroupfs_root *root)
 {
        struct cgroup *cgrp = &root->top_cgroup;
 
-       INIT_LIST_HEAD(&root->subsys_list);
        INIT_LIST_HEAD(&root->root_list);
        root->number_of_cgroups = 1;
        cgrp->root = root;
@@ -1928,8 +1952,8 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
                              bool threadgroup)
 {
        int retval, i, group_size;
-       struct cgroup_subsys *ss, *failed_ss = NULL;
        struct cgroupfs_root *root = cgrp->root;
+       struct cgroup_subsys_state *css, *failed_css = NULL;
        /* threadgroup list cursor and array */
        struct task_struct *leader = tsk;
        struct task_and_cgroup *tc;
@@ -2002,13 +2026,11 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
        /*
         * step 1: check that we can legitimately attach to the cgroup.
         */
-       for_each_root_subsys(root, ss) {
-               struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
-
-               if (ss->can_attach) {
-                       retval = ss->can_attach(css, &tset);
+       for_each_css(css, i, cgrp) {
+               if (css->ss->can_attach) {
+                       retval = css->ss->can_attach(css, &tset);
                        if (retval) {
-                               failed_ss = ss;
+                               failed_css = css;
                                goto out_cancel_attach;
                        }
                }
@@ -2044,12 +2066,9 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
        /*
         * step 4: do subsystem attach callbacks.
         */
-       for_each_root_subsys(root, ss) {
-               struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
-
-               if (ss->attach)
-                       ss->attach(css, &tset);
-       }
+       for_each_css(css, i, cgrp)
+               if (css->ss->attach)
+                       css->ss->attach(css, &tset);
 
        /*
         * step 5: success! and cleanup
@@ -2066,13 +2085,11 @@ out_put_css_set_refs:
        }
 out_cancel_attach:
        if (retval) {
-               for_each_root_subsys(root, ss) {
-                       struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
-
-                       if (ss == failed_ss)
+               for_each_css(css, i, cgrp) {
+                       if (css == failed_css)
                                break;
-                       if (ss->cancel_attach)
-                               ss->cancel_attach(css, &tset);
+                       if (css->ss->cancel_attach)
+                               css->ss->cancel_attach(css, &tset);
                }
        }
 out_free_group_list:
@@ -2212,10 +2229,9 @@ static int cgroup_release_agent_write(struct cgroup_subsys_state *css,
        return 0;
 }
 
-static int cgroup_release_agent_show(struct cgroup_subsys_state *css,
-                                    struct cftype *cft, struct seq_file *seq)
+static int cgroup_release_agent_show(struct seq_file *seq, void *v)
 {
-       struct cgroup *cgrp = css->cgroup;
+       struct cgroup *cgrp = seq_css(seq)->cgroup;
 
        if (!cgroup_lock_live_group(cgrp))
                return -ENODEV;
@@ -2225,10 +2241,11 @@ static int cgroup_release_agent_show(struct cgroup_subsys_state *css,
        return 0;
 }
 
-static int cgroup_sane_behavior_show(struct cgroup_subsys_state *css,
-                                    struct cftype *cft, struct seq_file *seq)
+static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
 {
-       seq_printf(seq, "%d\n", cgroup_sane_behavior(css->cgroup));
+       struct cgroup *cgrp = seq_css(seq)->cgroup;
+
+       seq_printf(seq, "%d\n", cgroup_sane_behavior(cgrp));
        return 0;
 }
 
@@ -2286,13 +2303,52 @@ out_free:
  * supports string->u64 maps, but can be extended in future.
  */
 
+static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
+{
+       struct cftype *cft = seq_cft(seq);
+
+       if (cft->seq_start) {
+               return cft->seq_start(seq, ppos);
+       } else {
+               /*
+                * The same behavior and code as single_open().  Returns
+                * !NULL if pos is at the beginning; otherwise, NULL.
+                */
+               return NULL + !*ppos;
+       }
+}
+
+static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
+{
+       struct cftype *cft = seq_cft(seq);
+
+       if (cft->seq_next) {
+               return cft->seq_next(seq, v, ppos);
+       } else {
+               /*
+                * The same behavior and code as single_open(), always
+                * terminate after the initial read.
+                */
+               ++*ppos;
+               return NULL;
+       }
+}
+
+static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
+{
+       struct cftype *cft = seq_cft(seq);
+
+       if (cft->seq_stop)
+               cft->seq_stop(seq, v);
+}
+
 static int cgroup_seqfile_show(struct seq_file *m, void *arg)
 {
        struct cftype *cft = seq_cft(m);
        struct cgroup_subsys_state *css = seq_css(m);
 
-       if (cft->read_seq_string)
-               return cft->read_seq_string(css, cft, m);
+       if (cft->seq_show)
+               return cft->seq_show(m, arg);
 
        if (cft->read_u64)
                seq_printf(m, "%llu\n", cft->read_u64(css, cft));
@@ -2303,12 +2359,20 @@ static int cgroup_seqfile_show(struct seq_file *m, void *arg)
        return 0;
 }
 
+static struct seq_operations cgroup_seq_operations = {
+       .start          = cgroup_seqfile_start,
+       .next           = cgroup_seqfile_next,
+       .stop           = cgroup_seqfile_stop,
+       .show           = cgroup_seqfile_show,
+};
+
 static int cgroup_file_open(struct inode *inode, struct file *file)
 {
        struct cfent *cfe = __d_cfe(file->f_dentry);
        struct cftype *cft = __d_cft(file->f_dentry);
        struct cgroup *cgrp = __d_cgrp(cfe->dentry->d_parent);
        struct cgroup_subsys_state *css;
+       struct cgroup_open_file *of;
        int err;
 
        err = generic_file_open(inode, file);
@@ -2338,24 +2402,16 @@ static int cgroup_file_open(struct inode *inode, struct file *file)
        WARN_ON_ONCE(cfe->css && cfe->css != css);
        cfe->css = css;
 
-       if (cft->open) {
-               err = cft->open(inode, file);
-       } else {
-               struct cgroup_open_file *of;
-
-               err = -ENOMEM;
-               of = kzalloc(sizeof(*of), GFP_KERNEL);
-               if (of) {
-                       of->cfe = cfe;
-                       err = single_open(file, cgroup_seqfile_show, of);
-                       if (err)
-                               kfree(of);
-               }
+       of = __seq_open_private(file, &cgroup_seq_operations,
+                               sizeof(struct cgroup_open_file));
+       if (of) {
+               of->cfe = cfe;
+               return 0;
        }
 
-       if (css->ss && err)
+       if (css->ss)
                css_put(css);
-       return err;
+       return -ENOMEM;
 }
 
 static int cgroup_file_release(struct inode *inode, struct file *file)
@@ -2365,8 +2421,7 @@ static int cgroup_file_release(struct inode *inode, struct file *file)
 
        if (css->ss)
                css_put(css);
-       kfree(((struct seq_file *)file->private_data)->private);
-       return single_release(inode, file);
+       return seq_release_private(inode, file);
 }
 
 /*
@@ -2559,7 +2614,7 @@ static umode_t cgroup_file_mode(const struct cftype *cft)
        if (cft->mode)
                return cft->mode;
 
-       if (cft->read_u64 || cft->read_s64 || cft->read_seq_string)
+       if (cft->read_u64 || cft->read_s64 || cft->seq_show)
                mode |= S_IRUGO;
 
        if (cft->write_u64 || cft->write_s64 || cft->write_string ||
@@ -2859,9 +2914,9 @@ static void cgroup_enable_task_cg_lists(void)
  * @parent_css: css whose children to walk
  *
  * This function returns the next child of @parent_css and should be called
- * under RCU read lock.  The only requirement is that @parent_css and
- * @pos_css are accessible.  The next sibling is guaranteed to be returned
- * regardless of their states.
+ * under either cgroup_mutex or RCU read lock.  The only requirement is
+ * that @parent_css and @pos_css are accessible.  The next sibling is
+ * guaranteed to be returned regardless of their states.
  */
 struct cgroup_subsys_state *
 css_next_child(struct cgroup_subsys_state *pos_css,
@@ -2871,7 +2926,7 @@ css_next_child(struct cgroup_subsys_state *pos_css,
        struct cgroup *cgrp = parent_css->cgroup;
        struct cgroup *next;
 
-       WARN_ON_ONCE(!rcu_read_lock_held());
+       cgroup_assert_mutex_or_rcu_locked();
 
        /*
         * @pos could already have been removed.  Once a cgroup is removed,
@@ -2918,10 +2973,10 @@ EXPORT_SYMBOL_GPL(css_next_child);
  * to visit for pre-order traversal of @root's descendants.  @root is
  * included in the iteration and the first node to be visited.
  *
- * While this function requires RCU read locking, it doesn't require the
- * whole traversal to be contained in a single RCU critical section.  This
- * function will return the correct next descendant as long as both @pos
- * and @root are accessible and @pos is a descendant of @root.
+ * While this function requires cgroup_mutex or RCU read locking, it
+ * doesn't require the whole traversal to be contained in a single critical
+ * section.  This function will return the correct next descendant as long
+ * as both @pos and @root are accessible and @pos is a descendant of @root.
  */
 struct cgroup_subsys_state *
 css_next_descendant_pre(struct cgroup_subsys_state *pos,
@@ -2929,7 +2984,7 @@ css_next_descendant_pre(struct cgroup_subsys_state *pos,
 {
        struct cgroup_subsys_state *next;
 
-       WARN_ON_ONCE(!rcu_read_lock_held());
+       cgroup_assert_mutex_or_rcu_locked();
 
        /* if first iteration, visit @root */
        if (!pos)
@@ -2960,17 +3015,17 @@ EXPORT_SYMBOL_GPL(css_next_descendant_pre);
  * is returned.  This can be used during pre-order traversal to skip
  * subtree of @pos.
  *
- * While this function requires RCU read locking, it doesn't require the
- * whole traversal to be contained in a single RCU critical section.  This
- * function will return the correct rightmost descendant as long as @pos is
- * accessible.
+ * While this function requires cgroup_mutex or RCU read locking, it
+ * doesn't require the whole traversal to be contained in a single critical
+ * section.  This function will return the correct rightmost descendant as
+ * long as @pos is accessible.
  */
 struct cgroup_subsys_state *
 css_rightmost_descendant(struct cgroup_subsys_state *pos)
 {
        struct cgroup_subsys_state *last, *tmp;
 
-       WARN_ON_ONCE(!rcu_read_lock_held());
+       cgroup_assert_mutex_or_rcu_locked();
 
        do {
                last = pos;
@@ -3006,10 +3061,11 @@ css_leftmost_descendant(struct cgroup_subsys_state *pos)
  * to visit for post-order traversal of @root's descendants.  @root is
  * included in the iteration and the last node to be visited.
  *
- * While this function requires RCU read locking, it doesn't require the
- * whole traversal to be contained in a single RCU critical section.  This
- * function will return the correct next descendant as long as both @pos
- * and @cgroup are accessible and @pos is a descendant of @cgroup.
+ * While this function requires cgroup_mutex or RCU read locking, it
+ * doesn't require the whole traversal to be contained in a single critical
+ * section.  This function will return the correct next descendant as long
+ * as both @pos and @cgroup are accessible and @pos is a descendant of
+ * @cgroup.
  */
 struct cgroup_subsys_state *
 css_next_descendant_post(struct cgroup_subsys_state *pos,
@@ -3017,7 +3073,7 @@ css_next_descendant_post(struct cgroup_subsys_state *pos,
 {
        struct cgroup_subsys_state *next;
 
-       WARN_ON_ONCE(!rcu_read_lock_held());
+       cgroup_assert_mutex_or_rcu_locked();
 
        /* if first iteration, visit leftmost descendant which may be @root */
        if (!pos)
@@ -3777,36 +3833,6 @@ static const struct seq_operations cgroup_pidlist_seq_operations = {
        .show = cgroup_pidlist_show,
 };
 
-static const struct file_operations cgroup_pidlist_operations = {
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .write = cgroup_file_write,
-       .release = seq_release_private,
-};
-
-/*
- * The following functions handle opens on a file that displays a pidlist
- * (tasks or procs). Prepare an array of the process/thread IDs of whoever's
- * in the cgroup.
- */
-/* helper function for the two below it */
-static int cgroup_pidlist_open(struct inode *unused, struct file *file)
-{
-       struct cfent *cfe = __d_cfe(file->f_dentry);
-       struct cgroup_open_file *of;
-
-       /* configure file information */
-       file->f_op = &cgroup_pidlist_operations;
-
-       of = __seq_open_private(file, &cgroup_pidlist_seq_operations,
-                               sizeof(*of));
-       if (!of)
-               return -ENOMEM;
-
-       of->cfe = cfe;
-       return 0;
-}
-
 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
                                         struct cftype *cft)
 {
@@ -3860,7 +3886,10 @@ static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
 static struct cftype cgroup_base_files[] = {
        {
                .name = "cgroup.procs",
-               .open = cgroup_pidlist_open,
+               .seq_start = cgroup_pidlist_start,
+               .seq_next = cgroup_pidlist_next,
+               .seq_stop = cgroup_pidlist_stop,
+               .seq_show = cgroup_pidlist_show,
                .private = CGROUP_FILE_PROCS,
                .write_u64 = cgroup_procs_write,
                .mode = S_IRUGO | S_IWUSR,
@@ -3874,7 +3903,7 @@ static struct cftype cgroup_base_files[] = {
        {
                .name = "cgroup.sane_behavior",
                .flags = CFTYPE_ONLY_ON_ROOT,
-               .read_seq_string = cgroup_sane_behavior_show,
+               .seq_show = cgroup_sane_behavior_show,
        },
 
        /*
@@ -3885,7 +3914,10 @@ static struct cftype cgroup_base_files[] = {
        {
                .name = "tasks",
                .flags = CFTYPE_INSANE,         /* use "procs" instead */
-               .open = cgroup_pidlist_open,
+               .seq_start = cgroup_pidlist_start,
+               .seq_next = cgroup_pidlist_next,
+               .seq_stop = cgroup_pidlist_stop,
+               .seq_show = cgroup_pidlist_show,
                .private = CGROUP_FILE_TASKS,
                .write_u64 = cgroup_tasks_write,
                .mode = S_IRUGO | S_IWUSR,
@@ -3899,7 +3931,7 @@ static struct cftype cgroup_base_files[] = {
        {
                .name = "release_agent",
                .flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT,
-               .read_seq_string = cgroup_release_agent_show,
+               .seq_show = cgroup_release_agent_show,
                .write_string = cgroup_release_agent_write,
                .max_write_len = PATH_MAX,
        },
@@ -4044,6 +4076,62 @@ static void offline_css(struct cgroup_subsys_state *css)
        RCU_INIT_POINTER(css->cgroup->subsys[ss->subsys_id], css);
 }
 
+/**
+ * create_css - create a cgroup_subsys_state
+ * @cgrp: the cgroup new css will be associated with
+ * @ss: the subsys of new css
+ *
+ * Create a new css associated with @cgrp - @ss pair.  On success, the new
+ * css is online and installed in @cgrp with all interface files created.
+ * Returns 0 on success, -errno on failure.
+ */
+static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+       struct cgroup *parent = cgrp->parent;
+       struct cgroup_subsys_state *css;
+       int err;
+
+       lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex);
+       lockdep_assert_held(&cgroup_mutex);
+
+       css = ss->css_alloc(cgroup_css(parent, ss));
+       if (IS_ERR(css))
+               return PTR_ERR(css);
+
+       err = percpu_ref_init(&css->refcnt, css_release);
+       if (err)
+               goto err_free;
+
+       init_css(css, ss, cgrp);
+
+       err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id);
+       if (err)
+               goto err_free;
+
+       err = online_css(css);
+       if (err)
+               goto err_free;
+
+       dget(cgrp->dentry);
+       css_get(css->parent);
+
+       if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
+           parent->parent) {
+               pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
+                          current->comm, current->pid, ss->name);
+               if (!strcmp(ss->name, "memory"))
+                       pr_warning("cgroup: \"memory\" requires setting use_hierarchy to 1 on the root.\n");
+               ss->warned_broken_hierarchy = true;
+       }
+
+       return 0;
+
+err_free:
+       percpu_ref_cancel_init(&css->refcnt);
+       ss->css_free(css);
+       return err;
+}
+
 /*
  * cgroup_create - create a cgroup
  * @parent: cgroup that will be parent of the new cgroup
@@ -4055,11 +4143,10 @@ static void offline_css(struct cgroup_subsys_state *css)
 static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
                             umode_t mode)
 {
-       struct cgroup_subsys_state *css_ar[CGROUP_SUBSYS_COUNT] = { };
        struct cgroup *cgrp;
        struct cgroup_name *name;
        struct cgroupfs_root *root = parent->root;
-       int err = 0;
+       int ssid, err = 0;
        struct cgroup_subsys *ss;
        struct super_block *sb = root->sb;
 
@@ -4115,23 +4202,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
        if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
                set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
 
-       for_each_root_subsys(root, ss) {
-               struct cgroup_subsys_state *css;
-
-               css = ss->css_alloc(cgroup_css(parent, ss));
-               if (IS_ERR(css)) {
-                       err = PTR_ERR(css);
-                       goto err_free_all;
-               }
-               css_ar[ss->subsys_id] = css;
-
-               err = percpu_ref_init(&css->refcnt, css_release);
-               if (err)
-                       goto err_free_all;
-
-               init_css(css, ss, cgrp);
-       }
-
        /*
         * Create directory.  cgroup_create_file() returns with the new
         * directory locked on success so that it can be populated without
@@ -4139,7 +4209,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
         */
        err = cgroup_create_file(dentry, S_IFDIR | mode, sb);
        if (err < 0)
-               goto err_free_all;
+               goto err_unlock;
        lockdep_assert_held(&dentry->d_inode->i_mutex);
 
        cgrp->serial_nr = cgroup_serial_nr_next++;
@@ -4148,59 +4218,34 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
        list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
        root->number_of_cgroups++;
 
-       /* each css holds a ref to the cgroup's dentry and the parent css */
-       for_each_root_subsys(root, ss) {
-               struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
-
-               dget(dentry);
-               css_get(css->parent);
-       }
-
        /* hold a ref to the parent's dentry */
        dget(parent->dentry);
 
-       /* creation succeeded, notify subsystems */
-       for_each_root_subsys(root, ss) {
-               struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
-
-               err = online_css(css);
-               if (err)
-                       goto err_destroy;
-
-               if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
-                   parent->parent) {
-                       pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
-                                  current->comm, current->pid, ss->name);
-                       if (!strcmp(ss->name, "memory"))
-                               pr_warning("cgroup: \"memory\" requires setting use_hierarchy to 1 on the root.\n");
-                       ss->warned_broken_hierarchy = true;
-               }
-       }
-
+       /*
+        * @cgrp is now fully operational.  If something fails after this
+        * point, it'll be released via the normal destruction path.
+        */
        idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
 
        err = cgroup_addrm_files(cgrp, cgroup_base_files, true);
        if (err)
                goto err_destroy;
 
-       err = cgroup_populate_dir(cgrp, root->subsys_mask);
-       if (err)
-               goto err_destroy;
+       /* let's create and online css's */
+       for_each_subsys(ss, ssid) {
+               if (root->subsys_mask & (1 << ssid)) {
+                       err = create_css(cgrp, ss);
+                       if (err)
+                               goto err_destroy;
+               }
+       }
 
        mutex_unlock(&cgroup_mutex);
        mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
 
        return 0;
 
-err_free_all:
-       for_each_root_subsys(root, ss) {
-               struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
-
-               if (css) {
-                       percpu_ref_cancel_init(&css->refcnt);
-                       ss->css_free(css);
-               }
-       }
+err_unlock:
        mutex_unlock(&cgroup_mutex);
        /* Release the reference count that we took on the superblock */
        deactivate_super(sb);
@@ -4335,9 +4380,10 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
        __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
 {
        struct dentry *d = cgrp->dentry;
-       struct cgroup_subsys *ss;
+       struct cgroup_subsys_state *css;
        struct cgroup *child;
        bool empty;
+       int ssid;
 
        lockdep_assert_held(&d->d_inode->i_mutex);
        lockdep_assert_held(&cgroup_mutex);
@@ -4373,8 +4419,8 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
         * will be invoked to perform the rest of destruction once the
         * percpu refs of all css's are confirmed to be killed.
         */
-       for_each_root_subsys(cgrp->root, ss)
-               kill_css(cgroup_css(cgrp, ss));
+       for_each_css(css, ssid, cgrp)
+               kill_css(css);
 
        /*
         * Mark @cgrp dead.  This prevents further task migration and child
@@ -4487,7 +4533,6 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
        cgroup_init_cftsets(ss);
 
        /* Create the top cgroup state for this subsystem */
-       list_add(&ss->sibling, &cgroup_dummy_root.subsys_list);
        ss->root = &cgroup_dummy_root;
        css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss));
        /* We don't handle early failures gracefully */
@@ -4561,6 +4606,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
        cgroup_init_cftsets(ss);
 
        mutex_lock(&cgroup_mutex);
+       mutex_lock(&cgroup_root_mutex);
        cgroup_subsys[ss->subsys_id] = ss;
 
        /*
@@ -4572,11 +4618,11 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
        if (IS_ERR(css)) {
                /* failure case - need to deassign the cgroup_subsys[] slot. */
                cgroup_subsys[ss->subsys_id] = NULL;
+               mutex_unlock(&cgroup_root_mutex);
                mutex_unlock(&cgroup_mutex);
                return PTR_ERR(css);
        }
 
-       list_add(&ss->sibling, &cgroup_dummy_root.subsys_list);
        ss->root = &cgroup_dummy_root;
 
        /* our new subsystem will be attached to the dummy hierarchy. */
@@ -4606,14 +4652,18 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
        write_unlock(&css_set_lock);
 
        ret = online_css(css);
-       if (ret)
+       if (ret) {
+               ss->css_free(css);
                goto err_unload;
+       }
 
        /* success! */
+       mutex_unlock(&cgroup_root_mutex);
        mutex_unlock(&cgroup_mutex);
        return 0;
 
 err_unload:
+       mutex_unlock(&cgroup_root_mutex);
        mutex_unlock(&cgroup_mutex);
        /* @ss can't be mounted here as try_module_get() would fail */
        cgroup_unload_subsys(ss);
@@ -4632,6 +4682,7 @@ EXPORT_SYMBOL_GPL(cgroup_load_subsys);
 void cgroup_unload_subsys(struct cgroup_subsys *ss)
 {
        struct cgrp_cset_link *link;
+       struct cgroup_subsys_state *css;
 
        BUG_ON(ss->module == NULL);
 
@@ -4643,15 +4694,15 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
        BUG_ON(ss->root != &cgroup_dummy_root);
 
        mutex_lock(&cgroup_mutex);
+       mutex_lock(&cgroup_root_mutex);
 
-       offline_css(cgroup_css(cgroup_dummy_top, ss));
+       css = cgroup_css(cgroup_dummy_top, ss);
+       if (css)
+               offline_css(css);
 
        /* deassign the subsys_id */
        cgroup_subsys[ss->subsys_id] = NULL;
 
-       /* remove subsystem from the dummy root's list of subsystems */
-       list_del_init(&ss->sibling);
-
        /*
         * disentangle the css from all css_sets attached to the dummy
         * top. as in loading, we need to pay our respects to the hashtable
@@ -4674,9 +4725,11 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
         * need to free before marking as null because ss->css_free needs
         * the cgrp->subsys pointer to find their state.
         */
-       ss->css_free(cgroup_css(cgroup_dummy_top, ss));
+       if (css)
+               ss->css_free(css);
        RCU_INIT_POINTER(cgroup_dummy_top->subsys[ss->subsys_id], NULL);
 
+       mutex_unlock(&cgroup_root_mutex);
        mutex_unlock(&cgroup_mutex);
 }
 EXPORT_SYMBOL_GPL(cgroup_unload_subsys);
@@ -4847,11 +4900,12 @@ int proc_cgroup_show(struct seq_file *m, void *v)
        for_each_active_root(root) {
                struct cgroup_subsys *ss;
                struct cgroup *cgrp;
-               int count = 0;
+               int ssid, count = 0;
 
                seq_printf(m, "%d:", root->hierarchy_id);
-               for_each_root_subsys(root, ss)
-                       seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
+               for_each_subsys(ss, ssid)
+                       if (root->subsys_mask & (1 << ssid))
+                               seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
                if (strlen(root->name))
                        seq_printf(m, "%sname=%s", count ? "," : "",
                                   root->name);
@@ -5192,16 +5246,16 @@ __setup("cgroup_disable=", cgroup_disable);
  * @dentry: directory dentry of interest
  * @ss: subsystem of interest
  *
- * Must be called under RCU read lock.  The caller is responsible for
- * pinning the returned css if it needs to be accessed outside the RCU
- * critical section.
+ * Must be called under cgroup_mutex or RCU read lock.  The caller is
+ * responsible for pinning the returned css if it needs to be accessed
+ * outside the critical section.
  */
 struct cgroup_subsys_state *css_from_dir(struct dentry *dentry,
                                         struct cgroup_subsys *ss)
 {
        struct cgroup *cgrp;
 
-       WARN_ON_ONCE(!rcu_read_lock_held());
+       cgroup_assert_mutex_or_rcu_locked();
 
        /* is @dentry a cgroup dir? */
        if (!dentry->d_inode ||
@@ -5224,9 +5278,7 @@ struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
 {
        struct cgroup *cgrp;
 
-       rcu_lockdep_assert(rcu_read_lock_held() ||
-                          lockdep_is_held(&cgroup_mutex),
-                          "css_from_id() needs proper protection");
+       cgroup_assert_mutex_or_rcu_locked();
 
        cgrp = idr_find(&ss->root->cgroup_idr, id);
        if (cgrp)
@@ -5274,9 +5326,7 @@ static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
        return count;
 }
 
-static int current_css_set_cg_links_read(struct cgroup_subsys_state *css,
-                                        struct cftype *cft,
-                                        struct seq_file *seq)
+static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
 {
        struct cgrp_cset_link *link;
        struct css_set *cset;
@@ -5301,9 +5351,9 @@ static int current_css_set_cg_links_read(struct cgroup_subsys_state *css,
 }
 
 #define MAX_TASKS_SHOWN_PER_CSS 25
-static int cgroup_css_links_read(struct cgroup_subsys_state *css,
-                                struct cftype *cft, struct seq_file *seq)
+static int cgroup_css_links_read(struct seq_file *seq, void *v)
 {
+       struct cgroup_subsys_state *css = seq_css(seq);
        struct cgrp_cset_link *link;
 
        read_lock(&css_set_lock);
@@ -5349,12 +5399,12 @@ static struct cftype debug_files[] =  {
 
        {
                .name = "current_css_set_cg_links",
-               .read_seq_string = current_css_set_cg_links_read,
+               .seq_show = current_css_set_cg_links_read,
        },
 
        {
                .name = "cgroup_css_links",
-               .read_seq_string = cgroup_css_links_read,
+               .seq_show = cgroup_css_links_read,
        },
 
        {