Merge tag 'backport/v3.14.24-ltsi-rc1/mach-shmobile-to-renesas-next-20141119-v3.18...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / memcontrol.c
index ce7a8cc..b58d4fb 100644 (file)
@@ -292,6 +292,9 @@ struct mem_cgroup {
        /* vmpressure notifications */
        struct vmpressure vmpressure;
 
+       /* css_online() has been completed */
+       int initialized;
+
        /*
         * the counter to account for mem+swap usage.
         */
@@ -1127,9 +1130,21 @@ skip_node:
         * skipping css reference should be safe.
         */
        if (next_css) {
-               if ((next_css->flags & CSS_ONLINE) &&
-                               (next_css == &root->css || css_tryget(next_css)))
-                       return mem_cgroup_from_css(next_css);
+               struct mem_cgroup *memcg = mem_cgroup_from_css(next_css);
+
+               if (next_css == &root->css)
+                       return memcg;
+
+               if (css_tryget(next_css)) {
+                       /*
+                        * Make sure the memcg is initialized:
+                        * mem_cgroup_css_online() orders the the
+                        * initialization against setting the flag.
+                        */
+                       if (smp_load_acquire(&memcg->initialized))
+                               return memcg;
+                       css_put(next_css);
+               }
 
                prev_css = next_css;
                goto skip_node;
@@ -5670,8 +5685,12 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
 {
        struct mem_cgroup_eventfd_list *ev;
 
+       spin_lock(&memcg_oom_lock);
+
        list_for_each_entry(ev, &memcg->oom_notify, list)
                eventfd_signal(ev->eventfd, 1);
+
+       spin_unlock(&memcg_oom_lock);
        return 0;
 }
 
@@ -6534,6 +6553,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
+       int ret;
 
        if (css->cgroup->id > MEM_CGROUP_ID_MAX)
                return -ENOSPC;
@@ -6570,7 +6590,18 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
        }
        mutex_unlock(&memcg_create_mutex);
 
-       return memcg_init_kmem(memcg, &mem_cgroup_subsys);
+       ret = memcg_init_kmem(memcg, &mem_cgroup_subsys);
+       if (ret)
+               return ret;
+
+       /*
+        * Make sure the memcg is initialized: mem_cgroup_iter()
+        * orders reading memcg->initialized against its callers
+        * reading the memcg members.
+        */
+       smp_store_release(&memcg->initialized, 1);
+
+       return 0;
 }
 
 /*
@@ -6595,6 +6626,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        struct mem_cgroup_event *event, *tmp;
+       struct cgroup_subsys_state *iter;
 
        /*
         * Unregister events and notify userspace.
@@ -6611,7 +6643,14 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
        kmem_cgroup_css_offline(memcg);
 
        mem_cgroup_invalidate_reclaim_iterators(memcg);
-       mem_cgroup_reparent_charges(memcg);
+
+       /*
+        * This requires that offlining is serialized.  Right now that is
+        * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
+        */
+       css_for_each_descendant_post(iter, css)
+               mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
+
        mem_cgroup_destroy_all_caches(memcg);
        vmpressure_cleanup(&memcg->vmpressure);
 }