memcg: clean up memcg->nodeinfo
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / memcontrol.c
index 1947218..2b7cd24 100644 (file)
@@ -187,10 +187,6 @@ struct mem_cgroup_per_node {
        struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
 };
 
-struct mem_cgroup_lru_info {
-       struct mem_cgroup_per_node *nodeinfo[0];
-};
-
 /*
  * Cgroups above their limits are maintained in a RB-Tree, independent of
  * their hierarchy representation
@@ -366,14 +362,8 @@ struct mem_cgroup {
        atomic_t        numainfo_updating;
 #endif
 
-       /*
-        * Per cgroup active and inactive list, similar to the
-        * per zone LRU lists.
-        *
-        * WARNING: This has to be the last element of the struct. Don't
-        * add new fields after this point.
-        */
-       struct mem_cgroup_lru_info info;
+       struct mem_cgroup_per_node *nodeinfo[0];
+       /* WARNING: nodeinfo must be the last member here */
 };
 
 static size_t memcg_size(void)
@@ -683,7 +673,7 @@ static struct mem_cgroup_per_zone *
 mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
 {
        VM_BUG_ON((unsigned)nid >= nr_node_ids);
-       return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
+       return &memcg->nodeinfo[nid]->zoneinfo[zid];
 }
 
 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
@@ -1148,6 +1138,58 @@ skip_node:
        return NULL;
 }
 
+static void mem_cgroup_iter_invalidate(struct mem_cgroup *root)
+{
+       /*
+        * When a group in the hierarchy below root is destroyed, the
+        * hierarchy iterator can no longer be trusted since it might
+        * have pointed to the destroyed group.  Invalidate it.
+        */
+       atomic_inc(&root->dead_count);
+}
+
+static struct mem_cgroup *
+mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
+                    struct mem_cgroup *root,
+                    int *sequence)
+{
+       struct mem_cgroup *position = NULL;
+       /*
+        * A cgroup destruction happens in two stages: offlining and
+        * release.  They are separated by a RCU grace period.
+        *
+        * If the iterator is valid, we may still race with an
+        * offlining.  The RCU lock ensures the object won't be
+        * released, tryget will fail if we lost the race.
+        */
+       *sequence = atomic_read(&root->dead_count);
+       if (iter->last_dead_count == *sequence) {
+               smp_rmb();
+               position = iter->last_visited;
+               if (position && !css_tryget(&position->css))
+                       position = NULL;
+       }
+       return position;
+}
+
+static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
+                                  struct mem_cgroup *last_visited,
+                                  struct mem_cgroup *new_position,
+                                  int sequence)
+{
+       if (last_visited)
+               css_put(&last_visited->css);
+       /*
+        * We store the sequence count from the time @last_visited was
+        * loaded successfully instead of rereading it here so that we
+        * don't lose destruction events in between.  We could have
+        * raced with the destruction of @new_position after all.
+        */
+       iter->last_visited = new_position;
+       smp_wmb();
+       iter->last_dead_count = sequence;
+}
+
 /**
  * mem_cgroup_iter - iterate over memory cgroup hierarchy
  * @root: hierarchy root
@@ -1171,7 +1213,6 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 {
        struct mem_cgroup *memcg = NULL;
        struct mem_cgroup *last_visited = NULL;
-       unsigned long uninitialized_var(dead_count);
 
        if (mem_cgroup_disabled())
                return NULL;
@@ -1191,6 +1232,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
        rcu_read_lock();
        while (!memcg) {
                struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
+               int uninitialized_var(seq);
 
                if (reclaim) {
                        int nid = zone_to_nid(reclaim->zone);
@@ -1204,37 +1246,13 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
                                goto out_unlock;
                        }
 
-                       /*
-                        * If the dead_count mismatches, a destruction
-                        * has happened or is happening concurrently.
-                        * If the dead_count matches, a destruction
-                        * might still happen concurrently, but since
-                        * we checked under RCU, that destruction
-                        * won't free the object until we release the
-                        * RCU reader lock.  Thus, the dead_count
-                        * check verifies the pointer is still valid,
-                        * css_tryget() verifies the cgroup pointed to
-                        * is alive.
-                        */
-                       dead_count = atomic_read(&root->dead_count);
-                       if (dead_count == iter->last_dead_count) {
-                               smp_rmb();
-                               last_visited = iter->last_visited;
-                               if (last_visited &&
-                                   !css_tryget(&last_visited->css))
-                                       last_visited = NULL;
-                       }
+                       last_visited = mem_cgroup_iter_load(iter, root, &seq);
                }
 
                memcg = __mem_cgroup_iter_next(root, last_visited);
 
                if (reclaim) {
-                       if (last_visited)
-                               css_put(&last_visited->css);
-
-                       iter->last_visited = memcg;
-                       smp_wmb();
-                       iter->last_dead_count = dead_count;
+                       mem_cgroup_iter_update(iter, last_visited, memcg, seq);
 
                        if (!memcg)
                                iter->generation++;
@@ -1448,11 +1466,12 @@ static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
        return ret;
 }
 
-int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
+bool task_in_mem_cgroup(struct task_struct *task,
+                       const struct mem_cgroup *memcg)
 {
-       int ret;
        struct mem_cgroup *curr = NULL;
        struct task_struct *p;
+       bool ret;
 
        p = find_lock_task_mm(task);
        if (p) {
@@ -1464,14 +1483,14 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
                 * killer still needs to detect if they have already been oom
                 * killed to prevent needlessly killing additional tasks.
                 */
-               task_lock(task);
+               rcu_read_lock();
                curr = mem_cgroup_from_task(task);
                if (curr)
                        css_get(&curr->css);
-               task_unlock(task);
+               rcu_read_unlock();
        }
        if (!curr)
-               return 0;
+               return false;
        /*
         * We should check use_hierarchy of "memcg" not "curr". Because checking
         * use_hierarchy of "curr" here make this function true if hierarchy is
@@ -6058,13 +6077,13 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
                mz->on_tree = false;
                mz->memcg = memcg;
        }
-       memcg->info.nodeinfo[node] = pn;
+       memcg->nodeinfo[node] = pn;
        return 0;
 }
 
 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 {
-       kfree(memcg->info.nodeinfo[node]);
+       kfree(memcg->nodeinfo[node]);
 }
 
 static struct mem_cgroup *mem_cgroup_alloc(void)
@@ -6317,14 +6336,14 @@ static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
        struct mem_cgroup *parent = memcg;
 
        while ((parent = parent_mem_cgroup(parent)))
-               atomic_inc(&parent->dead_count);
+               mem_cgroup_iter_invalidate(parent);
 
        /*
         * if the root memcg is not hierarchical we have to check it
         * explicitely.
         */
        if (!root_mem_cgroup->use_hierarchy)
-               atomic_inc(&root_mem_cgroup->dead_count);
+               mem_cgroup_iter_invalidate(root_mem_cgroup);
 }
 
 static void mem_cgroup_css_offline(struct cgroup *cont)