mm, memcg: prevent memory.max load tearing
authorChris Down <chris@chrisdown.name>
Thu, 2 Apr 2020 04:07:20 +0000 (21:07 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 2 Apr 2020 16:35:28 +0000 (09:35 -0700)
This one is a bit more nuanced because we have memcg_max_mutex, which is
mostly just used for enforcing invariants, but we still need to READ_ONCE
since (despite its name) it doesn't really protect memory.max access.

On write (page_counter_set_max() and memory_max_write()) we use xchg(),
which uses smp_mb(), so that's already fine.

Signed-off-by: Chris Down <chris@chrisdown.name>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/50a31e5f39f8ae6c8fb73966ba1455f0924e8f44.1584034301.git.chris@chrisdown.name
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memcontrol.c

index b1bb624..f7af5ae 100644 (file)
@@ -1521,7 +1521,7 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
 
        pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
                K((u64)page_counter_read(&memcg->memory)),
-               K((u64)memcg->memory.max), memcg->memory.failcnt);
+               K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
        if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
                pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
                        K((u64)page_counter_read(&memcg->swap)),
@@ -1552,7 +1552,7 @@ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
 {
        unsigned long max;
 
-       max = memcg->memory.max;
+       max = READ_ONCE(memcg->memory.max);
        if (mem_cgroup_swappiness(memcg)) {
                unsigned long memsw_max;
                unsigned long swap_max;
@@ -3068,7 +3068,7 @@ static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
                 * Make sure that the new limit (memsw or memory limit) doesn't
                 * break our basic invariant rule memory.max <= memsw.max.
                 */
-               limits_invariant = memsw ? max >= memcg->memory.max :
+               limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
                                           max <= memcg->memsw.max;
                if (!limits_invariant) {
                        mutex_unlock(&memcg_max_mutex);
@@ -3815,8 +3815,8 @@ static int memcg_stat_show(struct seq_file *m, void *v)
        /* Hierarchical information */
        memory = memsw = PAGE_COUNTER_MAX;
        for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
-               memory = min(memory, mi->memory.max);
-               memsw = min(memsw, mi->memsw.max);
+               memory = min(memory, READ_ONCE(mi->memory.max));
+               memsw = min(memsw, READ_ONCE(mi->memsw.max));
        }
        seq_printf(m, "hierarchical_memory_limit %llu\n",
                   (u64)memory * PAGE_SIZE);
@@ -4325,7 +4325,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
        *pheadroom = PAGE_COUNTER_MAX;
 
        while ((parent = parent_mem_cgroup(memcg))) {
-               unsigned long ceiling = min(memcg->memory.max,
+               unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
                                            READ_ONCE(memcg->high));
                unsigned long used = page_counter_read(&memcg->memory);