mm, memcg: Prevent memory.oom_control load/store tearing
authorYue Zhao <findns94@gmail.com>
Mon, 6 Mar 2023 15:41:37 +0000 (23:41 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 28 Mar 2023 23:20:13 +0000 (16:20 -0700)
The knob for cgroup v1 memory controller: memory.oom_control is not
protected by any locking so it can be modified while it is used.  This is
not an actual problem because races are unlikely.  But it is better to use
[READ|WRITE]_ONCE to prevent compiler from doing anything funky.

The access of memcg->oom_kill_disable is lockless, so it can be
concurrently set at the same time as we are trying to read it.  All
occurrences of memcg->oom_kill_disable are updated with [READ|WRITE]_ONCE.

[findns94@gmail.com: v3]
Link: https://lkml.kernel.org/r/20230308162555.14195-4-findns94@gmail.com
Link: https://lkml.kernel.org/r/20230306154138.377-4-findns94@gmail.com
Signed-off-by: Yue Zhao <findns94@gmail.com>
Acked-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Tang Yizhou <tangyeechou@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memcontrol.c

index 1b0112a..5b7062d 100644 (file)
@@ -1929,7 +1929,7 @@ static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
         * Please note that mem_cgroup_out_of_memory might fail to find a
         * victim and then we have to bail out from the charge path.
         */
-       if (memcg->oom_kill_disable) {
+       if (READ_ONCE(memcg->oom_kill_disable)) {
                if (current->in_user_fault) {
                        css_get(&memcg->css);
                        current->memcg_in_oom = memcg;
@@ -1999,7 +1999,7 @@ bool mem_cgroup_oom_synchronize(bool handle)
        if (locked)
                mem_cgroup_oom_notify(memcg);
 
-       if (locked && !memcg->oom_kill_disable) {
+       if (locked && !READ_ONCE(memcg->oom_kill_disable)) {
                mem_cgroup_unmark_under_oom(memcg);
                finish_wait(&memcg_oom_waitq, &owait.wait);
                mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
@@ -4515,7 +4515,7 @@ static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
 
-       seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
+       seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable));
        seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
        seq_printf(sf, "oom_kill %lu\n",
                   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
@@ -4531,7 +4531,7 @@ static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
        if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
                return -EINVAL;
 
-       memcg->oom_kill_disable = val;
+       WRITE_ONCE(memcg->oom_kill_disable, val);
        if (!val)
                memcg_oom_recover(memcg);
 
@@ -5354,7 +5354,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
        if (parent) {
                WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
-               memcg->oom_kill_disable = parent->oom_kill_disable;
+               WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
 
                page_counter_init(&memcg->memory, &parent->memory);
                page_counter_init(&memcg->swap, &parent->swap);