mm, memcg: prevent memory.max load tearing
[platform/kernel/linux-rpi.git] / mm / memcontrol.c
index badd043..f7af5ae 100644 (file)
@@ -1521,7 +1521,7 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
 
        pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
                K((u64)page_counter_read(&memcg->memory)),
-               K((u64)memcg->memory.max), memcg->memory.failcnt);
+               K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
        if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
                pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
                        K((u64)page_counter_read(&memcg->swap)),
@@ -1552,7 +1552,7 @@ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
 {
        unsigned long max;
 
-       max = memcg->memory.max;
+       max = READ_ONCE(memcg->memory.max);
        if (mem_cgroup_swappiness(memcg)) {
                unsigned long memsw_max;
                unsigned long swap_max;
@@ -2242,7 +2242,7 @@ static void reclaim_high(struct mem_cgroup *memcg,
                         gfp_t gfp_mask)
 {
        do {
-               if (page_counter_read(&memcg->memory) <= memcg->high)
+               if (page_counter_read(&memcg->memory) <= READ_ONCE(memcg->high))
                        continue;
                memcg_memory_event(memcg, MEMCG_HIGH);
                try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
@@ -2582,7 +2582,7 @@ done_restock:
         * reclaim, the cost of mismatch is negligible.
         */
        do {
-               if (page_counter_read(&memcg->memory) > memcg->high) {
+               if (page_counter_read(&memcg->memory) > READ_ONCE(memcg->high)) {
                        /* Don't bother a random interrupted task */
                        if (in_interrupt()) {
                                schedule_work(&memcg->high_work);
@@ -3068,7 +3068,7 @@ static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
                 * Make sure that the new limit (memsw or memory limit) doesn't
                 * break our basic invariant rule memory.max <= memsw.max.
                 */
-               limits_invariant = memsw ? max >= memcg->memory.max :
+               limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
                                           max <= memcg->memsw.max;
                if (!limits_invariant) {
                        mutex_unlock(&memcg_max_mutex);
@@ -3815,8 +3815,8 @@ static int memcg_stat_show(struct seq_file *m, void *v)
        /* Hierarchical information */
        memory = memsw = PAGE_COUNTER_MAX;
        for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
-               memory = min(memory, mi->memory.max);
-               memsw = min(memsw, mi->memsw.max);
+               memory = min(memory, READ_ONCE(mi->memory.max));
+               memsw = min(memsw, READ_ONCE(mi->memsw.max));
        }
        seq_printf(m, "hierarchical_memory_limit %llu\n",
                   (u64)memory * PAGE_SIZE);
@@ -4325,7 +4325,8 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
        *pheadroom = PAGE_COUNTER_MAX;
 
        while ((parent = parent_mem_cgroup(memcg))) {
-               unsigned long ceiling = min(memcg->memory.max, memcg->high);
+               unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
+                                           READ_ONCE(memcg->high));
                unsigned long used = page_counter_read(&memcg->memory);
 
                *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
@@ -5047,7 +5048,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        if (!memcg)
                return ERR_PTR(error);
 
-       memcg->high = PAGE_COUNTER_MAX;
+       WRITE_ONCE(memcg->high, PAGE_COUNTER_MAX);
        memcg->soft_limit = PAGE_COUNTER_MAX;
        if (parent) {
                memcg->swappiness = mem_cgroup_swappiness(parent);
@@ -5200,7 +5201,7 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
        page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
        page_counter_set_min(&memcg->memory, 0);
        page_counter_set_low(&memcg->memory, 0);
-       memcg->high = PAGE_COUNTER_MAX;
+       WRITE_ONCE(memcg->high, PAGE_COUNTER_MAX);
        memcg->soft_limit = PAGE_COUNTER_MAX;
        memcg_wb_domain_size_changed(memcg);
 }
@@ -6016,7 +6017,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
        if (err)
                return err;
 
-       memcg->high = high;
+       WRITE_ONCE(memcg->high, high);
 
        for (;;) {
                unsigned long nr_pages = page_counter_read(&memcg->memory);