Merge remote-tracking branch 'stable/linux-5.15.y' into rpi-5.15.y
[platform/kernel/linux-rpi.git] / mm / memcontrol.c
index b174c0c..971546b 100644 (file)
@@ -254,7 +254,7 @@ struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
 }
 
 #ifdef CONFIG_MEMCG_KMEM
-extern spinlock_t css_set_lock;
+static DEFINE_SPINLOCK(objcg_lock);
 
 bool mem_cgroup_kmem_disabled(void)
 {
@@ -298,9 +298,9 @@ static void obj_cgroup_release(struct percpu_ref *ref)
        if (nr_pages)
                obj_cgroup_uncharge_pages(objcg, nr_pages);
 
-       spin_lock_irqsave(&css_set_lock, flags);
+       spin_lock_irqsave(&objcg_lock, flags);
        list_del(&objcg->list);
-       spin_unlock_irqrestore(&css_set_lock, flags);
+       spin_unlock_irqrestore(&objcg_lock, flags);
 
        percpu_ref_exit(ref);
        kfree_rcu(objcg, rcu);
@@ -332,7 +332,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
 
        objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
 
-       spin_lock_irq(&css_set_lock);
+       spin_lock_irq(&objcg_lock);
 
        /* 1) Ready to reparent active objcg. */
        list_add(&objcg->list, &memcg->objcg_list);
@@ -342,7 +342,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
        /* 3) Move already reparented objcgs to the parent's list */
        list_splice(&memcg->objcg_list, &parent->objcg_list);
 
-       spin_unlock_irq(&css_set_lock);
+       spin_unlock_irq(&objcg_lock);
 
        percpu_ref_kill(&objcg->refcnt);
 }
@@ -650,12 +650,21 @@ static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
 static DEFINE_SPINLOCK(stats_flush_lock);
 static DEFINE_PER_CPU(unsigned int, stats_updates);
 static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
+static u64 flush_next_time;
 
-static inline void memcg_rstat_updated(struct mem_cgroup *memcg)
+#define FLUSH_TIME (2UL*HZ)
+
+static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
 {
+       unsigned int x;
+
        cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
-       if (!(__this_cpu_inc_return(stats_updates) % MEMCG_CHARGE_BATCH))
-               atomic_inc(&stats_flush_threshold);
+
+       x = __this_cpu_add_return(stats_updates, abs(val));
+       if (x > MEMCG_CHARGE_BATCH) {
+               atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
+               __this_cpu_write(stats_updates, 0);
+       }
 }
 
 static void __mem_cgroup_flush_stats(void)
@@ -665,6 +674,7 @@ static void __mem_cgroup_flush_stats(void)
        if (!spin_trylock_irqsave(&stats_flush_lock, flag))
                return;
 
+       flush_next_time = jiffies_64 + 2*FLUSH_TIME;
        cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
        atomic_set(&stats_flush_threshold, 0);
        spin_unlock_irqrestore(&stats_flush_lock, flag);
@@ -676,10 +686,16 @@ void mem_cgroup_flush_stats(void)
                __mem_cgroup_flush_stats();
 }
 
+void mem_cgroup_flush_stats_delayed(void)
+{
+       if (time_after64(jiffies_64, flush_next_time))
+               mem_cgroup_flush_stats();
+}
+
 static void flush_memcg_stats_dwork(struct work_struct *w)
 {
-       mem_cgroup_flush_stats();
-       queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
+       __mem_cgroup_flush_stats();
+       queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
 }
 
 /**
@@ -694,7 +710,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
                return;
 
        __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
-       memcg_rstat_updated(memcg);
+       memcg_rstat_updated(memcg, val);
 }
 
 /* idx can be of type enum memcg_stat_item or node_stat_item. */
@@ -727,7 +743,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
        /* Update lruvec */
        __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
 
-       memcg_rstat_updated(memcg);
+       memcg_rstat_updated(memcg, val);
 }
 
 /**
@@ -829,7 +845,7 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
                return;
 
        __this_cpu_add(memcg->vmstats_percpu->events[idx], count);
-       memcg_rstat_updated(memcg);
+       memcg_rstat_updated(memcg, count);
 }
 
 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
@@ -7099,7 +7115,7 @@ static int __init cgroup_memory(char *s)
                if (!strcmp(token, "nokmem"))
                        cgroup_memory_nokmem = true;
        }
-       return 0;
+       return 1;
 }
 __setup("cgroup.memory=", cgroup_memory);