irqchip: IMX_MU_MSI should depend on ARCH_MXC
[platform/kernel/linux-starfive.git] / mm / memcontrol.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  *
24  * Per memcg lru locking
25  * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26  */
27
28 #include <linux/page_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/pagewalk.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/vm_event_item.h>
37 #include <linux/smp.h>
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
40 #include <linux/bit_spinlock.h>
41 #include <linux/rcupdate.h>
42 #include <linux/limits.h>
43 #include <linux/export.h>
44 #include <linux/mutex.h>
45 #include <linux/rbtree.h>
46 #include <linux/slab.h>
47 #include <linux/swap.h>
48 #include <linux/swapops.h>
49 #include <linux/spinlock.h>
50 #include <linux/eventfd.h>
51 #include <linux/poll.h>
52 #include <linux/sort.h>
53 #include <linux/fs.h>
54 #include <linux/seq_file.h>
55 #include <linux/vmpressure.h>
56 #include <linux/memremap.h>
57 #include <linux/mm_inline.h>
58 #include <linux/swap_cgroup.h>
59 #include <linux/cpu.h>
60 #include <linux/oom.h>
61 #include <linux/lockdep.h>
62 #include <linux/file.h>
63 #include <linux/resume_user_mode.h>
64 #include <linux/psi.h>
65 #include <linux/seq_buf.h>
66 #include "internal.h"
67 #include <net/sock.h>
68 #include <net/ip.h>
69 #include "slab.h"
70 #include "swap.h"
71
72 #include <linux/uaccess.h>
73
74 #include <trace/events/vmscan.h>
75
76 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
77 EXPORT_SYMBOL(memory_cgrp_subsys);
78
79 struct mem_cgroup *root_mem_cgroup __read_mostly;
80
81 /* Active memory cgroup to use from an interrupt context */
82 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
83 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
84
85 /* Socket memory accounting disabled? */
86 static bool cgroup_memory_nosocket __ro_after_init;
87
88 /* Kernel memory accounting disabled? */
89 static bool cgroup_memory_nokmem __ro_after_init;
90
91 /* Whether the swap controller is active */
92 #ifdef CONFIG_MEMCG_SWAP
93 static bool cgroup_memory_noswap __ro_after_init;
94 #else
95 #define cgroup_memory_noswap            1
96 #endif
97
98 #ifdef CONFIG_CGROUP_WRITEBACK
99 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
100 #endif
101
102 /* Whether legacy memory+swap accounting is active */
103 static bool do_memsw_account(void)
104 {
105         return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
106 }
107
108 #define THRESHOLDS_EVENTS_TARGET 128
109 #define SOFTLIMIT_EVENTS_TARGET 1024
110
111 /*
112  * Cgroups above their limits are maintained in a RB-Tree, independent of
113  * their hierarchy representation
114  */
115
116 struct mem_cgroup_tree_per_node {
117         struct rb_root rb_root;
118         struct rb_node *rb_rightmost;
119         spinlock_t lock;
120 };
121
122 struct mem_cgroup_tree {
123         struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
124 };
125
126 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
127
128 /* for OOM */
129 struct mem_cgroup_eventfd_list {
130         struct list_head list;
131         struct eventfd_ctx *eventfd;
132 };
133
134 /*
135  * cgroup_event represents events which userspace want to receive.
136  */
137 struct mem_cgroup_event {
138         /*
139          * memcg which the event belongs to.
140          */
141         struct mem_cgroup *memcg;
142         /*
143          * eventfd to signal userspace about the event.
144          */
145         struct eventfd_ctx *eventfd;
146         /*
147          * Each of these stored in a list by the cgroup.
148          */
149         struct list_head list;
150         /*
151          * register_event() callback will be used to add new userspace
152          * waiter for changes related to this event.  Use eventfd_signal()
153          * on eventfd to send notification to userspace.
154          */
155         int (*register_event)(struct mem_cgroup *memcg,
156                               struct eventfd_ctx *eventfd, const char *args);
157         /*
158          * unregister_event() callback will be called when userspace closes
159          * the eventfd or on cgroup removing.  This callback must be set,
160          * if you want provide notification functionality.
161          */
162         void (*unregister_event)(struct mem_cgroup *memcg,
163                                  struct eventfd_ctx *eventfd);
164         /*
165          * All fields below needed to unregister event when
166          * userspace closes eventfd.
167          */
168         poll_table pt;
169         wait_queue_head_t *wqh;
170         wait_queue_entry_t wait;
171         struct work_struct remove;
172 };
173
174 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
175 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
176
177 /* Stuffs for move charges at task migration. */
178 /*
179  * Types of charges to be moved.
180  */
181 #define MOVE_ANON       0x1U
182 #define MOVE_FILE       0x2U
183 #define MOVE_MASK       (MOVE_ANON | MOVE_FILE)
184
185 /* "mc" and its members are protected by cgroup_mutex */
186 static struct move_charge_struct {
187         spinlock_t        lock; /* for from, to */
188         struct mm_struct  *mm;
189         struct mem_cgroup *from;
190         struct mem_cgroup *to;
191         unsigned long flags;
192         unsigned long precharge;
193         unsigned long moved_charge;
194         unsigned long moved_swap;
195         struct task_struct *moving_task;        /* a task moving charges */
196         wait_queue_head_t waitq;                /* a waitq for other context */
197 } mc = {
198         .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
199         .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
200 };
201
202 /*
203  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
204  * limit reclaim to prevent infinite loops, if they ever occur.
205  */
206 #define MEM_CGROUP_MAX_RECLAIM_LOOPS            100
207 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
208
209 /* for encoding cft->private value on file */
210 enum res_type {
211         _MEM,
212         _MEMSWAP,
213         _KMEM,
214         _TCP,
215 };
216
217 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
218 #define MEMFILE_TYPE(val)       ((val) >> 16 & 0xffff)
219 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
220
221 /*
222  * Iteration constructs for visiting all cgroups (under a tree).  If
223  * loops are exited prematurely (break), mem_cgroup_iter_break() must
224  * be used for reference counting.
225  */
226 #define for_each_mem_cgroup_tree(iter, root)            \
227         for (iter = mem_cgroup_iter(root, NULL, NULL);  \
228              iter != NULL;                              \
229              iter = mem_cgroup_iter(root, iter, NULL))
230
231 #define for_each_mem_cgroup(iter)                       \
232         for (iter = mem_cgroup_iter(NULL, NULL, NULL);  \
233              iter != NULL;                              \
234              iter = mem_cgroup_iter(NULL, iter, NULL))
235
236 static inline bool task_is_dying(void)
237 {
238         return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
239                 (current->flags & PF_EXITING);
240 }
241
242 /* Some nice accessors for the vmpressure. */
243 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
244 {
245         if (!memcg)
246                 memcg = root_mem_cgroup;
247         return &memcg->vmpressure;
248 }
249
250 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
251 {
252         return container_of(vmpr, struct mem_cgroup, vmpressure);
253 }
254
255 #ifdef CONFIG_MEMCG_KMEM
256 static DEFINE_SPINLOCK(objcg_lock);
257
258 bool mem_cgroup_kmem_disabled(void)
259 {
260         return cgroup_memory_nokmem;
261 }
262
263 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
264                                       unsigned int nr_pages);
265
266 static void obj_cgroup_release(struct percpu_ref *ref)
267 {
268         struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
269         unsigned int nr_bytes;
270         unsigned int nr_pages;
271         unsigned long flags;
272
273         /*
274          * At this point all allocated objects are freed, and
275          * objcg->nr_charged_bytes can't have an arbitrary byte value.
276          * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
277          *
278          * The following sequence can lead to it:
279          * 1) CPU0: objcg == stock->cached_objcg
280          * 2) CPU1: we do a small allocation (e.g. 92 bytes),
281          *          PAGE_SIZE bytes are charged
282          * 3) CPU1: a process from another memcg is allocating something,
283          *          the stock if flushed,
284          *          objcg->nr_charged_bytes = PAGE_SIZE - 92
285          * 5) CPU0: we do release this object,
286          *          92 bytes are added to stock->nr_bytes
287          * 6) CPU0: stock is flushed,
288          *          92 bytes are added to objcg->nr_charged_bytes
289          *
290          * In the result, nr_charged_bytes == PAGE_SIZE.
291          * This page will be uncharged in obj_cgroup_release().
292          */
293         nr_bytes = atomic_read(&objcg->nr_charged_bytes);
294         WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
295         nr_pages = nr_bytes >> PAGE_SHIFT;
296
297         if (nr_pages)
298                 obj_cgroup_uncharge_pages(objcg, nr_pages);
299
300         spin_lock_irqsave(&objcg_lock, flags);
301         list_del(&objcg->list);
302         spin_unlock_irqrestore(&objcg_lock, flags);
303
304         percpu_ref_exit(ref);
305         kfree_rcu(objcg, rcu);
306 }
307
308 static struct obj_cgroup *obj_cgroup_alloc(void)
309 {
310         struct obj_cgroup *objcg;
311         int ret;
312
313         objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
314         if (!objcg)
315                 return NULL;
316
317         ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
318                               GFP_KERNEL);
319         if (ret) {
320                 kfree(objcg);
321                 return NULL;
322         }
323         INIT_LIST_HEAD(&objcg->list);
324         return objcg;
325 }
326
327 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
328                                   struct mem_cgroup *parent)
329 {
330         struct obj_cgroup *objcg, *iter;
331
332         objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
333
334         spin_lock_irq(&objcg_lock);
335
336         /* 1) Ready to reparent active objcg. */
337         list_add(&objcg->list, &memcg->objcg_list);
338         /* 2) Reparent active objcg and already reparented objcgs to parent. */
339         list_for_each_entry(iter, &memcg->objcg_list, list)
340                 WRITE_ONCE(iter->memcg, parent);
341         /* 3) Move already reparented objcgs to the parent's list */
342         list_splice(&memcg->objcg_list, &parent->objcg_list);
343
344         spin_unlock_irq(&objcg_lock);
345
346         percpu_ref_kill(&objcg->refcnt);
347 }
348
349 /*
350  * A lot of the calls to the cache allocation functions are expected to be
351  * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
352  * conditional to this static branch, we'll have to allow modules that does
353  * kmem_cache_alloc and the such to see this symbol as well
354  */
355 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
356 EXPORT_SYMBOL(memcg_kmem_enabled_key);
357 #endif
358
359 /**
360  * mem_cgroup_css_from_page - css of the memcg associated with a page
361  * @page: page of interest
362  *
363  * If memcg is bound to the default hierarchy, css of the memcg associated
364  * with @page is returned.  The returned css remains associated with @page
365  * until it is released.
366  *
367  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
368  * is returned.
369  */
370 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
371 {
372         struct mem_cgroup *memcg;
373
374         memcg = page_memcg(page);
375
376         if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
377                 memcg = root_mem_cgroup;
378
379         return &memcg->css;
380 }
381
382 /**
383  * page_cgroup_ino - return inode number of the memcg a page is charged to
384  * @page: the page
385  *
386  * Look up the closest online ancestor of the memory cgroup @page is charged to
387  * and return its inode number or 0 if @page is not charged to any cgroup. It
388  * is safe to call this function without holding a reference to @page.
389  *
390  * Note, this function is inherently racy, because there is nothing to prevent
391  * the cgroup inode from getting torn down and potentially reallocated a moment
392  * after page_cgroup_ino() returns, so it only should be used by callers that
393  * do not care (such as procfs interfaces).
394  */
395 ino_t page_cgroup_ino(struct page *page)
396 {
397         struct mem_cgroup *memcg;
398         unsigned long ino = 0;
399
400         rcu_read_lock();
401         memcg = page_memcg_check(page);
402
403         while (memcg && !(memcg->css.flags & CSS_ONLINE))
404                 memcg = parent_mem_cgroup(memcg);
405         if (memcg)
406                 ino = cgroup_ino(memcg->css.cgroup);
407         rcu_read_unlock();
408         return ino;
409 }
410
411 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
412                                          struct mem_cgroup_tree_per_node *mctz,
413                                          unsigned long new_usage_in_excess)
414 {
415         struct rb_node **p = &mctz->rb_root.rb_node;
416         struct rb_node *parent = NULL;
417         struct mem_cgroup_per_node *mz_node;
418         bool rightmost = true;
419
420         if (mz->on_tree)
421                 return;
422
423         mz->usage_in_excess = new_usage_in_excess;
424         if (!mz->usage_in_excess)
425                 return;
426         while (*p) {
427                 parent = *p;
428                 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
429                                         tree_node);
430                 if (mz->usage_in_excess < mz_node->usage_in_excess) {
431                         p = &(*p)->rb_left;
432                         rightmost = false;
433                 } else {
434                         p = &(*p)->rb_right;
435                 }
436         }
437
438         if (rightmost)
439                 mctz->rb_rightmost = &mz->tree_node;
440
441         rb_link_node(&mz->tree_node, parent, p);
442         rb_insert_color(&mz->tree_node, &mctz->rb_root);
443         mz->on_tree = true;
444 }
445
446 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
447                                          struct mem_cgroup_tree_per_node *mctz)
448 {
449         if (!mz->on_tree)
450                 return;
451
452         if (&mz->tree_node == mctz->rb_rightmost)
453                 mctz->rb_rightmost = rb_prev(&mz->tree_node);
454
455         rb_erase(&mz->tree_node, &mctz->rb_root);
456         mz->on_tree = false;
457 }
458
459 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
460                                        struct mem_cgroup_tree_per_node *mctz)
461 {
462         unsigned long flags;
463
464         spin_lock_irqsave(&mctz->lock, flags);
465         __mem_cgroup_remove_exceeded(mz, mctz);
466         spin_unlock_irqrestore(&mctz->lock, flags);
467 }
468
469 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
470 {
471         unsigned long nr_pages = page_counter_read(&memcg->memory);
472         unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
473         unsigned long excess = 0;
474
475         if (nr_pages > soft_limit)
476                 excess = nr_pages - soft_limit;
477
478         return excess;
479 }
480
481 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
482 {
483         unsigned long excess;
484         struct mem_cgroup_per_node *mz;
485         struct mem_cgroup_tree_per_node *mctz;
486
487         mctz = soft_limit_tree.rb_tree_per_node[nid];
488         if (!mctz)
489                 return;
490         /*
491          * Necessary to update all ancestors when hierarchy is used.
492          * because their event counter is not touched.
493          */
494         for (; memcg; memcg = parent_mem_cgroup(memcg)) {
495                 mz = memcg->nodeinfo[nid];
496                 excess = soft_limit_excess(memcg);
497                 /*
498                  * We have to update the tree if mz is on RB-tree or
499                  * mem is over its softlimit.
500                  */
501                 if (excess || mz->on_tree) {
502                         unsigned long flags;
503
504                         spin_lock_irqsave(&mctz->lock, flags);
505                         /* if on-tree, remove it */
506                         if (mz->on_tree)
507                                 __mem_cgroup_remove_exceeded(mz, mctz);
508                         /*
509                          * Insert again. mz->usage_in_excess will be updated.
510                          * If excess is 0, no tree ops.
511                          */
512                         __mem_cgroup_insert_exceeded(mz, mctz, excess);
513                         spin_unlock_irqrestore(&mctz->lock, flags);
514                 }
515         }
516 }
517
518 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
519 {
520         struct mem_cgroup_tree_per_node *mctz;
521         struct mem_cgroup_per_node *mz;
522         int nid;
523
524         for_each_node(nid) {
525                 mz = memcg->nodeinfo[nid];
526                 mctz = soft_limit_tree.rb_tree_per_node[nid];
527                 if (mctz)
528                         mem_cgroup_remove_exceeded(mz, mctz);
529         }
530 }
531
532 static struct mem_cgroup_per_node *
533 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
534 {
535         struct mem_cgroup_per_node *mz;
536
537 retry:
538         mz = NULL;
539         if (!mctz->rb_rightmost)
540                 goto done;              /* Nothing to reclaim from */
541
542         mz = rb_entry(mctz->rb_rightmost,
543                       struct mem_cgroup_per_node, tree_node);
544         /*
545          * Remove the node now but someone else can add it back,
546          * we will to add it back at the end of reclaim to its correct
547          * position in the tree.
548          */
549         __mem_cgroup_remove_exceeded(mz, mctz);
550         if (!soft_limit_excess(mz->memcg) ||
551             !css_tryget(&mz->memcg->css))
552                 goto retry;
553 done:
554         return mz;
555 }
556
557 static struct mem_cgroup_per_node *
558 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
559 {
560         struct mem_cgroup_per_node *mz;
561
562         spin_lock_irq(&mctz->lock);
563         mz = __mem_cgroup_largest_soft_limit_node(mctz);
564         spin_unlock_irq(&mctz->lock);
565         return mz;
566 }
567
568 /*
569  * memcg and lruvec stats flushing
570  *
571  * Many codepaths leading to stats update or read are performance sensitive and
572  * adding stats flushing in such codepaths is not desirable. So, to optimize the
573  * flushing the kernel does:
574  *
575  * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
576  *    rstat update tree grow unbounded.
577  *
578  * 2) Flush the stats synchronously on reader side only when there are more than
579  *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
580  *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
581  *    only for 2 seconds due to (1).
582  */
583 static void flush_memcg_stats_dwork(struct work_struct *w);
584 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
585 static DEFINE_SPINLOCK(stats_flush_lock);
586 static DEFINE_PER_CPU(unsigned int, stats_updates);
587 static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
588 static u64 flush_next_time;
589
590 #define FLUSH_TIME (2UL*HZ)
591
592 /*
593  * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
594  * not rely on this as part of an acquired spinlock_t lock. These functions are
595  * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
596  * is sufficient.
597  */
598 static void memcg_stats_lock(void)
599 {
600 #ifdef CONFIG_PREEMPT_RT
601       preempt_disable();
602 #else
603       VM_BUG_ON(!irqs_disabled());
604 #endif
605 }
606
607 static void __memcg_stats_lock(void)
608 {
609 #ifdef CONFIG_PREEMPT_RT
610       preempt_disable();
611 #endif
612 }
613
614 static void memcg_stats_unlock(void)
615 {
616 #ifdef CONFIG_PREEMPT_RT
617       preempt_enable();
618 #endif
619 }
620
621 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
622 {
623         unsigned int x;
624
625         cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
626
627         x = __this_cpu_add_return(stats_updates, abs(val));
628         if (x > MEMCG_CHARGE_BATCH) {
629                 /*
630                  * If stats_flush_threshold exceeds the threshold
631                  * (>num_online_cpus()), cgroup stats update will be triggered
632                  * in __mem_cgroup_flush_stats(). Increasing this var further
633                  * is redundant and simply adds overhead in atomic update.
634                  */
635                 if (atomic_read(&stats_flush_threshold) <= num_online_cpus())
636                         atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
637                 __this_cpu_write(stats_updates, 0);
638         }
639 }
640
641 static void __mem_cgroup_flush_stats(void)
642 {
643         unsigned long flag;
644
645         if (!spin_trylock_irqsave(&stats_flush_lock, flag))
646                 return;
647
648         flush_next_time = jiffies_64 + 2*FLUSH_TIME;
649         cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
650         atomic_set(&stats_flush_threshold, 0);
651         spin_unlock_irqrestore(&stats_flush_lock, flag);
652 }
653
654 void mem_cgroup_flush_stats(void)
655 {
656         if (atomic_read(&stats_flush_threshold) > num_online_cpus())
657                 __mem_cgroup_flush_stats();
658 }
659
660 void mem_cgroup_flush_stats_delayed(void)
661 {
662         if (time_after64(jiffies_64, flush_next_time))
663                 mem_cgroup_flush_stats();
664 }
665
666 static void flush_memcg_stats_dwork(struct work_struct *w)
667 {
668         __mem_cgroup_flush_stats();
669         queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
670 }
671
672 /**
673  * __mod_memcg_state - update cgroup memory statistics
674  * @memcg: the memory cgroup
675  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
676  * @val: delta to add to the counter, can be negative
677  */
678 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
679 {
680         if (mem_cgroup_disabled())
681                 return;
682
683         __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
684         memcg_rstat_updated(memcg, val);
685 }
686
687 /* idx can be of type enum memcg_stat_item or node_stat_item. */
688 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
689 {
690         long x = 0;
691         int cpu;
692
693         for_each_possible_cpu(cpu)
694                 x += per_cpu(memcg->vmstats_percpu->state[idx], cpu);
695 #ifdef CONFIG_SMP
696         if (x < 0)
697                 x = 0;
698 #endif
699         return x;
700 }
701
702 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
703                               int val)
704 {
705         struct mem_cgroup_per_node *pn;
706         struct mem_cgroup *memcg;
707
708         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
709         memcg = pn->memcg;
710
711         /*
712          * The caller from rmap relay on disabled preemption becase they never
713          * update their counter from in-interrupt context. For these two
714          * counters we check that the update is never performed from an
715          * interrupt context while other caller need to have disabled interrupt.
716          */
717         __memcg_stats_lock();
718         if (IS_ENABLED(CONFIG_DEBUG_VM) && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
719                 switch (idx) {
720                 case NR_ANON_MAPPED:
721                 case NR_FILE_MAPPED:
722                 case NR_ANON_THPS:
723                 case NR_SHMEM_PMDMAPPED:
724                 case NR_FILE_PMDMAPPED:
725                         WARN_ON_ONCE(!in_task());
726                         break;
727                 default:
728                         WARN_ON_ONCE(!irqs_disabled());
729                 }
730         }
731
732         /* Update memcg */
733         __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
734
735         /* Update lruvec */
736         __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
737
738         memcg_rstat_updated(memcg, val);
739         memcg_stats_unlock();
740 }
741
742 /**
743  * __mod_lruvec_state - update lruvec memory statistics
744  * @lruvec: the lruvec
745  * @idx: the stat item
746  * @val: delta to add to the counter, can be negative
747  *
748  * The lruvec is the intersection of the NUMA node and a cgroup. This
749  * function updates the all three counters that are affected by a
750  * change of state at this level: per-node, per-cgroup, per-lruvec.
751  */
752 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
753                         int val)
754 {
755         /* Update node */
756         __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
757
758         /* Update memcg and lruvec */
759         if (!mem_cgroup_disabled())
760                 __mod_memcg_lruvec_state(lruvec, idx, val);
761 }
762
763 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
764                              int val)
765 {
766         struct page *head = compound_head(page); /* rmap on tail pages */
767         struct mem_cgroup *memcg;
768         pg_data_t *pgdat = page_pgdat(page);
769         struct lruvec *lruvec;
770
771         rcu_read_lock();
772         memcg = page_memcg(head);
773         /* Untracked pages have no memcg, no lruvec. Update only the node */
774         if (!memcg) {
775                 rcu_read_unlock();
776                 __mod_node_page_state(pgdat, idx, val);
777                 return;
778         }
779
780         lruvec = mem_cgroup_lruvec(memcg, pgdat);
781         __mod_lruvec_state(lruvec, idx, val);
782         rcu_read_unlock();
783 }
784 EXPORT_SYMBOL(__mod_lruvec_page_state);
785
786 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
787 {
788         pg_data_t *pgdat = page_pgdat(virt_to_page(p));
789         struct mem_cgroup *memcg;
790         struct lruvec *lruvec;
791
792         rcu_read_lock();
793         memcg = mem_cgroup_from_slab_obj(p);
794
795         /*
796          * Untracked pages have no memcg, no lruvec. Update only the
797          * node. If we reparent the slab objects to the root memcg,
798          * when we free the slab object, we need to update the per-memcg
799          * vmstats to keep it correct for the root memcg.
800          */
801         if (!memcg) {
802                 __mod_node_page_state(pgdat, idx, val);
803         } else {
804                 lruvec = mem_cgroup_lruvec(memcg, pgdat);
805                 __mod_lruvec_state(lruvec, idx, val);
806         }
807         rcu_read_unlock();
808 }
809
810 /**
811  * __count_memcg_events - account VM events in a cgroup
812  * @memcg: the memory cgroup
813  * @idx: the event item
814  * @count: the number of events that occurred
815  */
816 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
817                           unsigned long count)
818 {
819         if (mem_cgroup_disabled())
820                 return;
821
822         memcg_stats_lock();
823         __this_cpu_add(memcg->vmstats_percpu->events[idx], count);
824         memcg_rstat_updated(memcg, count);
825         memcg_stats_unlock();
826 }
827
828 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
829 {
830         return READ_ONCE(memcg->vmstats.events[event]);
831 }
832
833 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
834 {
835         long x = 0;
836         int cpu;
837
838         for_each_possible_cpu(cpu)
839                 x += per_cpu(memcg->vmstats_percpu->events[event], cpu);
840         return x;
841 }
842
843 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
844                                          int nr_pages)
845 {
846         /* pagein of a big page is an event. So, ignore page size */
847         if (nr_pages > 0)
848                 __count_memcg_events(memcg, PGPGIN, 1);
849         else {
850                 __count_memcg_events(memcg, PGPGOUT, 1);
851                 nr_pages = -nr_pages; /* for event */
852         }
853
854         __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
855 }
856
857 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
858                                        enum mem_cgroup_events_target target)
859 {
860         unsigned long val, next;
861
862         val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
863         next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
864         /* from time_after() in jiffies.h */
865         if ((long)(next - val) < 0) {
866                 switch (target) {
867                 case MEM_CGROUP_TARGET_THRESH:
868                         next = val + THRESHOLDS_EVENTS_TARGET;
869                         break;
870                 case MEM_CGROUP_TARGET_SOFTLIMIT:
871                         next = val + SOFTLIMIT_EVENTS_TARGET;
872                         break;
873                 default:
874                         break;
875                 }
876                 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
877                 return true;
878         }
879         return false;
880 }
881
882 /*
883  * Check events in order.
884  *
885  */
886 static void memcg_check_events(struct mem_cgroup *memcg, int nid)
887 {
888         if (IS_ENABLED(CONFIG_PREEMPT_RT))
889                 return;
890
891         /* threshold event is triggered in finer grain than soft limit */
892         if (unlikely(mem_cgroup_event_ratelimit(memcg,
893                                                 MEM_CGROUP_TARGET_THRESH))) {
894                 bool do_softlimit;
895
896                 do_softlimit = mem_cgroup_event_ratelimit(memcg,
897                                                 MEM_CGROUP_TARGET_SOFTLIMIT);
898                 mem_cgroup_threshold(memcg);
899                 if (unlikely(do_softlimit))
900                         mem_cgroup_update_tree(memcg, nid);
901         }
902 }
903
904 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
905 {
906         /*
907          * mm_update_next_owner() may clear mm->owner to NULL
908          * if it races with swapoff, page migration, etc.
909          * So this can be called with p == NULL.
910          */
911         if (unlikely(!p))
912                 return NULL;
913
914         return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
915 }
916 EXPORT_SYMBOL(mem_cgroup_from_task);
917
918 static __always_inline struct mem_cgroup *active_memcg(void)
919 {
920         if (!in_task())
921                 return this_cpu_read(int_active_memcg);
922         else
923                 return current->active_memcg;
924 }
925
926 /**
927  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
928  * @mm: mm from which memcg should be extracted. It can be NULL.
929  *
930  * Obtain a reference on mm->memcg and returns it if successful. If mm
931  * is NULL, then the memcg is chosen as follows:
932  * 1) The active memcg, if set.
933  * 2) current->mm->memcg, if available
934  * 3) root memcg
935  * If mem_cgroup is disabled, NULL is returned.
936  */
937 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
938 {
939         struct mem_cgroup *memcg;
940
941         if (mem_cgroup_disabled())
942                 return NULL;
943
944         /*
945          * Page cache insertions can happen without an
946          * actual mm context, e.g. during disk probing
947          * on boot, loopback IO, acct() writes etc.
948          *
949          * No need to css_get on root memcg as the reference
950          * counting is disabled on the root level in the
951          * cgroup core. See CSS_NO_REF.
952          */
953         if (unlikely(!mm)) {
954                 memcg = active_memcg();
955                 if (unlikely(memcg)) {
956                         /* remote memcg must hold a ref */
957                         css_get(&memcg->css);
958                         return memcg;
959                 }
960                 mm = current->mm;
961                 if (unlikely(!mm))
962                         return root_mem_cgroup;
963         }
964
965         rcu_read_lock();
966         do {
967                 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
968                 if (unlikely(!memcg))
969                         memcg = root_mem_cgroup;
970         } while (!css_tryget(&memcg->css));
971         rcu_read_unlock();
972         return memcg;
973 }
974 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
975
976 static __always_inline bool memcg_kmem_bypass(void)
977 {
978         /* Allow remote memcg charging from any context. */
979         if (unlikely(active_memcg()))
980                 return false;
981
982         /* Memcg to charge can't be determined. */
983         if (!in_task() || !current->mm || (current->flags & PF_KTHREAD))
984                 return true;
985
986         return false;
987 }
988
989 /**
990  * mem_cgroup_iter - iterate over memory cgroup hierarchy
991  * @root: hierarchy root
992  * @prev: previously returned memcg, NULL on first invocation
993  * @reclaim: cookie for shared reclaim walks, NULL for full walks
994  *
995  * Returns references to children of the hierarchy below @root, or
996  * @root itself, or %NULL after a full round-trip.
997  *
998  * Caller must pass the return value in @prev on subsequent
999  * invocations for reference counting, or use mem_cgroup_iter_break()
1000  * to cancel a hierarchy walk before the round-trip is complete.
1001  *
1002  * Reclaimers can specify a node in @reclaim to divide up the memcgs
1003  * in the hierarchy among all concurrent reclaimers operating on the
1004  * same node.
1005  */
1006 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1007                                    struct mem_cgroup *prev,
1008                                    struct mem_cgroup_reclaim_cookie *reclaim)
1009 {
1010         struct mem_cgroup_reclaim_iter *iter;
1011         struct cgroup_subsys_state *css = NULL;
1012         struct mem_cgroup *memcg = NULL;
1013         struct mem_cgroup *pos = NULL;
1014
1015         if (mem_cgroup_disabled())
1016                 return NULL;
1017
1018         if (!root)
1019                 root = root_mem_cgroup;
1020
1021         rcu_read_lock();
1022
1023         if (reclaim) {
1024                 struct mem_cgroup_per_node *mz;
1025
1026                 mz = root->nodeinfo[reclaim->pgdat->node_id];
1027                 iter = &mz->iter;
1028
1029                 /*
1030                  * On start, join the current reclaim iteration cycle.
1031                  * Exit when a concurrent walker completes it.
1032                  */
1033                 if (!prev)
1034                         reclaim->generation = iter->generation;
1035                 else if (reclaim->generation != iter->generation)
1036                         goto out_unlock;
1037
1038                 while (1) {
1039                         pos = READ_ONCE(iter->position);
1040                         if (!pos || css_tryget(&pos->css))
1041                                 break;
1042                         /*
1043                          * css reference reached zero, so iter->position will
1044                          * be cleared by ->css_released. However, we should not
1045                          * rely on this happening soon, because ->css_released
1046                          * is called from a work queue, and by busy-waiting we
1047                          * might block it. So we clear iter->position right
1048                          * away.
1049                          */
1050                         (void)cmpxchg(&iter->position, pos, NULL);
1051                 }
1052         } else if (prev) {
1053                 pos = prev;
1054         }
1055
1056         if (pos)
1057                 css = &pos->css;
1058
1059         for (;;) {
1060                 css = css_next_descendant_pre(css, &root->css);
1061                 if (!css) {
1062                         /*
1063                          * Reclaimers share the hierarchy walk, and a
1064                          * new one might jump in right at the end of
1065                          * the hierarchy - make sure they see at least
1066                          * one group and restart from the beginning.
1067                          */
1068                         if (!prev)
1069                                 continue;
1070                         break;
1071                 }
1072
1073                 /*
1074                  * Verify the css and acquire a reference.  The root
1075                  * is provided by the caller, so we know it's alive
1076                  * and kicking, and don't take an extra reference.
1077                  */
1078                 if (css == &root->css || css_tryget(css)) {
1079                         memcg = mem_cgroup_from_css(css);
1080                         break;
1081                 }
1082         }
1083
1084         if (reclaim) {
1085                 /*
1086                  * The position could have already been updated by a competing
1087                  * thread, so check that the value hasn't changed since we read
1088                  * it to avoid reclaiming from the same cgroup twice.
1089                  */
1090                 (void)cmpxchg(&iter->position, pos, memcg);
1091
1092                 if (pos)
1093                         css_put(&pos->css);
1094
1095                 if (!memcg)
1096                         iter->generation++;
1097         }
1098
1099 out_unlock:
1100         rcu_read_unlock();
1101         if (prev && prev != root)
1102                 css_put(&prev->css);
1103
1104         return memcg;
1105 }
1106
1107 /**
1108  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1109  * @root: hierarchy root
1110  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1111  */
1112 void mem_cgroup_iter_break(struct mem_cgroup *root,
1113                            struct mem_cgroup *prev)
1114 {
1115         if (!root)
1116                 root = root_mem_cgroup;
1117         if (prev && prev != root)
1118                 css_put(&prev->css);
1119 }
1120
1121 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1122                                         struct mem_cgroup *dead_memcg)
1123 {
1124         struct mem_cgroup_reclaim_iter *iter;
1125         struct mem_cgroup_per_node *mz;
1126         int nid;
1127
1128         for_each_node(nid) {
1129                 mz = from->nodeinfo[nid];
1130                 iter = &mz->iter;
1131                 cmpxchg(&iter->position, dead_memcg, NULL);
1132         }
1133 }
1134
1135 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1136 {
1137         struct mem_cgroup *memcg = dead_memcg;
1138         struct mem_cgroup *last;
1139
1140         do {
1141                 __invalidate_reclaim_iterators(memcg, dead_memcg);
1142                 last = memcg;
1143         } while ((memcg = parent_mem_cgroup(memcg)));
1144
1145         /*
1146          * When cgruop1 non-hierarchy mode is used,
1147          * parent_mem_cgroup() does not walk all the way up to the
1148          * cgroup root (root_mem_cgroup). So we have to handle
1149          * dead_memcg from cgroup root separately.
1150          */
1151         if (last != root_mem_cgroup)
1152                 __invalidate_reclaim_iterators(root_mem_cgroup,
1153                                                 dead_memcg);
1154 }
1155
1156 /**
1157  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1158  * @memcg: hierarchy root
1159  * @fn: function to call for each task
1160  * @arg: argument passed to @fn
1161  *
1162  * This function iterates over tasks attached to @memcg or to any of its
1163  * descendants and calls @fn for each task. If @fn returns a non-zero
1164  * value, the function breaks the iteration loop and returns the value.
1165  * Otherwise, it will iterate over all tasks and return 0.
1166  *
1167  * This function must not be called for the root memory cgroup.
1168  */
1169 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1170                           int (*fn)(struct task_struct *, void *), void *arg)
1171 {
1172         struct mem_cgroup *iter;
1173         int ret = 0;
1174
1175         BUG_ON(memcg == root_mem_cgroup);
1176
1177         for_each_mem_cgroup_tree(iter, memcg) {
1178                 struct css_task_iter it;
1179                 struct task_struct *task;
1180
1181                 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1182                 while (!ret && (task = css_task_iter_next(&it)))
1183                         ret = fn(task, arg);
1184                 css_task_iter_end(&it);
1185                 if (ret) {
1186                         mem_cgroup_iter_break(memcg, iter);
1187                         break;
1188                 }
1189         }
1190         return ret;
1191 }
1192
1193 #ifdef CONFIG_DEBUG_VM
1194 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1195 {
1196         struct mem_cgroup *memcg;
1197
1198         if (mem_cgroup_disabled())
1199                 return;
1200
1201         memcg = folio_memcg(folio);
1202
1203         if (!memcg)
1204                 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != root_mem_cgroup, folio);
1205         else
1206                 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1207 }
1208 #endif
1209
1210 /**
1211  * folio_lruvec_lock - Lock the lruvec for a folio.
1212  * @folio: Pointer to the folio.
1213  *
1214  * These functions are safe to use under any of the following conditions:
1215  * - folio locked
1216  * - folio_test_lru false
1217  * - folio_memcg_lock()
1218  * - folio frozen (refcount of 0)
1219  *
1220  * Return: The lruvec this folio is on with its lock held.
1221  */
1222 struct lruvec *folio_lruvec_lock(struct folio *folio)
1223 {
1224         struct lruvec *lruvec = folio_lruvec(folio);
1225
1226         spin_lock(&lruvec->lru_lock);
1227         lruvec_memcg_debug(lruvec, folio);
1228
1229         return lruvec;
1230 }
1231
1232 /**
1233  * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1234  * @folio: Pointer to the folio.
1235  *
1236  * These functions are safe to use under any of the following conditions:
1237  * - folio locked
1238  * - folio_test_lru false
1239  * - folio_memcg_lock()
1240  * - folio frozen (refcount of 0)
1241  *
1242  * Return: The lruvec this folio is on with its lock held and interrupts
1243  * disabled.
1244  */
1245 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1246 {
1247         struct lruvec *lruvec = folio_lruvec(folio);
1248
1249         spin_lock_irq(&lruvec->lru_lock);
1250         lruvec_memcg_debug(lruvec, folio);
1251
1252         return lruvec;
1253 }
1254
1255 /**
1256  * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1257  * @folio: Pointer to the folio.
1258  * @flags: Pointer to irqsave flags.
1259  *
1260  * These functions are safe to use under any of the following conditions:
1261  * - folio locked
1262  * - folio_test_lru false
1263  * - folio_memcg_lock()
1264  * - folio frozen (refcount of 0)
1265  *
1266  * Return: The lruvec this folio is on with its lock held and interrupts
1267  * disabled.
1268  */
1269 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1270                 unsigned long *flags)
1271 {
1272         struct lruvec *lruvec = folio_lruvec(folio);
1273
1274         spin_lock_irqsave(&lruvec->lru_lock, *flags);
1275         lruvec_memcg_debug(lruvec, folio);
1276
1277         return lruvec;
1278 }
1279
1280 /**
1281  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1282  * @lruvec: mem_cgroup per zone lru vector
1283  * @lru: index of lru list the page is sitting on
1284  * @zid: zone id of the accounted pages
1285  * @nr_pages: positive when adding or negative when removing
1286  *
1287  * This function must be called under lru_lock, just before a page is added
1288  * to or just after a page is removed from an lru list.
1289  */
1290 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1291                                 int zid, int nr_pages)
1292 {
1293         struct mem_cgroup_per_node *mz;
1294         unsigned long *lru_size;
1295         long size;
1296
1297         if (mem_cgroup_disabled())
1298                 return;
1299
1300         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1301         lru_size = &mz->lru_zone_size[zid][lru];
1302
1303         if (nr_pages < 0)
1304                 *lru_size += nr_pages;
1305
1306         size = *lru_size;
1307         if (WARN_ONCE(size < 0,
1308                 "%s(%p, %d, %d): lru_size %ld\n",
1309                 __func__, lruvec, lru, nr_pages, size)) {
1310                 VM_BUG_ON(1);
1311                 *lru_size = 0;
1312         }
1313
1314         if (nr_pages > 0)
1315                 *lru_size += nr_pages;
1316 }
1317
1318 /**
1319  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1320  * @memcg: the memory cgroup
1321  *
1322  * Returns the maximum amount of memory @mem can be charged with, in
1323  * pages.
1324  */
1325 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1326 {
1327         unsigned long margin = 0;
1328         unsigned long count;
1329         unsigned long limit;
1330
1331         count = page_counter_read(&memcg->memory);
1332         limit = READ_ONCE(memcg->memory.max);
1333         if (count < limit)
1334                 margin = limit - count;
1335
1336         if (do_memsw_account()) {
1337                 count = page_counter_read(&memcg->memsw);
1338                 limit = READ_ONCE(memcg->memsw.max);
1339                 if (count < limit)
1340                         margin = min(margin, limit - count);
1341                 else
1342                         margin = 0;
1343         }
1344
1345         return margin;
1346 }
1347
1348 /*
1349  * A routine for checking "mem" is under move_account() or not.
1350  *
1351  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1352  * moving cgroups. This is for waiting at high-memory pressure
1353  * caused by "move".
1354  */
1355 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1356 {
1357         struct mem_cgroup *from;
1358         struct mem_cgroup *to;
1359         bool ret = false;
1360         /*
1361          * Unlike task_move routines, we access mc.to, mc.from not under
1362          * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1363          */
1364         spin_lock(&mc.lock);
1365         from = mc.from;
1366         to = mc.to;
1367         if (!from)
1368                 goto unlock;
1369
1370         ret = mem_cgroup_is_descendant(from, memcg) ||
1371                 mem_cgroup_is_descendant(to, memcg);
1372 unlock:
1373         spin_unlock(&mc.lock);
1374         return ret;
1375 }
1376
1377 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1378 {
1379         if (mc.moving_task && current != mc.moving_task) {
1380                 if (mem_cgroup_under_move(memcg)) {
1381                         DEFINE_WAIT(wait);
1382                         prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1383                         /* moving charge context might have finished. */
1384                         if (mc.moving_task)
1385                                 schedule();
1386                         finish_wait(&mc.waitq, &wait);
1387                         return true;
1388                 }
1389         }
1390         return false;
1391 }
1392
1393 struct memory_stat {
1394         const char *name;
1395         unsigned int idx;
1396 };
1397
1398 static const struct memory_stat memory_stats[] = {
1399         { "anon",                       NR_ANON_MAPPED                  },
1400         { "file",                       NR_FILE_PAGES                   },
1401         { "kernel",                     MEMCG_KMEM                      },
1402         { "kernel_stack",               NR_KERNEL_STACK_KB              },
1403         { "pagetables",                 NR_PAGETABLE                    },
1404         { "percpu",                     MEMCG_PERCPU_B                  },
1405         { "sock",                       MEMCG_SOCK                      },
1406         { "vmalloc",                    MEMCG_VMALLOC                   },
1407         { "shmem",                      NR_SHMEM                        },
1408 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1409         { "zswap",                      MEMCG_ZSWAP_B                   },
1410         { "zswapped",                   MEMCG_ZSWAPPED                  },
1411 #endif
1412         { "file_mapped",                NR_FILE_MAPPED                  },
1413         { "file_dirty",                 NR_FILE_DIRTY                   },
1414         { "file_writeback",             NR_WRITEBACK                    },
1415 #ifdef CONFIG_SWAP
1416         { "swapcached",                 NR_SWAPCACHE                    },
1417 #endif
1418 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1419         { "anon_thp",                   NR_ANON_THPS                    },
1420         { "file_thp",                   NR_FILE_THPS                    },
1421         { "shmem_thp",                  NR_SHMEM_THPS                   },
1422 #endif
1423         { "inactive_anon",              NR_INACTIVE_ANON                },
1424         { "active_anon",                NR_ACTIVE_ANON                  },
1425         { "inactive_file",              NR_INACTIVE_FILE                },
1426         { "active_file",                NR_ACTIVE_FILE                  },
1427         { "unevictable",                NR_UNEVICTABLE                  },
1428         { "slab_reclaimable",           NR_SLAB_RECLAIMABLE_B           },
1429         { "slab_unreclaimable",         NR_SLAB_UNRECLAIMABLE_B         },
1430
1431         /* The memory events */
1432         { "workingset_refault_anon",    WORKINGSET_REFAULT_ANON         },
1433         { "workingset_refault_file",    WORKINGSET_REFAULT_FILE         },
1434         { "workingset_activate_anon",   WORKINGSET_ACTIVATE_ANON        },
1435         { "workingset_activate_file",   WORKINGSET_ACTIVATE_FILE        },
1436         { "workingset_restore_anon",    WORKINGSET_RESTORE_ANON         },
1437         { "workingset_restore_file",    WORKINGSET_RESTORE_FILE         },
1438         { "workingset_nodereclaim",     WORKINGSET_NODERECLAIM          },
1439 };
1440
1441 /* Translate stat items to the correct unit for memory.stat output */
1442 static int memcg_page_state_unit(int item)
1443 {
1444         switch (item) {
1445         case MEMCG_PERCPU_B:
1446         case MEMCG_ZSWAP_B:
1447         case NR_SLAB_RECLAIMABLE_B:
1448         case NR_SLAB_UNRECLAIMABLE_B:
1449         case WORKINGSET_REFAULT_ANON:
1450         case WORKINGSET_REFAULT_FILE:
1451         case WORKINGSET_ACTIVATE_ANON:
1452         case WORKINGSET_ACTIVATE_FILE:
1453         case WORKINGSET_RESTORE_ANON:
1454         case WORKINGSET_RESTORE_FILE:
1455         case WORKINGSET_NODERECLAIM:
1456                 return 1;
1457         case NR_KERNEL_STACK_KB:
1458                 return SZ_1K;
1459         default:
1460                 return PAGE_SIZE;
1461         }
1462 }
1463
1464 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1465                                                     int item)
1466 {
1467         return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
1468 }
1469
1470 /* Subset of vm_event_item to report for memcg event stats */
1471 static const unsigned int memcg_vm_event_stat[] = {
1472         PGSCAN_KSWAPD,
1473         PGSCAN_DIRECT,
1474         PGSTEAL_KSWAPD,
1475         PGSTEAL_DIRECT,
1476         PGFAULT,
1477         PGMAJFAULT,
1478         PGREFILL,
1479         PGACTIVATE,
1480         PGDEACTIVATE,
1481         PGLAZYFREE,
1482         PGLAZYFREED,
1483 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1484         ZSWPIN,
1485         ZSWPOUT,
1486 #endif
1487 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1488         THP_FAULT_ALLOC,
1489         THP_COLLAPSE_ALLOC,
1490 #endif
1491 };
1492
1493 static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize)
1494 {
1495         struct seq_buf s;
1496         int i;
1497
1498         seq_buf_init(&s, buf, bufsize);
1499
1500         /*
1501          * Provide statistics on the state of the memory subsystem as
1502          * well as cumulative event counters that show past behavior.
1503          *
1504          * This list is ordered following a combination of these gradients:
1505          * 1) generic big picture -> specifics and details
1506          * 2) reflecting userspace activity -> reflecting kernel heuristics
1507          *
1508          * Current memory state:
1509          */
1510         mem_cgroup_flush_stats();
1511
1512         for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1513                 u64 size;
1514
1515                 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1516                 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1517
1518                 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1519                         size += memcg_page_state_output(memcg,
1520                                                         NR_SLAB_RECLAIMABLE_B);
1521                         seq_buf_printf(&s, "slab %llu\n", size);
1522                 }
1523         }
1524
1525         /* Accumulated memory events */
1526         seq_buf_printf(&s, "pgscan %lu\n",
1527                        memcg_events(memcg, PGSCAN_KSWAPD) +
1528                        memcg_events(memcg, PGSCAN_DIRECT));
1529         seq_buf_printf(&s, "pgsteal %lu\n",
1530                        memcg_events(memcg, PGSTEAL_KSWAPD) +
1531                        memcg_events(memcg, PGSTEAL_DIRECT));
1532
1533         for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++)
1534                 seq_buf_printf(&s, "%s %lu\n",
1535                                vm_event_name(memcg_vm_event_stat[i]),
1536                                memcg_events(memcg, memcg_vm_event_stat[i]));
1537
1538         /* The above should easily fit into one page */
1539         WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1540 }
1541
1542 #define K(x) ((x) << (PAGE_SHIFT-10))
1543 /**
1544  * mem_cgroup_print_oom_context: Print OOM information relevant to
1545  * memory controller.
1546  * @memcg: The memory cgroup that went over limit
1547  * @p: Task that is going to be killed
1548  *
1549  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1550  * enabled
1551  */
1552 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1553 {
1554         rcu_read_lock();
1555
1556         if (memcg) {
1557                 pr_cont(",oom_memcg=");
1558                 pr_cont_cgroup_path(memcg->css.cgroup);
1559         } else
1560                 pr_cont(",global_oom");
1561         if (p) {
1562                 pr_cont(",task_memcg=");
1563                 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1564         }
1565         rcu_read_unlock();
1566 }
1567
1568 /**
1569  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1570  * memory controller.
1571  * @memcg: The memory cgroup that went over limit
1572  */
1573 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1574 {
1575         /* Use static buffer, for the caller is holding oom_lock. */
1576         static char buf[PAGE_SIZE];
1577
1578         lockdep_assert_held(&oom_lock);
1579
1580         pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1581                 K((u64)page_counter_read(&memcg->memory)),
1582                 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1583         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1584                 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1585                         K((u64)page_counter_read(&memcg->swap)),
1586                         K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1587         else {
1588                 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1589                         K((u64)page_counter_read(&memcg->memsw)),
1590                         K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1591                 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1592                         K((u64)page_counter_read(&memcg->kmem)),
1593                         K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1594         }
1595
1596         pr_info("Memory cgroup stats for ");
1597         pr_cont_cgroup_path(memcg->css.cgroup);
1598         pr_cont(":");
1599         memory_stat_format(memcg, buf, sizeof(buf));
1600         pr_info("%s", buf);
1601 }
1602
1603 /*
1604  * Return the memory (and swap, if configured) limit for a memcg.
1605  */
1606 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1607 {
1608         unsigned long max = READ_ONCE(memcg->memory.max);
1609
1610         if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
1611                 if (mem_cgroup_swappiness(memcg))
1612                         max += min(READ_ONCE(memcg->swap.max),
1613                                    (unsigned long)total_swap_pages);
1614         } else { /* v1 */
1615                 if (mem_cgroup_swappiness(memcg)) {
1616                         /* Calculate swap excess capacity from memsw limit */
1617                         unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1618
1619                         max += min(swap, (unsigned long)total_swap_pages);
1620                 }
1621         }
1622         return max;
1623 }
1624
1625 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1626 {
1627         return page_counter_read(&memcg->memory);
1628 }
1629
1630 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1631                                      int order)
1632 {
1633         struct oom_control oc = {
1634                 .zonelist = NULL,
1635                 .nodemask = NULL,
1636                 .memcg = memcg,
1637                 .gfp_mask = gfp_mask,
1638                 .order = order,
1639         };
1640         bool ret = true;
1641
1642         if (mutex_lock_killable(&oom_lock))
1643                 return true;
1644
1645         if (mem_cgroup_margin(memcg) >= (1 << order))
1646                 goto unlock;
1647
1648         /*
1649          * A few threads which were not waiting at mutex_lock_killable() can
1650          * fail to bail out. Therefore, check again after holding oom_lock.
1651          */
1652         ret = task_is_dying() || out_of_memory(&oc);
1653
1654 unlock:
1655         mutex_unlock(&oom_lock);
1656         return ret;
1657 }
1658
1659 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1660                                    pg_data_t *pgdat,
1661                                    gfp_t gfp_mask,
1662                                    unsigned long *total_scanned)
1663 {
1664         struct mem_cgroup *victim = NULL;
1665         int total = 0;
1666         int loop = 0;
1667         unsigned long excess;
1668         unsigned long nr_scanned;
1669         struct mem_cgroup_reclaim_cookie reclaim = {
1670                 .pgdat = pgdat,
1671         };
1672
1673         excess = soft_limit_excess(root_memcg);
1674
1675         while (1) {
1676                 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1677                 if (!victim) {
1678                         loop++;
1679                         if (loop >= 2) {
1680                                 /*
1681                                  * If we have not been able to reclaim
1682                                  * anything, it might because there are
1683                                  * no reclaimable pages under this hierarchy
1684                                  */
1685                                 if (!total)
1686                                         break;
1687                                 /*
1688                                  * We want to do more targeted reclaim.
1689                                  * excess >> 2 is not to excessive so as to
1690                                  * reclaim too much, nor too less that we keep
1691                                  * coming back to reclaim from this cgroup
1692                                  */
1693                                 if (total >= (excess >> 2) ||
1694                                         (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1695                                         break;
1696                         }
1697                         continue;
1698                 }
1699                 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1700                                         pgdat, &nr_scanned);
1701                 *total_scanned += nr_scanned;
1702                 if (!soft_limit_excess(root_memcg))
1703                         break;
1704         }
1705         mem_cgroup_iter_break(root_memcg, victim);
1706         return total;
1707 }
1708
1709 #ifdef CONFIG_LOCKDEP
1710 static struct lockdep_map memcg_oom_lock_dep_map = {
1711         .name = "memcg_oom_lock",
1712 };
1713 #endif
1714
1715 static DEFINE_SPINLOCK(memcg_oom_lock);
1716
1717 /*
1718  * Check OOM-Killer is already running under our hierarchy.
1719  * If someone is running, return false.
1720  */
1721 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1722 {
1723         struct mem_cgroup *iter, *failed = NULL;
1724
1725         spin_lock(&memcg_oom_lock);
1726
1727         for_each_mem_cgroup_tree(iter, memcg) {
1728                 if (iter->oom_lock) {
1729                         /*
1730                          * this subtree of our hierarchy is already locked
1731                          * so we cannot give a lock.
1732                          */
1733                         failed = iter;
1734                         mem_cgroup_iter_break(memcg, iter);
1735                         break;
1736                 } else
1737                         iter->oom_lock = true;
1738         }
1739
1740         if (failed) {
1741                 /*
1742                  * OK, we failed to lock the whole subtree so we have
1743                  * to clean up what we set up to the failing subtree
1744                  */
1745                 for_each_mem_cgroup_tree(iter, memcg) {
1746                         if (iter == failed) {
1747                                 mem_cgroup_iter_break(memcg, iter);
1748                                 break;
1749                         }
1750                         iter->oom_lock = false;
1751                 }
1752         } else
1753                 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1754
1755         spin_unlock(&memcg_oom_lock);
1756
1757         return !failed;
1758 }
1759
1760 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1761 {
1762         struct mem_cgroup *iter;
1763
1764         spin_lock(&memcg_oom_lock);
1765         mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1766         for_each_mem_cgroup_tree(iter, memcg)
1767                 iter->oom_lock = false;
1768         spin_unlock(&memcg_oom_lock);
1769 }
1770
1771 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1772 {
1773         struct mem_cgroup *iter;
1774
1775         spin_lock(&memcg_oom_lock);
1776         for_each_mem_cgroup_tree(iter, memcg)
1777                 iter->under_oom++;
1778         spin_unlock(&memcg_oom_lock);
1779 }
1780
1781 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1782 {
1783         struct mem_cgroup *iter;
1784
1785         /*
1786          * Be careful about under_oom underflows because a child memcg
1787          * could have been added after mem_cgroup_mark_under_oom.
1788          */
1789         spin_lock(&memcg_oom_lock);
1790         for_each_mem_cgroup_tree(iter, memcg)
1791                 if (iter->under_oom > 0)
1792                         iter->under_oom--;
1793         spin_unlock(&memcg_oom_lock);
1794 }
1795
1796 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1797
1798 struct oom_wait_info {
1799         struct mem_cgroup *memcg;
1800         wait_queue_entry_t      wait;
1801 };
1802
1803 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1804         unsigned mode, int sync, void *arg)
1805 {
1806         struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1807         struct mem_cgroup *oom_wait_memcg;
1808         struct oom_wait_info *oom_wait_info;
1809
1810         oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1811         oom_wait_memcg = oom_wait_info->memcg;
1812
1813         if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1814             !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1815                 return 0;
1816         return autoremove_wake_function(wait, mode, sync, arg);
1817 }
1818
1819 static void memcg_oom_recover(struct mem_cgroup *memcg)
1820 {
1821         /*
1822          * For the following lockless ->under_oom test, the only required
1823          * guarantee is that it must see the state asserted by an OOM when
1824          * this function is called as a result of userland actions
1825          * triggered by the notification of the OOM.  This is trivially
1826          * achieved by invoking mem_cgroup_mark_under_oom() before
1827          * triggering notification.
1828          */
1829         if (memcg && memcg->under_oom)
1830                 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1831 }
1832
1833 /*
1834  * Returns true if successfully killed one or more processes. Though in some
1835  * corner cases it can return true even without killing any process.
1836  */
1837 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1838 {
1839         bool locked, ret;
1840
1841         if (order > PAGE_ALLOC_COSTLY_ORDER)
1842                 return false;
1843
1844         memcg_memory_event(memcg, MEMCG_OOM);
1845
1846         /*
1847          * We are in the middle of the charge context here, so we
1848          * don't want to block when potentially sitting on a callstack
1849          * that holds all kinds of filesystem and mm locks.
1850          *
1851          * cgroup1 allows disabling the OOM killer and waiting for outside
1852          * handling until the charge can succeed; remember the context and put
1853          * the task to sleep at the end of the page fault when all locks are
1854          * released.
1855          *
1856          * On the other hand, in-kernel OOM killer allows for an async victim
1857          * memory reclaim (oom_reaper) and that means that we are not solely
1858          * relying on the oom victim to make a forward progress and we can
1859          * invoke the oom killer here.
1860          *
1861          * Please note that mem_cgroup_out_of_memory might fail to find a
1862          * victim and then we have to bail out from the charge path.
1863          */
1864         if (memcg->oom_kill_disable) {
1865                 if (current->in_user_fault) {
1866                         css_get(&memcg->css);
1867                         current->memcg_in_oom = memcg;
1868                         current->memcg_oom_gfp_mask = mask;
1869                         current->memcg_oom_order = order;
1870                 }
1871                 return false;
1872         }
1873
1874         mem_cgroup_mark_under_oom(memcg);
1875
1876         locked = mem_cgroup_oom_trylock(memcg);
1877
1878         if (locked)
1879                 mem_cgroup_oom_notify(memcg);
1880
1881         mem_cgroup_unmark_under_oom(memcg);
1882         ret = mem_cgroup_out_of_memory(memcg, mask, order);
1883
1884         if (locked)
1885                 mem_cgroup_oom_unlock(memcg);
1886
1887         return ret;
1888 }
1889
1890 /**
1891  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1892  * @handle: actually kill/wait or just clean up the OOM state
1893  *
1894  * This has to be called at the end of a page fault if the memcg OOM
1895  * handler was enabled.
1896  *
1897  * Memcg supports userspace OOM handling where failed allocations must
1898  * sleep on a waitqueue until the userspace task resolves the
1899  * situation.  Sleeping directly in the charge context with all kinds
1900  * of locks held is not a good idea, instead we remember an OOM state
1901  * in the task and mem_cgroup_oom_synchronize() has to be called at
1902  * the end of the page fault to complete the OOM handling.
1903  *
1904  * Returns %true if an ongoing memcg OOM situation was detected and
1905  * completed, %false otherwise.
1906  */
1907 bool mem_cgroup_oom_synchronize(bool handle)
1908 {
1909         struct mem_cgroup *memcg = current->memcg_in_oom;
1910         struct oom_wait_info owait;
1911         bool locked;
1912
1913         /* OOM is global, do not handle */
1914         if (!memcg)
1915                 return false;
1916
1917         if (!handle)
1918                 goto cleanup;
1919
1920         owait.memcg = memcg;
1921         owait.wait.flags = 0;
1922         owait.wait.func = memcg_oom_wake_function;
1923         owait.wait.private = current;
1924         INIT_LIST_HEAD(&owait.wait.entry);
1925
1926         prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1927         mem_cgroup_mark_under_oom(memcg);
1928
1929         locked = mem_cgroup_oom_trylock(memcg);
1930
1931         if (locked)
1932                 mem_cgroup_oom_notify(memcg);
1933
1934         if (locked && !memcg->oom_kill_disable) {
1935                 mem_cgroup_unmark_under_oom(memcg);
1936                 finish_wait(&memcg_oom_waitq, &owait.wait);
1937                 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1938                                          current->memcg_oom_order);
1939         } else {
1940                 schedule();
1941                 mem_cgroup_unmark_under_oom(memcg);
1942                 finish_wait(&memcg_oom_waitq, &owait.wait);
1943         }
1944
1945         if (locked) {
1946                 mem_cgroup_oom_unlock(memcg);
1947                 /*
1948                  * There is no guarantee that an OOM-lock contender
1949                  * sees the wakeups triggered by the OOM kill
1950                  * uncharges.  Wake any sleepers explicitly.
1951                  */
1952                 memcg_oom_recover(memcg);
1953         }
1954 cleanup:
1955         current->memcg_in_oom = NULL;
1956         css_put(&memcg->css);
1957         return true;
1958 }
1959
1960 /**
1961  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1962  * @victim: task to be killed by the OOM killer
1963  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1964  *
1965  * Returns a pointer to a memory cgroup, which has to be cleaned up
1966  * by killing all belonging OOM-killable tasks.
1967  *
1968  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1969  */
1970 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1971                                             struct mem_cgroup *oom_domain)
1972 {
1973         struct mem_cgroup *oom_group = NULL;
1974         struct mem_cgroup *memcg;
1975
1976         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1977                 return NULL;
1978
1979         if (!oom_domain)
1980                 oom_domain = root_mem_cgroup;
1981
1982         rcu_read_lock();
1983
1984         memcg = mem_cgroup_from_task(victim);
1985         if (memcg == root_mem_cgroup)
1986                 goto out;
1987
1988         /*
1989          * If the victim task has been asynchronously moved to a different
1990          * memory cgroup, we might end up killing tasks outside oom_domain.
1991          * In this case it's better to ignore memory.group.oom.
1992          */
1993         if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1994                 goto out;
1995
1996         /*
1997          * Traverse the memory cgroup hierarchy from the victim task's
1998          * cgroup up to the OOMing cgroup (or root) to find the
1999          * highest-level memory cgroup with oom.group set.
2000          */
2001         for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2002                 if (memcg->oom_group)
2003                         oom_group = memcg;
2004
2005                 if (memcg == oom_domain)
2006                         break;
2007         }
2008
2009         if (oom_group)
2010                 css_get(&oom_group->css);
2011 out:
2012         rcu_read_unlock();
2013
2014         return oom_group;
2015 }
2016
2017 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2018 {
2019         pr_info("Tasks in ");
2020         pr_cont_cgroup_path(memcg->css.cgroup);
2021         pr_cont(" are going to be killed due to memory.oom.group set\n");
2022 }
2023
2024 /**
2025  * folio_memcg_lock - Bind a folio to its memcg.
2026  * @folio: The folio.
2027  *
2028  * This function prevents unlocked LRU folios from being moved to
2029  * another cgroup.
2030  *
2031  * It ensures lifetime of the bound memcg.  The caller is responsible
2032  * for the lifetime of the folio.
2033  */
2034 void folio_memcg_lock(struct folio *folio)
2035 {
2036         struct mem_cgroup *memcg;
2037         unsigned long flags;
2038
2039         /*
2040          * The RCU lock is held throughout the transaction.  The fast
2041          * path can get away without acquiring the memcg->move_lock
2042          * because page moving starts with an RCU grace period.
2043          */
2044         rcu_read_lock();
2045
2046         if (mem_cgroup_disabled())
2047                 return;
2048 again:
2049         memcg = folio_memcg(folio);
2050         if (unlikely(!memcg))
2051                 return;
2052
2053 #ifdef CONFIG_PROVE_LOCKING
2054         local_irq_save(flags);
2055         might_lock(&memcg->move_lock);
2056         local_irq_restore(flags);
2057 #endif
2058
2059         if (atomic_read(&memcg->moving_account) <= 0)
2060                 return;
2061
2062         spin_lock_irqsave(&memcg->move_lock, flags);
2063         if (memcg != folio_memcg(folio)) {
2064                 spin_unlock_irqrestore(&memcg->move_lock, flags);
2065                 goto again;
2066         }
2067
2068         /*
2069          * When charge migration first begins, we can have multiple
2070          * critical sections holding the fast-path RCU lock and one
2071          * holding the slowpath move_lock. Track the task who has the
2072          * move_lock for unlock_page_memcg().
2073          */
2074         memcg->move_lock_task = current;
2075         memcg->move_lock_flags = flags;
2076 }
2077
2078 void lock_page_memcg(struct page *page)
2079 {
2080         folio_memcg_lock(page_folio(page));
2081 }
2082
2083 static void __folio_memcg_unlock(struct mem_cgroup *memcg)
2084 {
2085         if (memcg && memcg->move_lock_task == current) {
2086                 unsigned long flags = memcg->move_lock_flags;
2087
2088                 memcg->move_lock_task = NULL;
2089                 memcg->move_lock_flags = 0;
2090
2091                 spin_unlock_irqrestore(&memcg->move_lock, flags);
2092         }
2093
2094         rcu_read_unlock();
2095 }
2096
2097 /**
2098  * folio_memcg_unlock - Release the binding between a folio and its memcg.
2099  * @folio: The folio.
2100  *
2101  * This releases the binding created by folio_memcg_lock().  This does
2102  * not change the accounting of this folio to its memcg, but it does
2103  * permit others to change it.
2104  */
2105 void folio_memcg_unlock(struct folio *folio)
2106 {
2107         __folio_memcg_unlock(folio_memcg(folio));
2108 }
2109
2110 void unlock_page_memcg(struct page *page)
2111 {
2112         folio_memcg_unlock(page_folio(page));
2113 }
2114
2115 struct memcg_stock_pcp {
2116         local_lock_t stock_lock;
2117         struct mem_cgroup *cached; /* this never be root cgroup */
2118         unsigned int nr_pages;
2119
2120 #ifdef CONFIG_MEMCG_KMEM
2121         struct obj_cgroup *cached_objcg;
2122         struct pglist_data *cached_pgdat;
2123         unsigned int nr_bytes;
2124         int nr_slab_reclaimable_b;
2125         int nr_slab_unreclaimable_b;
2126 #endif
2127
2128         struct work_struct work;
2129         unsigned long flags;
2130 #define FLUSHING_CACHED_CHARGE  0
2131 };
2132 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2133         .stock_lock = INIT_LOCAL_LOCK(stock_lock),
2134 };
2135 static DEFINE_MUTEX(percpu_charge_mutex);
2136
2137 #ifdef CONFIG_MEMCG_KMEM
2138 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2139 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2140                                      struct mem_cgroup *root_memcg);
2141 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2142
2143 #else
2144 static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2145 {
2146         return NULL;
2147 }
2148 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2149                                      struct mem_cgroup *root_memcg)
2150 {
2151         return false;
2152 }
2153 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2154 {
2155 }
2156 #endif
2157
2158 /**
2159  * consume_stock: Try to consume stocked charge on this cpu.
2160  * @memcg: memcg to consume from.
2161  * @nr_pages: how many pages to charge.
2162  *
2163  * The charges will only happen if @memcg matches the current cpu's memcg
2164  * stock, and at least @nr_pages are available in that stock.  Failure to
2165  * service an allocation will refill the stock.
2166  *
2167  * returns true if successful, false otherwise.
2168  */
2169 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2170 {
2171         struct memcg_stock_pcp *stock;
2172         unsigned long flags;
2173         bool ret = false;
2174
2175         if (nr_pages > MEMCG_CHARGE_BATCH)
2176                 return ret;
2177
2178         local_lock_irqsave(&memcg_stock.stock_lock, flags);
2179
2180         stock = this_cpu_ptr(&memcg_stock);
2181         if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2182                 stock->nr_pages -= nr_pages;
2183                 ret = true;
2184         }
2185
2186         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2187
2188         return ret;
2189 }
2190
2191 /*
2192  * Returns stocks cached in percpu and reset cached information.
2193  */
2194 static void drain_stock(struct memcg_stock_pcp *stock)
2195 {
2196         struct mem_cgroup *old = stock->cached;
2197
2198         if (!old)
2199                 return;
2200
2201         if (stock->nr_pages) {
2202                 page_counter_uncharge(&old->memory, stock->nr_pages);
2203                 if (do_memsw_account())
2204                         page_counter_uncharge(&old->memsw, stock->nr_pages);
2205                 stock->nr_pages = 0;
2206         }
2207
2208         css_put(&old->css);
2209         stock->cached = NULL;
2210 }
2211
2212 static void drain_local_stock(struct work_struct *dummy)
2213 {
2214         struct memcg_stock_pcp *stock;
2215         struct obj_cgroup *old = NULL;
2216         unsigned long flags;
2217
2218         /*
2219          * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2220          * drain_stock races is that we always operate on local CPU stock
2221          * here with IRQ disabled
2222          */
2223         local_lock_irqsave(&memcg_stock.stock_lock, flags);
2224
2225         stock = this_cpu_ptr(&memcg_stock);
2226         old = drain_obj_stock(stock);
2227         drain_stock(stock);
2228         clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2229
2230         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2231         if (old)
2232                 obj_cgroup_put(old);
2233 }
2234
2235 /*
2236  * Cache charges(val) to local per_cpu area.
2237  * This will be consumed by consume_stock() function, later.
2238  */
2239 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2240 {
2241         struct memcg_stock_pcp *stock;
2242
2243         stock = this_cpu_ptr(&memcg_stock);
2244         if (stock->cached != memcg) { /* reset if necessary */
2245                 drain_stock(stock);
2246                 css_get(&memcg->css);
2247                 stock->cached = memcg;
2248         }
2249         stock->nr_pages += nr_pages;
2250
2251         if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2252                 drain_stock(stock);
2253 }
2254
2255 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2256 {
2257         unsigned long flags;
2258
2259         local_lock_irqsave(&memcg_stock.stock_lock, flags);
2260         __refill_stock(memcg, nr_pages);
2261         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2262 }
2263
2264 /*
2265  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2266  * of the hierarchy under it.
2267  */
2268 static void drain_all_stock(struct mem_cgroup *root_memcg)
2269 {
2270         int cpu, curcpu;
2271
2272         /* If someone's already draining, avoid adding running more workers. */
2273         if (!mutex_trylock(&percpu_charge_mutex))
2274                 return;
2275         /*
2276          * Notify other cpus that system-wide "drain" is running
2277          * We do not care about races with the cpu hotplug because cpu down
2278          * as well as workers from this path always operate on the local
2279          * per-cpu data. CPU up doesn't touch memcg_stock at all.
2280          */
2281         migrate_disable();
2282         curcpu = smp_processor_id();
2283         for_each_online_cpu(cpu) {
2284                 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2285                 struct mem_cgroup *memcg;
2286                 bool flush = false;
2287
2288                 rcu_read_lock();
2289                 memcg = stock->cached;
2290                 if (memcg && stock->nr_pages &&
2291                     mem_cgroup_is_descendant(memcg, root_memcg))
2292                         flush = true;
2293                 else if (obj_stock_flush_required(stock, root_memcg))
2294                         flush = true;
2295                 rcu_read_unlock();
2296
2297                 if (flush &&
2298                     !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2299                         if (cpu == curcpu)
2300                                 drain_local_stock(&stock->work);
2301                         else
2302                                 schedule_work_on(cpu, &stock->work);
2303                 }
2304         }
2305         migrate_enable();
2306         mutex_unlock(&percpu_charge_mutex);
2307 }
2308
2309 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2310 {
2311         struct memcg_stock_pcp *stock;
2312
2313         stock = &per_cpu(memcg_stock, cpu);
2314         drain_stock(stock);
2315
2316         return 0;
2317 }
2318
2319 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2320                                   unsigned int nr_pages,
2321                                   gfp_t gfp_mask)
2322 {
2323         unsigned long nr_reclaimed = 0;
2324
2325         do {
2326                 unsigned long pflags;
2327
2328                 if (page_counter_read(&memcg->memory) <=
2329                     READ_ONCE(memcg->memory.high))
2330                         continue;
2331
2332                 memcg_memory_event(memcg, MEMCG_HIGH);
2333
2334                 psi_memstall_enter(&pflags);
2335                 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2336                                                         gfp_mask,
2337                                                         MEMCG_RECLAIM_MAY_SWAP);
2338                 psi_memstall_leave(&pflags);
2339         } while ((memcg = parent_mem_cgroup(memcg)) &&
2340                  !mem_cgroup_is_root(memcg));
2341
2342         return nr_reclaimed;
2343 }
2344
2345 static void high_work_func(struct work_struct *work)
2346 {
2347         struct mem_cgroup *memcg;
2348
2349         memcg = container_of(work, struct mem_cgroup, high_work);
2350         reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2351 }
2352
2353 /*
2354  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2355  * enough to still cause a significant slowdown in most cases, while still
2356  * allowing diagnostics and tracing to proceed without becoming stuck.
2357  */
2358 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2359
2360 /*
2361  * When calculating the delay, we use these either side of the exponentiation to
2362  * maintain precision and scale to a reasonable number of jiffies (see the table
2363  * below.
2364  *
2365  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2366  *   overage ratio to a delay.
2367  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2368  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2369  *   to produce a reasonable delay curve.
2370  *
2371  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2372  * reasonable delay curve compared to precision-adjusted overage, not
2373  * penalising heavily at first, but still making sure that growth beyond the
2374  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2375  * example, with a high of 100 megabytes:
2376  *
2377  *  +-------+------------------------+
2378  *  | usage | time to allocate in ms |
2379  *  +-------+------------------------+
2380  *  | 100M  |                      0 |
2381  *  | 101M  |                      6 |
2382  *  | 102M  |                     25 |
2383  *  | 103M  |                     57 |
2384  *  | 104M  |                    102 |
2385  *  | 105M  |                    159 |
2386  *  | 106M  |                    230 |
2387  *  | 107M  |                    313 |
2388  *  | 108M  |                    409 |
2389  *  | 109M  |                    518 |
2390  *  | 110M  |                    639 |
2391  *  | 111M  |                    774 |
2392  *  | 112M  |                    921 |
2393  *  | 113M  |                   1081 |
2394  *  | 114M  |                   1254 |
2395  *  | 115M  |                   1439 |
2396  *  | 116M  |                   1638 |
2397  *  | 117M  |                   1849 |
2398  *  | 118M  |                   2000 |
2399  *  | 119M  |                   2000 |
2400  *  | 120M  |                   2000 |
2401  *  +-------+------------------------+
2402  */
2403  #define MEMCG_DELAY_PRECISION_SHIFT 20
2404  #define MEMCG_DELAY_SCALING_SHIFT 14
2405
2406 static u64 calculate_overage(unsigned long usage, unsigned long high)
2407 {
2408         u64 overage;
2409
2410         if (usage <= high)
2411                 return 0;
2412
2413         /*
2414          * Prevent division by 0 in overage calculation by acting as if
2415          * it was a threshold of 1 page
2416          */
2417         high = max(high, 1UL);
2418
2419         overage = usage - high;
2420         overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2421         return div64_u64(overage, high);
2422 }
2423
2424 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2425 {
2426         u64 overage, max_overage = 0;
2427
2428         do {
2429                 overage = calculate_overage(page_counter_read(&memcg->memory),
2430                                             READ_ONCE(memcg->memory.high));
2431                 max_overage = max(overage, max_overage);
2432         } while ((memcg = parent_mem_cgroup(memcg)) &&
2433                  !mem_cgroup_is_root(memcg));
2434
2435         return max_overage;
2436 }
2437
2438 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2439 {
2440         u64 overage, max_overage = 0;
2441
2442         do {
2443                 overage = calculate_overage(page_counter_read(&memcg->swap),
2444                                             READ_ONCE(memcg->swap.high));
2445                 if (overage)
2446                         memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2447                 max_overage = max(overage, max_overage);
2448         } while ((memcg = parent_mem_cgroup(memcg)) &&
2449                  !mem_cgroup_is_root(memcg));
2450
2451         return max_overage;
2452 }
2453
2454 /*
2455  * Get the number of jiffies that we should penalise a mischievous cgroup which
2456  * is exceeding its memory.high by checking both it and its ancestors.
2457  */
2458 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2459                                           unsigned int nr_pages,
2460                                           u64 max_overage)
2461 {
2462         unsigned long penalty_jiffies;
2463
2464         if (!max_overage)
2465                 return 0;
2466
2467         /*
2468          * We use overage compared to memory.high to calculate the number of
2469          * jiffies to sleep (penalty_jiffies). Ideally this value should be
2470          * fairly lenient on small overages, and increasingly harsh when the
2471          * memcg in question makes it clear that it has no intention of stopping
2472          * its crazy behaviour, so we exponentially increase the delay based on
2473          * overage amount.
2474          */
2475         penalty_jiffies = max_overage * max_overage * HZ;
2476         penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2477         penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2478
2479         /*
2480          * Factor in the task's own contribution to the overage, such that four
2481          * N-sized allocations are throttled approximately the same as one
2482          * 4N-sized allocation.
2483          *
2484          * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2485          * larger the current charge patch is than that.
2486          */
2487         return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2488 }
2489
2490 /*
2491  * Scheduled by try_charge() to be executed from the userland return path
2492  * and reclaims memory over the high limit.
2493  */
2494 void mem_cgroup_handle_over_high(void)
2495 {
2496         unsigned long penalty_jiffies;
2497         unsigned long pflags;
2498         unsigned long nr_reclaimed;
2499         unsigned int nr_pages = current->memcg_nr_pages_over_high;
2500         int nr_retries = MAX_RECLAIM_RETRIES;
2501         struct mem_cgroup *memcg;
2502         bool in_retry = false;
2503
2504         if (likely(!nr_pages))
2505                 return;
2506
2507         memcg = get_mem_cgroup_from_mm(current->mm);
2508         current->memcg_nr_pages_over_high = 0;
2509
2510 retry_reclaim:
2511         /*
2512          * The allocating task should reclaim at least the batch size, but for
2513          * subsequent retries we only want to do what's necessary to prevent oom
2514          * or breaching resource isolation.
2515          *
2516          * This is distinct from memory.max or page allocator behaviour because
2517          * memory.high is currently batched, whereas memory.max and the page
2518          * allocator run every time an allocation is made.
2519          */
2520         nr_reclaimed = reclaim_high(memcg,
2521                                     in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2522                                     GFP_KERNEL);
2523
2524         /*
2525          * memory.high is breached and reclaim is unable to keep up. Throttle
2526          * allocators proactively to slow down excessive growth.
2527          */
2528         penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2529                                                mem_find_max_overage(memcg));
2530
2531         penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2532                                                 swap_find_max_overage(memcg));
2533
2534         /*
2535          * Clamp the max delay per usermode return so as to still keep the
2536          * application moving forwards and also permit diagnostics, albeit
2537          * extremely slowly.
2538          */
2539         penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2540
2541         /*
2542          * Don't sleep if the amount of jiffies this memcg owes us is so low
2543          * that it's not even worth doing, in an attempt to be nice to those who
2544          * go only a small amount over their memory.high value and maybe haven't
2545          * been aggressively reclaimed enough yet.
2546          */
2547         if (penalty_jiffies <= HZ / 100)
2548                 goto out;
2549
2550         /*
2551          * If reclaim is making forward progress but we're still over
2552          * memory.high, we want to encourage that rather than doing allocator
2553          * throttling.
2554          */
2555         if (nr_reclaimed || nr_retries--) {
2556                 in_retry = true;
2557                 goto retry_reclaim;
2558         }
2559
2560         /*
2561          * If we exit early, we're guaranteed to die (since
2562          * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2563          * need to account for any ill-begotten jiffies to pay them off later.
2564          */
2565         psi_memstall_enter(&pflags);
2566         schedule_timeout_killable(penalty_jiffies);
2567         psi_memstall_leave(&pflags);
2568
2569 out:
2570         css_put(&memcg->css);
2571 }
2572
2573 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2574                         unsigned int nr_pages)
2575 {
2576         unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2577         int nr_retries = MAX_RECLAIM_RETRIES;
2578         struct mem_cgroup *mem_over_limit;
2579         struct page_counter *counter;
2580         unsigned long nr_reclaimed;
2581         bool passed_oom = false;
2582         unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2583         bool drained = false;
2584         bool raised_max_event = false;
2585         unsigned long pflags;
2586
2587 retry:
2588         if (consume_stock(memcg, nr_pages))
2589                 return 0;
2590
2591         if (!do_memsw_account() ||
2592             page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2593                 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2594                         goto done_restock;
2595                 if (do_memsw_account())
2596                         page_counter_uncharge(&memcg->memsw, batch);
2597                 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2598         } else {
2599                 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2600                 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2601         }
2602
2603         if (batch > nr_pages) {
2604                 batch = nr_pages;
2605                 goto retry;
2606         }
2607
2608         /*
2609          * Prevent unbounded recursion when reclaim operations need to
2610          * allocate memory. This might exceed the limits temporarily,
2611          * but we prefer facilitating memory reclaim and getting back
2612          * under the limit over triggering OOM kills in these cases.
2613          */
2614         if (unlikely(current->flags & PF_MEMALLOC))
2615                 goto force;
2616
2617         if (unlikely(task_in_memcg_oom(current)))
2618                 goto nomem;
2619
2620         if (!gfpflags_allow_blocking(gfp_mask))
2621                 goto nomem;
2622
2623         memcg_memory_event(mem_over_limit, MEMCG_MAX);
2624         raised_max_event = true;
2625
2626         psi_memstall_enter(&pflags);
2627         nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2628                                                     gfp_mask, reclaim_options);
2629         psi_memstall_leave(&pflags);
2630
2631         if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2632                 goto retry;
2633
2634         if (!drained) {
2635                 drain_all_stock(mem_over_limit);
2636                 drained = true;
2637                 goto retry;
2638         }
2639
2640         if (gfp_mask & __GFP_NORETRY)
2641                 goto nomem;
2642         /*
2643          * Even though the limit is exceeded at this point, reclaim
2644          * may have been able to free some pages.  Retry the charge
2645          * before killing the task.
2646          *
2647          * Only for regular pages, though: huge pages are rather
2648          * unlikely to succeed so close to the limit, and we fall back
2649          * to regular pages anyway in case of failure.
2650          */
2651         if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2652                 goto retry;
2653         /*
2654          * At task move, charge accounts can be doubly counted. So, it's
2655          * better to wait until the end of task_move if something is going on.
2656          */
2657         if (mem_cgroup_wait_acct_move(mem_over_limit))
2658                 goto retry;
2659
2660         if (nr_retries--)
2661                 goto retry;
2662
2663         if (gfp_mask & __GFP_RETRY_MAYFAIL)
2664                 goto nomem;
2665
2666         /* Avoid endless loop for tasks bypassed by the oom killer */
2667         if (passed_oom && task_is_dying())
2668                 goto nomem;
2669
2670         /*
2671          * keep retrying as long as the memcg oom killer is able to make
2672          * a forward progress or bypass the charge if the oom killer
2673          * couldn't make any progress.
2674          */
2675         if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2676                            get_order(nr_pages * PAGE_SIZE))) {
2677                 passed_oom = true;
2678                 nr_retries = MAX_RECLAIM_RETRIES;
2679                 goto retry;
2680         }
2681 nomem:
2682         /*
2683          * Memcg doesn't have a dedicated reserve for atomic
2684          * allocations. But like the global atomic pool, we need to
2685          * put the burden of reclaim on regular allocation requests
2686          * and let these go through as privileged allocations.
2687          */
2688         if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2689                 return -ENOMEM;
2690 force:
2691         /*
2692          * If the allocation has to be enforced, don't forget to raise
2693          * a MEMCG_MAX event.
2694          */
2695         if (!raised_max_event)
2696                 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2697
2698         /*
2699          * The allocation either can't fail or will lead to more memory
2700          * being freed very soon.  Allow memory usage go over the limit
2701          * temporarily by force charging it.
2702          */
2703         page_counter_charge(&memcg->memory, nr_pages);
2704         if (do_memsw_account())
2705                 page_counter_charge(&memcg->memsw, nr_pages);
2706
2707         return 0;
2708
2709 done_restock:
2710         if (batch > nr_pages)
2711                 refill_stock(memcg, batch - nr_pages);
2712
2713         /*
2714          * If the hierarchy is above the normal consumption range, schedule
2715          * reclaim on returning to userland.  We can perform reclaim here
2716          * if __GFP_RECLAIM but let's always punt for simplicity and so that
2717          * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2718          * not recorded as it most likely matches current's and won't
2719          * change in the meantime.  As high limit is checked again before
2720          * reclaim, the cost of mismatch is negligible.
2721          */
2722         do {
2723                 bool mem_high, swap_high;
2724
2725                 mem_high = page_counter_read(&memcg->memory) >
2726                         READ_ONCE(memcg->memory.high);
2727                 swap_high = page_counter_read(&memcg->swap) >
2728                         READ_ONCE(memcg->swap.high);
2729
2730                 /* Don't bother a random interrupted task */
2731                 if (!in_task()) {
2732                         if (mem_high) {
2733                                 schedule_work(&memcg->high_work);
2734                                 break;
2735                         }
2736                         continue;
2737                 }
2738
2739                 if (mem_high || swap_high) {
2740                         /*
2741                          * The allocating tasks in this cgroup will need to do
2742                          * reclaim or be throttled to prevent further growth
2743                          * of the memory or swap footprints.
2744                          *
2745                          * Target some best-effort fairness between the tasks,
2746                          * and distribute reclaim work and delay penalties
2747                          * based on how much each task is actually allocating.
2748                          */
2749                         current->memcg_nr_pages_over_high += batch;
2750                         set_notify_resume(current);
2751                         break;
2752                 }
2753         } while ((memcg = parent_mem_cgroup(memcg)));
2754
2755         if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2756             !(current->flags & PF_MEMALLOC) &&
2757             gfpflags_allow_blocking(gfp_mask)) {
2758                 mem_cgroup_handle_over_high();
2759         }
2760         return 0;
2761 }
2762
2763 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2764                              unsigned int nr_pages)
2765 {
2766         if (mem_cgroup_is_root(memcg))
2767                 return 0;
2768
2769         return try_charge_memcg(memcg, gfp_mask, nr_pages);
2770 }
2771
2772 static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2773 {
2774         if (mem_cgroup_is_root(memcg))
2775                 return;
2776
2777         page_counter_uncharge(&memcg->memory, nr_pages);
2778         if (do_memsw_account())
2779                 page_counter_uncharge(&memcg->memsw, nr_pages);
2780 }
2781
2782 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2783 {
2784         VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2785         /*
2786          * Any of the following ensures page's memcg stability:
2787          *
2788          * - the page lock
2789          * - LRU isolation
2790          * - lock_page_memcg()
2791          * - exclusive reference
2792          */
2793         folio->memcg_data = (unsigned long)memcg;
2794 }
2795
2796 #ifdef CONFIG_MEMCG_KMEM
2797 /*
2798  * The allocated objcg pointers array is not accounted directly.
2799  * Moreover, it should not come from DMA buffer and is not readily
2800  * reclaimable. So those GFP bits should be masked off.
2801  */
2802 #define OBJCGS_CLEAR_MASK       (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
2803
2804 /*
2805  * mod_objcg_mlstate() may be called with irq enabled, so
2806  * mod_memcg_lruvec_state() should be used.
2807  */
2808 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2809                                      struct pglist_data *pgdat,
2810                                      enum node_stat_item idx, int nr)
2811 {
2812         struct mem_cgroup *memcg;
2813         struct lruvec *lruvec;
2814
2815         rcu_read_lock();
2816         memcg = obj_cgroup_memcg(objcg);
2817         lruvec = mem_cgroup_lruvec(memcg, pgdat);
2818         mod_memcg_lruvec_state(lruvec, idx, nr);
2819         rcu_read_unlock();
2820 }
2821
2822 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
2823                                  gfp_t gfp, bool new_slab)
2824 {
2825         unsigned int objects = objs_per_slab(s, slab);
2826         unsigned long memcg_data;
2827         void *vec;
2828
2829         gfp &= ~OBJCGS_CLEAR_MASK;
2830         vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2831                            slab_nid(slab));
2832         if (!vec)
2833                 return -ENOMEM;
2834
2835         memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
2836         if (new_slab) {
2837                 /*
2838                  * If the slab is brand new and nobody can yet access its
2839                  * memcg_data, no synchronization is required and memcg_data can
2840                  * be simply assigned.
2841                  */
2842                 slab->memcg_data = memcg_data;
2843         } else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
2844                 /*
2845                  * If the slab is already in use, somebody can allocate and
2846                  * assign obj_cgroups in parallel. In this case the existing
2847                  * objcg vector should be reused.
2848                  */
2849                 kfree(vec);
2850                 return 0;
2851         }
2852
2853         kmemleak_not_leak(vec);
2854         return 0;
2855 }
2856
2857 static __always_inline
2858 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2859 {
2860         /*
2861          * Slab objects are accounted individually, not per-page.
2862          * Memcg membership data for each individual object is saved in
2863          * slab->memcg_data.
2864          */
2865         if (folio_test_slab(folio)) {
2866                 struct obj_cgroup **objcgs;
2867                 struct slab *slab;
2868                 unsigned int off;
2869
2870                 slab = folio_slab(folio);
2871                 objcgs = slab_objcgs(slab);
2872                 if (!objcgs)
2873                         return NULL;
2874
2875                 off = obj_to_index(slab->slab_cache, slab, p);
2876                 if (objcgs[off])
2877                         return obj_cgroup_memcg(objcgs[off]);
2878
2879                 return NULL;
2880         }
2881
2882         /*
2883          * page_memcg_check() is used here, because in theory we can encounter
2884          * a folio where the slab flag has been cleared already, but
2885          * slab->memcg_data has not been freed yet
2886          * page_memcg_check(page) will guarantee that a proper memory
2887          * cgroup pointer or NULL will be returned.
2888          */
2889         return page_memcg_check(folio_page(folio, 0));
2890 }
2891
2892 /*
2893  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2894  *
2895  * A passed kernel object can be a slab object, vmalloc object or a generic
2896  * kernel page, so different mechanisms for getting the memory cgroup pointer
2897  * should be used.
2898  *
2899  * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2900  * can not know for sure how the kernel object is implemented.
2901  * mem_cgroup_from_obj() can be safely used in such cases.
2902  *
2903  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2904  * cgroup_mutex, etc.
2905  */
2906 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2907 {
2908         struct folio *folio;
2909
2910         if (mem_cgroup_disabled())
2911                 return NULL;
2912
2913         if (unlikely(is_vmalloc_addr(p)))
2914                 folio = page_folio(vmalloc_to_page(p));
2915         else
2916                 folio = virt_to_folio(p);
2917
2918         return mem_cgroup_from_obj_folio(folio, p);
2919 }
2920
2921 /*
2922  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2923  * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
2924  * allocated using vmalloc().
2925  *
2926  * A passed kernel object must be a slab object or a generic kernel page.
2927  *
2928  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2929  * cgroup_mutex, etc.
2930  */
2931 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
2932 {
2933         if (mem_cgroup_disabled())
2934                 return NULL;
2935
2936         return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
2937 }
2938
2939 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2940 {
2941         struct obj_cgroup *objcg = NULL;
2942
2943         for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
2944                 objcg = rcu_dereference(memcg->objcg);
2945                 if (objcg && obj_cgroup_tryget(objcg))
2946                         break;
2947                 objcg = NULL;
2948         }
2949         return objcg;
2950 }
2951
2952 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
2953 {
2954         struct obj_cgroup *objcg = NULL;
2955         struct mem_cgroup *memcg;
2956
2957         if (memcg_kmem_bypass())
2958                 return NULL;
2959
2960         rcu_read_lock();
2961         if (unlikely(active_memcg()))
2962                 memcg = active_memcg();
2963         else
2964                 memcg = mem_cgroup_from_task(current);
2965         objcg = __get_obj_cgroup_from_memcg(memcg);
2966         rcu_read_unlock();
2967         return objcg;
2968 }
2969
2970 struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
2971 {
2972         struct obj_cgroup *objcg;
2973
2974         if (!memcg_kmem_enabled() || memcg_kmem_bypass())
2975                 return NULL;
2976
2977         if (PageMemcgKmem(page)) {
2978                 objcg = __folio_objcg(page_folio(page));
2979                 obj_cgroup_get(objcg);
2980         } else {
2981                 struct mem_cgroup *memcg;
2982
2983                 rcu_read_lock();
2984                 memcg = __folio_memcg(page_folio(page));
2985                 if (memcg)
2986                         objcg = __get_obj_cgroup_from_memcg(memcg);
2987                 else
2988                         objcg = NULL;
2989                 rcu_read_unlock();
2990         }
2991         return objcg;
2992 }
2993
2994 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2995 {
2996         mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
2997         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
2998                 if (nr_pages > 0)
2999                         page_counter_charge(&memcg->kmem, nr_pages);
3000                 else
3001                         page_counter_uncharge(&memcg->kmem, -nr_pages);
3002         }
3003 }
3004
3005
3006 /*
3007  * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3008  * @objcg: object cgroup to uncharge
3009  * @nr_pages: number of pages to uncharge
3010  */
3011 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3012                                       unsigned int nr_pages)
3013 {
3014         struct mem_cgroup *memcg;
3015
3016         memcg = get_mem_cgroup_from_objcg(objcg);
3017
3018         memcg_account_kmem(memcg, -nr_pages);
3019         refill_stock(memcg, nr_pages);
3020
3021         css_put(&memcg->css);
3022 }
3023
3024 /*
3025  * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3026  * @objcg: object cgroup to charge
3027  * @gfp: reclaim mode
3028  * @nr_pages: number of pages to charge
3029  *
3030  * Returns 0 on success, an error code on failure.
3031  */
3032 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3033                                    unsigned int nr_pages)
3034 {
3035         struct mem_cgroup *memcg;
3036         int ret;
3037
3038         memcg = get_mem_cgroup_from_objcg(objcg);
3039
3040         ret = try_charge_memcg(memcg, gfp, nr_pages);
3041         if (ret)
3042                 goto out;
3043
3044         memcg_account_kmem(memcg, nr_pages);
3045 out:
3046         css_put(&memcg->css);
3047
3048         return ret;
3049 }
3050
3051 /**
3052  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3053  * @page: page to charge
3054  * @gfp: reclaim mode
3055  * @order: allocation order
3056  *
3057  * Returns 0 on success, an error code on failure.
3058  */
3059 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3060 {
3061         struct obj_cgroup *objcg;
3062         int ret = 0;
3063
3064         objcg = get_obj_cgroup_from_current();
3065         if (objcg) {
3066                 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3067                 if (!ret) {
3068                         page->memcg_data = (unsigned long)objcg |
3069                                 MEMCG_DATA_KMEM;
3070                         return 0;
3071                 }
3072                 obj_cgroup_put(objcg);
3073         }
3074         return ret;
3075 }
3076
3077 /**
3078  * __memcg_kmem_uncharge_page: uncharge a kmem page
3079  * @page: page to uncharge
3080  * @order: allocation order
3081  */
3082 void __memcg_kmem_uncharge_page(struct page *page, int order)
3083 {
3084         struct folio *folio = page_folio(page);
3085         struct obj_cgroup *objcg;
3086         unsigned int nr_pages = 1 << order;
3087
3088         if (!folio_memcg_kmem(folio))
3089                 return;
3090
3091         objcg = __folio_objcg(folio);
3092         obj_cgroup_uncharge_pages(objcg, nr_pages);
3093         folio->memcg_data = 0;
3094         obj_cgroup_put(objcg);
3095 }
3096
3097 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3098                      enum node_stat_item idx, int nr)
3099 {
3100         struct memcg_stock_pcp *stock;
3101         struct obj_cgroup *old = NULL;
3102         unsigned long flags;
3103         int *bytes;
3104
3105         local_lock_irqsave(&memcg_stock.stock_lock, flags);
3106         stock = this_cpu_ptr(&memcg_stock);
3107
3108         /*
3109          * Save vmstat data in stock and skip vmstat array update unless
3110          * accumulating over a page of vmstat data or when pgdat or idx
3111          * changes.
3112          */
3113         if (stock->cached_objcg != objcg) {
3114                 old = drain_obj_stock(stock);
3115                 obj_cgroup_get(objcg);
3116                 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3117                                 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3118                 stock->cached_objcg = objcg;
3119                 stock->cached_pgdat = pgdat;
3120         } else if (stock->cached_pgdat != pgdat) {
3121                 /* Flush the existing cached vmstat data */
3122                 struct pglist_data *oldpg = stock->cached_pgdat;
3123
3124                 if (stock->nr_slab_reclaimable_b) {
3125                         mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3126                                           stock->nr_slab_reclaimable_b);
3127                         stock->nr_slab_reclaimable_b = 0;
3128                 }
3129                 if (stock->nr_slab_unreclaimable_b) {
3130                         mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3131                                           stock->nr_slab_unreclaimable_b);
3132                         stock->nr_slab_unreclaimable_b = 0;
3133                 }
3134                 stock->cached_pgdat = pgdat;
3135         }
3136
3137         bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3138                                                : &stock->nr_slab_unreclaimable_b;
3139         /*
3140          * Even for large object >= PAGE_SIZE, the vmstat data will still be
3141          * cached locally at least once before pushing it out.
3142          */
3143         if (!*bytes) {
3144                 *bytes = nr;
3145                 nr = 0;
3146         } else {
3147                 *bytes += nr;
3148                 if (abs(*bytes) > PAGE_SIZE) {
3149                         nr = *bytes;
3150                         *bytes = 0;
3151                 } else {
3152                         nr = 0;
3153                 }
3154         }
3155         if (nr)
3156                 mod_objcg_mlstate(objcg, pgdat, idx, nr);
3157
3158         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3159         if (old)
3160                 obj_cgroup_put(old);
3161 }
3162
3163 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3164 {
3165         struct memcg_stock_pcp *stock;
3166         unsigned long flags;
3167         bool ret = false;
3168
3169         local_lock_irqsave(&memcg_stock.stock_lock, flags);
3170
3171         stock = this_cpu_ptr(&memcg_stock);
3172         if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3173                 stock->nr_bytes -= nr_bytes;
3174                 ret = true;
3175         }
3176
3177         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3178
3179         return ret;
3180 }
3181
3182 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
3183 {
3184         struct obj_cgroup *old = stock->cached_objcg;
3185
3186         if (!old)
3187                 return NULL;
3188
3189         if (stock->nr_bytes) {
3190                 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3191                 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3192
3193                 if (nr_pages) {
3194                         struct mem_cgroup *memcg;
3195
3196                         memcg = get_mem_cgroup_from_objcg(old);
3197
3198                         memcg_account_kmem(memcg, -nr_pages);
3199                         __refill_stock(memcg, nr_pages);
3200
3201                         css_put(&memcg->css);
3202                 }
3203
3204                 /*
3205                  * The leftover is flushed to the centralized per-memcg value.
3206                  * On the next attempt to refill obj stock it will be moved
3207                  * to a per-cpu stock (probably, on an other CPU), see
3208                  * refill_obj_stock().
3209                  *
3210                  * How often it's flushed is a trade-off between the memory
3211                  * limit enforcement accuracy and potential CPU contention,
3212                  * so it might be changed in the future.
3213                  */
3214                 atomic_add(nr_bytes, &old->nr_charged_bytes);
3215                 stock->nr_bytes = 0;
3216         }
3217
3218         /*
3219          * Flush the vmstat data in current stock
3220          */
3221         if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3222                 if (stock->nr_slab_reclaimable_b) {
3223                         mod_objcg_mlstate(old, stock->cached_pgdat,
3224                                           NR_SLAB_RECLAIMABLE_B,
3225                                           stock->nr_slab_reclaimable_b);
3226                         stock->nr_slab_reclaimable_b = 0;
3227                 }
3228                 if (stock->nr_slab_unreclaimable_b) {
3229                         mod_objcg_mlstate(old, stock->cached_pgdat,
3230                                           NR_SLAB_UNRECLAIMABLE_B,
3231                                           stock->nr_slab_unreclaimable_b);
3232                         stock->nr_slab_unreclaimable_b = 0;
3233                 }
3234                 stock->cached_pgdat = NULL;
3235         }
3236
3237         stock->cached_objcg = NULL;
3238         /*
3239          * The `old' objects needs to be released by the caller via
3240          * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3241          */
3242         return old;
3243 }
3244
3245 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3246                                      struct mem_cgroup *root_memcg)
3247 {
3248         struct mem_cgroup *memcg;
3249
3250         if (stock->cached_objcg) {
3251                 memcg = obj_cgroup_memcg(stock->cached_objcg);
3252                 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3253                         return true;
3254         }
3255
3256         return false;
3257 }
3258
3259 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3260                              bool allow_uncharge)
3261 {
3262         struct memcg_stock_pcp *stock;
3263         struct obj_cgroup *old = NULL;
3264         unsigned long flags;
3265         unsigned int nr_pages = 0;
3266
3267         local_lock_irqsave(&memcg_stock.stock_lock, flags);
3268
3269         stock = this_cpu_ptr(&memcg_stock);
3270         if (stock->cached_objcg != objcg) { /* reset if necessary */
3271                 old = drain_obj_stock(stock);
3272                 obj_cgroup_get(objcg);
3273                 stock->cached_objcg = objcg;
3274                 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3275                                 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3276                 allow_uncharge = true;  /* Allow uncharge when objcg changes */
3277         }
3278         stock->nr_bytes += nr_bytes;
3279
3280         if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3281                 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3282                 stock->nr_bytes &= (PAGE_SIZE - 1);
3283         }
3284
3285         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3286         if (old)
3287                 obj_cgroup_put(old);
3288
3289         if (nr_pages)
3290                 obj_cgroup_uncharge_pages(objcg, nr_pages);
3291 }
3292
3293 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3294 {
3295         unsigned int nr_pages, nr_bytes;
3296         int ret;
3297
3298         if (consume_obj_stock(objcg, size))
3299                 return 0;
3300
3301         /*
3302          * In theory, objcg->nr_charged_bytes can have enough
3303          * pre-charged bytes to satisfy the allocation. However,
3304          * flushing objcg->nr_charged_bytes requires two atomic
3305          * operations, and objcg->nr_charged_bytes can't be big.
3306          * The shared objcg->nr_charged_bytes can also become a
3307          * performance bottleneck if all tasks of the same memcg are
3308          * trying to update it. So it's better to ignore it and try
3309          * grab some new pages. The stock's nr_bytes will be flushed to
3310          * objcg->nr_charged_bytes later on when objcg changes.
3311          *
3312          * The stock's nr_bytes may contain enough pre-charged bytes
3313          * to allow one less page from being charged, but we can't rely
3314          * on the pre-charged bytes not being changed outside of
3315          * consume_obj_stock() or refill_obj_stock(). So ignore those
3316          * pre-charged bytes as well when charging pages. To avoid a
3317          * page uncharge right after a page charge, we set the
3318          * allow_uncharge flag to false when calling refill_obj_stock()
3319          * to temporarily allow the pre-charged bytes to exceed the page
3320          * size limit. The maximum reachable value of the pre-charged
3321          * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3322          * race.
3323          */
3324         nr_pages = size >> PAGE_SHIFT;
3325         nr_bytes = size & (PAGE_SIZE - 1);
3326
3327         if (nr_bytes)
3328                 nr_pages += 1;
3329
3330         ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3331         if (!ret && nr_bytes)
3332                 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3333
3334         return ret;
3335 }
3336
3337 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3338 {
3339         refill_obj_stock(objcg, size, true);
3340 }
3341
3342 #endif /* CONFIG_MEMCG_KMEM */
3343
3344 /*
3345  * Because page_memcg(head) is not set on tails, set it now.
3346  */
3347 void split_page_memcg(struct page *head, unsigned int nr)
3348 {
3349         struct folio *folio = page_folio(head);
3350         struct mem_cgroup *memcg = folio_memcg(folio);
3351         int i;
3352
3353         if (mem_cgroup_disabled() || !memcg)
3354                 return;
3355
3356         for (i = 1; i < nr; i++)
3357                 folio_page(folio, i)->memcg_data = folio->memcg_data;
3358
3359         if (folio_memcg_kmem(folio))
3360                 obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
3361         else
3362                 css_get_many(&memcg->css, nr - 1);
3363 }
3364
3365 #ifdef CONFIG_MEMCG_SWAP
3366 /**
3367  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3368  * @entry: swap entry to be moved
3369  * @from:  mem_cgroup which the entry is moved from
3370  * @to:  mem_cgroup which the entry is moved to
3371  *
3372  * It succeeds only when the swap_cgroup's record for this entry is the same
3373  * as the mem_cgroup's id of @from.
3374  *
3375  * Returns 0 on success, -EINVAL on failure.
3376  *
3377  * The caller must have charged to @to, IOW, called page_counter_charge() about
3378  * both res and memsw, and called css_get().
3379  */
3380 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3381                                 struct mem_cgroup *from, struct mem_cgroup *to)
3382 {
3383         unsigned short old_id, new_id;
3384
3385         old_id = mem_cgroup_id(from);
3386         new_id = mem_cgroup_id(to);
3387
3388         if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3389                 mod_memcg_state(from, MEMCG_SWAP, -1);
3390                 mod_memcg_state(to, MEMCG_SWAP, 1);
3391                 return 0;
3392         }
3393         return -EINVAL;
3394 }
3395 #else
3396 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3397                                 struct mem_cgroup *from, struct mem_cgroup *to)
3398 {
3399         return -EINVAL;
3400 }
3401 #endif
3402
3403 static DEFINE_MUTEX(memcg_max_mutex);
3404
3405 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3406                                  unsigned long max, bool memsw)
3407 {
3408         bool enlarge = false;
3409         bool drained = false;
3410         int ret;
3411         bool limits_invariant;
3412         struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3413
3414         do {
3415                 if (signal_pending(current)) {
3416                         ret = -EINTR;
3417                         break;
3418                 }
3419
3420                 mutex_lock(&memcg_max_mutex);
3421                 /*
3422                  * Make sure that the new limit (memsw or memory limit) doesn't
3423                  * break our basic invariant rule memory.max <= memsw.max.
3424                  */
3425                 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3426                                            max <= memcg->memsw.max;
3427                 if (!limits_invariant) {
3428                         mutex_unlock(&memcg_max_mutex);
3429                         ret = -EINVAL;
3430                         break;
3431                 }
3432                 if (max > counter->max)
3433                         enlarge = true;
3434                 ret = page_counter_set_max(counter, max);
3435                 mutex_unlock(&memcg_max_mutex);
3436
3437                 if (!ret)
3438                         break;
3439
3440                 if (!drained) {
3441                         drain_all_stock(memcg);
3442                         drained = true;
3443                         continue;
3444                 }
3445
3446                 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3447                                         memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
3448                         ret = -EBUSY;
3449                         break;
3450                 }
3451         } while (true);
3452
3453         if (!ret && enlarge)
3454                 memcg_oom_recover(memcg);
3455
3456         return ret;
3457 }
3458
3459 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3460                                             gfp_t gfp_mask,
3461                                             unsigned long *total_scanned)
3462 {
3463         unsigned long nr_reclaimed = 0;
3464         struct mem_cgroup_per_node *mz, *next_mz = NULL;
3465         unsigned long reclaimed;
3466         int loop = 0;
3467         struct mem_cgroup_tree_per_node *mctz;
3468         unsigned long excess;
3469
3470         if (order > 0)
3471                 return 0;
3472
3473         mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3474
3475         /*
3476          * Do not even bother to check the largest node if the root
3477          * is empty. Do it lockless to prevent lock bouncing. Races
3478          * are acceptable as soft limit is best effort anyway.
3479          */
3480         if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3481                 return 0;
3482
3483         /*
3484          * This loop can run a while, specially if mem_cgroup's continuously
3485          * keep exceeding their soft limit and putting the system under
3486          * pressure
3487          */
3488         do {
3489                 if (next_mz)
3490                         mz = next_mz;
3491                 else
3492                         mz = mem_cgroup_largest_soft_limit_node(mctz);
3493                 if (!mz)
3494                         break;
3495
3496                 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3497                                                     gfp_mask, total_scanned);
3498                 nr_reclaimed += reclaimed;
3499                 spin_lock_irq(&mctz->lock);
3500
3501                 /*
3502                  * If we failed to reclaim anything from this memory cgroup
3503                  * it is time to move on to the next cgroup
3504                  */
3505                 next_mz = NULL;
3506                 if (!reclaimed)
3507                         next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3508
3509                 excess = soft_limit_excess(mz->memcg);
3510                 /*
3511                  * One school of thought says that we should not add
3512                  * back the node to the tree if reclaim returns 0.
3513                  * But our reclaim could return 0, simply because due
3514                  * to priority we are exposing a smaller subset of
3515                  * memory to reclaim from. Consider this as a longer
3516                  * term TODO.
3517                  */
3518                 /* If excess == 0, no tree ops */
3519                 __mem_cgroup_insert_exceeded(mz, mctz, excess);
3520                 spin_unlock_irq(&mctz->lock);
3521                 css_put(&mz->memcg->css);
3522                 loop++;
3523                 /*
3524                  * Could not reclaim anything and there are no more
3525                  * mem cgroups to try or we seem to be looping without
3526                  * reclaiming anything.
3527                  */
3528                 if (!nr_reclaimed &&
3529                         (next_mz == NULL ||
3530                         loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3531                         break;
3532         } while (!nr_reclaimed);
3533         if (next_mz)
3534                 css_put(&next_mz->memcg->css);
3535         return nr_reclaimed;
3536 }
3537
3538 /*
3539  * Reclaims as many pages from the given memcg as possible.
3540  *
3541  * Caller is responsible for holding css reference for memcg.
3542  */
3543 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3544 {
3545         int nr_retries = MAX_RECLAIM_RETRIES;
3546
3547         /* we call try-to-free pages for make this cgroup empty */
3548         lru_add_drain_all();
3549
3550         drain_all_stock(memcg);
3551
3552         /* try to free all pages in this cgroup */
3553         while (nr_retries && page_counter_read(&memcg->memory)) {
3554                 if (signal_pending(current))
3555                         return -EINTR;
3556
3557                 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3558                                                   MEMCG_RECLAIM_MAY_SWAP))
3559                         nr_retries--;
3560         }
3561
3562         return 0;
3563 }
3564
3565 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3566                                             char *buf, size_t nbytes,
3567                                             loff_t off)
3568 {
3569         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3570
3571         if (mem_cgroup_is_root(memcg))
3572                 return -EINVAL;
3573         return mem_cgroup_force_empty(memcg) ?: nbytes;
3574 }
3575
3576 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3577                                      struct cftype *cft)
3578 {
3579         return 1;
3580 }
3581
3582 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3583                                       struct cftype *cft, u64 val)
3584 {
3585         if (val == 1)
3586                 return 0;
3587
3588         pr_warn_once("Non-hierarchical mode is deprecated. "
3589                      "Please report your usecase to linux-mm@kvack.org if you "
3590                      "depend on this functionality.\n");
3591
3592         return -EINVAL;
3593 }
3594
3595 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3596 {
3597         unsigned long val;
3598
3599         if (mem_cgroup_is_root(memcg)) {
3600                 mem_cgroup_flush_stats();
3601                 val = memcg_page_state(memcg, NR_FILE_PAGES) +
3602                         memcg_page_state(memcg, NR_ANON_MAPPED);
3603                 if (swap)
3604                         val += memcg_page_state(memcg, MEMCG_SWAP);
3605         } else {
3606                 if (!swap)
3607                         val = page_counter_read(&memcg->memory);
3608                 else
3609                         val = page_counter_read(&memcg->memsw);
3610         }
3611         return val;
3612 }
3613
3614 enum {
3615         RES_USAGE,
3616         RES_LIMIT,
3617         RES_MAX_USAGE,
3618         RES_FAILCNT,
3619         RES_SOFT_LIMIT,
3620 };
3621
3622 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3623                                struct cftype *cft)
3624 {
3625         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3626         struct page_counter *counter;
3627
3628         switch (MEMFILE_TYPE(cft->private)) {
3629         case _MEM:
3630                 counter = &memcg->memory;
3631                 break;
3632         case _MEMSWAP:
3633                 counter = &memcg->memsw;
3634                 break;
3635         case _KMEM:
3636                 counter = &memcg->kmem;
3637                 break;
3638         case _TCP:
3639                 counter = &memcg->tcpmem;
3640                 break;
3641         default:
3642                 BUG();
3643         }
3644
3645         switch (MEMFILE_ATTR(cft->private)) {
3646         case RES_USAGE:
3647                 if (counter == &memcg->memory)
3648                         return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3649                 if (counter == &memcg->memsw)
3650                         return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3651                 return (u64)page_counter_read(counter) * PAGE_SIZE;
3652         case RES_LIMIT:
3653                 return (u64)counter->max * PAGE_SIZE;
3654         case RES_MAX_USAGE:
3655                 return (u64)counter->watermark * PAGE_SIZE;
3656         case RES_FAILCNT:
3657                 return counter->failcnt;
3658         case RES_SOFT_LIMIT:
3659                 return (u64)memcg->soft_limit * PAGE_SIZE;
3660         default:
3661                 BUG();
3662         }
3663 }
3664
3665 #ifdef CONFIG_MEMCG_KMEM
3666 static int memcg_online_kmem(struct mem_cgroup *memcg)
3667 {
3668         struct obj_cgroup *objcg;
3669
3670         if (mem_cgroup_kmem_disabled())
3671                 return 0;
3672
3673         if (unlikely(mem_cgroup_is_root(memcg)))
3674                 return 0;
3675
3676         objcg = obj_cgroup_alloc();
3677         if (!objcg)
3678                 return -ENOMEM;
3679
3680         objcg->memcg = memcg;
3681         rcu_assign_pointer(memcg->objcg, objcg);
3682
3683         static_branch_enable(&memcg_kmem_enabled_key);
3684
3685         memcg->kmemcg_id = memcg->id.id;
3686
3687         return 0;
3688 }
3689
3690 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3691 {
3692         struct mem_cgroup *parent;
3693
3694         if (mem_cgroup_kmem_disabled())
3695                 return;
3696
3697         if (unlikely(mem_cgroup_is_root(memcg)))
3698                 return;
3699
3700         parent = parent_mem_cgroup(memcg);
3701         if (!parent)
3702                 parent = root_mem_cgroup;
3703
3704         memcg_reparent_objcgs(memcg, parent);
3705
3706         /*
3707          * After we have finished memcg_reparent_objcgs(), all list_lrus
3708          * corresponding to this cgroup are guaranteed to remain empty.
3709          * The ordering is imposed by list_lru_node->lock taken by
3710          * memcg_reparent_list_lrus().
3711          */
3712         memcg_reparent_list_lrus(memcg, parent);
3713 }
3714 #else
3715 static int memcg_online_kmem(struct mem_cgroup *memcg)
3716 {
3717         return 0;
3718 }
3719 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3720 {
3721 }
3722 #endif /* CONFIG_MEMCG_KMEM */
3723
3724 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3725 {
3726         int ret;
3727
3728         mutex_lock(&memcg_max_mutex);
3729
3730         ret = page_counter_set_max(&memcg->tcpmem, max);
3731         if (ret)
3732                 goto out;
3733
3734         if (!memcg->tcpmem_active) {
3735                 /*
3736                  * The active flag needs to be written after the static_key
3737                  * update. This is what guarantees that the socket activation
3738                  * function is the last one to run. See mem_cgroup_sk_alloc()
3739                  * for details, and note that we don't mark any socket as
3740                  * belonging to this memcg until that flag is up.
3741                  *
3742                  * We need to do this, because static_keys will span multiple
3743                  * sites, but we can't control their order. If we mark a socket
3744                  * as accounted, but the accounting functions are not patched in
3745                  * yet, we'll lose accounting.
3746                  *
3747                  * We never race with the readers in mem_cgroup_sk_alloc(),
3748                  * because when this value change, the code to process it is not
3749                  * patched in yet.
3750                  */
3751                 static_branch_inc(&memcg_sockets_enabled_key);
3752                 memcg->tcpmem_active = true;
3753         }
3754 out:
3755         mutex_unlock(&memcg_max_mutex);
3756         return ret;
3757 }
3758
3759 /*
3760  * The user of this function is...
3761  * RES_LIMIT.
3762  */
3763 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3764                                 char *buf, size_t nbytes, loff_t off)
3765 {
3766         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3767         unsigned long nr_pages;
3768         int ret;
3769
3770         buf = strstrip(buf);
3771         ret = page_counter_memparse(buf, "-1", &nr_pages);
3772         if (ret)
3773                 return ret;
3774
3775         switch (MEMFILE_ATTR(of_cft(of)->private)) {
3776         case RES_LIMIT:
3777                 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3778                         ret = -EINVAL;
3779                         break;
3780                 }
3781                 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3782                 case _MEM:
3783                         ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3784                         break;
3785                 case _MEMSWAP:
3786                         ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3787                         break;
3788                 case _KMEM:
3789                         /* kmem.limit_in_bytes is deprecated. */
3790                         ret = -EOPNOTSUPP;
3791                         break;
3792                 case _TCP:
3793                         ret = memcg_update_tcp_max(memcg, nr_pages);
3794                         break;
3795                 }
3796                 break;
3797         case RES_SOFT_LIMIT:
3798                 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
3799                         ret = -EOPNOTSUPP;
3800                 } else {
3801                         memcg->soft_limit = nr_pages;
3802                         ret = 0;
3803                 }
3804                 break;
3805         }
3806         return ret ?: nbytes;
3807 }
3808
3809 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3810                                 size_t nbytes, loff_t off)
3811 {
3812         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3813         struct page_counter *counter;
3814
3815         switch (MEMFILE_TYPE(of_cft(of)->private)) {
3816         case _MEM:
3817                 counter = &memcg->memory;
3818                 break;
3819         case _MEMSWAP:
3820                 counter = &memcg->memsw;
3821                 break;
3822         case _KMEM:
3823                 counter = &memcg->kmem;
3824                 break;
3825         case _TCP:
3826                 counter = &memcg->tcpmem;
3827                 break;
3828         default:
3829                 BUG();
3830         }
3831
3832         switch (MEMFILE_ATTR(of_cft(of)->private)) {
3833         case RES_MAX_USAGE:
3834                 page_counter_reset_watermark(counter);
3835                 break;
3836         case RES_FAILCNT:
3837                 counter->failcnt = 0;
3838                 break;
3839         default:
3840                 BUG();
3841         }
3842
3843         return nbytes;
3844 }
3845
3846 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3847                                         struct cftype *cft)
3848 {
3849         return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3850 }
3851
3852 #ifdef CONFIG_MMU
3853 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3854                                         struct cftype *cft, u64 val)
3855 {
3856         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3857
3858         if (val & ~MOVE_MASK)
3859                 return -EINVAL;
3860
3861         /*
3862          * No kind of locking is needed in here, because ->can_attach() will
3863          * check this value once in the beginning of the process, and then carry
3864          * on with stale data. This means that changes to this value will only
3865          * affect task migrations starting after the change.
3866          */
3867         memcg->move_charge_at_immigrate = val;
3868         return 0;
3869 }
3870 #else
3871 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3872                                         struct cftype *cft, u64 val)
3873 {
3874         return -ENOSYS;
3875 }
3876 #endif
3877
3878 #ifdef CONFIG_NUMA
3879
3880 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3881 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3882 #define LRU_ALL      ((1 << NR_LRU_LISTS) - 1)
3883
3884 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3885                                 int nid, unsigned int lru_mask, bool tree)
3886 {
3887         struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3888         unsigned long nr = 0;
3889         enum lru_list lru;
3890
3891         VM_BUG_ON((unsigned)nid >= nr_node_ids);
3892
3893         for_each_lru(lru) {
3894                 if (!(BIT(lru) & lru_mask))
3895                         continue;
3896                 if (tree)
3897                         nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3898                 else
3899                         nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3900         }
3901         return nr;
3902 }
3903
3904 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3905                                              unsigned int lru_mask,
3906                                              bool tree)
3907 {
3908         unsigned long nr = 0;
3909         enum lru_list lru;
3910
3911         for_each_lru(lru) {
3912                 if (!(BIT(lru) & lru_mask))
3913                         continue;
3914                 if (tree)
3915                         nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3916                 else
3917                         nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3918         }
3919         return nr;
3920 }
3921
3922 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3923 {
3924         struct numa_stat {
3925                 const char *name;
3926                 unsigned int lru_mask;
3927         };
3928
3929         static const struct numa_stat stats[] = {
3930                 { "total", LRU_ALL },
3931                 { "file", LRU_ALL_FILE },
3932                 { "anon", LRU_ALL_ANON },
3933                 { "unevictable", BIT(LRU_UNEVICTABLE) },
3934         };
3935         const struct numa_stat *stat;
3936         int nid;
3937         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3938
3939         mem_cgroup_flush_stats();
3940
3941         for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3942                 seq_printf(m, "%s=%lu", stat->name,
3943                            mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3944                                                    false));
3945                 for_each_node_state(nid, N_MEMORY)
3946                         seq_printf(m, " N%d=%lu", nid,
3947                                    mem_cgroup_node_nr_lru_pages(memcg, nid,
3948                                                         stat->lru_mask, false));
3949                 seq_putc(m, '\n');
3950         }
3951
3952         for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3953
3954                 seq_printf(m, "hierarchical_%s=%lu", stat->name,
3955                            mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3956                                                    true));
3957                 for_each_node_state(nid, N_MEMORY)
3958                         seq_printf(m, " N%d=%lu", nid,
3959                                    mem_cgroup_node_nr_lru_pages(memcg, nid,
3960                                                         stat->lru_mask, true));
3961                 seq_putc(m, '\n');
3962         }
3963
3964         return 0;
3965 }
3966 #endif /* CONFIG_NUMA */
3967
3968 static const unsigned int memcg1_stats[] = {
3969         NR_FILE_PAGES,
3970         NR_ANON_MAPPED,
3971 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3972         NR_ANON_THPS,
3973 #endif
3974         NR_SHMEM,
3975         NR_FILE_MAPPED,
3976         NR_FILE_DIRTY,
3977         NR_WRITEBACK,
3978         MEMCG_SWAP,
3979 };
3980
3981 static const char *const memcg1_stat_names[] = {
3982         "cache",
3983         "rss",
3984 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3985         "rss_huge",
3986 #endif
3987         "shmem",
3988         "mapped_file",
3989         "dirty",
3990         "writeback",
3991         "swap",
3992 };
3993
3994 /* Universal VM events cgroup1 shows, original sort order */
3995 static const unsigned int memcg1_events[] = {
3996         PGPGIN,
3997         PGPGOUT,
3998         PGFAULT,
3999         PGMAJFAULT,
4000 };
4001
4002 static int memcg_stat_show(struct seq_file *m, void *v)
4003 {
4004         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4005         unsigned long memory, memsw;
4006         struct mem_cgroup *mi;
4007         unsigned int i;
4008
4009         BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4010
4011         mem_cgroup_flush_stats();
4012
4013         for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4014                 unsigned long nr;
4015
4016                 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4017                         continue;
4018                 nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4019                 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
4020         }
4021
4022         for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4023                 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
4024                            memcg_events_local(memcg, memcg1_events[i]));
4025
4026         for (i = 0; i < NR_LRU_LISTS; i++)
4027                 seq_printf(m, "%s %lu\n", lru_list_name(i),
4028                            memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4029                            PAGE_SIZE);
4030
4031         /* Hierarchical information */
4032         memory = memsw = PAGE_COUNTER_MAX;
4033         for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4034                 memory = min(memory, READ_ONCE(mi->memory.max));
4035                 memsw = min(memsw, READ_ONCE(mi->memsw.max));
4036         }
4037         seq_printf(m, "hierarchical_memory_limit %llu\n",
4038                    (u64)memory * PAGE_SIZE);
4039         if (do_memsw_account())
4040                 seq_printf(m, "hierarchical_memsw_limit %llu\n",
4041                            (u64)memsw * PAGE_SIZE);
4042
4043         for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4044                 unsigned long nr;
4045
4046                 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4047                         continue;
4048                 nr = memcg_page_state(memcg, memcg1_stats[i]);
4049                 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4050                                                 (u64)nr * PAGE_SIZE);
4051         }
4052
4053         for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4054                 seq_printf(m, "total_%s %llu\n",
4055                            vm_event_name(memcg1_events[i]),
4056                            (u64)memcg_events(memcg, memcg1_events[i]));
4057
4058         for (i = 0; i < NR_LRU_LISTS; i++)
4059                 seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4060                            (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4061                            PAGE_SIZE);
4062
4063 #ifdef CONFIG_DEBUG_VM
4064         {
4065                 pg_data_t *pgdat;
4066                 struct mem_cgroup_per_node *mz;
4067                 unsigned long anon_cost = 0;
4068                 unsigned long file_cost = 0;
4069
4070                 for_each_online_pgdat(pgdat) {
4071                         mz = memcg->nodeinfo[pgdat->node_id];
4072
4073                         anon_cost += mz->lruvec.anon_cost;
4074                         file_cost += mz->lruvec.file_cost;
4075                 }
4076                 seq_printf(m, "anon_cost %lu\n", anon_cost);
4077                 seq_printf(m, "file_cost %lu\n", file_cost);
4078         }
4079 #endif
4080
4081         return 0;
4082 }
4083
4084 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4085                                       struct cftype *cft)
4086 {
4087         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4088
4089         return mem_cgroup_swappiness(memcg);
4090 }
4091
4092 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4093                                        struct cftype *cft, u64 val)
4094 {
4095         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4096
4097         if (val > 200)
4098                 return -EINVAL;
4099
4100         if (!mem_cgroup_is_root(memcg))
4101                 memcg->swappiness = val;
4102         else
4103                 vm_swappiness = val;
4104
4105         return 0;
4106 }
4107
4108 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4109 {
4110         struct mem_cgroup_threshold_ary *t;
4111         unsigned long usage;
4112         int i;
4113
4114         rcu_read_lock();
4115         if (!swap)
4116                 t = rcu_dereference(memcg->thresholds.primary);
4117         else
4118                 t = rcu_dereference(memcg->memsw_thresholds.primary);
4119
4120         if (!t)
4121                 goto unlock;
4122
4123         usage = mem_cgroup_usage(memcg, swap);
4124
4125         /*
4126          * current_threshold points to threshold just below or equal to usage.
4127          * If it's not true, a threshold was crossed after last
4128          * call of __mem_cgroup_threshold().
4129          */
4130         i = t->current_threshold;
4131
4132         /*
4133          * Iterate backward over array of thresholds starting from
4134          * current_threshold and check if a threshold is crossed.
4135          * If none of thresholds below usage is crossed, we read
4136          * only one element of the array here.
4137          */
4138         for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4139                 eventfd_signal(t->entries[i].eventfd, 1);
4140
4141         /* i = current_threshold + 1 */
4142         i++;
4143
4144         /*
4145          * Iterate forward over array of thresholds starting from
4146          * current_threshold+1 and check if a threshold is crossed.
4147          * If none of thresholds above usage is crossed, we read
4148          * only one element of the array here.
4149          */
4150         for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4151                 eventfd_signal(t->entries[i].eventfd, 1);
4152
4153         /* Update current_threshold */
4154         t->current_threshold = i - 1;
4155 unlock:
4156         rcu_read_unlock();
4157 }
4158
4159 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4160 {
4161         while (memcg) {
4162                 __mem_cgroup_threshold(memcg, false);
4163                 if (do_memsw_account())
4164                         __mem_cgroup_threshold(memcg, true);
4165
4166                 memcg = parent_mem_cgroup(memcg);
4167         }
4168 }
4169
4170 static int compare_thresholds(const void *a, const void *b)
4171 {
4172         const struct mem_cgroup_threshold *_a = a;
4173         const struct mem_cgroup_threshold *_b = b;
4174
4175         if (_a->threshold > _b->threshold)
4176                 return 1;
4177
4178         if (_a->threshold < _b->threshold)
4179                 return -1;
4180
4181         return 0;
4182 }
4183
4184 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4185 {
4186         struct mem_cgroup_eventfd_list *ev;
4187
4188         spin_lock(&memcg_oom_lock);
4189
4190         list_for_each_entry(ev, &memcg->oom_notify, list)
4191                 eventfd_signal(ev->eventfd, 1);
4192
4193         spin_unlock(&memcg_oom_lock);
4194         return 0;
4195 }
4196
4197 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4198 {
4199         struct mem_cgroup *iter;
4200
4201         for_each_mem_cgroup_tree(iter, memcg)
4202                 mem_cgroup_oom_notify_cb(iter);
4203 }
4204
4205 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4206         struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4207 {
4208         struct mem_cgroup_thresholds *thresholds;
4209         struct mem_cgroup_threshold_ary *new;
4210         unsigned long threshold;
4211         unsigned long usage;
4212         int i, size, ret;
4213
4214         ret = page_counter_memparse(args, "-1", &threshold);
4215         if (ret)
4216                 return ret;
4217
4218         mutex_lock(&memcg->thresholds_lock);
4219
4220         if (type == _MEM) {
4221                 thresholds = &memcg->thresholds;
4222                 usage = mem_cgroup_usage(memcg, false);
4223         } else if (type == _MEMSWAP) {
4224                 thresholds = &memcg->memsw_thresholds;
4225                 usage = mem_cgroup_usage(memcg, true);
4226         } else
4227                 BUG();
4228
4229         /* Check if a threshold crossed before adding a new one */
4230         if (thresholds->primary)
4231                 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4232
4233         size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4234
4235         /* Allocate memory for new array of thresholds */
4236         new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4237         if (!new) {
4238                 ret = -ENOMEM;
4239                 goto unlock;
4240         }
4241         new->size = size;
4242
4243         /* Copy thresholds (if any) to new array */
4244         if (thresholds->primary)
4245                 memcpy(new->entries, thresholds->primary->entries,
4246                        flex_array_size(new, entries, size - 1));
4247
4248         /* Add new threshold */
4249         new->entries[size - 1].eventfd = eventfd;
4250         new->entries[size - 1].threshold = threshold;
4251
4252         /* Sort thresholds. Registering of new threshold isn't time-critical */
4253         sort(new->entries, size, sizeof(*new->entries),
4254                         compare_thresholds, NULL);
4255
4256         /* Find current threshold */
4257         new->current_threshold = -1;
4258         for (i = 0; i < size; i++) {
4259                 if (new->entries[i].threshold <= usage) {
4260                         /*
4261                          * new->current_threshold will not be used until
4262                          * rcu_assign_pointer(), so it's safe to increment
4263                          * it here.
4264                          */
4265                         ++new->current_threshold;
4266                 } else
4267                         break;
4268         }
4269
4270         /* Free old spare buffer and save old primary buffer as spare */
4271         kfree(thresholds->spare);
4272         thresholds->spare = thresholds->primary;
4273
4274         rcu_assign_pointer(thresholds->primary, new);
4275
4276         /* To be sure that nobody uses thresholds */
4277         synchronize_rcu();
4278
4279 unlock:
4280         mutex_unlock(&memcg->thresholds_lock);
4281
4282         return ret;
4283 }
4284
4285 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4286         struct eventfd_ctx *eventfd, const char *args)
4287 {
4288         return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4289 }
4290
4291 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4292         struct eventfd_ctx *eventfd, const char *args)
4293 {
4294         return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4295 }
4296
4297 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4298         struct eventfd_ctx *eventfd, enum res_type type)
4299 {
4300         struct mem_cgroup_thresholds *thresholds;
4301         struct mem_cgroup_threshold_ary *new;
4302         unsigned long usage;
4303         int i, j, size, entries;
4304
4305         mutex_lock(&memcg->thresholds_lock);
4306
4307         if (type == _MEM) {
4308                 thresholds = &memcg->thresholds;
4309                 usage = mem_cgroup_usage(memcg, false);
4310         } else if (type == _MEMSWAP) {
4311                 thresholds = &memcg->memsw_thresholds;
4312                 usage = mem_cgroup_usage(memcg, true);
4313         } else
4314                 BUG();
4315
4316         if (!thresholds->primary)
4317                 goto unlock;
4318
4319         /* Check if a threshold crossed before removing */
4320         __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4321
4322         /* Calculate new number of threshold */
4323         size = entries = 0;
4324         for (i = 0; i < thresholds->primary->size; i++) {
4325                 if (thresholds->primary->entries[i].eventfd != eventfd)
4326                         size++;
4327                 else
4328                         entries++;
4329         }
4330
4331         new = thresholds->spare;
4332
4333         /* If no items related to eventfd have been cleared, nothing to do */
4334         if (!entries)
4335                 goto unlock;
4336
4337         /* Set thresholds array to NULL if we don't have thresholds */
4338         if (!size) {
4339                 kfree(new);
4340                 new = NULL;
4341                 goto swap_buffers;
4342         }
4343
4344         new->size = size;
4345
4346         /* Copy thresholds and find current threshold */
4347         new->current_threshold = -1;
4348         for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4349                 if (thresholds->primary->entries[i].eventfd == eventfd)
4350                         continue;
4351
4352                 new->entries[j] = thresholds->primary->entries[i];
4353                 if (new->entries[j].threshold <= usage) {
4354                         /*
4355                          * new->current_threshold will not be used
4356                          * until rcu_assign_pointer(), so it's safe to increment
4357                          * it here.
4358                          */
4359                         ++new->current_threshold;
4360                 }
4361                 j++;
4362         }
4363
4364 swap_buffers:
4365         /* Swap primary and spare array */
4366         thresholds->spare = thresholds->primary;
4367
4368         rcu_assign_pointer(thresholds->primary, new);
4369
4370         /* To be sure that nobody uses thresholds */
4371         synchronize_rcu();
4372
4373         /* If all events are unregistered, free the spare array */
4374         if (!new) {
4375                 kfree(thresholds->spare);
4376                 thresholds->spare = NULL;
4377         }
4378 unlock:
4379         mutex_unlock(&memcg->thresholds_lock);
4380 }
4381
4382 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4383         struct eventfd_ctx *eventfd)
4384 {
4385         return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4386 }
4387
4388 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4389         struct eventfd_ctx *eventfd)
4390 {
4391         return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4392 }
4393
4394 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4395         struct eventfd_ctx *eventfd, const char *args)
4396 {
4397         struct mem_cgroup_eventfd_list *event;
4398
4399         event = kmalloc(sizeof(*event), GFP_KERNEL);
4400         if (!event)
4401                 return -ENOMEM;
4402
4403         spin_lock(&memcg_oom_lock);
4404
4405         event->eventfd = eventfd;
4406         list_add(&event->list, &memcg->oom_notify);
4407
4408         /* already in OOM ? */
4409         if (memcg->under_oom)
4410                 eventfd_signal(eventfd, 1);
4411         spin_unlock(&memcg_oom_lock);
4412
4413         return 0;
4414 }
4415
4416 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4417         struct eventfd_ctx *eventfd)
4418 {
4419         struct mem_cgroup_eventfd_list *ev, *tmp;
4420
4421         spin_lock(&memcg_oom_lock);
4422
4423         list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4424                 if (ev->eventfd == eventfd) {
4425                         list_del(&ev->list);
4426                         kfree(ev);
4427                 }
4428         }
4429
4430         spin_unlock(&memcg_oom_lock);
4431 }
4432
4433 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4434 {
4435         struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4436
4437         seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4438         seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4439         seq_printf(sf, "oom_kill %lu\n",
4440                    atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4441         return 0;
4442 }
4443
4444 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4445         struct cftype *cft, u64 val)
4446 {
4447         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4448
4449         /* cannot set to root cgroup and only 0 and 1 are allowed */
4450         if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4451                 return -EINVAL;
4452
4453         memcg->oom_kill_disable = val;
4454         if (!val)
4455                 memcg_oom_recover(memcg);
4456
4457         return 0;
4458 }
4459
4460 #ifdef CONFIG_CGROUP_WRITEBACK
4461
4462 #include <trace/events/writeback.h>
4463
4464 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4465 {
4466         return wb_domain_init(&memcg->cgwb_domain, gfp);
4467 }
4468
4469 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4470 {
4471         wb_domain_exit(&memcg->cgwb_domain);
4472 }
4473
4474 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4475 {
4476         wb_domain_size_changed(&memcg->cgwb_domain);
4477 }
4478
4479 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4480 {
4481         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4482
4483         if (!memcg->css.parent)
4484                 return NULL;
4485
4486         return &memcg->cgwb_domain;
4487 }
4488
4489 /**
4490  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4491  * @wb: bdi_writeback in question
4492  * @pfilepages: out parameter for number of file pages
4493  * @pheadroom: out parameter for number of allocatable pages according to memcg
4494  * @pdirty: out parameter for number of dirty pages
4495  * @pwriteback: out parameter for number of pages under writeback
4496  *
4497  * Determine the numbers of file, headroom, dirty, and writeback pages in
4498  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4499  * is a bit more involved.
4500  *
4501  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4502  * headroom is calculated as the lowest headroom of itself and the
4503  * ancestors.  Note that this doesn't consider the actual amount of
4504  * available memory in the system.  The caller should further cap
4505  * *@pheadroom accordingly.
4506  */
4507 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4508                          unsigned long *pheadroom, unsigned long *pdirty,
4509                          unsigned long *pwriteback)
4510 {
4511         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4512         struct mem_cgroup *parent;
4513
4514         mem_cgroup_flush_stats();
4515
4516         *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4517         *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4518         *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4519                         memcg_page_state(memcg, NR_ACTIVE_FILE);
4520
4521         *pheadroom = PAGE_COUNTER_MAX;
4522         while ((parent = parent_mem_cgroup(memcg))) {
4523                 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4524                                             READ_ONCE(memcg->memory.high));
4525                 unsigned long used = page_counter_read(&memcg->memory);
4526
4527                 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4528                 memcg = parent;
4529         }
4530 }
4531
4532 /*
4533  * Foreign dirty flushing
4534  *
4535  * There's an inherent mismatch between memcg and writeback.  The former
4536  * tracks ownership per-page while the latter per-inode.  This was a
4537  * deliberate design decision because honoring per-page ownership in the
4538  * writeback path is complicated, may lead to higher CPU and IO overheads
4539  * and deemed unnecessary given that write-sharing an inode across
4540  * different cgroups isn't a common use-case.
4541  *
4542  * Combined with inode majority-writer ownership switching, this works well
4543  * enough in most cases but there are some pathological cases.  For
4544  * example, let's say there are two cgroups A and B which keep writing to
4545  * different but confined parts of the same inode.  B owns the inode and
4546  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4547  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4548  * triggering background writeback.  A will be slowed down without a way to
4549  * make writeback of the dirty pages happen.
4550  *
4551  * Conditions like the above can lead to a cgroup getting repeatedly and
4552  * severely throttled after making some progress after each
4553  * dirty_expire_interval while the underlying IO device is almost
4554  * completely idle.
4555  *
4556  * Solving this problem completely requires matching the ownership tracking
4557  * granularities between memcg and writeback in either direction.  However,
4558  * the more egregious behaviors can be avoided by simply remembering the
4559  * most recent foreign dirtying events and initiating remote flushes on
4560  * them when local writeback isn't enough to keep the memory clean enough.
4561  *
4562  * The following two functions implement such mechanism.  When a foreign
4563  * page - a page whose memcg and writeback ownerships don't match - is
4564  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4565  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4566  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4567  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4568  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4569  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4570  * limited to MEMCG_CGWB_FRN_CNT.
4571  *
4572  * The mechanism only remembers IDs and doesn't hold any object references.
4573  * As being wrong occasionally doesn't matter, updates and accesses to the
4574  * records are lockless and racy.
4575  */
4576 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4577                                              struct bdi_writeback *wb)
4578 {
4579         struct mem_cgroup *memcg = folio_memcg(folio);
4580         struct memcg_cgwb_frn *frn;
4581         u64 now = get_jiffies_64();
4582         u64 oldest_at = now;
4583         int oldest = -1;
4584         int i;
4585
4586         trace_track_foreign_dirty(folio, wb);
4587
4588         /*
4589          * Pick the slot to use.  If there is already a slot for @wb, keep
4590          * using it.  If not replace the oldest one which isn't being
4591          * written out.
4592          */
4593         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4594                 frn = &memcg->cgwb_frn[i];
4595                 if (frn->bdi_id == wb->bdi->id &&
4596                     frn->memcg_id == wb->memcg_css->id)
4597                         break;
4598                 if (time_before64(frn->at, oldest_at) &&
4599                     atomic_read(&frn->done.cnt) == 1) {
4600                         oldest = i;
4601                         oldest_at = frn->at;
4602                 }
4603         }
4604
4605         if (i < MEMCG_CGWB_FRN_CNT) {
4606                 /*
4607                  * Re-using an existing one.  Update timestamp lazily to
4608                  * avoid making the cacheline hot.  We want them to be
4609                  * reasonably up-to-date and significantly shorter than
4610                  * dirty_expire_interval as that's what expires the record.
4611                  * Use the shorter of 1s and dirty_expire_interval / 8.
4612                  */
4613                 unsigned long update_intv =
4614                         min_t(unsigned long, HZ,
4615                               msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4616
4617                 if (time_before64(frn->at, now - update_intv))
4618                         frn->at = now;
4619         } else if (oldest >= 0) {
4620                 /* replace the oldest free one */
4621                 frn = &memcg->cgwb_frn[oldest];
4622                 frn->bdi_id = wb->bdi->id;
4623                 frn->memcg_id = wb->memcg_css->id;
4624                 frn->at = now;
4625         }
4626 }
4627
4628 /* issue foreign writeback flushes for recorded foreign dirtying events */
4629 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4630 {
4631         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4632         unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4633         u64 now = jiffies_64;
4634         int i;
4635
4636         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4637                 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4638
4639                 /*
4640                  * If the record is older than dirty_expire_interval,
4641                  * writeback on it has already started.  No need to kick it
4642                  * off again.  Also, don't start a new one if there's
4643                  * already one in flight.
4644                  */
4645                 if (time_after64(frn->at, now - intv) &&
4646                     atomic_read(&frn->done.cnt) == 1) {
4647                         frn->at = 0;
4648                         trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4649                         cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4650                                                WB_REASON_FOREIGN_FLUSH,
4651                                                &frn->done);
4652                 }
4653         }
4654 }
4655
4656 #else   /* CONFIG_CGROUP_WRITEBACK */
4657
4658 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4659 {
4660         return 0;
4661 }
4662
4663 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4664 {
4665 }
4666
4667 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4668 {
4669 }
4670
4671 #endif  /* CONFIG_CGROUP_WRITEBACK */
4672
4673 /*
4674  * DO NOT USE IN NEW FILES.
4675  *
4676  * "cgroup.event_control" implementation.
4677  *
4678  * This is way over-engineered.  It tries to support fully configurable
4679  * events for each user.  Such level of flexibility is completely
4680  * unnecessary especially in the light of the planned unified hierarchy.
4681  *
4682  * Please deprecate this and replace with something simpler if at all
4683  * possible.
4684  */
4685
4686 /*
4687  * Unregister event and free resources.
4688  *
4689  * Gets called from workqueue.
4690  */
4691 static void memcg_event_remove(struct work_struct *work)
4692 {
4693         struct mem_cgroup_event *event =
4694                 container_of(work, struct mem_cgroup_event, remove);
4695         struct mem_cgroup *memcg = event->memcg;
4696
4697         remove_wait_queue(event->wqh, &event->wait);
4698
4699         event->unregister_event(memcg, event->eventfd);
4700
4701         /* Notify userspace the event is going away. */
4702         eventfd_signal(event->eventfd, 1);
4703
4704         eventfd_ctx_put(event->eventfd);
4705         kfree(event);
4706         css_put(&memcg->css);
4707 }
4708
4709 /*
4710  * Gets called on EPOLLHUP on eventfd when user closes it.
4711  *
4712  * Called with wqh->lock held and interrupts disabled.
4713  */
4714 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4715                             int sync, void *key)
4716 {
4717         struct mem_cgroup_event *event =
4718                 container_of(wait, struct mem_cgroup_event, wait);
4719         struct mem_cgroup *memcg = event->memcg;
4720         __poll_t flags = key_to_poll(key);
4721
4722         if (flags & EPOLLHUP) {
4723                 /*
4724                  * If the event has been detached at cgroup removal, we
4725                  * can simply return knowing the other side will cleanup
4726                  * for us.
4727                  *
4728                  * We can't race against event freeing since the other
4729                  * side will require wqh->lock via remove_wait_queue(),
4730                  * which we hold.
4731                  */
4732                 spin_lock(&memcg->event_list_lock);
4733                 if (!list_empty(&event->list)) {
4734                         list_del_init(&event->list);
4735                         /*
4736                          * We are in atomic context, but cgroup_event_remove()
4737                          * may sleep, so we have to call it in workqueue.
4738                          */
4739                         schedule_work(&event->remove);
4740                 }
4741                 spin_unlock(&memcg->event_list_lock);
4742         }
4743
4744         return 0;
4745 }
4746
4747 static void memcg_event_ptable_queue_proc(struct file *file,
4748                 wait_queue_head_t *wqh, poll_table *pt)
4749 {
4750         struct mem_cgroup_event *event =
4751                 container_of(pt, struct mem_cgroup_event, pt);
4752
4753         event->wqh = wqh;
4754         add_wait_queue(wqh, &event->wait);
4755 }
4756
4757 /*
4758  * DO NOT USE IN NEW FILES.
4759  *
4760  * Parse input and register new cgroup event handler.
4761  *
4762  * Input must be in format '<event_fd> <control_fd> <args>'.
4763  * Interpretation of args is defined by control file implementation.
4764  */
4765 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4766                                          char *buf, size_t nbytes, loff_t off)
4767 {
4768         struct cgroup_subsys_state *css = of_css(of);
4769         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4770         struct mem_cgroup_event *event;
4771         struct cgroup_subsys_state *cfile_css;
4772         unsigned int efd, cfd;
4773         struct fd efile;
4774         struct fd cfile;
4775         const char *name;
4776         char *endp;
4777         int ret;
4778
4779         if (IS_ENABLED(CONFIG_PREEMPT_RT))
4780                 return -EOPNOTSUPP;
4781
4782         buf = strstrip(buf);
4783
4784         efd = simple_strtoul(buf, &endp, 10);
4785         if (*endp != ' ')
4786                 return -EINVAL;
4787         buf = endp + 1;
4788
4789         cfd = simple_strtoul(buf, &endp, 10);
4790         if ((*endp != ' ') && (*endp != '\0'))
4791                 return -EINVAL;
4792         buf = endp + 1;
4793
4794         event = kzalloc(sizeof(*event), GFP_KERNEL);
4795         if (!event)
4796                 return -ENOMEM;
4797
4798         event->memcg = memcg;
4799         INIT_LIST_HEAD(&event->list);
4800         init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4801         init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4802         INIT_WORK(&event->remove, memcg_event_remove);
4803
4804         efile = fdget(efd);
4805         if (!efile.file) {
4806                 ret = -EBADF;
4807                 goto out_kfree;
4808         }
4809
4810         event->eventfd = eventfd_ctx_fileget(efile.file);
4811         if (IS_ERR(event->eventfd)) {
4812                 ret = PTR_ERR(event->eventfd);
4813                 goto out_put_efile;
4814         }
4815
4816         cfile = fdget(cfd);
4817         if (!cfile.file) {
4818                 ret = -EBADF;
4819                 goto out_put_eventfd;
4820         }
4821
4822         /* the process need read permission on control file */
4823         /* AV: shouldn't we check that it's been opened for read instead? */
4824         ret = file_permission(cfile.file, MAY_READ);
4825         if (ret < 0)
4826                 goto out_put_cfile;
4827
4828         /*
4829          * Determine the event callbacks and set them in @event.  This used
4830          * to be done via struct cftype but cgroup core no longer knows
4831          * about these events.  The following is crude but the whole thing
4832          * is for compatibility anyway.
4833          *
4834          * DO NOT ADD NEW FILES.
4835          */
4836         name = cfile.file->f_path.dentry->d_name.name;
4837
4838         if (!strcmp(name, "memory.usage_in_bytes")) {
4839                 event->register_event = mem_cgroup_usage_register_event;
4840                 event->unregister_event = mem_cgroup_usage_unregister_event;
4841         } else if (!strcmp(name, "memory.oom_control")) {
4842                 event->register_event = mem_cgroup_oom_register_event;
4843                 event->unregister_event = mem_cgroup_oom_unregister_event;
4844         } else if (!strcmp(name, "memory.pressure_level")) {
4845                 event->register_event = vmpressure_register_event;
4846                 event->unregister_event = vmpressure_unregister_event;
4847         } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4848                 event->register_event = memsw_cgroup_usage_register_event;
4849                 event->unregister_event = memsw_cgroup_usage_unregister_event;
4850         } else {
4851                 ret = -EINVAL;
4852                 goto out_put_cfile;
4853         }
4854
4855         /*
4856          * Verify @cfile should belong to @css.  Also, remaining events are
4857          * automatically removed on cgroup destruction but the removal is
4858          * asynchronous, so take an extra ref on @css.
4859          */
4860         cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4861                                                &memory_cgrp_subsys);
4862         ret = -EINVAL;
4863         if (IS_ERR(cfile_css))
4864                 goto out_put_cfile;
4865         if (cfile_css != css) {
4866                 css_put(cfile_css);
4867                 goto out_put_cfile;
4868         }
4869
4870         ret = event->register_event(memcg, event->eventfd, buf);
4871         if (ret)
4872                 goto out_put_css;
4873
4874         vfs_poll(efile.file, &event->pt);
4875
4876         spin_lock_irq(&memcg->event_list_lock);
4877         list_add(&event->list, &memcg->event_list);
4878         spin_unlock_irq(&memcg->event_list_lock);
4879
4880         fdput(cfile);
4881         fdput(efile);
4882
4883         return nbytes;
4884
4885 out_put_css:
4886         css_put(css);
4887 out_put_cfile:
4888         fdput(cfile);
4889 out_put_eventfd:
4890         eventfd_ctx_put(event->eventfd);
4891 out_put_efile:
4892         fdput(efile);
4893 out_kfree:
4894         kfree(event);
4895
4896         return ret;
4897 }
4898
4899 #if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
4900 static int mem_cgroup_slab_show(struct seq_file *m, void *p)
4901 {
4902         /*
4903          * Deprecated.
4904          * Please, take a look at tools/cgroup/memcg_slabinfo.py .
4905          */
4906         return 0;
4907 }
4908 #endif
4909
4910 static struct cftype mem_cgroup_legacy_files[] = {
4911         {
4912                 .name = "usage_in_bytes",
4913                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4914                 .read_u64 = mem_cgroup_read_u64,
4915         },
4916         {
4917                 .name = "max_usage_in_bytes",
4918                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4919                 .write = mem_cgroup_reset,
4920                 .read_u64 = mem_cgroup_read_u64,
4921         },
4922         {
4923                 .name = "limit_in_bytes",
4924                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4925                 .write = mem_cgroup_write,
4926                 .read_u64 = mem_cgroup_read_u64,
4927         },
4928         {
4929                 .name = "soft_limit_in_bytes",
4930                 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4931                 .write = mem_cgroup_write,
4932                 .read_u64 = mem_cgroup_read_u64,
4933         },
4934         {
4935                 .name = "failcnt",
4936                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4937                 .write = mem_cgroup_reset,
4938                 .read_u64 = mem_cgroup_read_u64,
4939         },
4940         {
4941                 .name = "stat",
4942                 .seq_show = memcg_stat_show,
4943         },
4944         {
4945                 .name = "force_empty",
4946                 .write = mem_cgroup_force_empty_write,
4947         },
4948         {
4949                 .name = "use_hierarchy",
4950                 .write_u64 = mem_cgroup_hierarchy_write,
4951                 .read_u64 = mem_cgroup_hierarchy_read,
4952         },
4953         {
4954                 .name = "cgroup.event_control",         /* XXX: for compat */
4955                 .write = memcg_write_event_control,
4956                 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4957         },
4958         {
4959                 .name = "swappiness",
4960                 .read_u64 = mem_cgroup_swappiness_read,
4961                 .write_u64 = mem_cgroup_swappiness_write,
4962         },
4963         {
4964                 .name = "move_charge_at_immigrate",
4965                 .read_u64 = mem_cgroup_move_charge_read,
4966                 .write_u64 = mem_cgroup_move_charge_write,
4967         },
4968         {
4969                 .name = "oom_control",
4970                 .seq_show = mem_cgroup_oom_control_read,
4971                 .write_u64 = mem_cgroup_oom_control_write,
4972         },
4973         {
4974                 .name = "pressure_level",
4975         },
4976 #ifdef CONFIG_NUMA
4977         {
4978                 .name = "numa_stat",
4979                 .seq_show = memcg_numa_stat_show,
4980         },
4981 #endif
4982         {
4983                 .name = "kmem.limit_in_bytes",
4984                 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4985                 .write = mem_cgroup_write,
4986                 .read_u64 = mem_cgroup_read_u64,
4987         },
4988         {
4989                 .name = "kmem.usage_in_bytes",
4990                 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4991                 .read_u64 = mem_cgroup_read_u64,
4992         },
4993         {
4994                 .name = "kmem.failcnt",
4995                 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4996                 .write = mem_cgroup_reset,
4997                 .read_u64 = mem_cgroup_read_u64,
4998         },
4999         {
5000                 .name = "kmem.max_usage_in_bytes",
5001                 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5002                 .write = mem_cgroup_reset,
5003                 .read_u64 = mem_cgroup_read_u64,
5004         },
5005 #if defined(CONFIG_MEMCG_KMEM) && \
5006         (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5007         {
5008                 .name = "kmem.slabinfo",
5009                 .seq_show = mem_cgroup_slab_show,
5010         },
5011 #endif
5012         {
5013                 .name = "kmem.tcp.limit_in_bytes",
5014                 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5015                 .write = mem_cgroup_write,
5016                 .read_u64 = mem_cgroup_read_u64,
5017         },
5018         {
5019                 .name = "kmem.tcp.usage_in_bytes",
5020                 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5021                 .read_u64 = mem_cgroup_read_u64,
5022         },
5023         {
5024                 .name = "kmem.tcp.failcnt",
5025                 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5026                 .write = mem_cgroup_reset,
5027                 .read_u64 = mem_cgroup_read_u64,
5028         },
5029         {
5030                 .name = "kmem.tcp.max_usage_in_bytes",
5031                 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5032                 .write = mem_cgroup_reset,
5033                 .read_u64 = mem_cgroup_read_u64,
5034         },
5035         { },    /* terminate */
5036 };
5037
5038 /*
5039  * Private memory cgroup IDR
5040  *
5041  * Swap-out records and page cache shadow entries need to store memcg
5042  * references in constrained space, so we maintain an ID space that is
5043  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5044  * memory-controlled cgroups to 64k.
5045  *
5046  * However, there usually are many references to the offline CSS after
5047  * the cgroup has been destroyed, such as page cache or reclaimable
5048  * slab objects, that don't need to hang on to the ID. We want to keep
5049  * those dead CSS from occupying IDs, or we might quickly exhaust the
5050  * relatively small ID space and prevent the creation of new cgroups
5051  * even when there are much fewer than 64k cgroups - possibly none.
5052  *
5053  * Maintain a private 16-bit ID space for memcg, and allow the ID to
5054  * be freed and recycled when it's no longer needed, which is usually
5055  * when the CSS is offlined.
5056  *
5057  * The only exception to that are records of swapped out tmpfs/shmem
5058  * pages that need to be attributed to live ancestors on swapin. But
5059  * those references are manageable from userspace.
5060  */
5061
5062 static DEFINE_IDR(mem_cgroup_idr);
5063
5064 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5065 {
5066         if (memcg->id.id > 0) {
5067                 idr_remove(&mem_cgroup_idr, memcg->id.id);
5068                 memcg->id.id = 0;
5069         }
5070 }
5071
5072 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5073                                                   unsigned int n)
5074 {
5075         refcount_add(n, &memcg->id.ref);
5076 }
5077
5078 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5079 {
5080         if (refcount_sub_and_test(n, &memcg->id.ref)) {
5081                 mem_cgroup_id_remove(memcg);
5082
5083                 /* Memcg ID pins CSS */
5084                 css_put(&memcg->css);
5085         }
5086 }
5087
5088 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5089 {
5090         mem_cgroup_id_put_many(memcg, 1);
5091 }
5092
5093 /**
5094  * mem_cgroup_from_id - look up a memcg from a memcg id
5095  * @id: the memcg id to look up
5096  *
5097  * Caller must hold rcu_read_lock().
5098  */
5099 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5100 {
5101         WARN_ON_ONCE(!rcu_read_lock_held());
5102         return idr_find(&mem_cgroup_idr, id);
5103 }
5104
5105 #ifdef CONFIG_SHRINKER_DEBUG
5106 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5107 {
5108         struct cgroup *cgrp;
5109         struct cgroup_subsys_state *css;
5110         struct mem_cgroup *memcg;
5111
5112         cgrp = cgroup_get_from_id(ino);
5113         if (!cgrp)
5114                 return ERR_PTR(-ENOENT);
5115
5116         css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5117         if (css)
5118                 memcg = container_of(css, struct mem_cgroup, css);
5119         else
5120                 memcg = ERR_PTR(-ENOENT);
5121
5122         cgroup_put(cgrp);
5123
5124         return memcg;
5125 }
5126 #endif
5127
5128 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5129 {
5130         struct mem_cgroup_per_node *pn;
5131
5132         pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
5133         if (!pn)
5134                 return 1;
5135
5136         pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5137                                                    GFP_KERNEL_ACCOUNT);
5138         if (!pn->lruvec_stats_percpu) {
5139                 kfree(pn);
5140                 return 1;
5141         }
5142
5143         lruvec_init(&pn->lruvec);
5144         pn->memcg = memcg;
5145
5146         memcg->nodeinfo[node] = pn;
5147         return 0;
5148 }
5149
5150 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5151 {
5152         struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5153
5154         if (!pn)
5155                 return;
5156
5157         free_percpu(pn->lruvec_stats_percpu);
5158         kfree(pn);
5159 }
5160
5161 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5162 {
5163         int node;
5164
5165         for_each_node(node)
5166                 free_mem_cgroup_per_node_info(memcg, node);
5167         free_percpu(memcg->vmstats_percpu);
5168         kfree(memcg);
5169 }
5170
5171 static void mem_cgroup_free(struct mem_cgroup *memcg)
5172 {
5173         memcg_wb_domain_exit(memcg);
5174         __mem_cgroup_free(memcg);
5175 }
5176
5177 static struct mem_cgroup *mem_cgroup_alloc(void)
5178 {
5179         struct mem_cgroup *memcg;
5180         int node;
5181         int __maybe_unused i;
5182         long error = -ENOMEM;
5183
5184         memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
5185         if (!memcg)
5186                 return ERR_PTR(error);
5187
5188         memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5189                                  1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
5190         if (memcg->id.id < 0) {
5191                 error = memcg->id.id;
5192                 goto fail;
5193         }
5194
5195         memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5196                                                  GFP_KERNEL_ACCOUNT);
5197         if (!memcg->vmstats_percpu)
5198                 goto fail;
5199
5200         for_each_node(node)
5201                 if (alloc_mem_cgroup_per_node_info(memcg, node))
5202                         goto fail;
5203
5204         if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5205                 goto fail;
5206
5207         INIT_WORK(&memcg->high_work, high_work_func);
5208         INIT_LIST_HEAD(&memcg->oom_notify);
5209         mutex_init(&memcg->thresholds_lock);
5210         spin_lock_init(&memcg->move_lock);
5211         vmpressure_init(&memcg->vmpressure);
5212         INIT_LIST_HEAD(&memcg->event_list);
5213         spin_lock_init(&memcg->event_list_lock);
5214         memcg->socket_pressure = jiffies;
5215 #ifdef CONFIG_MEMCG_KMEM
5216         memcg->kmemcg_id = -1;
5217         INIT_LIST_HEAD(&memcg->objcg_list);
5218 #endif
5219 #ifdef CONFIG_CGROUP_WRITEBACK
5220         INIT_LIST_HEAD(&memcg->cgwb_list);
5221         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5222                 memcg->cgwb_frn[i].done =
5223                         __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5224 #endif
5225 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5226         spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5227         INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5228         memcg->deferred_split_queue.split_queue_len = 0;
5229 #endif
5230         idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5231         return memcg;
5232 fail:
5233         mem_cgroup_id_remove(memcg);
5234         __mem_cgroup_free(memcg);
5235         return ERR_PTR(error);
5236 }
5237
5238 static struct cgroup_subsys_state * __ref
5239 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5240 {
5241         struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5242         struct mem_cgroup *memcg, *old_memcg;
5243
5244         old_memcg = set_active_memcg(parent);
5245         memcg = mem_cgroup_alloc();
5246         set_active_memcg(old_memcg);
5247         if (IS_ERR(memcg))
5248                 return ERR_CAST(memcg);
5249
5250         page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5251         memcg->soft_limit = PAGE_COUNTER_MAX;
5252 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5253         memcg->zswap_max = PAGE_COUNTER_MAX;
5254 #endif
5255         page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5256         if (parent) {
5257                 memcg->swappiness = mem_cgroup_swappiness(parent);
5258                 memcg->oom_kill_disable = parent->oom_kill_disable;
5259
5260                 page_counter_init(&memcg->memory, &parent->memory);
5261                 page_counter_init(&memcg->swap, &parent->swap);
5262                 page_counter_init(&memcg->kmem, &parent->kmem);
5263                 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5264         } else {
5265                 page_counter_init(&memcg->memory, NULL);
5266                 page_counter_init(&memcg->swap, NULL);
5267                 page_counter_init(&memcg->kmem, NULL);
5268                 page_counter_init(&memcg->tcpmem, NULL);
5269
5270                 root_mem_cgroup = memcg;
5271                 return &memcg->css;
5272         }
5273
5274         if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5275                 static_branch_inc(&memcg_sockets_enabled_key);
5276
5277         return &memcg->css;
5278 }
5279
5280 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5281 {
5282         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5283
5284         if (memcg_online_kmem(memcg))
5285                 goto remove_id;
5286
5287         /*
5288          * A memcg must be visible for expand_shrinker_info()
5289          * by the time the maps are allocated. So, we allocate maps
5290          * here, when for_each_mem_cgroup() can't skip it.
5291          */
5292         if (alloc_shrinker_info(memcg))
5293                 goto offline_kmem;
5294
5295         /* Online state pins memcg ID, memcg ID pins CSS */
5296         refcount_set(&memcg->id.ref, 1);
5297         css_get(css);
5298
5299         if (unlikely(mem_cgroup_is_root(memcg)))
5300                 queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5301                                    2UL*HZ);
5302         return 0;
5303 offline_kmem:
5304         memcg_offline_kmem(memcg);
5305 remove_id:
5306         mem_cgroup_id_remove(memcg);
5307         return -ENOMEM;
5308 }
5309
5310 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5311 {
5312         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5313         struct mem_cgroup_event *event, *tmp;
5314
5315         /*
5316          * Unregister events and notify userspace.
5317          * Notify userspace about cgroup removing only after rmdir of cgroup
5318          * directory to avoid race between userspace and kernelspace.
5319          */
5320         spin_lock_irq(&memcg->event_list_lock);
5321         list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5322                 list_del_init(&event->list);
5323                 schedule_work(&event->remove);
5324         }
5325         spin_unlock_irq(&memcg->event_list_lock);
5326
5327         page_counter_set_min(&memcg->memory, 0);
5328         page_counter_set_low(&memcg->memory, 0);
5329
5330         memcg_offline_kmem(memcg);
5331         reparent_shrinker_deferred(memcg);
5332         wb_memcg_offline(memcg);
5333
5334         drain_all_stock(memcg);
5335
5336         mem_cgroup_id_put(memcg);
5337 }
5338
5339 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5340 {
5341         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5342
5343         invalidate_reclaim_iterators(memcg);
5344 }
5345
5346 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5347 {
5348         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5349         int __maybe_unused i;
5350
5351 #ifdef CONFIG_CGROUP_WRITEBACK
5352         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5353                 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5354 #endif
5355         if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5356                 static_branch_dec(&memcg_sockets_enabled_key);
5357
5358         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5359                 static_branch_dec(&memcg_sockets_enabled_key);
5360
5361         vmpressure_cleanup(&memcg->vmpressure);
5362         cancel_work_sync(&memcg->high_work);
5363         mem_cgroup_remove_from_trees(memcg);
5364         free_shrinker_info(memcg);
5365         mem_cgroup_free(memcg);
5366 }
5367
5368 /**
5369  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5370  * @css: the target css
5371  *
5372  * Reset the states of the mem_cgroup associated with @css.  This is
5373  * invoked when the userland requests disabling on the default hierarchy
5374  * but the memcg is pinned through dependency.  The memcg should stop
5375  * applying policies and should revert to the vanilla state as it may be
5376  * made visible again.
5377  *
5378  * The current implementation only resets the essential configurations.
5379  * This needs to be expanded to cover all the visible parts.
5380  */
5381 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5382 {
5383         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5384
5385         page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5386         page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5387         page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5388         page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5389         page_counter_set_min(&memcg->memory, 0);
5390         page_counter_set_low(&memcg->memory, 0);
5391         page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5392         memcg->soft_limit = PAGE_COUNTER_MAX;
5393         page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5394         memcg_wb_domain_size_changed(memcg);
5395 }
5396
5397 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5398 {
5399         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5400         struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5401         struct memcg_vmstats_percpu *statc;
5402         long delta, v;
5403         int i, nid;
5404
5405         statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5406
5407         for (i = 0; i < MEMCG_NR_STAT; i++) {
5408                 /*
5409                  * Collect the aggregated propagation counts of groups
5410                  * below us. We're in a per-cpu loop here and this is
5411                  * a global counter, so the first cycle will get them.
5412                  */
5413                 delta = memcg->vmstats.state_pending[i];
5414                 if (delta)
5415                         memcg->vmstats.state_pending[i] = 0;
5416
5417                 /* Add CPU changes on this level since the last flush */
5418                 v = READ_ONCE(statc->state[i]);
5419                 if (v != statc->state_prev[i]) {
5420                         delta += v - statc->state_prev[i];
5421                         statc->state_prev[i] = v;
5422                 }
5423
5424                 if (!delta)
5425                         continue;
5426
5427                 /* Aggregate counts on this level and propagate upwards */
5428                 memcg->vmstats.state[i] += delta;
5429                 if (parent)
5430                         parent->vmstats.state_pending[i] += delta;
5431         }
5432
5433         for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
5434                 delta = memcg->vmstats.events_pending[i];
5435                 if (delta)
5436                         memcg->vmstats.events_pending[i] = 0;
5437
5438                 v = READ_ONCE(statc->events[i]);
5439                 if (v != statc->events_prev[i]) {
5440                         delta += v - statc->events_prev[i];
5441                         statc->events_prev[i] = v;
5442                 }
5443
5444                 if (!delta)
5445                         continue;
5446
5447                 memcg->vmstats.events[i] += delta;
5448                 if (parent)
5449                         parent->vmstats.events_pending[i] += delta;
5450         }
5451
5452         for_each_node_state(nid, N_MEMORY) {
5453                 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5454                 struct mem_cgroup_per_node *ppn = NULL;
5455                 struct lruvec_stats_percpu *lstatc;
5456
5457                 if (parent)
5458                         ppn = parent->nodeinfo[nid];
5459
5460                 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5461
5462                 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5463                         delta = pn->lruvec_stats.state_pending[i];
5464                         if (delta)
5465                                 pn->lruvec_stats.state_pending[i] = 0;
5466
5467                         v = READ_ONCE(lstatc->state[i]);
5468                         if (v != lstatc->state_prev[i]) {
5469                                 delta += v - lstatc->state_prev[i];
5470                                 lstatc->state_prev[i] = v;
5471                         }
5472
5473                         if (!delta)
5474                                 continue;
5475
5476                         pn->lruvec_stats.state[i] += delta;
5477                         if (ppn)
5478                                 ppn->lruvec_stats.state_pending[i] += delta;
5479                 }
5480         }
5481 }
5482
5483 #ifdef CONFIG_MMU
5484 /* Handlers for move charge at task migration. */
5485 static int mem_cgroup_do_precharge(unsigned long count)
5486 {
5487         int ret;
5488
5489         /* Try a single bulk charge without reclaim first, kswapd may wake */
5490         ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5491         if (!ret) {
5492                 mc.precharge += count;
5493                 return ret;
5494         }
5495
5496         /* Try charges one by one with reclaim, but do not retry */
5497         while (count--) {
5498                 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5499                 if (ret)
5500                         return ret;
5501                 mc.precharge++;
5502                 cond_resched();
5503         }
5504         return 0;
5505 }
5506
5507 union mc_target {
5508         struct page     *page;
5509         swp_entry_t     ent;
5510 };
5511
5512 enum mc_target_type {
5513         MC_TARGET_NONE = 0,
5514         MC_TARGET_PAGE,
5515         MC_TARGET_SWAP,
5516         MC_TARGET_DEVICE,
5517 };
5518
5519 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5520                                                 unsigned long addr, pte_t ptent)
5521 {
5522         struct page *page = vm_normal_page(vma, addr, ptent);
5523
5524         if (!page || !page_mapped(page))
5525                 return NULL;
5526         if (PageAnon(page)) {
5527                 if (!(mc.flags & MOVE_ANON))
5528                         return NULL;
5529         } else {
5530                 if (!(mc.flags & MOVE_FILE))
5531                         return NULL;
5532         }
5533         if (!get_page_unless_zero(page))
5534                 return NULL;
5535
5536         return page;
5537 }
5538
5539 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5540 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5541                         pte_t ptent, swp_entry_t *entry)
5542 {
5543         struct page *page = NULL;
5544         swp_entry_t ent = pte_to_swp_entry(ptent);
5545
5546         if (!(mc.flags & MOVE_ANON))
5547                 return NULL;
5548
5549         /*
5550          * Handle device private pages that are not accessible by the CPU, but
5551          * stored as special swap entries in the page table.
5552          */
5553         if (is_device_private_entry(ent)) {
5554                 page = pfn_swap_entry_to_page(ent);
5555                 if (!get_page_unless_zero(page))
5556                         return NULL;
5557                 return page;
5558         }
5559
5560         if (non_swap_entry(ent))
5561                 return NULL;
5562
5563         /*
5564          * Because lookup_swap_cache() updates some statistics counter,
5565          * we call find_get_page() with swapper_space directly.
5566          */
5567         page = find_get_page(swap_address_space(ent), swp_offset(ent));
5568         entry->val = ent.val;
5569
5570         return page;
5571 }
5572 #else
5573 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5574                         pte_t ptent, swp_entry_t *entry)
5575 {
5576         return NULL;
5577 }
5578 #endif
5579
5580 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5581                         unsigned long addr, pte_t ptent)
5582 {
5583         if (!vma->vm_file) /* anonymous vma */
5584                 return NULL;
5585         if (!(mc.flags & MOVE_FILE))
5586                 return NULL;
5587
5588         /* page is moved even if it's not RSS of this task(page-faulted). */
5589         /* shmem/tmpfs may report page out on swap: account for that too. */
5590         return find_get_incore_page(vma->vm_file->f_mapping,
5591                         linear_page_index(vma, addr));
5592 }
5593
5594 /**
5595  * mem_cgroup_move_account - move account of the page
5596  * @page: the page
5597  * @compound: charge the page as compound or small page
5598  * @from: mem_cgroup which the page is moved from.
5599  * @to: mem_cgroup which the page is moved to. @from != @to.
5600  *
5601  * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5602  *
5603  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5604  * from old cgroup.
5605  */
5606 static int mem_cgroup_move_account(struct page *page,
5607                                    bool compound,
5608                                    struct mem_cgroup *from,
5609                                    struct mem_cgroup *to)
5610 {
5611         struct folio *folio = page_folio(page);
5612         struct lruvec *from_vec, *to_vec;
5613         struct pglist_data *pgdat;
5614         unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5615         int nid, ret;
5616
5617         VM_BUG_ON(from == to);
5618         VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5619         VM_BUG_ON(compound && !folio_test_large(folio));
5620
5621         /*
5622          * Prevent mem_cgroup_migrate() from looking at
5623          * page's memory cgroup of its source page while we change it.
5624          */
5625         ret = -EBUSY;
5626         if (!folio_trylock(folio))
5627                 goto out;
5628
5629         ret = -EINVAL;
5630         if (folio_memcg(folio) != from)
5631                 goto out_unlock;
5632
5633         pgdat = folio_pgdat(folio);
5634         from_vec = mem_cgroup_lruvec(from, pgdat);
5635         to_vec = mem_cgroup_lruvec(to, pgdat);
5636
5637         folio_memcg_lock(folio);
5638
5639         if (folio_test_anon(folio)) {
5640                 if (folio_mapped(folio)) {
5641                         __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5642                         __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5643                         if (folio_test_transhuge(folio)) {
5644                                 __mod_lruvec_state(from_vec, NR_ANON_THPS,
5645                                                    -nr_pages);
5646                                 __mod_lruvec_state(to_vec, NR_ANON_THPS,
5647                                                    nr_pages);
5648                         }
5649                 }
5650         } else {
5651                 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5652                 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5653
5654                 if (folio_test_swapbacked(folio)) {
5655                         __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5656                         __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5657                 }
5658
5659                 if (folio_mapped(folio)) {
5660                         __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5661                         __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5662                 }
5663
5664                 if (folio_test_dirty(folio)) {
5665                         struct address_space *mapping = folio_mapping(folio);
5666
5667                         if (mapping_can_writeback(mapping)) {
5668                                 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5669                                                    -nr_pages);
5670                                 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5671                                                    nr_pages);
5672                         }
5673                 }
5674         }
5675
5676         if (folio_test_writeback(folio)) {
5677                 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5678                 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5679         }
5680
5681         /*
5682          * All state has been migrated, let's switch to the new memcg.
5683          *
5684          * It is safe to change page's memcg here because the page
5685          * is referenced, charged, isolated, and locked: we can't race
5686          * with (un)charging, migration, LRU putback, or anything else
5687          * that would rely on a stable page's memory cgroup.
5688          *
5689          * Note that lock_page_memcg is a memcg lock, not a page lock,
5690          * to save space. As soon as we switch page's memory cgroup to a
5691          * new memcg that isn't locked, the above state can change
5692          * concurrently again. Make sure we're truly done with it.
5693          */
5694         smp_mb();
5695
5696         css_get(&to->css);
5697         css_put(&from->css);
5698
5699         folio->memcg_data = (unsigned long)to;
5700
5701         __folio_memcg_unlock(from);
5702
5703         ret = 0;
5704         nid = folio_nid(folio);
5705
5706         local_irq_disable();
5707         mem_cgroup_charge_statistics(to, nr_pages);
5708         memcg_check_events(to, nid);
5709         mem_cgroup_charge_statistics(from, -nr_pages);
5710         memcg_check_events(from, nid);
5711         local_irq_enable();
5712 out_unlock:
5713         folio_unlock(folio);
5714 out:
5715         return ret;
5716 }
5717
5718 /**
5719  * get_mctgt_type - get target type of moving charge
5720  * @vma: the vma the pte to be checked belongs
5721  * @addr: the address corresponding to the pte to be checked
5722  * @ptent: the pte to be checked
5723  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5724  *
5725  * Returns
5726  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5727  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5728  *     move charge. if @target is not NULL, the page is stored in target->page
5729  *     with extra refcnt got(Callers should handle it).
5730  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5731  *     target for charge migration. if @target is not NULL, the entry is stored
5732  *     in target->ent.
5733  *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is device memory and
5734  *   thus not on the lru.
5735  *     For now we such page is charge like a regular page would be as for all
5736  *     intent and purposes it is just special memory taking the place of a
5737  *     regular page.
5738  *
5739  *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5740  *
5741  * Called with pte lock held.
5742  */
5743
5744 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5745                 unsigned long addr, pte_t ptent, union mc_target *target)
5746 {
5747         struct page *page = NULL;
5748         enum mc_target_type ret = MC_TARGET_NONE;
5749         swp_entry_t ent = { .val = 0 };
5750
5751         if (pte_present(ptent))
5752                 page = mc_handle_present_pte(vma, addr, ptent);
5753         else if (pte_none_mostly(ptent))
5754                 /*
5755                  * PTE markers should be treated as a none pte here, separated
5756                  * from other swap handling below.
5757                  */
5758                 page = mc_handle_file_pte(vma, addr, ptent);
5759         else if (is_swap_pte(ptent))
5760                 page = mc_handle_swap_pte(vma, ptent, &ent);
5761
5762         if (!page && !ent.val)
5763                 return ret;
5764         if (page) {
5765                 /*
5766                  * Do only loose check w/o serialization.
5767                  * mem_cgroup_move_account() checks the page is valid or
5768                  * not under LRU exclusion.
5769                  */
5770                 if (page_memcg(page) == mc.from) {
5771                         ret = MC_TARGET_PAGE;
5772                         if (is_device_private_page(page) ||
5773                             is_device_coherent_page(page))
5774                                 ret = MC_TARGET_DEVICE;
5775                         if (target)
5776                                 target->page = page;
5777                 }
5778                 if (!ret || !target)
5779                         put_page(page);
5780         }
5781         /*
5782          * There is a swap entry and a page doesn't exist or isn't charged.
5783          * But we cannot move a tail-page in a THP.
5784          */
5785         if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5786             mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5787                 ret = MC_TARGET_SWAP;
5788                 if (target)
5789                         target->ent = ent;
5790         }
5791         return ret;
5792 }
5793
5794 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5795 /*
5796  * We don't consider PMD mapped swapping or file mapped pages because THP does
5797  * not support them for now.
5798  * Caller should make sure that pmd_trans_huge(pmd) is true.
5799  */
5800 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5801                 unsigned long addr, pmd_t pmd, union mc_target *target)
5802 {
5803         struct page *page = NULL;
5804         enum mc_target_type ret = MC_TARGET_NONE;
5805
5806         if (unlikely(is_swap_pmd(pmd))) {
5807                 VM_BUG_ON(thp_migration_supported() &&
5808                                   !is_pmd_migration_entry(pmd));
5809                 return ret;
5810         }
5811         page = pmd_page(pmd);
5812         VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5813         if (!(mc.flags & MOVE_ANON))
5814                 return ret;
5815         if (page_memcg(page) == mc.from) {
5816                 ret = MC_TARGET_PAGE;
5817                 if (target) {
5818                         get_page(page);
5819                         target->page = page;
5820                 }
5821         }
5822         return ret;
5823 }
5824 #else
5825 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5826                 unsigned long addr, pmd_t pmd, union mc_target *target)
5827 {
5828         return MC_TARGET_NONE;
5829 }
5830 #endif
5831
5832 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5833                                         unsigned long addr, unsigned long end,
5834                                         struct mm_walk *walk)
5835 {
5836         struct vm_area_struct *vma = walk->vma;
5837         pte_t *pte;
5838         spinlock_t *ptl;
5839
5840         ptl = pmd_trans_huge_lock(pmd, vma);
5841         if (ptl) {
5842                 /*
5843                  * Note their can not be MC_TARGET_DEVICE for now as we do not
5844                  * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5845                  * this might change.
5846                  */
5847                 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5848                         mc.precharge += HPAGE_PMD_NR;
5849                 spin_unlock(ptl);
5850                 return 0;
5851         }
5852
5853         if (pmd_trans_unstable(pmd))
5854                 return 0;
5855         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5856         for (; addr != end; pte++, addr += PAGE_SIZE)
5857                 if (get_mctgt_type(vma, addr, *pte, NULL))
5858                         mc.precharge++; /* increment precharge temporarily */
5859         pte_unmap_unlock(pte - 1, ptl);
5860         cond_resched();
5861
5862         return 0;
5863 }
5864
5865 static const struct mm_walk_ops precharge_walk_ops = {
5866         .pmd_entry      = mem_cgroup_count_precharge_pte_range,
5867 };
5868
5869 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5870 {
5871         unsigned long precharge;
5872
5873         mmap_read_lock(mm);
5874         walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5875         mmap_read_unlock(mm);
5876
5877         precharge = mc.precharge;
5878         mc.precharge = 0;
5879
5880         return precharge;
5881 }
5882
5883 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5884 {
5885         unsigned long precharge = mem_cgroup_count_precharge(mm);
5886
5887         VM_BUG_ON(mc.moving_task);
5888         mc.moving_task = current;
5889         return mem_cgroup_do_precharge(precharge);
5890 }
5891
5892 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5893 static void __mem_cgroup_clear_mc(void)
5894 {
5895         struct mem_cgroup *from = mc.from;
5896         struct mem_cgroup *to = mc.to;
5897
5898         /* we must uncharge all the leftover precharges from mc.to */
5899         if (mc.precharge) {
5900                 cancel_charge(mc.to, mc.precharge);
5901                 mc.precharge = 0;
5902         }
5903         /*
5904          * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5905          * we must uncharge here.
5906          */
5907         if (mc.moved_charge) {
5908                 cancel_charge(mc.from, mc.moved_charge);
5909                 mc.moved_charge = 0;
5910         }
5911         /* we must fixup refcnts and charges */
5912         if (mc.moved_swap) {
5913                 /* uncharge swap account from the old cgroup */
5914                 if (!mem_cgroup_is_root(mc.from))
5915                         page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5916
5917                 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5918
5919                 /*
5920                  * we charged both to->memory and to->memsw, so we
5921                  * should uncharge to->memory.
5922                  */
5923                 if (!mem_cgroup_is_root(mc.to))
5924                         page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5925
5926                 mc.moved_swap = 0;
5927         }
5928         memcg_oom_recover(from);
5929         memcg_oom_recover(to);
5930         wake_up_all(&mc.waitq);
5931 }
5932
5933 static void mem_cgroup_clear_mc(void)
5934 {
5935         struct mm_struct *mm = mc.mm;
5936
5937         /*
5938          * we must clear moving_task before waking up waiters at the end of
5939          * task migration.
5940          */
5941         mc.moving_task = NULL;
5942         __mem_cgroup_clear_mc();
5943         spin_lock(&mc.lock);
5944         mc.from = NULL;
5945         mc.to = NULL;
5946         mc.mm = NULL;
5947         spin_unlock(&mc.lock);
5948
5949         mmput(mm);
5950 }
5951
5952 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5953 {
5954         struct cgroup_subsys_state *css;
5955         struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5956         struct mem_cgroup *from;
5957         struct task_struct *leader, *p;
5958         struct mm_struct *mm;
5959         unsigned long move_flags;
5960         int ret = 0;
5961
5962         /* charge immigration isn't supported on the default hierarchy */
5963         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5964                 return 0;
5965
5966         /*
5967          * Multi-process migrations only happen on the default hierarchy
5968          * where charge immigration is not used.  Perform charge
5969          * immigration if @tset contains a leader and whine if there are
5970          * multiple.
5971          */
5972         p = NULL;
5973         cgroup_taskset_for_each_leader(leader, css, tset) {
5974                 WARN_ON_ONCE(p);
5975                 p = leader;
5976                 memcg = mem_cgroup_from_css(css);
5977         }
5978         if (!p)
5979                 return 0;
5980
5981         /*
5982          * We are now committed to this value whatever it is. Changes in this
5983          * tunable will only affect upcoming migrations, not the current one.
5984          * So we need to save it, and keep it going.
5985          */
5986         move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5987         if (!move_flags)
5988                 return 0;
5989
5990         from = mem_cgroup_from_task(p);
5991
5992         VM_BUG_ON(from == memcg);
5993
5994         mm = get_task_mm(p);
5995         if (!mm)
5996                 return 0;
5997         /* We move charges only when we move a owner of the mm */
5998         if (mm->owner == p) {
5999                 VM_BUG_ON(mc.from);
6000                 VM_BUG_ON(mc.to);
6001                 VM_BUG_ON(mc.precharge);
6002                 VM_BUG_ON(mc.moved_charge);
6003                 VM_BUG_ON(mc.moved_swap);
6004
6005                 spin_lock(&mc.lock);
6006                 mc.mm = mm;
6007                 mc.from = from;
6008                 mc.to = memcg;
6009                 mc.flags = move_flags;
6010                 spin_unlock(&mc.lock);
6011                 /* We set mc.moving_task later */
6012
6013                 ret = mem_cgroup_precharge_mc(mm);
6014                 if (ret)
6015                         mem_cgroup_clear_mc();
6016         } else {
6017                 mmput(mm);
6018         }
6019         return ret;
6020 }
6021
6022 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6023 {
6024         if (mc.to)
6025                 mem_cgroup_clear_mc();
6026 }
6027
6028 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6029                                 unsigned long addr, unsigned long end,
6030                                 struct mm_walk *walk)
6031 {
6032         int ret = 0;
6033         struct vm_area_struct *vma = walk->vma;
6034         pte_t *pte;
6035         spinlock_t *ptl;
6036         enum mc_target_type target_type;
6037         union mc_target target;
6038         struct page *page;
6039
6040         ptl = pmd_trans_huge_lock(pmd, vma);
6041         if (ptl) {
6042                 if (mc.precharge < HPAGE_PMD_NR) {
6043                         spin_unlock(ptl);
6044                         return 0;
6045                 }
6046                 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6047                 if (target_type == MC_TARGET_PAGE) {
6048                         page = target.page;
6049                         if (!isolate_lru_page(page)) {
6050                                 if (!mem_cgroup_move_account(page, true,
6051                                                              mc.from, mc.to)) {
6052                                         mc.precharge -= HPAGE_PMD_NR;
6053                                         mc.moved_charge += HPAGE_PMD_NR;
6054                                 }
6055                                 putback_lru_page(page);
6056                         }
6057                         put_page(page);
6058                 } else if (target_type == MC_TARGET_DEVICE) {
6059                         page = target.page;
6060                         if (!mem_cgroup_move_account(page, true,
6061                                                      mc.from, mc.to)) {
6062                                 mc.precharge -= HPAGE_PMD_NR;
6063                                 mc.moved_charge += HPAGE_PMD_NR;
6064                         }
6065                         put_page(page);
6066                 }
6067                 spin_unlock(ptl);
6068                 return 0;
6069         }
6070
6071         if (pmd_trans_unstable(pmd))
6072                 return 0;
6073 retry:
6074         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6075         for (; addr != end; addr += PAGE_SIZE) {
6076                 pte_t ptent = *(pte++);
6077                 bool device = false;
6078                 swp_entry_t ent;
6079
6080                 if (!mc.precharge)
6081                         break;
6082
6083                 switch (get_mctgt_type(vma, addr, ptent, &target)) {
6084                 case MC_TARGET_DEVICE:
6085                         device = true;
6086                         fallthrough;
6087                 case MC_TARGET_PAGE:
6088                         page = target.page;
6089                         /*
6090                          * We can have a part of the split pmd here. Moving it
6091                          * can be done but it would be too convoluted so simply
6092                          * ignore such a partial THP and keep it in original
6093                          * memcg. There should be somebody mapping the head.
6094                          */
6095                         if (PageTransCompound(page))
6096                                 goto put;
6097                         if (!device && isolate_lru_page(page))
6098                                 goto put;
6099                         if (!mem_cgroup_move_account(page, false,
6100                                                 mc.from, mc.to)) {
6101                                 mc.precharge--;
6102                                 /* we uncharge from mc.from later. */
6103                                 mc.moved_charge++;
6104                         }
6105                         if (!device)
6106                                 putback_lru_page(page);
6107 put:                    /* get_mctgt_type() gets the page */
6108                         put_page(page);
6109                         break;
6110                 case MC_TARGET_SWAP:
6111                         ent = target.ent;
6112                         if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6113                                 mc.precharge--;
6114                                 mem_cgroup_id_get_many(mc.to, 1);
6115                                 /* we fixup other refcnts and charges later. */
6116                                 mc.moved_swap++;
6117                         }
6118                         break;
6119                 default:
6120                         break;
6121                 }
6122         }
6123         pte_unmap_unlock(pte - 1, ptl);
6124         cond_resched();
6125
6126         if (addr != end) {
6127                 /*
6128                  * We have consumed all precharges we got in can_attach().
6129                  * We try charge one by one, but don't do any additional
6130                  * charges to mc.to if we have failed in charge once in attach()
6131                  * phase.
6132                  */
6133                 ret = mem_cgroup_do_precharge(1);
6134                 if (!ret)
6135                         goto retry;
6136         }
6137
6138         return ret;
6139 }
6140
6141 static const struct mm_walk_ops charge_walk_ops = {
6142         .pmd_entry      = mem_cgroup_move_charge_pte_range,
6143 };
6144
6145 static void mem_cgroup_move_charge(void)
6146 {
6147         lru_add_drain_all();
6148         /*
6149          * Signal lock_page_memcg() to take the memcg's move_lock
6150          * while we're moving its pages to another memcg. Then wait
6151          * for already started RCU-only updates to finish.
6152          */
6153         atomic_inc(&mc.from->moving_account);
6154         synchronize_rcu();
6155 retry:
6156         if (unlikely(!mmap_read_trylock(mc.mm))) {
6157                 /*
6158                  * Someone who are holding the mmap_lock might be waiting in
6159                  * waitq. So we cancel all extra charges, wake up all waiters,
6160                  * and retry. Because we cancel precharges, we might not be able
6161                  * to move enough charges, but moving charge is a best-effort
6162                  * feature anyway, so it wouldn't be a big problem.
6163                  */
6164                 __mem_cgroup_clear_mc();
6165                 cond_resched();
6166                 goto retry;
6167         }
6168         /*
6169          * When we have consumed all precharges and failed in doing
6170          * additional charge, the page walk just aborts.
6171          */
6172         walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6173                         NULL);
6174
6175         mmap_read_unlock(mc.mm);
6176         atomic_dec(&mc.from->moving_account);
6177 }
6178
6179 static void mem_cgroup_move_task(void)
6180 {
6181         if (mc.to) {
6182                 mem_cgroup_move_charge();
6183                 mem_cgroup_clear_mc();
6184         }
6185 }
6186 #else   /* !CONFIG_MMU */
6187 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6188 {
6189         return 0;
6190 }
6191 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6192 {
6193 }
6194 static void mem_cgroup_move_task(void)
6195 {
6196 }
6197 #endif
6198
6199 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6200 {
6201         if (value == PAGE_COUNTER_MAX)
6202                 seq_puts(m, "max\n");
6203         else
6204                 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6205
6206         return 0;
6207 }
6208
6209 static u64 memory_current_read(struct cgroup_subsys_state *css,
6210                                struct cftype *cft)
6211 {
6212         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6213
6214         return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6215 }
6216
6217 static u64 memory_peak_read(struct cgroup_subsys_state *css,
6218                             struct cftype *cft)
6219 {
6220         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6221
6222         return (u64)memcg->memory.watermark * PAGE_SIZE;
6223 }
6224
6225 static int memory_min_show(struct seq_file *m, void *v)
6226 {
6227         return seq_puts_memcg_tunable(m,
6228                 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6229 }
6230
6231 static ssize_t memory_min_write(struct kernfs_open_file *of,
6232                                 char *buf, size_t nbytes, loff_t off)
6233 {
6234         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6235         unsigned long min;
6236         int err;
6237
6238         buf = strstrip(buf);
6239         err = page_counter_memparse(buf, "max", &min);
6240         if (err)
6241                 return err;
6242
6243         page_counter_set_min(&memcg->memory, min);
6244
6245         return nbytes;
6246 }
6247
6248 static int memory_low_show(struct seq_file *m, void *v)
6249 {
6250         return seq_puts_memcg_tunable(m,
6251                 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6252 }
6253
6254 static ssize_t memory_low_write(struct kernfs_open_file *of,
6255                                 char *buf, size_t nbytes, loff_t off)
6256 {
6257         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6258         unsigned long low;
6259         int err;
6260
6261         buf = strstrip(buf);
6262         err = page_counter_memparse(buf, "max", &low);
6263         if (err)
6264                 return err;
6265
6266         page_counter_set_low(&memcg->memory, low);
6267
6268         return nbytes;
6269 }
6270
6271 static int memory_high_show(struct seq_file *m, void *v)
6272 {
6273         return seq_puts_memcg_tunable(m,
6274                 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6275 }
6276
6277 static ssize_t memory_high_write(struct kernfs_open_file *of,
6278                                  char *buf, size_t nbytes, loff_t off)
6279 {
6280         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6281         unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6282         bool drained = false;
6283         unsigned long high;
6284         int err;
6285
6286         buf = strstrip(buf);
6287         err = page_counter_memparse(buf, "max", &high);
6288         if (err)
6289                 return err;
6290
6291         page_counter_set_high(&memcg->memory, high);
6292
6293         for (;;) {
6294                 unsigned long nr_pages = page_counter_read(&memcg->memory);
6295                 unsigned long reclaimed;
6296
6297                 if (nr_pages <= high)
6298                         break;
6299
6300                 if (signal_pending(current))
6301                         break;
6302
6303                 if (!drained) {
6304                         drain_all_stock(memcg);
6305                         drained = true;
6306                         continue;
6307                 }
6308
6309                 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6310                                         GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
6311
6312                 if (!reclaimed && !nr_retries--)
6313                         break;
6314         }
6315
6316         memcg_wb_domain_size_changed(memcg);
6317         return nbytes;
6318 }
6319
6320 static int memory_max_show(struct seq_file *m, void *v)
6321 {
6322         return seq_puts_memcg_tunable(m,
6323                 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6324 }
6325
6326 static ssize_t memory_max_write(struct kernfs_open_file *of,
6327                                 char *buf, size_t nbytes, loff_t off)
6328 {
6329         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6330         unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6331         bool drained = false;
6332         unsigned long max;
6333         int err;
6334
6335         buf = strstrip(buf);
6336         err = page_counter_memparse(buf, "max", &max);
6337         if (err)
6338                 return err;
6339
6340         xchg(&memcg->memory.max, max);
6341
6342         for (;;) {
6343                 unsigned long nr_pages = page_counter_read(&memcg->memory);
6344
6345                 if (nr_pages <= max)
6346                         break;
6347
6348                 if (signal_pending(current))
6349                         break;
6350
6351                 if (!drained) {
6352                         drain_all_stock(memcg);
6353                         drained = true;
6354                         continue;
6355                 }
6356
6357                 if (nr_reclaims) {
6358                         if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6359                                         GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
6360                                 nr_reclaims--;
6361                         continue;
6362                 }
6363
6364                 memcg_memory_event(memcg, MEMCG_OOM);
6365                 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6366                         break;
6367         }
6368
6369         memcg_wb_domain_size_changed(memcg);
6370         return nbytes;
6371 }
6372
6373 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6374 {
6375         seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6376         seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6377         seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6378         seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6379         seq_printf(m, "oom_kill %lu\n",
6380                    atomic_long_read(&events[MEMCG_OOM_KILL]));
6381         seq_printf(m, "oom_group_kill %lu\n",
6382                    atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6383 }
6384
6385 static int memory_events_show(struct seq_file *m, void *v)
6386 {
6387         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6388
6389         __memory_events_show(m, memcg->memory_events);
6390         return 0;
6391 }
6392
6393 static int memory_events_local_show(struct seq_file *m, void *v)
6394 {
6395         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6396
6397         __memory_events_show(m, memcg->memory_events_local);
6398         return 0;
6399 }
6400
6401 static int memory_stat_show(struct seq_file *m, void *v)
6402 {
6403         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6404         char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
6405
6406         if (!buf)
6407                 return -ENOMEM;
6408         memory_stat_format(memcg, buf, PAGE_SIZE);
6409         seq_puts(m, buf);
6410         kfree(buf);
6411         return 0;
6412 }
6413
6414 #ifdef CONFIG_NUMA
6415 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6416                                                      int item)
6417 {
6418         return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
6419 }
6420
6421 static int memory_numa_stat_show(struct seq_file *m, void *v)
6422 {
6423         int i;
6424         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6425
6426         mem_cgroup_flush_stats();
6427
6428         for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6429                 int nid;
6430
6431                 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6432                         continue;
6433
6434                 seq_printf(m, "%s", memory_stats[i].name);
6435                 for_each_node_state(nid, N_MEMORY) {
6436                         u64 size;
6437                         struct lruvec *lruvec;
6438
6439                         lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6440                         size = lruvec_page_state_output(lruvec,
6441                                                         memory_stats[i].idx);
6442                         seq_printf(m, " N%d=%llu", nid, size);
6443                 }
6444                 seq_putc(m, '\n');
6445         }
6446
6447         return 0;
6448 }
6449 #endif
6450
6451 static int memory_oom_group_show(struct seq_file *m, void *v)
6452 {
6453         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6454
6455         seq_printf(m, "%d\n", memcg->oom_group);
6456
6457         return 0;
6458 }
6459
6460 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6461                                       char *buf, size_t nbytes, loff_t off)
6462 {
6463         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6464         int ret, oom_group;
6465
6466         buf = strstrip(buf);
6467         if (!buf)
6468                 return -EINVAL;
6469
6470         ret = kstrtoint(buf, 0, &oom_group);
6471         if (ret)
6472                 return ret;
6473
6474         if (oom_group != 0 && oom_group != 1)
6475                 return -EINVAL;
6476
6477         memcg->oom_group = oom_group;
6478
6479         return nbytes;
6480 }
6481
6482 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6483                               size_t nbytes, loff_t off)
6484 {
6485         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6486         unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6487         unsigned long nr_to_reclaim, nr_reclaimed = 0;
6488         unsigned int reclaim_options;
6489         int err;
6490
6491         buf = strstrip(buf);
6492         err = page_counter_memparse(buf, "", &nr_to_reclaim);
6493         if (err)
6494                 return err;
6495
6496         reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
6497         while (nr_reclaimed < nr_to_reclaim) {
6498                 unsigned long reclaimed;
6499
6500                 if (signal_pending(current))
6501                         return -EINTR;
6502
6503                 /*
6504                  * This is the final attempt, drain percpu lru caches in the
6505                  * hope of introducing more evictable pages for
6506                  * try_to_free_mem_cgroup_pages().
6507                  */
6508                 if (!nr_retries)
6509                         lru_add_drain_all();
6510
6511                 reclaimed = try_to_free_mem_cgroup_pages(memcg,
6512                                                 nr_to_reclaim - nr_reclaimed,
6513                                                 GFP_KERNEL, reclaim_options);
6514
6515                 if (!reclaimed && !nr_retries--)
6516                         return -EAGAIN;
6517
6518                 nr_reclaimed += reclaimed;
6519         }
6520
6521         return nbytes;
6522 }
6523
6524 static struct cftype memory_files[] = {
6525         {
6526                 .name = "current",
6527                 .flags = CFTYPE_NOT_ON_ROOT,
6528                 .read_u64 = memory_current_read,
6529         },
6530         {
6531                 .name = "peak",
6532                 .flags = CFTYPE_NOT_ON_ROOT,
6533                 .read_u64 = memory_peak_read,
6534         },
6535         {
6536                 .name = "min",
6537                 .flags = CFTYPE_NOT_ON_ROOT,
6538                 .seq_show = memory_min_show,
6539                 .write = memory_min_write,
6540         },
6541         {
6542                 .name = "low",
6543                 .flags = CFTYPE_NOT_ON_ROOT,
6544                 .seq_show = memory_low_show,
6545                 .write = memory_low_write,
6546         },
6547         {
6548                 .name = "high",
6549                 .flags = CFTYPE_NOT_ON_ROOT,
6550                 .seq_show = memory_high_show,
6551                 .write = memory_high_write,
6552         },
6553         {
6554                 .name = "max",
6555                 .flags = CFTYPE_NOT_ON_ROOT,
6556                 .seq_show = memory_max_show,
6557                 .write = memory_max_write,
6558         },
6559         {
6560                 .name = "events",
6561                 .flags = CFTYPE_NOT_ON_ROOT,
6562                 .file_offset = offsetof(struct mem_cgroup, events_file),
6563                 .seq_show = memory_events_show,
6564         },
6565         {
6566                 .name = "events.local",
6567                 .flags = CFTYPE_NOT_ON_ROOT,
6568                 .file_offset = offsetof(struct mem_cgroup, events_local_file),
6569                 .seq_show = memory_events_local_show,
6570         },
6571         {
6572                 .name = "stat",
6573                 .seq_show = memory_stat_show,
6574         },
6575 #ifdef CONFIG_NUMA
6576         {
6577                 .name = "numa_stat",
6578                 .seq_show = memory_numa_stat_show,
6579         },
6580 #endif
6581         {
6582                 .name = "oom.group",
6583                 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6584                 .seq_show = memory_oom_group_show,
6585                 .write = memory_oom_group_write,
6586         },
6587         {
6588                 .name = "reclaim",
6589                 .flags = CFTYPE_NS_DELEGATABLE,
6590                 .write = memory_reclaim,
6591         },
6592         { }     /* terminate */
6593 };
6594
6595 struct cgroup_subsys memory_cgrp_subsys = {
6596         .css_alloc = mem_cgroup_css_alloc,
6597         .css_online = mem_cgroup_css_online,
6598         .css_offline = mem_cgroup_css_offline,
6599         .css_released = mem_cgroup_css_released,
6600         .css_free = mem_cgroup_css_free,
6601         .css_reset = mem_cgroup_css_reset,
6602         .css_rstat_flush = mem_cgroup_css_rstat_flush,
6603         .can_attach = mem_cgroup_can_attach,
6604         .cancel_attach = mem_cgroup_cancel_attach,
6605         .post_attach = mem_cgroup_move_task,
6606         .dfl_cftypes = memory_files,
6607         .legacy_cftypes = mem_cgroup_legacy_files,
6608         .early_init = 0,
6609 };
6610
6611 /*
6612  * This function calculates an individual cgroup's effective
6613  * protection which is derived from its own memory.min/low, its
6614  * parent's and siblings' settings, as well as the actual memory
6615  * distribution in the tree.
6616  *
6617  * The following rules apply to the effective protection values:
6618  *
6619  * 1. At the first level of reclaim, effective protection is equal to
6620  *    the declared protection in memory.min and memory.low.
6621  *
6622  * 2. To enable safe delegation of the protection configuration, at
6623  *    subsequent levels the effective protection is capped to the
6624  *    parent's effective protection.
6625  *
6626  * 3. To make complex and dynamic subtrees easier to configure, the
6627  *    user is allowed to overcommit the declared protection at a given
6628  *    level. If that is the case, the parent's effective protection is
6629  *    distributed to the children in proportion to how much protection
6630  *    they have declared and how much of it they are utilizing.
6631  *
6632  *    This makes distribution proportional, but also work-conserving:
6633  *    if one cgroup claims much more protection than it uses memory,
6634  *    the unused remainder is available to its siblings.
6635  *
6636  * 4. Conversely, when the declared protection is undercommitted at a
6637  *    given level, the distribution of the larger parental protection
6638  *    budget is NOT proportional. A cgroup's protection from a sibling
6639  *    is capped to its own memory.min/low setting.
6640  *
6641  * 5. However, to allow protecting recursive subtrees from each other
6642  *    without having to declare each individual cgroup's fixed share
6643  *    of the ancestor's claim to protection, any unutilized -
6644  *    "floating" - protection from up the tree is distributed in
6645  *    proportion to each cgroup's *usage*. This makes the protection
6646  *    neutral wrt sibling cgroups and lets them compete freely over
6647  *    the shared parental protection budget, but it protects the
6648  *    subtree as a whole from neighboring subtrees.
6649  *
6650  * Note that 4. and 5. are not in conflict: 4. is about protecting
6651  * against immediate siblings whereas 5. is about protecting against
6652  * neighboring subtrees.
6653  */
6654 static unsigned long effective_protection(unsigned long usage,
6655                                           unsigned long parent_usage,
6656                                           unsigned long setting,
6657                                           unsigned long parent_effective,
6658                                           unsigned long siblings_protected)
6659 {
6660         unsigned long protected;
6661         unsigned long ep;
6662
6663         protected = min(usage, setting);
6664         /*
6665          * If all cgroups at this level combined claim and use more
6666          * protection then what the parent affords them, distribute
6667          * shares in proportion to utilization.
6668          *
6669          * We are using actual utilization rather than the statically
6670          * claimed protection in order to be work-conserving: claimed
6671          * but unused protection is available to siblings that would
6672          * otherwise get a smaller chunk than what they claimed.
6673          */
6674         if (siblings_protected > parent_effective)
6675                 return protected * parent_effective / siblings_protected;
6676
6677         /*
6678          * Ok, utilized protection of all children is within what the
6679          * parent affords them, so we know whatever this child claims
6680          * and utilizes is effectively protected.
6681          *
6682          * If there is unprotected usage beyond this value, reclaim
6683          * will apply pressure in proportion to that amount.
6684          *
6685          * If there is unutilized protection, the cgroup will be fully
6686          * shielded from reclaim, but we do return a smaller value for
6687          * protection than what the group could enjoy in theory. This
6688          * is okay. With the overcommit distribution above, effective
6689          * protection is always dependent on how memory is actually
6690          * consumed among the siblings anyway.
6691          */
6692         ep = protected;
6693
6694         /*
6695          * If the children aren't claiming (all of) the protection
6696          * afforded to them by the parent, distribute the remainder in
6697          * proportion to the (unprotected) memory of each cgroup. That
6698          * way, cgroups that aren't explicitly prioritized wrt each
6699          * other compete freely over the allowance, but they are
6700          * collectively protected from neighboring trees.
6701          *
6702          * We're using unprotected memory for the weight so that if
6703          * some cgroups DO claim explicit protection, we don't protect
6704          * the same bytes twice.
6705          *
6706          * Check both usage and parent_usage against the respective
6707          * protected values. One should imply the other, but they
6708          * aren't read atomically - make sure the division is sane.
6709          */
6710         if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6711                 return ep;
6712         if (parent_effective > siblings_protected &&
6713             parent_usage > siblings_protected &&
6714             usage > protected) {
6715                 unsigned long unclaimed;
6716
6717                 unclaimed = parent_effective - siblings_protected;
6718                 unclaimed *= usage - protected;
6719                 unclaimed /= parent_usage - siblings_protected;
6720
6721                 ep += unclaimed;
6722         }
6723
6724         return ep;
6725 }
6726
6727 /**
6728  * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
6729  * @root: the top ancestor of the sub-tree being checked
6730  * @memcg: the memory cgroup to check
6731  *
6732  * WARNING: This function is not stateless! It can only be used as part
6733  *          of a top-down tree iteration, not for isolated queries.
6734  */
6735 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6736                                      struct mem_cgroup *memcg)
6737 {
6738         unsigned long usage, parent_usage;
6739         struct mem_cgroup *parent;
6740
6741         if (mem_cgroup_disabled())
6742                 return;
6743
6744         if (!root)
6745                 root = root_mem_cgroup;
6746
6747         /*
6748          * Effective values of the reclaim targets are ignored so they
6749          * can be stale. Have a look at mem_cgroup_protection for more
6750          * details.
6751          * TODO: calculation should be more robust so that we do not need
6752          * that special casing.
6753          */
6754         if (memcg == root)
6755                 return;
6756
6757         usage = page_counter_read(&memcg->memory);
6758         if (!usage)
6759                 return;
6760
6761         parent = parent_mem_cgroup(memcg);
6762
6763         if (parent == root) {
6764                 memcg->memory.emin = READ_ONCE(memcg->memory.min);
6765                 memcg->memory.elow = READ_ONCE(memcg->memory.low);
6766                 return;
6767         }
6768
6769         parent_usage = page_counter_read(&parent->memory);
6770
6771         WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6772                         READ_ONCE(memcg->memory.min),
6773                         READ_ONCE(parent->memory.emin),
6774                         atomic_long_read(&parent->memory.children_min_usage)));
6775
6776         WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6777                         READ_ONCE(memcg->memory.low),
6778                         READ_ONCE(parent->memory.elow),
6779                         atomic_long_read(&parent->memory.children_low_usage)));
6780 }
6781
6782 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
6783                         gfp_t gfp)
6784 {
6785         long nr_pages = folio_nr_pages(folio);
6786         int ret;
6787
6788         ret = try_charge(memcg, gfp, nr_pages);
6789         if (ret)
6790                 goto out;
6791
6792         css_get(&memcg->css);
6793         commit_charge(folio, memcg);
6794
6795         local_irq_disable();
6796         mem_cgroup_charge_statistics(memcg, nr_pages);
6797         memcg_check_events(memcg, folio_nid(folio));
6798         local_irq_enable();
6799 out:
6800         return ret;
6801 }
6802
6803 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
6804 {
6805         struct mem_cgroup *memcg;
6806         int ret;
6807
6808         memcg = get_mem_cgroup_from_mm(mm);
6809         ret = charge_memcg(folio, memcg, gfp);
6810         css_put(&memcg->css);
6811
6812         return ret;
6813 }
6814
6815 /**
6816  * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin
6817  * @page: page to charge
6818  * @mm: mm context of the victim
6819  * @gfp: reclaim mode
6820  * @entry: swap entry for which the page is allocated
6821  *
6822  * This function charges a page allocated for swapin. Please call this before
6823  * adding the page to the swapcache.
6824  *
6825  * Returns 0 on success. Otherwise, an error code is returned.
6826  */
6827 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
6828                                   gfp_t gfp, swp_entry_t entry)
6829 {
6830         struct folio *folio = page_folio(page);
6831         struct mem_cgroup *memcg;
6832         unsigned short id;
6833         int ret;
6834
6835         if (mem_cgroup_disabled())
6836                 return 0;
6837
6838         id = lookup_swap_cgroup_id(entry);
6839         rcu_read_lock();
6840         memcg = mem_cgroup_from_id(id);
6841         if (!memcg || !css_tryget_online(&memcg->css))
6842                 memcg = get_mem_cgroup_from_mm(mm);
6843         rcu_read_unlock();
6844
6845         ret = charge_memcg(folio, memcg, gfp);
6846
6847         css_put(&memcg->css);
6848         return ret;
6849 }
6850
6851 /*
6852  * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
6853  * @entry: swap entry for which the page is charged
6854  *
6855  * Call this function after successfully adding the charged page to swapcache.
6856  *
6857  * Note: This function assumes the page for which swap slot is being uncharged
6858  * is order 0 page.
6859  */
6860 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
6861 {
6862         /*
6863          * Cgroup1's unified memory+swap counter has been charged with the
6864          * new swapcache page, finish the transfer by uncharging the swap
6865          * slot. The swap slot would also get uncharged when it dies, but
6866          * it can stick around indefinitely and we'd count the page twice
6867          * the entire time.
6868          *
6869          * Cgroup2 has separate resource counters for memory and swap,
6870          * so this is a non-issue here. Memory and swap charge lifetimes
6871          * correspond 1:1 to page and swap slot lifetimes: we charge the
6872          * page to memory here, and uncharge swap when the slot is freed.
6873          */
6874         if (!mem_cgroup_disabled() && do_memsw_account()) {
6875                 /*
6876                  * The swap entry might not get freed for a long time,
6877                  * let's not wait for it.  The page already received a
6878                  * memory+swap charge, drop the swap entry duplicate.
6879                  */
6880                 mem_cgroup_uncharge_swap(entry, 1);
6881         }
6882 }
6883
6884 struct uncharge_gather {
6885         struct mem_cgroup *memcg;
6886         unsigned long nr_memory;
6887         unsigned long pgpgout;
6888         unsigned long nr_kmem;
6889         int nid;
6890 };
6891
6892 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6893 {
6894         memset(ug, 0, sizeof(*ug));
6895 }
6896
6897 static void uncharge_batch(const struct uncharge_gather *ug)
6898 {
6899         unsigned long flags;
6900
6901         if (ug->nr_memory) {
6902                 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
6903                 if (do_memsw_account())
6904                         page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
6905                 if (ug->nr_kmem)
6906                         memcg_account_kmem(ug->memcg, -ug->nr_kmem);
6907                 memcg_oom_recover(ug->memcg);
6908         }
6909
6910         local_irq_save(flags);
6911         __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6912         __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
6913         memcg_check_events(ug->memcg, ug->nid);
6914         local_irq_restore(flags);
6915
6916         /* drop reference from uncharge_folio */
6917         css_put(&ug->memcg->css);
6918 }
6919
6920 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
6921 {
6922         long nr_pages;
6923         struct mem_cgroup *memcg;
6924         struct obj_cgroup *objcg;
6925
6926         VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
6927
6928         /*
6929          * Nobody should be changing or seriously looking at
6930          * folio memcg or objcg at this point, we have fully
6931          * exclusive access to the folio.
6932          */
6933         if (folio_memcg_kmem(folio)) {
6934                 objcg = __folio_objcg(folio);
6935                 /*
6936                  * This get matches the put at the end of the function and
6937                  * kmem pages do not hold memcg references anymore.
6938                  */
6939                 memcg = get_mem_cgroup_from_objcg(objcg);
6940         } else {
6941                 memcg = __folio_memcg(folio);
6942         }
6943
6944         if (!memcg)
6945                 return;
6946
6947         if (ug->memcg != memcg) {
6948                 if (ug->memcg) {
6949                         uncharge_batch(ug);
6950                         uncharge_gather_clear(ug);
6951                 }
6952                 ug->memcg = memcg;
6953                 ug->nid = folio_nid(folio);
6954
6955                 /* pairs with css_put in uncharge_batch */
6956                 css_get(&memcg->css);
6957         }
6958
6959         nr_pages = folio_nr_pages(folio);
6960
6961         if (folio_memcg_kmem(folio)) {
6962                 ug->nr_memory += nr_pages;
6963                 ug->nr_kmem += nr_pages;
6964
6965                 folio->memcg_data = 0;
6966                 obj_cgroup_put(objcg);
6967         } else {
6968                 /* LRU pages aren't accounted at the root level */
6969                 if (!mem_cgroup_is_root(memcg))
6970                         ug->nr_memory += nr_pages;
6971                 ug->pgpgout++;
6972
6973                 folio->memcg_data = 0;
6974         }
6975
6976         css_put(&memcg->css);
6977 }
6978
6979 void __mem_cgroup_uncharge(struct folio *folio)
6980 {
6981         struct uncharge_gather ug;
6982
6983         /* Don't touch folio->lru of any random page, pre-check: */
6984         if (!folio_memcg(folio))
6985                 return;
6986
6987         uncharge_gather_clear(&ug);
6988         uncharge_folio(folio, &ug);
6989         uncharge_batch(&ug);
6990 }
6991
6992 /**
6993  * __mem_cgroup_uncharge_list - uncharge a list of page
6994  * @page_list: list of pages to uncharge
6995  *
6996  * Uncharge a list of pages previously charged with
6997  * __mem_cgroup_charge().
6998  */
6999 void __mem_cgroup_uncharge_list(struct list_head *page_list)
7000 {
7001         struct uncharge_gather ug;
7002         struct folio *folio;
7003
7004         uncharge_gather_clear(&ug);
7005         list_for_each_entry(folio, page_list, lru)
7006                 uncharge_folio(folio, &ug);
7007         if (ug.memcg)
7008                 uncharge_batch(&ug);
7009 }
7010
7011 /**
7012  * mem_cgroup_migrate - Charge a folio's replacement.
7013  * @old: Currently circulating folio.
7014  * @new: Replacement folio.
7015  *
7016  * Charge @new as a replacement folio for @old. @old will
7017  * be uncharged upon free.
7018  *
7019  * Both folios must be locked, @new->mapping must be set up.
7020  */
7021 void mem_cgroup_migrate(struct folio *old, struct folio *new)
7022 {
7023         struct mem_cgroup *memcg;
7024         long nr_pages = folio_nr_pages(new);
7025         unsigned long flags;
7026
7027         VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7028         VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7029         VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7030         VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
7031
7032         if (mem_cgroup_disabled())
7033                 return;
7034
7035         /* Page cache replacement: new folio already charged? */
7036         if (folio_memcg(new))
7037                 return;
7038
7039         memcg = folio_memcg(old);
7040         VM_WARN_ON_ONCE_FOLIO(!memcg, old);
7041         if (!memcg)
7042                 return;
7043
7044         /* Force-charge the new page. The old one will be freed soon */
7045         if (!mem_cgroup_is_root(memcg)) {
7046                 page_counter_charge(&memcg->memory, nr_pages);
7047                 if (do_memsw_account())
7048                         page_counter_charge(&memcg->memsw, nr_pages);
7049         }
7050
7051         css_get(&memcg->css);
7052         commit_charge(new, memcg);
7053
7054         local_irq_save(flags);
7055         mem_cgroup_charge_statistics(memcg, nr_pages);
7056         memcg_check_events(memcg, folio_nid(new));
7057         local_irq_restore(flags);
7058 }
7059
7060 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7061 EXPORT_SYMBOL(memcg_sockets_enabled_key);
7062
7063 void mem_cgroup_sk_alloc(struct sock *sk)
7064 {
7065         struct mem_cgroup *memcg;
7066
7067         if (!mem_cgroup_sockets_enabled)
7068                 return;
7069
7070         /* Do not associate the sock with unrelated interrupted task's memcg. */
7071         if (!in_task())
7072                 return;
7073
7074         rcu_read_lock();
7075         memcg = mem_cgroup_from_task(current);
7076         if (memcg == root_mem_cgroup)
7077                 goto out;
7078         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7079                 goto out;
7080         if (css_tryget(&memcg->css))
7081                 sk->sk_memcg = memcg;
7082 out:
7083         rcu_read_unlock();
7084 }
7085
7086 void mem_cgroup_sk_free(struct sock *sk)
7087 {
7088         if (sk->sk_memcg)
7089                 css_put(&sk->sk_memcg->css);
7090 }
7091
7092 /**
7093  * mem_cgroup_charge_skmem - charge socket memory
7094  * @memcg: memcg to charge
7095  * @nr_pages: number of pages to charge
7096  * @gfp_mask: reclaim mode
7097  *
7098  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7099  * @memcg's configured limit, %false if it doesn't.
7100  */
7101 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7102                              gfp_t gfp_mask)
7103 {
7104         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7105                 struct page_counter *fail;
7106
7107                 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7108                         memcg->tcpmem_pressure = 0;
7109                         return true;
7110                 }
7111                 memcg->tcpmem_pressure = 1;
7112                 if (gfp_mask & __GFP_NOFAIL) {
7113                         page_counter_charge(&memcg->tcpmem, nr_pages);
7114                         return true;
7115                 }
7116                 return false;
7117         }
7118
7119         if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7120                 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7121                 return true;
7122         }
7123
7124         return false;
7125 }
7126
7127 /**
7128  * mem_cgroup_uncharge_skmem - uncharge socket memory
7129  * @memcg: memcg to uncharge
7130  * @nr_pages: number of pages to uncharge
7131  */
7132 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7133 {
7134         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7135                 page_counter_uncharge(&memcg->tcpmem, nr_pages);
7136                 return;
7137         }
7138
7139         mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7140
7141         refill_stock(memcg, nr_pages);
7142 }
7143
7144 static int __init cgroup_memory(char *s)
7145 {
7146         char *token;
7147
7148         while ((token = strsep(&s, ",")) != NULL) {
7149                 if (!*token)
7150                         continue;
7151                 if (!strcmp(token, "nosocket"))
7152                         cgroup_memory_nosocket = true;
7153                 if (!strcmp(token, "nokmem"))
7154                         cgroup_memory_nokmem = true;
7155         }
7156         return 1;
7157 }
7158 __setup("cgroup.memory=", cgroup_memory);
7159
7160 /*
7161  * subsys_initcall() for memory controller.
7162  *
7163  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7164  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7165  * basically everything that doesn't depend on a specific mem_cgroup structure
7166  * should be initialized from here.
7167  */
7168 static int __init mem_cgroup_init(void)
7169 {
7170         int cpu, node;
7171
7172         /*
7173          * Currently s32 type (can refer to struct batched_lruvec_stat) is
7174          * used for per-memcg-per-cpu caching of per-node statistics. In order
7175          * to work fine, we should make sure that the overfill threshold can't
7176          * exceed S32_MAX / PAGE_SIZE.
7177          */
7178         BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7179
7180         cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7181                                   memcg_hotplug_cpu_dead);
7182
7183         for_each_possible_cpu(cpu)
7184                 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7185                           drain_local_stock);
7186
7187         for_each_node(node) {
7188                 struct mem_cgroup_tree_per_node *rtpn;
7189
7190                 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7191                                     node_online(node) ? node : NUMA_NO_NODE);
7192
7193                 rtpn->rb_root = RB_ROOT;
7194                 rtpn->rb_rightmost = NULL;
7195                 spin_lock_init(&rtpn->lock);
7196                 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7197         }
7198
7199         return 0;
7200 }
7201 subsys_initcall(mem_cgroup_init);
7202
7203 #ifdef CONFIG_MEMCG_SWAP
7204 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7205 {
7206         while (!refcount_inc_not_zero(&memcg->id.ref)) {
7207                 /*
7208                  * The root cgroup cannot be destroyed, so it's refcount must
7209                  * always be >= 1.
7210                  */
7211                 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7212                         VM_BUG_ON(1);
7213                         break;
7214                 }
7215                 memcg = parent_mem_cgroup(memcg);
7216                 if (!memcg)
7217                         memcg = root_mem_cgroup;
7218         }
7219         return memcg;
7220 }
7221
7222 /**
7223  * mem_cgroup_swapout - transfer a memsw charge to swap
7224  * @folio: folio whose memsw charge to transfer
7225  * @entry: swap entry to move the charge to
7226  *
7227  * Transfer the memsw charge of @folio to @entry.
7228  */
7229 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7230 {
7231         struct mem_cgroup *memcg, *swap_memcg;
7232         unsigned int nr_entries;
7233         unsigned short oldid;
7234
7235         VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7236         VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7237
7238         if (mem_cgroup_disabled())
7239                 return;
7240
7241         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7242                 return;
7243
7244         memcg = folio_memcg(folio);
7245
7246         VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7247         if (!memcg)
7248                 return;
7249
7250         /*
7251          * In case the memcg owning these pages has been offlined and doesn't
7252          * have an ID allocated to it anymore, charge the closest online
7253          * ancestor for the swap instead and transfer the memory+swap charge.
7254          */
7255         swap_memcg = mem_cgroup_id_get_online(memcg);
7256         nr_entries = folio_nr_pages(folio);
7257         /* Get references for the tail pages, too */
7258         if (nr_entries > 1)
7259                 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7260         oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7261                                    nr_entries);
7262         VM_BUG_ON_FOLIO(oldid, folio);
7263         mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7264
7265         folio->memcg_data = 0;
7266
7267         if (!mem_cgroup_is_root(memcg))
7268                 page_counter_uncharge(&memcg->memory, nr_entries);
7269
7270         if (!cgroup_memory_noswap && memcg != swap_memcg) {
7271                 if (!mem_cgroup_is_root(swap_memcg))
7272                         page_counter_charge(&swap_memcg->memsw, nr_entries);
7273                 page_counter_uncharge(&memcg->memsw, nr_entries);
7274         }
7275
7276         /*
7277          * Interrupts should be disabled here because the caller holds the
7278          * i_pages lock which is taken with interrupts-off. It is
7279          * important here to have the interrupts disabled because it is the
7280          * only synchronisation we have for updating the per-CPU variables.
7281          */
7282         memcg_stats_lock();
7283         mem_cgroup_charge_statistics(memcg, -nr_entries);
7284         memcg_stats_unlock();
7285         memcg_check_events(memcg, folio_nid(folio));
7286
7287         css_put(&memcg->css);
7288 }
7289
7290 /**
7291  * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7292  * @folio: folio being added to swap
7293  * @entry: swap entry to charge
7294  *
7295  * Try to charge @folio's memcg for the swap space at @entry.
7296  *
7297  * Returns 0 on success, -ENOMEM on failure.
7298  */
7299 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
7300 {
7301         unsigned int nr_pages = folio_nr_pages(folio);
7302         struct page_counter *counter;
7303         struct mem_cgroup *memcg;
7304         unsigned short oldid;
7305
7306         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7307                 return 0;
7308
7309         memcg = folio_memcg(folio);
7310
7311         VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7312         if (!memcg)
7313                 return 0;
7314
7315         if (!entry.val) {
7316                 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7317                 return 0;
7318         }
7319
7320         memcg = mem_cgroup_id_get_online(memcg);
7321
7322         if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7323             !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7324                 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7325                 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7326                 mem_cgroup_id_put(memcg);
7327                 return -ENOMEM;
7328         }
7329
7330         /* Get references for the tail pages, too */
7331         if (nr_pages > 1)
7332                 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7333         oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7334         VM_BUG_ON_FOLIO(oldid, folio);
7335         mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7336
7337         return 0;
7338 }
7339
7340 /**
7341  * __mem_cgroup_uncharge_swap - uncharge swap space
7342  * @entry: swap entry to uncharge
7343  * @nr_pages: the amount of swap space to uncharge
7344  */
7345 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7346 {
7347         struct mem_cgroup *memcg;
7348         unsigned short id;
7349
7350         id = swap_cgroup_record(entry, 0, nr_pages);
7351         rcu_read_lock();
7352         memcg = mem_cgroup_from_id(id);
7353         if (memcg) {
7354                 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7355                         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7356                                 page_counter_uncharge(&memcg->swap, nr_pages);
7357                         else
7358                                 page_counter_uncharge(&memcg->memsw, nr_pages);
7359                 }
7360                 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7361                 mem_cgroup_id_put_many(memcg, nr_pages);
7362         }
7363         rcu_read_unlock();
7364 }
7365
7366 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7367 {
7368         long nr_swap_pages = get_nr_swap_pages();
7369
7370         if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7371                 return nr_swap_pages;
7372         for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7373                 nr_swap_pages = min_t(long, nr_swap_pages,
7374                                       READ_ONCE(memcg->swap.max) -
7375                                       page_counter_read(&memcg->swap));
7376         return nr_swap_pages;
7377 }
7378
7379 bool mem_cgroup_swap_full(struct page *page)
7380 {
7381         struct mem_cgroup *memcg;
7382
7383         VM_BUG_ON_PAGE(!PageLocked(page), page);
7384
7385         if (vm_swap_full())
7386                 return true;
7387         if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7388                 return false;
7389
7390         memcg = page_memcg(page);
7391         if (!memcg)
7392                 return false;
7393
7394         for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7395                 unsigned long usage = page_counter_read(&memcg->swap);
7396
7397                 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7398                     usage * 2 >= READ_ONCE(memcg->swap.max))
7399                         return true;
7400         }
7401
7402         return false;
7403 }
7404
7405 static int __init setup_swap_account(char *s)
7406 {
7407         if (!strcmp(s, "1"))
7408                 cgroup_memory_noswap = false;
7409         else if (!strcmp(s, "0"))
7410                 cgroup_memory_noswap = true;
7411         return 1;
7412 }
7413 __setup("swapaccount=", setup_swap_account);
7414
7415 static u64 swap_current_read(struct cgroup_subsys_state *css,
7416                              struct cftype *cft)
7417 {
7418         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7419
7420         return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7421 }
7422
7423 static int swap_high_show(struct seq_file *m, void *v)
7424 {
7425         return seq_puts_memcg_tunable(m,
7426                 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7427 }
7428
7429 static ssize_t swap_high_write(struct kernfs_open_file *of,
7430                                char *buf, size_t nbytes, loff_t off)
7431 {
7432         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7433         unsigned long high;
7434         int err;
7435
7436         buf = strstrip(buf);
7437         err = page_counter_memparse(buf, "max", &high);
7438         if (err)
7439                 return err;
7440
7441         page_counter_set_high(&memcg->swap, high);
7442
7443         return nbytes;
7444 }
7445
7446 static int swap_max_show(struct seq_file *m, void *v)
7447 {
7448         return seq_puts_memcg_tunable(m,
7449                 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7450 }
7451
7452 static ssize_t swap_max_write(struct kernfs_open_file *of,
7453                               char *buf, size_t nbytes, loff_t off)
7454 {
7455         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7456         unsigned long max;
7457         int err;
7458
7459         buf = strstrip(buf);
7460         err = page_counter_memparse(buf, "max", &max);
7461         if (err)
7462                 return err;
7463
7464         xchg(&memcg->swap.max, max);
7465
7466         return nbytes;
7467 }
7468
7469 static int swap_events_show(struct seq_file *m, void *v)
7470 {
7471         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7472
7473         seq_printf(m, "high %lu\n",
7474                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7475         seq_printf(m, "max %lu\n",
7476                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7477         seq_printf(m, "fail %lu\n",
7478                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7479
7480         return 0;
7481 }
7482
7483 static struct cftype swap_files[] = {
7484         {
7485                 .name = "swap.current",
7486                 .flags = CFTYPE_NOT_ON_ROOT,
7487                 .read_u64 = swap_current_read,
7488         },
7489         {
7490                 .name = "swap.high",
7491                 .flags = CFTYPE_NOT_ON_ROOT,
7492                 .seq_show = swap_high_show,
7493                 .write = swap_high_write,
7494         },
7495         {
7496                 .name = "swap.max",
7497                 .flags = CFTYPE_NOT_ON_ROOT,
7498                 .seq_show = swap_max_show,
7499                 .write = swap_max_write,
7500         },
7501         {
7502                 .name = "swap.events",
7503                 .flags = CFTYPE_NOT_ON_ROOT,
7504                 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
7505                 .seq_show = swap_events_show,
7506         },
7507         { }     /* terminate */
7508 };
7509
7510 static struct cftype memsw_files[] = {
7511         {
7512                 .name = "memsw.usage_in_bytes",
7513                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7514                 .read_u64 = mem_cgroup_read_u64,
7515         },
7516         {
7517                 .name = "memsw.max_usage_in_bytes",
7518                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7519                 .write = mem_cgroup_reset,
7520                 .read_u64 = mem_cgroup_read_u64,
7521         },
7522         {
7523                 .name = "memsw.limit_in_bytes",
7524                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7525                 .write = mem_cgroup_write,
7526                 .read_u64 = mem_cgroup_read_u64,
7527         },
7528         {
7529                 .name = "memsw.failcnt",
7530                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7531                 .write = mem_cgroup_reset,
7532                 .read_u64 = mem_cgroup_read_u64,
7533         },
7534         { },    /* terminate */
7535 };
7536
7537 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
7538 /**
7539  * obj_cgroup_may_zswap - check if this cgroup can zswap
7540  * @objcg: the object cgroup
7541  *
7542  * Check if the hierarchical zswap limit has been reached.
7543  *
7544  * This doesn't check for specific headroom, and it is not atomic
7545  * either. But with zswap, the size of the allocation is only known
7546  * once compression has occured, and this optimistic pre-check avoids
7547  * spending cycles on compression when there is already no room left
7548  * or zswap is disabled altogether somewhere in the hierarchy.
7549  */
7550 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
7551 {
7552         struct mem_cgroup *memcg, *original_memcg;
7553         bool ret = true;
7554
7555         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7556                 return true;
7557
7558         original_memcg = get_mem_cgroup_from_objcg(objcg);
7559         for (memcg = original_memcg; memcg != root_mem_cgroup;
7560              memcg = parent_mem_cgroup(memcg)) {
7561                 unsigned long max = READ_ONCE(memcg->zswap_max);
7562                 unsigned long pages;
7563
7564                 if (max == PAGE_COUNTER_MAX)
7565                         continue;
7566                 if (max == 0) {
7567                         ret = false;
7568                         break;
7569                 }
7570
7571                 cgroup_rstat_flush(memcg->css.cgroup);
7572                 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
7573                 if (pages < max)
7574                         continue;
7575                 ret = false;
7576                 break;
7577         }
7578         mem_cgroup_put(original_memcg);
7579         return ret;
7580 }
7581
7582 /**
7583  * obj_cgroup_charge_zswap - charge compression backend memory
7584  * @objcg: the object cgroup
7585  * @size: size of compressed object
7586  *
7587  * This forces the charge after obj_cgroup_may_swap() allowed
7588  * compression and storage in zwap for this cgroup to go ahead.
7589  */
7590 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
7591 {
7592         struct mem_cgroup *memcg;
7593
7594         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7595                 return;
7596
7597         VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
7598
7599         /* PF_MEMALLOC context, charging must succeed */
7600         if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
7601                 VM_WARN_ON_ONCE(1);
7602
7603         rcu_read_lock();
7604         memcg = obj_cgroup_memcg(objcg);
7605         mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
7606         mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
7607         rcu_read_unlock();
7608 }
7609
7610 /**
7611  * obj_cgroup_uncharge_zswap - uncharge compression backend memory
7612  * @objcg: the object cgroup
7613  * @size: size of compressed object
7614  *
7615  * Uncharges zswap memory on page in.
7616  */
7617 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
7618 {
7619         struct mem_cgroup *memcg;
7620
7621         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7622                 return;
7623
7624         obj_cgroup_uncharge(objcg, size);
7625
7626         rcu_read_lock();
7627         memcg = obj_cgroup_memcg(objcg);
7628         mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
7629         mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
7630         rcu_read_unlock();
7631 }
7632
7633 static u64 zswap_current_read(struct cgroup_subsys_state *css,
7634                               struct cftype *cft)
7635 {
7636         cgroup_rstat_flush(css->cgroup);
7637         return memcg_page_state(mem_cgroup_from_css(css), MEMCG_ZSWAP_B);
7638 }
7639
7640 static int zswap_max_show(struct seq_file *m, void *v)
7641 {
7642         return seq_puts_memcg_tunable(m,
7643                 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
7644 }
7645
7646 static ssize_t zswap_max_write(struct kernfs_open_file *of,
7647                                char *buf, size_t nbytes, loff_t off)
7648 {
7649         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7650         unsigned long max;
7651         int err;
7652
7653         buf = strstrip(buf);
7654         err = page_counter_memparse(buf, "max", &max);
7655         if (err)
7656                 return err;
7657
7658         xchg(&memcg->zswap_max, max);
7659
7660         return nbytes;
7661 }
7662
7663 static struct cftype zswap_files[] = {
7664         {
7665                 .name = "zswap.current",
7666                 .flags = CFTYPE_NOT_ON_ROOT,
7667                 .read_u64 = zswap_current_read,
7668         },
7669         {
7670                 .name = "zswap.max",
7671                 .flags = CFTYPE_NOT_ON_ROOT,
7672                 .seq_show = zswap_max_show,
7673                 .write = zswap_max_write,
7674         },
7675         { }     /* terminate */
7676 };
7677 #endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
7678
7679 /*
7680  * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7681  * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7682  * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7683  * boot parameter. This may result in premature OOPS inside
7684  * mem_cgroup_get_nr_swap_pages() function in corner cases.
7685  */
7686 static int __init mem_cgroup_swap_init(void)
7687 {
7688         /* No memory control -> no swap control */
7689         if (mem_cgroup_disabled())
7690                 cgroup_memory_noswap = true;
7691
7692         if (cgroup_memory_noswap)
7693                 return 0;
7694
7695         WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7696         WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7697 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
7698         WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
7699 #endif
7700         return 0;
7701 }
7702 core_initcall(mem_cgroup_swap_init);
7703
7704 #endif /* CONFIG_MEMCG_SWAP */