c6c5792178555ec197083878c12de28c02f3f905
[platform/kernel/linux-starfive.git] / mm / memcontrol.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  */
24
25 #include <linux/page_counter.h>
26 #include <linux/memcontrol.h>
27 #include <linux/cgroup.h>
28 #include <linux/pagewalk.h>
29 #include <linux/sched/mm.h>
30 #include <linux/shmem_fs.h>
31 #include <linux/hugetlb.h>
32 #include <linux/pagemap.h>
33 #include <linux/vm_event_item.h>
34 #include <linux/smp.h>
35 #include <linux/page-flags.h>
36 #include <linux/backing-dev.h>
37 #include <linux/bit_spinlock.h>
38 #include <linux/rcupdate.h>
39 #include <linux/limits.h>
40 #include <linux/export.h>
41 #include <linux/mutex.h>
42 #include <linux/rbtree.h>
43 #include <linux/slab.h>
44 #include <linux/swap.h>
45 #include <linux/swapops.h>
46 #include <linux/spinlock.h>
47 #include <linux/eventfd.h>
48 #include <linux/poll.h>
49 #include <linux/sort.h>
50 #include <linux/fs.h>
51 #include <linux/seq_file.h>
52 #include <linux/vmpressure.h>
53 #include <linux/mm_inline.h>
54 #include <linux/swap_cgroup.h>
55 #include <linux/cpu.h>
56 #include <linux/oom.h>
57 #include <linux/lockdep.h>
58 #include <linux/file.h>
59 #include <linux/tracehook.h>
60 #include <linux/psi.h>
61 #include <linux/seq_buf.h>
62 #include "internal.h"
63 #include <net/sock.h>
64 #include <net/ip.h>
65 #include "slab.h"
66
67 #include <linux/uaccess.h>
68
69 #include <trace/events/vmscan.h>
70
71 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
72 EXPORT_SYMBOL(memory_cgrp_subsys);
73
74 struct mem_cgroup *root_mem_cgroup __read_mostly;
75
76 /* Socket memory accounting disabled? */
77 static bool cgroup_memory_nosocket;
78
79 /* Kernel memory accounting disabled? */
80 static bool cgroup_memory_nokmem;
81
82 /* Whether the swap controller is active */
83 #ifdef CONFIG_MEMCG_SWAP
84 bool cgroup_memory_noswap __read_mostly;
85 #else
86 #define cgroup_memory_noswap            1
87 #endif
88
89 #ifdef CONFIG_CGROUP_WRITEBACK
90 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
91 #endif
92
93 /* Whether legacy memory+swap accounting is active */
94 static bool do_memsw_account(void)
95 {
96         return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
97 }
98
99 #define THRESHOLDS_EVENTS_TARGET 128
100 #define SOFTLIMIT_EVENTS_TARGET 1024
101
102 /*
103  * Cgroups above their limits are maintained in a RB-Tree, independent of
104  * their hierarchy representation
105  */
106
107 struct mem_cgroup_tree_per_node {
108         struct rb_root rb_root;
109         struct rb_node *rb_rightmost;
110         spinlock_t lock;
111 };
112
113 struct mem_cgroup_tree {
114         struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
115 };
116
117 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
118
119 /* for OOM */
120 struct mem_cgroup_eventfd_list {
121         struct list_head list;
122         struct eventfd_ctx *eventfd;
123 };
124
125 /*
126  * cgroup_event represents events which userspace want to receive.
127  */
128 struct mem_cgroup_event {
129         /*
130          * memcg which the event belongs to.
131          */
132         struct mem_cgroup *memcg;
133         /*
134          * eventfd to signal userspace about the event.
135          */
136         struct eventfd_ctx *eventfd;
137         /*
138          * Each of these stored in a list by the cgroup.
139          */
140         struct list_head list;
141         /*
142          * register_event() callback will be used to add new userspace
143          * waiter for changes related to this event.  Use eventfd_signal()
144          * on eventfd to send notification to userspace.
145          */
146         int (*register_event)(struct mem_cgroup *memcg,
147                               struct eventfd_ctx *eventfd, const char *args);
148         /*
149          * unregister_event() callback will be called when userspace closes
150          * the eventfd or on cgroup removing.  This callback must be set,
151          * if you want provide notification functionality.
152          */
153         void (*unregister_event)(struct mem_cgroup *memcg,
154                                  struct eventfd_ctx *eventfd);
155         /*
156          * All fields below needed to unregister event when
157          * userspace closes eventfd.
158          */
159         poll_table pt;
160         wait_queue_head_t *wqh;
161         wait_queue_entry_t wait;
162         struct work_struct remove;
163 };
164
165 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
166 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
167
168 /* Stuffs for move charges at task migration. */
169 /*
170  * Types of charges to be moved.
171  */
172 #define MOVE_ANON       0x1U
173 #define MOVE_FILE       0x2U
174 #define MOVE_MASK       (MOVE_ANON | MOVE_FILE)
175
176 /* "mc" and its members are protected by cgroup_mutex */
177 static struct move_charge_struct {
178         spinlock_t        lock; /* for from, to */
179         struct mm_struct  *mm;
180         struct mem_cgroup *from;
181         struct mem_cgroup *to;
182         unsigned long flags;
183         unsigned long precharge;
184         unsigned long moved_charge;
185         unsigned long moved_swap;
186         struct task_struct *moving_task;        /* a task moving charges */
187         wait_queue_head_t waitq;                /* a waitq for other context */
188 } mc = {
189         .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
190         .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
191 };
192
193 /*
194  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
195  * limit reclaim to prevent infinite loops, if they ever occur.
196  */
197 #define MEM_CGROUP_MAX_RECLAIM_LOOPS            100
198 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
199
200 enum charge_type {
201         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
202         MEM_CGROUP_CHARGE_TYPE_ANON,
203         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
204         MEM_CGROUP_CHARGE_TYPE_DROP,    /* a page was unused swap cache */
205         NR_CHARGE_TYPE,
206 };
207
208 /* for encoding cft->private value on file */
209 enum res_type {
210         _MEM,
211         _MEMSWAP,
212         _OOM_TYPE,
213         _KMEM,
214         _TCP,
215 };
216
217 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
218 #define MEMFILE_TYPE(val)       ((val) >> 16 & 0xffff)
219 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
220 /* Used for OOM nofiier */
221 #define OOM_CONTROL             (0)
222
223 /*
224  * Iteration constructs for visiting all cgroups (under a tree).  If
225  * loops are exited prematurely (break), mem_cgroup_iter_break() must
226  * be used for reference counting.
227  */
228 #define for_each_mem_cgroup_tree(iter, root)            \
229         for (iter = mem_cgroup_iter(root, NULL, NULL);  \
230              iter != NULL;                              \
231              iter = mem_cgroup_iter(root, iter, NULL))
232
233 #define for_each_mem_cgroup(iter)                       \
234         for (iter = mem_cgroup_iter(NULL, NULL, NULL);  \
235              iter != NULL;                              \
236              iter = mem_cgroup_iter(NULL, iter, NULL))
237
238 static inline bool should_force_charge(void)
239 {
240         return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
241                 (current->flags & PF_EXITING);
242 }
243
244 /* Some nice accessors for the vmpressure. */
245 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
246 {
247         if (!memcg)
248                 memcg = root_mem_cgroup;
249         return &memcg->vmpressure;
250 }
251
252 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
253 {
254         return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
255 }
256
257 #ifdef CONFIG_MEMCG_KMEM
258 extern spinlock_t css_set_lock;
259
260 static void obj_cgroup_release(struct percpu_ref *ref)
261 {
262         struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
263         struct mem_cgroup *memcg;
264         unsigned int nr_bytes;
265         unsigned int nr_pages;
266         unsigned long flags;
267
268         /*
269          * At this point all allocated objects are freed, and
270          * objcg->nr_charged_bytes can't have an arbitrary byte value.
271          * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
272          *
273          * The following sequence can lead to it:
274          * 1) CPU0: objcg == stock->cached_objcg
275          * 2) CPU1: we do a small allocation (e.g. 92 bytes),
276          *          PAGE_SIZE bytes are charged
277          * 3) CPU1: a process from another memcg is allocating something,
278          *          the stock if flushed,
279          *          objcg->nr_charged_bytes = PAGE_SIZE - 92
280          * 5) CPU0: we do release this object,
281          *          92 bytes are added to stock->nr_bytes
282          * 6) CPU0: stock is flushed,
283          *          92 bytes are added to objcg->nr_charged_bytes
284          *
285          * In the result, nr_charged_bytes == PAGE_SIZE.
286          * This page will be uncharged in obj_cgroup_release().
287          */
288         nr_bytes = atomic_read(&objcg->nr_charged_bytes);
289         WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
290         nr_pages = nr_bytes >> PAGE_SHIFT;
291
292         spin_lock_irqsave(&css_set_lock, flags);
293         memcg = obj_cgroup_memcg(objcg);
294         if (nr_pages)
295                 __memcg_kmem_uncharge(memcg, nr_pages);
296         list_del(&objcg->list);
297         mem_cgroup_put(memcg);
298         spin_unlock_irqrestore(&css_set_lock, flags);
299
300         percpu_ref_exit(ref);
301         kfree_rcu(objcg, rcu);
302 }
303
304 static struct obj_cgroup *obj_cgroup_alloc(void)
305 {
306         struct obj_cgroup *objcg;
307         int ret;
308
309         objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
310         if (!objcg)
311                 return NULL;
312
313         ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
314                               GFP_KERNEL);
315         if (ret) {
316                 kfree(objcg);
317                 return NULL;
318         }
319         INIT_LIST_HEAD(&objcg->list);
320         return objcg;
321 }
322
323 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
324                                   struct mem_cgroup *parent)
325 {
326         struct obj_cgroup *objcg, *iter;
327
328         objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
329
330         spin_lock_irq(&css_set_lock);
331
332         /* Move active objcg to the parent's list */
333         xchg(&objcg->memcg, parent);
334         css_get(&parent->css);
335         list_add(&objcg->list, &parent->objcg_list);
336
337         /* Move already reparented objcgs to the parent's list */
338         list_for_each_entry(iter, &memcg->objcg_list, list) {
339                 css_get(&parent->css);
340                 xchg(&iter->memcg, parent);
341                 css_put(&memcg->css);
342         }
343         list_splice(&memcg->objcg_list, &parent->objcg_list);
344
345         spin_unlock_irq(&css_set_lock);
346
347         percpu_ref_kill(&objcg->refcnt);
348 }
349
350 /*
351  * This will be used as a shrinker list's index.
352  * The main reason for not using cgroup id for this:
353  *  this works better in sparse environments, where we have a lot of memcgs,
354  *  but only a few kmem-limited. Or also, if we have, for instance, 200
355  *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
356  *  200 entry array for that.
357  *
358  * The current size of the caches array is stored in memcg_nr_cache_ids. It
359  * will double each time we have to increase it.
360  */
361 static DEFINE_IDA(memcg_cache_ida);
362 int memcg_nr_cache_ids;
363
364 /* Protects memcg_nr_cache_ids */
365 static DECLARE_RWSEM(memcg_cache_ids_sem);
366
367 void memcg_get_cache_ids(void)
368 {
369         down_read(&memcg_cache_ids_sem);
370 }
371
372 void memcg_put_cache_ids(void)
373 {
374         up_read(&memcg_cache_ids_sem);
375 }
376
377 /*
378  * MIN_SIZE is different than 1, because we would like to avoid going through
379  * the alloc/free process all the time. In a small machine, 4 kmem-limited
380  * cgroups is a reasonable guess. In the future, it could be a parameter or
381  * tunable, but that is strictly not necessary.
382  *
383  * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
384  * this constant directly from cgroup, but it is understandable that this is
385  * better kept as an internal representation in cgroup.c. In any case, the
386  * cgrp_id space is not getting any smaller, and we don't have to necessarily
387  * increase ours as well if it increases.
388  */
389 #define MEMCG_CACHES_MIN_SIZE 4
390 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
391
392 /*
393  * A lot of the calls to the cache allocation functions are expected to be
394  * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
395  * conditional to this static branch, we'll have to allow modules that does
396  * kmem_cache_alloc and the such to see this symbol as well
397  */
398 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
399 EXPORT_SYMBOL(memcg_kmem_enabled_key);
400 #endif
401
402 static int memcg_shrinker_map_size;
403 static DEFINE_MUTEX(memcg_shrinker_map_mutex);
404
405 static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
406 {
407         kvfree(container_of(head, struct memcg_shrinker_map, rcu));
408 }
409
410 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
411                                          int size, int old_size)
412 {
413         struct memcg_shrinker_map *new, *old;
414         int nid;
415
416         lockdep_assert_held(&memcg_shrinker_map_mutex);
417
418         for_each_node(nid) {
419                 old = rcu_dereference_protected(
420                         mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
421                 /* Not yet online memcg */
422                 if (!old)
423                         return 0;
424
425                 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
426                 if (!new)
427                         return -ENOMEM;
428
429                 /* Set all old bits, clear all new bits */
430                 memset(new->map, (int)0xff, old_size);
431                 memset((void *)new->map + old_size, 0, size - old_size);
432
433                 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
434                 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
435         }
436
437         return 0;
438 }
439
440 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
441 {
442         struct mem_cgroup_per_node *pn;
443         struct memcg_shrinker_map *map;
444         int nid;
445
446         if (mem_cgroup_is_root(memcg))
447                 return;
448
449         for_each_node(nid) {
450                 pn = mem_cgroup_nodeinfo(memcg, nid);
451                 map = rcu_dereference_protected(pn->shrinker_map, true);
452                 if (map)
453                         kvfree(map);
454                 rcu_assign_pointer(pn->shrinker_map, NULL);
455         }
456 }
457
458 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
459 {
460         struct memcg_shrinker_map *map;
461         int nid, size, ret = 0;
462
463         if (mem_cgroup_is_root(memcg))
464                 return 0;
465
466         mutex_lock(&memcg_shrinker_map_mutex);
467         size = memcg_shrinker_map_size;
468         for_each_node(nid) {
469                 map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
470                 if (!map) {
471                         memcg_free_shrinker_maps(memcg);
472                         ret = -ENOMEM;
473                         break;
474                 }
475                 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
476         }
477         mutex_unlock(&memcg_shrinker_map_mutex);
478
479         return ret;
480 }
481
482 int memcg_expand_shrinker_maps(int new_id)
483 {
484         int size, old_size, ret = 0;
485         struct mem_cgroup *memcg;
486
487         size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
488         old_size = memcg_shrinker_map_size;
489         if (size <= old_size)
490                 return 0;
491
492         mutex_lock(&memcg_shrinker_map_mutex);
493         if (!root_mem_cgroup)
494                 goto unlock;
495
496         for_each_mem_cgroup(memcg) {
497                 if (mem_cgroup_is_root(memcg))
498                         continue;
499                 ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
500                 if (ret) {
501                         mem_cgroup_iter_break(NULL, memcg);
502                         goto unlock;
503                 }
504         }
505 unlock:
506         if (!ret)
507                 memcg_shrinker_map_size = size;
508         mutex_unlock(&memcg_shrinker_map_mutex);
509         return ret;
510 }
511
512 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
513 {
514         if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
515                 struct memcg_shrinker_map *map;
516
517                 rcu_read_lock();
518                 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
519                 /* Pairs with smp mb in shrink_slab() */
520                 smp_mb__before_atomic();
521                 set_bit(shrinker_id, map->map);
522                 rcu_read_unlock();
523         }
524 }
525
526 /**
527  * mem_cgroup_css_from_page - css of the memcg associated with a page
528  * @page: page of interest
529  *
530  * If memcg is bound to the default hierarchy, css of the memcg associated
531  * with @page is returned.  The returned css remains associated with @page
532  * until it is released.
533  *
534  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
535  * is returned.
536  */
537 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
538 {
539         struct mem_cgroup *memcg;
540
541         memcg = page->mem_cgroup;
542
543         if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
544                 memcg = root_mem_cgroup;
545
546         return &memcg->css;
547 }
548
549 /**
550  * page_cgroup_ino - return inode number of the memcg a page is charged to
551  * @page: the page
552  *
553  * Look up the closest online ancestor of the memory cgroup @page is charged to
554  * and return its inode number or 0 if @page is not charged to any cgroup. It
555  * is safe to call this function without holding a reference to @page.
556  *
557  * Note, this function is inherently racy, because there is nothing to prevent
558  * the cgroup inode from getting torn down and potentially reallocated a moment
559  * after page_cgroup_ino() returns, so it only should be used by callers that
560  * do not care (such as procfs interfaces).
561  */
562 ino_t page_cgroup_ino(struct page *page)
563 {
564         struct mem_cgroup *memcg;
565         unsigned long ino = 0;
566
567         rcu_read_lock();
568         memcg = page->mem_cgroup;
569
570         /*
571          * The lowest bit set means that memcg isn't a valid
572          * memcg pointer, but a obj_cgroups pointer.
573          * In this case the page is shared and doesn't belong
574          * to any specific memory cgroup.
575          */
576         if ((unsigned long) memcg & 0x1UL)
577                 memcg = NULL;
578
579         while (memcg && !(memcg->css.flags & CSS_ONLINE))
580                 memcg = parent_mem_cgroup(memcg);
581         if (memcg)
582                 ino = cgroup_ino(memcg->css.cgroup);
583         rcu_read_unlock();
584         return ino;
585 }
586
587 static struct mem_cgroup_per_node *
588 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
589 {
590         int nid = page_to_nid(page);
591
592         return memcg->nodeinfo[nid];
593 }
594
595 static struct mem_cgroup_tree_per_node *
596 soft_limit_tree_node(int nid)
597 {
598         return soft_limit_tree.rb_tree_per_node[nid];
599 }
600
601 static struct mem_cgroup_tree_per_node *
602 soft_limit_tree_from_page(struct page *page)
603 {
604         int nid = page_to_nid(page);
605
606         return soft_limit_tree.rb_tree_per_node[nid];
607 }
608
609 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
610                                          struct mem_cgroup_tree_per_node *mctz,
611                                          unsigned long new_usage_in_excess)
612 {
613         struct rb_node **p = &mctz->rb_root.rb_node;
614         struct rb_node *parent = NULL;
615         struct mem_cgroup_per_node *mz_node;
616         bool rightmost = true;
617
618         if (mz->on_tree)
619                 return;
620
621         mz->usage_in_excess = new_usage_in_excess;
622         if (!mz->usage_in_excess)
623                 return;
624         while (*p) {
625                 parent = *p;
626                 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
627                                         tree_node);
628                 if (mz->usage_in_excess < mz_node->usage_in_excess) {
629                         p = &(*p)->rb_left;
630                         rightmost = false;
631                 }
632
633                 /*
634                  * We can't avoid mem cgroups that are over their soft
635                  * limit by the same amount
636                  */
637                 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
638                         p = &(*p)->rb_right;
639         }
640
641         if (rightmost)
642                 mctz->rb_rightmost = &mz->tree_node;
643
644         rb_link_node(&mz->tree_node, parent, p);
645         rb_insert_color(&mz->tree_node, &mctz->rb_root);
646         mz->on_tree = true;
647 }
648
649 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
650                                          struct mem_cgroup_tree_per_node *mctz)
651 {
652         if (!mz->on_tree)
653                 return;
654
655         if (&mz->tree_node == mctz->rb_rightmost)
656                 mctz->rb_rightmost = rb_prev(&mz->tree_node);
657
658         rb_erase(&mz->tree_node, &mctz->rb_root);
659         mz->on_tree = false;
660 }
661
662 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
663                                        struct mem_cgroup_tree_per_node *mctz)
664 {
665         unsigned long flags;
666
667         spin_lock_irqsave(&mctz->lock, flags);
668         __mem_cgroup_remove_exceeded(mz, mctz);
669         spin_unlock_irqrestore(&mctz->lock, flags);
670 }
671
672 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
673 {
674         unsigned long nr_pages = page_counter_read(&memcg->memory);
675         unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
676         unsigned long excess = 0;
677
678         if (nr_pages > soft_limit)
679                 excess = nr_pages - soft_limit;
680
681         return excess;
682 }
683
684 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
685 {
686         unsigned long excess;
687         struct mem_cgroup_per_node *mz;
688         struct mem_cgroup_tree_per_node *mctz;
689
690         mctz = soft_limit_tree_from_page(page);
691         if (!mctz)
692                 return;
693         /*
694          * Necessary to update all ancestors when hierarchy is used.
695          * because their event counter is not touched.
696          */
697         for (; memcg; memcg = parent_mem_cgroup(memcg)) {
698                 mz = mem_cgroup_page_nodeinfo(memcg, page);
699                 excess = soft_limit_excess(memcg);
700                 /*
701                  * We have to update the tree if mz is on RB-tree or
702                  * mem is over its softlimit.
703                  */
704                 if (excess || mz->on_tree) {
705                         unsigned long flags;
706
707                         spin_lock_irqsave(&mctz->lock, flags);
708                         /* if on-tree, remove it */
709                         if (mz->on_tree)
710                                 __mem_cgroup_remove_exceeded(mz, mctz);
711                         /*
712                          * Insert again. mz->usage_in_excess will be updated.
713                          * If excess is 0, no tree ops.
714                          */
715                         __mem_cgroup_insert_exceeded(mz, mctz, excess);
716                         spin_unlock_irqrestore(&mctz->lock, flags);
717                 }
718         }
719 }
720
721 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
722 {
723         struct mem_cgroup_tree_per_node *mctz;
724         struct mem_cgroup_per_node *mz;
725         int nid;
726
727         for_each_node(nid) {
728                 mz = mem_cgroup_nodeinfo(memcg, nid);
729                 mctz = soft_limit_tree_node(nid);
730                 if (mctz)
731                         mem_cgroup_remove_exceeded(mz, mctz);
732         }
733 }
734
735 static struct mem_cgroup_per_node *
736 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
737 {
738         struct mem_cgroup_per_node *mz;
739
740 retry:
741         mz = NULL;
742         if (!mctz->rb_rightmost)
743                 goto done;              /* Nothing to reclaim from */
744
745         mz = rb_entry(mctz->rb_rightmost,
746                       struct mem_cgroup_per_node, tree_node);
747         /*
748          * Remove the node now but someone else can add it back,
749          * we will to add it back at the end of reclaim to its correct
750          * position in the tree.
751          */
752         __mem_cgroup_remove_exceeded(mz, mctz);
753         if (!soft_limit_excess(mz->memcg) ||
754             !css_tryget(&mz->memcg->css))
755                 goto retry;
756 done:
757         return mz;
758 }
759
760 static struct mem_cgroup_per_node *
761 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
762 {
763         struct mem_cgroup_per_node *mz;
764
765         spin_lock_irq(&mctz->lock);
766         mz = __mem_cgroup_largest_soft_limit_node(mctz);
767         spin_unlock_irq(&mctz->lock);
768         return mz;
769 }
770
771 /**
772  * __mod_memcg_state - update cgroup memory statistics
773  * @memcg: the memory cgroup
774  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
775  * @val: delta to add to the counter, can be negative
776  */
777 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
778 {
779         long x, threshold = MEMCG_CHARGE_BATCH;
780
781         if (mem_cgroup_disabled())
782                 return;
783
784         if (memcg_stat_item_in_bytes(idx))
785                 threshold <<= PAGE_SHIFT;
786
787         x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
788         if (unlikely(abs(x) > threshold)) {
789                 struct mem_cgroup *mi;
790
791                 /*
792                  * Batch local counters to keep them in sync with
793                  * the hierarchical ones.
794                  */
795                 __this_cpu_add(memcg->vmstats_local->stat[idx], x);
796                 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
797                         atomic_long_add(x, &mi->vmstats[idx]);
798                 x = 0;
799         }
800         __this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
801 }
802
803 static struct mem_cgroup_per_node *
804 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
805 {
806         struct mem_cgroup *parent;
807
808         parent = parent_mem_cgroup(pn->memcg);
809         if (!parent)
810                 return NULL;
811         return mem_cgroup_nodeinfo(parent, nid);
812 }
813
814 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
815                               int val)
816 {
817         struct mem_cgroup_per_node *pn;
818         struct mem_cgroup *memcg;
819         long x, threshold = MEMCG_CHARGE_BATCH;
820
821         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
822         memcg = pn->memcg;
823
824         /* Update memcg */
825         __mod_memcg_state(memcg, idx, val);
826
827         /* Update lruvec */
828         __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
829
830         if (vmstat_item_in_bytes(idx))
831                 threshold <<= PAGE_SHIFT;
832
833         x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
834         if (unlikely(abs(x) > threshold)) {
835                 pg_data_t *pgdat = lruvec_pgdat(lruvec);
836                 struct mem_cgroup_per_node *pi;
837
838                 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
839                         atomic_long_add(x, &pi->lruvec_stat[idx]);
840                 x = 0;
841         }
842         __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
843 }
844
845 /**
846  * __mod_lruvec_state - update lruvec memory statistics
847  * @lruvec: the lruvec
848  * @idx: the stat item
849  * @val: delta to add to the counter, can be negative
850  *
851  * The lruvec is the intersection of the NUMA node and a cgroup. This
852  * function updates the all three counters that are affected by a
853  * change of state at this level: per-node, per-cgroup, per-lruvec.
854  */
855 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
856                         int val)
857 {
858         /* Update node */
859         __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
860
861         /* Update memcg and lruvec */
862         if (!mem_cgroup_disabled())
863                 __mod_memcg_lruvec_state(lruvec, idx, val);
864 }
865
866 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
867 {
868         pg_data_t *pgdat = page_pgdat(virt_to_page(p));
869         struct mem_cgroup *memcg;
870         struct lruvec *lruvec;
871
872         rcu_read_lock();
873         memcg = mem_cgroup_from_obj(p);
874
875         /* Untracked pages have no memcg, no lruvec. Update only the node */
876         if (!memcg || memcg == root_mem_cgroup) {
877                 __mod_node_page_state(pgdat, idx, val);
878         } else {
879                 lruvec = mem_cgroup_lruvec(memcg, pgdat);
880                 __mod_lruvec_state(lruvec, idx, val);
881         }
882         rcu_read_unlock();
883 }
884
885 void mod_memcg_obj_state(void *p, int idx, int val)
886 {
887         struct mem_cgroup *memcg;
888
889         rcu_read_lock();
890         memcg = mem_cgroup_from_obj(p);
891         if (memcg)
892                 mod_memcg_state(memcg, idx, val);
893         rcu_read_unlock();
894 }
895
896 /**
897  * __count_memcg_events - account VM events in a cgroup
898  * @memcg: the memory cgroup
899  * @idx: the event item
900  * @count: the number of events that occured
901  */
902 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
903                           unsigned long count)
904 {
905         unsigned long x;
906
907         if (mem_cgroup_disabled())
908                 return;
909
910         x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
911         if (unlikely(x > MEMCG_CHARGE_BATCH)) {
912                 struct mem_cgroup *mi;
913
914                 /*
915                  * Batch local counters to keep them in sync with
916                  * the hierarchical ones.
917                  */
918                 __this_cpu_add(memcg->vmstats_local->events[idx], x);
919                 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
920                         atomic_long_add(x, &mi->vmevents[idx]);
921                 x = 0;
922         }
923         __this_cpu_write(memcg->vmstats_percpu->events[idx], x);
924 }
925
926 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
927 {
928         return atomic_long_read(&memcg->vmevents[event]);
929 }
930
931 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
932 {
933         long x = 0;
934         int cpu;
935
936         for_each_possible_cpu(cpu)
937                 x += per_cpu(memcg->vmstats_local->events[event], cpu);
938         return x;
939 }
940
941 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
942                                          struct page *page,
943                                          int nr_pages)
944 {
945         /* pagein of a big page is an event. So, ignore page size */
946         if (nr_pages > 0)
947                 __count_memcg_events(memcg, PGPGIN, 1);
948         else {
949                 __count_memcg_events(memcg, PGPGOUT, 1);
950                 nr_pages = -nr_pages; /* for event */
951         }
952
953         __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
954 }
955
956 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
957                                        enum mem_cgroup_events_target target)
958 {
959         unsigned long val, next;
960
961         val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
962         next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
963         /* from time_after() in jiffies.h */
964         if ((long)(next - val) < 0) {
965                 switch (target) {
966                 case MEM_CGROUP_TARGET_THRESH:
967                         next = val + THRESHOLDS_EVENTS_TARGET;
968                         break;
969                 case MEM_CGROUP_TARGET_SOFTLIMIT:
970                         next = val + SOFTLIMIT_EVENTS_TARGET;
971                         break;
972                 default:
973                         break;
974                 }
975                 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
976                 return true;
977         }
978         return false;
979 }
980
981 /*
982  * Check events in order.
983  *
984  */
985 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
986 {
987         /* threshold event is triggered in finer grain than soft limit */
988         if (unlikely(mem_cgroup_event_ratelimit(memcg,
989                                                 MEM_CGROUP_TARGET_THRESH))) {
990                 bool do_softlimit;
991
992                 do_softlimit = mem_cgroup_event_ratelimit(memcg,
993                                                 MEM_CGROUP_TARGET_SOFTLIMIT);
994                 mem_cgroup_threshold(memcg);
995                 if (unlikely(do_softlimit))
996                         mem_cgroup_update_tree(memcg, page);
997         }
998 }
999
1000 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1001 {
1002         /*
1003          * mm_update_next_owner() may clear mm->owner to NULL
1004          * if it races with swapoff, page migration, etc.
1005          * So this can be called with p == NULL.
1006          */
1007         if (unlikely(!p))
1008                 return NULL;
1009
1010         return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1011 }
1012 EXPORT_SYMBOL(mem_cgroup_from_task);
1013
1014 /**
1015  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1016  * @mm: mm from which memcg should be extracted. It can be NULL.
1017  *
1018  * Obtain a reference on mm->memcg and returns it if successful. Otherwise
1019  * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
1020  * returned.
1021  */
1022 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1023 {
1024         struct mem_cgroup *memcg;
1025
1026         if (mem_cgroup_disabled())
1027                 return NULL;
1028
1029         rcu_read_lock();
1030         do {
1031                 /*
1032                  * Page cache insertions can happen withou an
1033                  * actual mm context, e.g. during disk probing
1034                  * on boot, loopback IO, acct() writes etc.
1035                  */
1036                 if (unlikely(!mm))
1037                         memcg = root_mem_cgroup;
1038                 else {
1039                         memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1040                         if (unlikely(!memcg))
1041                                 memcg = root_mem_cgroup;
1042                 }
1043         } while (!css_tryget(&memcg->css));
1044         rcu_read_unlock();
1045         return memcg;
1046 }
1047 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1048
1049 /**
1050  * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
1051  * @page: page from which memcg should be extracted.
1052  *
1053  * Obtain a reference on page->memcg and returns it if successful. Otherwise
1054  * root_mem_cgroup is returned.
1055  */
1056 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
1057 {
1058         struct mem_cgroup *memcg = page->mem_cgroup;
1059
1060         if (mem_cgroup_disabled())
1061                 return NULL;
1062
1063         rcu_read_lock();
1064         /* Page should not get uncharged and freed memcg under us. */
1065         if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
1066                 memcg = root_mem_cgroup;
1067         rcu_read_unlock();
1068         return memcg;
1069 }
1070 EXPORT_SYMBOL(get_mem_cgroup_from_page);
1071
1072 /**
1073  * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg.
1074  */
1075 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
1076 {
1077         if (unlikely(current->active_memcg)) {
1078                 struct mem_cgroup *memcg;
1079
1080                 rcu_read_lock();
1081                 /* current->active_memcg must hold a ref. */
1082                 if (WARN_ON_ONCE(!css_tryget(&current->active_memcg->css)))
1083                         memcg = root_mem_cgroup;
1084                 else
1085                         memcg = current->active_memcg;
1086                 rcu_read_unlock();
1087                 return memcg;
1088         }
1089         return get_mem_cgroup_from_mm(current->mm);
1090 }
1091
1092 /**
1093  * mem_cgroup_iter - iterate over memory cgroup hierarchy
1094  * @root: hierarchy root
1095  * @prev: previously returned memcg, NULL on first invocation
1096  * @reclaim: cookie for shared reclaim walks, NULL for full walks
1097  *
1098  * Returns references to children of the hierarchy below @root, or
1099  * @root itself, or %NULL after a full round-trip.
1100  *
1101  * Caller must pass the return value in @prev on subsequent
1102  * invocations for reference counting, or use mem_cgroup_iter_break()
1103  * to cancel a hierarchy walk before the round-trip is complete.
1104  *
1105  * Reclaimers can specify a node and a priority level in @reclaim to
1106  * divide up the memcgs in the hierarchy among all concurrent
1107  * reclaimers operating on the same node and priority.
1108  */
1109 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1110                                    struct mem_cgroup *prev,
1111                                    struct mem_cgroup_reclaim_cookie *reclaim)
1112 {
1113         struct mem_cgroup_reclaim_iter *iter;
1114         struct cgroup_subsys_state *css = NULL;
1115         struct mem_cgroup *memcg = NULL;
1116         struct mem_cgroup *pos = NULL;
1117
1118         if (mem_cgroup_disabled())
1119                 return NULL;
1120
1121         if (!root)
1122                 root = root_mem_cgroup;
1123
1124         if (prev && !reclaim)
1125                 pos = prev;
1126
1127         if (!root->use_hierarchy && root != root_mem_cgroup) {
1128                 if (prev)
1129                         goto out;
1130                 return root;
1131         }
1132
1133         rcu_read_lock();
1134
1135         if (reclaim) {
1136                 struct mem_cgroup_per_node *mz;
1137
1138                 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
1139                 iter = &mz->iter;
1140
1141                 if (prev && reclaim->generation != iter->generation)
1142                         goto out_unlock;
1143
1144                 while (1) {
1145                         pos = READ_ONCE(iter->position);
1146                         if (!pos || css_tryget(&pos->css))
1147                                 break;
1148                         /*
1149                          * css reference reached zero, so iter->position will
1150                          * be cleared by ->css_released. However, we should not
1151                          * rely on this happening soon, because ->css_released
1152                          * is called from a work queue, and by busy-waiting we
1153                          * might block it. So we clear iter->position right
1154                          * away.
1155                          */
1156                         (void)cmpxchg(&iter->position, pos, NULL);
1157                 }
1158         }
1159
1160         if (pos)
1161                 css = &pos->css;
1162
1163         for (;;) {
1164                 css = css_next_descendant_pre(css, &root->css);
1165                 if (!css) {
1166                         /*
1167                          * Reclaimers share the hierarchy walk, and a
1168                          * new one might jump in right at the end of
1169                          * the hierarchy - make sure they see at least
1170                          * one group and restart from the beginning.
1171                          */
1172                         if (!prev)
1173                                 continue;
1174                         break;
1175                 }
1176
1177                 /*
1178                  * Verify the css and acquire a reference.  The root
1179                  * is provided by the caller, so we know it's alive
1180                  * and kicking, and don't take an extra reference.
1181                  */
1182                 memcg = mem_cgroup_from_css(css);
1183
1184                 if (css == &root->css)
1185                         break;
1186
1187                 if (css_tryget(css))
1188                         break;
1189
1190                 memcg = NULL;
1191         }
1192
1193         if (reclaim) {
1194                 /*
1195                  * The position could have already been updated by a competing
1196                  * thread, so check that the value hasn't changed since we read
1197                  * it to avoid reclaiming from the same cgroup twice.
1198                  */
1199                 (void)cmpxchg(&iter->position, pos, memcg);
1200
1201                 if (pos)
1202                         css_put(&pos->css);
1203
1204                 if (!memcg)
1205                         iter->generation++;
1206                 else if (!prev)
1207                         reclaim->generation = iter->generation;
1208         }
1209
1210 out_unlock:
1211         rcu_read_unlock();
1212 out:
1213         if (prev && prev != root)
1214                 css_put(&prev->css);
1215
1216         return memcg;
1217 }
1218
1219 /**
1220  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1221  * @root: hierarchy root
1222  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1223  */
1224 void mem_cgroup_iter_break(struct mem_cgroup *root,
1225                            struct mem_cgroup *prev)
1226 {
1227         if (!root)
1228                 root = root_mem_cgroup;
1229         if (prev && prev != root)
1230                 css_put(&prev->css);
1231 }
1232
1233 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1234                                         struct mem_cgroup *dead_memcg)
1235 {
1236         struct mem_cgroup_reclaim_iter *iter;
1237         struct mem_cgroup_per_node *mz;
1238         int nid;
1239
1240         for_each_node(nid) {
1241                 mz = mem_cgroup_nodeinfo(from, nid);
1242                 iter = &mz->iter;
1243                 cmpxchg(&iter->position, dead_memcg, NULL);
1244         }
1245 }
1246
1247 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1248 {
1249         struct mem_cgroup *memcg = dead_memcg;
1250         struct mem_cgroup *last;
1251
1252         do {
1253                 __invalidate_reclaim_iterators(memcg, dead_memcg);
1254                 last = memcg;
1255         } while ((memcg = parent_mem_cgroup(memcg)));
1256
1257         /*
1258          * When cgruop1 non-hierarchy mode is used,
1259          * parent_mem_cgroup() does not walk all the way up to the
1260          * cgroup root (root_mem_cgroup). So we have to handle
1261          * dead_memcg from cgroup root separately.
1262          */
1263         if (last != root_mem_cgroup)
1264                 __invalidate_reclaim_iterators(root_mem_cgroup,
1265                                                 dead_memcg);
1266 }
1267
1268 /**
1269  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1270  * @memcg: hierarchy root
1271  * @fn: function to call for each task
1272  * @arg: argument passed to @fn
1273  *
1274  * This function iterates over tasks attached to @memcg or to any of its
1275  * descendants and calls @fn for each task. If @fn returns a non-zero
1276  * value, the function breaks the iteration loop and returns the value.
1277  * Otherwise, it will iterate over all tasks and return 0.
1278  *
1279  * This function must not be called for the root memory cgroup.
1280  */
1281 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1282                           int (*fn)(struct task_struct *, void *), void *arg)
1283 {
1284         struct mem_cgroup *iter;
1285         int ret = 0;
1286
1287         BUG_ON(memcg == root_mem_cgroup);
1288
1289         for_each_mem_cgroup_tree(iter, memcg) {
1290                 struct css_task_iter it;
1291                 struct task_struct *task;
1292
1293                 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1294                 while (!ret && (task = css_task_iter_next(&it)))
1295                         ret = fn(task, arg);
1296                 css_task_iter_end(&it);
1297                 if (ret) {
1298                         mem_cgroup_iter_break(memcg, iter);
1299                         break;
1300                 }
1301         }
1302         return ret;
1303 }
1304
1305 /**
1306  * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1307  * @page: the page
1308  * @pgdat: pgdat of the page
1309  *
1310  * This function relies on page->mem_cgroup being stable - see the
1311  * access rules in commit_charge().
1312  */
1313 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
1314 {
1315         struct mem_cgroup_per_node *mz;
1316         struct mem_cgroup *memcg;
1317         struct lruvec *lruvec;
1318
1319         if (mem_cgroup_disabled()) {
1320                 lruvec = &pgdat->__lruvec;
1321                 goto out;
1322         }
1323
1324         memcg = page->mem_cgroup;
1325         /*
1326          * Swapcache readahead pages are added to the LRU - and
1327          * possibly migrated - before they are charged.
1328          */
1329         if (!memcg)
1330                 memcg = root_mem_cgroup;
1331
1332         mz = mem_cgroup_page_nodeinfo(memcg, page);
1333         lruvec = &mz->lruvec;
1334 out:
1335         /*
1336          * Since a node can be onlined after the mem_cgroup was created,
1337          * we have to be prepared to initialize lruvec->zone here;
1338          * and if offlined then reonlined, we need to reinitialize it.
1339          */
1340         if (unlikely(lruvec->pgdat != pgdat))
1341                 lruvec->pgdat = pgdat;
1342         return lruvec;
1343 }
1344
1345 /**
1346  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1347  * @lruvec: mem_cgroup per zone lru vector
1348  * @lru: index of lru list the page is sitting on
1349  * @zid: zone id of the accounted pages
1350  * @nr_pages: positive when adding or negative when removing
1351  *
1352  * This function must be called under lru_lock, just before a page is added
1353  * to or just after a page is removed from an lru list (that ordering being
1354  * so as to allow it to check that lru_size 0 is consistent with list_empty).
1355  */
1356 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1357                                 int zid, int nr_pages)
1358 {
1359         struct mem_cgroup_per_node *mz;
1360         unsigned long *lru_size;
1361         long size;
1362
1363         if (mem_cgroup_disabled())
1364                 return;
1365
1366         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1367         lru_size = &mz->lru_zone_size[zid][lru];
1368
1369         if (nr_pages < 0)
1370                 *lru_size += nr_pages;
1371
1372         size = *lru_size;
1373         if (WARN_ONCE(size < 0,
1374                 "%s(%p, %d, %d): lru_size %ld\n",
1375                 __func__, lruvec, lru, nr_pages, size)) {
1376                 VM_BUG_ON(1);
1377                 *lru_size = 0;
1378         }
1379
1380         if (nr_pages > 0)
1381                 *lru_size += nr_pages;
1382 }
1383
1384 /**
1385  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1386  * @memcg: the memory cgroup
1387  *
1388  * Returns the maximum amount of memory @mem can be charged with, in
1389  * pages.
1390  */
1391 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1392 {
1393         unsigned long margin = 0;
1394         unsigned long count;
1395         unsigned long limit;
1396
1397         count = page_counter_read(&memcg->memory);
1398         limit = READ_ONCE(memcg->memory.max);
1399         if (count < limit)
1400                 margin = limit - count;
1401
1402         if (do_memsw_account()) {
1403                 count = page_counter_read(&memcg->memsw);
1404                 limit = READ_ONCE(memcg->memsw.max);
1405                 if (count < limit)
1406                         margin = min(margin, limit - count);
1407                 else
1408                         margin = 0;
1409         }
1410
1411         return margin;
1412 }
1413
1414 /*
1415  * A routine for checking "mem" is under move_account() or not.
1416  *
1417  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1418  * moving cgroups. This is for waiting at high-memory pressure
1419  * caused by "move".
1420  */
1421 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1422 {
1423         struct mem_cgroup *from;
1424         struct mem_cgroup *to;
1425         bool ret = false;
1426         /*
1427          * Unlike task_move routines, we access mc.to, mc.from not under
1428          * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1429          */
1430         spin_lock(&mc.lock);
1431         from = mc.from;
1432         to = mc.to;
1433         if (!from)
1434                 goto unlock;
1435
1436         ret = mem_cgroup_is_descendant(from, memcg) ||
1437                 mem_cgroup_is_descendant(to, memcg);
1438 unlock:
1439         spin_unlock(&mc.lock);
1440         return ret;
1441 }
1442
1443 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1444 {
1445         if (mc.moving_task && current != mc.moving_task) {
1446                 if (mem_cgroup_under_move(memcg)) {
1447                         DEFINE_WAIT(wait);
1448                         prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1449                         /* moving charge context might have finished. */
1450                         if (mc.moving_task)
1451                                 schedule();
1452                         finish_wait(&mc.waitq, &wait);
1453                         return true;
1454                 }
1455         }
1456         return false;
1457 }
1458
1459 static char *memory_stat_format(struct mem_cgroup *memcg)
1460 {
1461         struct seq_buf s;
1462         int i;
1463
1464         seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1465         if (!s.buffer)
1466                 return NULL;
1467
1468         /*
1469          * Provide statistics on the state of the memory subsystem as
1470          * well as cumulative event counters that show past behavior.
1471          *
1472          * This list is ordered following a combination of these gradients:
1473          * 1) generic big picture -> specifics and details
1474          * 2) reflecting userspace activity -> reflecting kernel heuristics
1475          *
1476          * Current memory state:
1477          */
1478
1479         seq_buf_printf(&s, "anon %llu\n",
1480                        (u64)memcg_page_state(memcg, NR_ANON_MAPPED) *
1481                        PAGE_SIZE);
1482         seq_buf_printf(&s, "file %llu\n",
1483                        (u64)memcg_page_state(memcg, NR_FILE_PAGES) *
1484                        PAGE_SIZE);
1485         seq_buf_printf(&s, "kernel_stack %llu\n",
1486                        (u64)memcg_page_state(memcg, NR_KERNEL_STACK_KB) *
1487                        1024);
1488         seq_buf_printf(&s, "slab %llu\n",
1489                        (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) +
1490                              memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B)));
1491         seq_buf_printf(&s, "percpu %llu\n",
1492                        (u64)memcg_page_state(memcg, MEMCG_PERCPU_B));
1493         seq_buf_printf(&s, "sock %llu\n",
1494                        (u64)memcg_page_state(memcg, MEMCG_SOCK) *
1495                        PAGE_SIZE);
1496
1497         seq_buf_printf(&s, "shmem %llu\n",
1498                        (u64)memcg_page_state(memcg, NR_SHMEM) *
1499                        PAGE_SIZE);
1500         seq_buf_printf(&s, "file_mapped %llu\n",
1501                        (u64)memcg_page_state(memcg, NR_FILE_MAPPED) *
1502                        PAGE_SIZE);
1503         seq_buf_printf(&s, "file_dirty %llu\n",
1504                        (u64)memcg_page_state(memcg, NR_FILE_DIRTY) *
1505                        PAGE_SIZE);
1506         seq_buf_printf(&s, "file_writeback %llu\n",
1507                        (u64)memcg_page_state(memcg, NR_WRITEBACK) *
1508                        PAGE_SIZE);
1509
1510 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1511         seq_buf_printf(&s, "anon_thp %llu\n",
1512                        (u64)memcg_page_state(memcg, NR_ANON_THPS) *
1513                        HPAGE_PMD_SIZE);
1514 #endif
1515
1516         for (i = 0; i < NR_LRU_LISTS; i++)
1517                 seq_buf_printf(&s, "%s %llu\n", lru_list_name(i),
1518                                (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
1519                                PAGE_SIZE);
1520
1521         seq_buf_printf(&s, "slab_reclaimable %llu\n",
1522                        (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B));
1523         seq_buf_printf(&s, "slab_unreclaimable %llu\n",
1524                        (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B));
1525
1526         /* Accumulated memory events */
1527
1528         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1529                        memcg_events(memcg, PGFAULT));
1530         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1531                        memcg_events(memcg, PGMAJFAULT));
1532
1533         seq_buf_printf(&s, "workingset_refault_anon %lu\n",
1534                        memcg_page_state(memcg, WORKINGSET_REFAULT_ANON));
1535         seq_buf_printf(&s, "workingset_refault_file %lu\n",
1536                        memcg_page_state(memcg, WORKINGSET_REFAULT_FILE));
1537         seq_buf_printf(&s, "workingset_activate_anon %lu\n",
1538                        memcg_page_state(memcg, WORKINGSET_ACTIVATE_ANON));
1539         seq_buf_printf(&s, "workingset_activate_file %lu\n",
1540                        memcg_page_state(memcg, WORKINGSET_ACTIVATE_FILE));
1541         seq_buf_printf(&s, "workingset_restore %lu\n",
1542                        memcg_page_state(memcg, WORKINGSET_RESTORE_ANON));
1543         seq_buf_printf(&s, "workingset_restore %lu\n",
1544                        memcg_page_state(memcg, WORKINGSET_RESTORE_FILE));
1545         seq_buf_printf(&s, "workingset_nodereclaim %lu\n",
1546                        memcg_page_state(memcg, WORKINGSET_NODERECLAIM));
1547
1548         seq_buf_printf(&s, "%s %lu\n",  vm_event_name(PGREFILL),
1549                        memcg_events(memcg, PGREFILL));
1550         seq_buf_printf(&s, "pgscan %lu\n",
1551                        memcg_events(memcg, PGSCAN_KSWAPD) +
1552                        memcg_events(memcg, PGSCAN_DIRECT));
1553         seq_buf_printf(&s, "pgsteal %lu\n",
1554                        memcg_events(memcg, PGSTEAL_KSWAPD) +
1555                        memcg_events(memcg, PGSTEAL_DIRECT));
1556         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1557                        memcg_events(memcg, PGACTIVATE));
1558         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1559                        memcg_events(memcg, PGDEACTIVATE));
1560         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1561                        memcg_events(memcg, PGLAZYFREE));
1562         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1563                        memcg_events(memcg, PGLAZYFREED));
1564
1565 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1566         seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1567                        memcg_events(memcg, THP_FAULT_ALLOC));
1568         seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1569                        memcg_events(memcg, THP_COLLAPSE_ALLOC));
1570 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1571
1572         /* The above should easily fit into one page */
1573         WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1574
1575         return s.buffer;
1576 }
1577
1578 #define K(x) ((x) << (PAGE_SHIFT-10))
1579 /**
1580  * mem_cgroup_print_oom_context: Print OOM information relevant to
1581  * memory controller.
1582  * @memcg: The memory cgroup that went over limit
1583  * @p: Task that is going to be killed
1584  *
1585  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1586  * enabled
1587  */
1588 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1589 {
1590         rcu_read_lock();
1591
1592         if (memcg) {
1593                 pr_cont(",oom_memcg=");
1594                 pr_cont_cgroup_path(memcg->css.cgroup);
1595         } else
1596                 pr_cont(",global_oom");
1597         if (p) {
1598                 pr_cont(",task_memcg=");
1599                 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1600         }
1601         rcu_read_unlock();
1602 }
1603
1604 /**
1605  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1606  * memory controller.
1607  * @memcg: The memory cgroup that went over limit
1608  */
1609 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1610 {
1611         char *buf;
1612
1613         pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1614                 K((u64)page_counter_read(&memcg->memory)),
1615                 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1616         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1617                 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1618                         K((u64)page_counter_read(&memcg->swap)),
1619                         K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1620         else {
1621                 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1622                         K((u64)page_counter_read(&memcg->memsw)),
1623                         K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1624                 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1625                         K((u64)page_counter_read(&memcg->kmem)),
1626                         K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1627         }
1628
1629         pr_info("Memory cgroup stats for ");
1630         pr_cont_cgroup_path(memcg->css.cgroup);
1631         pr_cont(":");
1632         buf = memory_stat_format(memcg);
1633         if (!buf)
1634                 return;
1635         pr_info("%s", buf);
1636         kfree(buf);
1637 }
1638
1639 /*
1640  * Return the memory (and swap, if configured) limit for a memcg.
1641  */
1642 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1643 {
1644         unsigned long max;
1645
1646         max = READ_ONCE(memcg->memory.max);
1647         if (mem_cgroup_swappiness(memcg)) {
1648                 unsigned long memsw_max;
1649                 unsigned long swap_max;
1650
1651                 memsw_max = memcg->memsw.max;
1652                 swap_max = READ_ONCE(memcg->swap.max);
1653                 swap_max = min(swap_max, (unsigned long)total_swap_pages);
1654                 max = min(max + swap_max, memsw_max);
1655         }
1656         return max;
1657 }
1658
1659 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1660 {
1661         return page_counter_read(&memcg->memory);
1662 }
1663
1664 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1665                                      int order)
1666 {
1667         struct oom_control oc = {
1668                 .zonelist = NULL,
1669                 .nodemask = NULL,
1670                 .memcg = memcg,
1671                 .gfp_mask = gfp_mask,
1672                 .order = order,
1673         };
1674         bool ret = true;
1675
1676         if (mutex_lock_killable(&oom_lock))
1677                 return true;
1678
1679         if (mem_cgroup_margin(memcg) >= (1 << order))
1680                 goto unlock;
1681
1682         /*
1683          * A few threads which were not waiting at mutex_lock_killable() can
1684          * fail to bail out. Therefore, check again after holding oom_lock.
1685          */
1686         ret = should_force_charge() || out_of_memory(&oc);
1687
1688 unlock:
1689         mutex_unlock(&oom_lock);
1690         return ret;
1691 }
1692
1693 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1694                                    pg_data_t *pgdat,
1695                                    gfp_t gfp_mask,
1696                                    unsigned long *total_scanned)
1697 {
1698         struct mem_cgroup *victim = NULL;
1699         int total = 0;
1700         int loop = 0;
1701         unsigned long excess;
1702         unsigned long nr_scanned;
1703         struct mem_cgroup_reclaim_cookie reclaim = {
1704                 .pgdat = pgdat,
1705         };
1706
1707         excess = soft_limit_excess(root_memcg);
1708
1709         while (1) {
1710                 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1711                 if (!victim) {
1712                         loop++;
1713                         if (loop >= 2) {
1714                                 /*
1715                                  * If we have not been able to reclaim
1716                                  * anything, it might because there are
1717                                  * no reclaimable pages under this hierarchy
1718                                  */
1719                                 if (!total)
1720                                         break;
1721                                 /*
1722                                  * We want to do more targeted reclaim.
1723                                  * excess >> 2 is not to excessive so as to
1724                                  * reclaim too much, nor too less that we keep
1725                                  * coming back to reclaim from this cgroup
1726                                  */
1727                                 if (total >= (excess >> 2) ||
1728                                         (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1729                                         break;
1730                         }
1731                         continue;
1732                 }
1733                 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1734                                         pgdat, &nr_scanned);
1735                 *total_scanned += nr_scanned;
1736                 if (!soft_limit_excess(root_memcg))
1737                         break;
1738         }
1739         mem_cgroup_iter_break(root_memcg, victim);
1740         return total;
1741 }
1742
1743 #ifdef CONFIG_LOCKDEP
1744 static struct lockdep_map memcg_oom_lock_dep_map = {
1745         .name = "memcg_oom_lock",
1746 };
1747 #endif
1748
1749 static DEFINE_SPINLOCK(memcg_oom_lock);
1750
1751 /*
1752  * Check OOM-Killer is already running under our hierarchy.
1753  * If someone is running, return false.
1754  */
1755 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1756 {
1757         struct mem_cgroup *iter, *failed = NULL;
1758
1759         spin_lock(&memcg_oom_lock);
1760
1761         for_each_mem_cgroup_tree(iter, memcg) {
1762                 if (iter->oom_lock) {
1763                         /*
1764                          * this subtree of our hierarchy is already locked
1765                          * so we cannot give a lock.
1766                          */
1767                         failed = iter;
1768                         mem_cgroup_iter_break(memcg, iter);
1769                         break;
1770                 } else
1771                         iter->oom_lock = true;
1772         }
1773
1774         if (failed) {
1775                 /*
1776                  * OK, we failed to lock the whole subtree so we have
1777                  * to clean up what we set up to the failing subtree
1778                  */
1779                 for_each_mem_cgroup_tree(iter, memcg) {
1780                         if (iter == failed) {
1781                                 mem_cgroup_iter_break(memcg, iter);
1782                                 break;
1783                         }
1784                         iter->oom_lock = false;
1785                 }
1786         } else
1787                 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1788
1789         spin_unlock(&memcg_oom_lock);
1790
1791         return !failed;
1792 }
1793
1794 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1795 {
1796         struct mem_cgroup *iter;
1797
1798         spin_lock(&memcg_oom_lock);
1799         mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1800         for_each_mem_cgroup_tree(iter, memcg)
1801                 iter->oom_lock = false;
1802         spin_unlock(&memcg_oom_lock);
1803 }
1804
1805 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1806 {
1807         struct mem_cgroup *iter;
1808
1809         spin_lock(&memcg_oom_lock);
1810         for_each_mem_cgroup_tree(iter, memcg)
1811                 iter->under_oom++;
1812         spin_unlock(&memcg_oom_lock);
1813 }
1814
1815 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1816 {
1817         struct mem_cgroup *iter;
1818
1819         /*
1820          * When a new child is created while the hierarchy is under oom,
1821          * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1822          */
1823         spin_lock(&memcg_oom_lock);
1824         for_each_mem_cgroup_tree(iter, memcg)
1825                 if (iter->under_oom > 0)
1826                         iter->under_oom--;
1827         spin_unlock(&memcg_oom_lock);
1828 }
1829
1830 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1831
1832 struct oom_wait_info {
1833         struct mem_cgroup *memcg;
1834         wait_queue_entry_t      wait;
1835 };
1836
1837 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1838         unsigned mode, int sync, void *arg)
1839 {
1840         struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1841         struct mem_cgroup *oom_wait_memcg;
1842         struct oom_wait_info *oom_wait_info;
1843
1844         oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1845         oom_wait_memcg = oom_wait_info->memcg;
1846
1847         if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1848             !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1849                 return 0;
1850         return autoremove_wake_function(wait, mode, sync, arg);
1851 }
1852
1853 static void memcg_oom_recover(struct mem_cgroup *memcg)
1854 {
1855         /*
1856          * For the following lockless ->under_oom test, the only required
1857          * guarantee is that it must see the state asserted by an OOM when
1858          * this function is called as a result of userland actions
1859          * triggered by the notification of the OOM.  This is trivially
1860          * achieved by invoking mem_cgroup_mark_under_oom() before
1861          * triggering notification.
1862          */
1863         if (memcg && memcg->under_oom)
1864                 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1865 }
1866
1867 enum oom_status {
1868         OOM_SUCCESS,
1869         OOM_FAILED,
1870         OOM_ASYNC,
1871         OOM_SKIPPED
1872 };
1873
1874 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1875 {
1876         enum oom_status ret;
1877         bool locked;
1878
1879         if (order > PAGE_ALLOC_COSTLY_ORDER)
1880                 return OOM_SKIPPED;
1881
1882         memcg_memory_event(memcg, MEMCG_OOM);
1883
1884         /*
1885          * We are in the middle of the charge context here, so we
1886          * don't want to block when potentially sitting on a callstack
1887          * that holds all kinds of filesystem and mm locks.
1888          *
1889          * cgroup1 allows disabling the OOM killer and waiting for outside
1890          * handling until the charge can succeed; remember the context and put
1891          * the task to sleep at the end of the page fault when all locks are
1892          * released.
1893          *
1894          * On the other hand, in-kernel OOM killer allows for an async victim
1895          * memory reclaim (oom_reaper) and that means that we are not solely
1896          * relying on the oom victim to make a forward progress and we can
1897          * invoke the oom killer here.
1898          *
1899          * Please note that mem_cgroup_out_of_memory might fail to find a
1900          * victim and then we have to bail out from the charge path.
1901          */
1902         if (memcg->oom_kill_disable) {
1903                 if (!current->in_user_fault)
1904                         return OOM_SKIPPED;
1905                 css_get(&memcg->css);
1906                 current->memcg_in_oom = memcg;
1907                 current->memcg_oom_gfp_mask = mask;
1908                 current->memcg_oom_order = order;
1909
1910                 return OOM_ASYNC;
1911         }
1912
1913         mem_cgroup_mark_under_oom(memcg);
1914
1915         locked = mem_cgroup_oom_trylock(memcg);
1916
1917         if (locked)
1918                 mem_cgroup_oom_notify(memcg);
1919
1920         mem_cgroup_unmark_under_oom(memcg);
1921         if (mem_cgroup_out_of_memory(memcg, mask, order))
1922                 ret = OOM_SUCCESS;
1923         else
1924                 ret = OOM_FAILED;
1925
1926         if (locked)
1927                 mem_cgroup_oom_unlock(memcg);
1928
1929         return ret;
1930 }
1931
1932 /**
1933  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1934  * @handle: actually kill/wait or just clean up the OOM state
1935  *
1936  * This has to be called at the end of a page fault if the memcg OOM
1937  * handler was enabled.
1938  *
1939  * Memcg supports userspace OOM handling where failed allocations must
1940  * sleep on a waitqueue until the userspace task resolves the
1941  * situation.  Sleeping directly in the charge context with all kinds
1942  * of locks held is not a good idea, instead we remember an OOM state
1943  * in the task and mem_cgroup_oom_synchronize() has to be called at
1944  * the end of the page fault to complete the OOM handling.
1945  *
1946  * Returns %true if an ongoing memcg OOM situation was detected and
1947  * completed, %false otherwise.
1948  */
1949 bool mem_cgroup_oom_synchronize(bool handle)
1950 {
1951         struct mem_cgroup *memcg = current->memcg_in_oom;
1952         struct oom_wait_info owait;
1953         bool locked;
1954
1955         /* OOM is global, do not handle */
1956         if (!memcg)
1957                 return false;
1958
1959         if (!handle)
1960                 goto cleanup;
1961
1962         owait.memcg = memcg;
1963         owait.wait.flags = 0;
1964         owait.wait.func = memcg_oom_wake_function;
1965         owait.wait.private = current;
1966         INIT_LIST_HEAD(&owait.wait.entry);
1967
1968         prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1969         mem_cgroup_mark_under_oom(memcg);
1970
1971         locked = mem_cgroup_oom_trylock(memcg);
1972
1973         if (locked)
1974                 mem_cgroup_oom_notify(memcg);
1975
1976         if (locked && !memcg->oom_kill_disable) {
1977                 mem_cgroup_unmark_under_oom(memcg);
1978                 finish_wait(&memcg_oom_waitq, &owait.wait);
1979                 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1980                                          current->memcg_oom_order);
1981         } else {
1982                 schedule();
1983                 mem_cgroup_unmark_under_oom(memcg);
1984                 finish_wait(&memcg_oom_waitq, &owait.wait);
1985         }
1986
1987         if (locked) {
1988                 mem_cgroup_oom_unlock(memcg);
1989                 /*
1990                  * There is no guarantee that an OOM-lock contender
1991                  * sees the wakeups triggered by the OOM kill
1992                  * uncharges.  Wake any sleepers explicitely.
1993                  */
1994                 memcg_oom_recover(memcg);
1995         }
1996 cleanup:
1997         current->memcg_in_oom = NULL;
1998         css_put(&memcg->css);
1999         return true;
2000 }
2001
2002 /**
2003  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2004  * @victim: task to be killed by the OOM killer
2005  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2006  *
2007  * Returns a pointer to a memory cgroup, which has to be cleaned up
2008  * by killing all belonging OOM-killable tasks.
2009  *
2010  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2011  */
2012 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2013                                             struct mem_cgroup *oom_domain)
2014 {
2015         struct mem_cgroup *oom_group = NULL;
2016         struct mem_cgroup *memcg;
2017
2018         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2019                 return NULL;
2020
2021         if (!oom_domain)
2022                 oom_domain = root_mem_cgroup;
2023
2024         rcu_read_lock();
2025
2026         memcg = mem_cgroup_from_task(victim);
2027         if (memcg == root_mem_cgroup)
2028                 goto out;
2029
2030         /*
2031          * If the victim task has been asynchronously moved to a different
2032          * memory cgroup, we might end up killing tasks outside oom_domain.
2033          * In this case it's better to ignore memory.group.oom.
2034          */
2035         if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2036                 goto out;
2037
2038         /*
2039          * Traverse the memory cgroup hierarchy from the victim task's
2040          * cgroup up to the OOMing cgroup (or root) to find the
2041          * highest-level memory cgroup with oom.group set.
2042          */
2043         for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2044                 if (memcg->oom_group)
2045                         oom_group = memcg;
2046
2047                 if (memcg == oom_domain)
2048                         break;
2049         }
2050
2051         if (oom_group)
2052                 css_get(&oom_group->css);
2053 out:
2054         rcu_read_unlock();
2055
2056         return oom_group;
2057 }
2058
2059 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2060 {
2061         pr_info("Tasks in ");
2062         pr_cont_cgroup_path(memcg->css.cgroup);
2063         pr_cont(" are going to be killed due to memory.oom.group set\n");
2064 }
2065
2066 /**
2067  * lock_page_memcg - lock a page->mem_cgroup binding
2068  * @page: the page
2069  *
2070  * This function protects unlocked LRU pages from being moved to
2071  * another cgroup.
2072  *
2073  * It ensures lifetime of the returned memcg. Caller is responsible
2074  * for the lifetime of the page; __unlock_page_memcg() is available
2075  * when @page might get freed inside the locked section.
2076  */
2077 struct mem_cgroup *lock_page_memcg(struct page *page)
2078 {
2079         struct page *head = compound_head(page); /* rmap on tail pages */
2080         struct mem_cgroup *memcg;
2081         unsigned long flags;
2082
2083         /*
2084          * The RCU lock is held throughout the transaction.  The fast
2085          * path can get away without acquiring the memcg->move_lock
2086          * because page moving starts with an RCU grace period.
2087          *
2088          * The RCU lock also protects the memcg from being freed when
2089          * the page state that is going to change is the only thing
2090          * preventing the page itself from being freed. E.g. writeback
2091          * doesn't hold a page reference and relies on PG_writeback to
2092          * keep off truncation, migration and so forth.
2093          */
2094         rcu_read_lock();
2095
2096         if (mem_cgroup_disabled())
2097                 return NULL;
2098 again:
2099         memcg = head->mem_cgroup;
2100         if (unlikely(!memcg))
2101                 return NULL;
2102
2103         if (atomic_read(&memcg->moving_account) <= 0)
2104                 return memcg;
2105
2106         spin_lock_irqsave(&memcg->move_lock, flags);
2107         if (memcg != head->mem_cgroup) {
2108                 spin_unlock_irqrestore(&memcg->move_lock, flags);
2109                 goto again;
2110         }
2111
2112         /*
2113          * When charge migration first begins, we can have locked and
2114          * unlocked page stat updates happening concurrently.  Track
2115          * the task who has the lock for unlock_page_memcg().
2116          */
2117         memcg->move_lock_task = current;
2118         memcg->move_lock_flags = flags;
2119
2120         return memcg;
2121 }
2122 EXPORT_SYMBOL(lock_page_memcg);
2123
2124 /**
2125  * __unlock_page_memcg - unlock and unpin a memcg
2126  * @memcg: the memcg
2127  *
2128  * Unlock and unpin a memcg returned by lock_page_memcg().
2129  */
2130 void __unlock_page_memcg(struct mem_cgroup *memcg)
2131 {
2132         if (memcg && memcg->move_lock_task == current) {
2133                 unsigned long flags = memcg->move_lock_flags;
2134
2135                 memcg->move_lock_task = NULL;
2136                 memcg->move_lock_flags = 0;
2137
2138                 spin_unlock_irqrestore(&memcg->move_lock, flags);
2139         }
2140
2141         rcu_read_unlock();
2142 }
2143
2144 /**
2145  * unlock_page_memcg - unlock a page->mem_cgroup binding
2146  * @page: the page
2147  */
2148 void unlock_page_memcg(struct page *page)
2149 {
2150         struct page *head = compound_head(page);
2151
2152         __unlock_page_memcg(head->mem_cgroup);
2153 }
2154 EXPORT_SYMBOL(unlock_page_memcg);
2155
2156 struct memcg_stock_pcp {
2157         struct mem_cgroup *cached; /* this never be root cgroup */
2158         unsigned int nr_pages;
2159
2160 #ifdef CONFIG_MEMCG_KMEM
2161         struct obj_cgroup *cached_objcg;
2162         unsigned int nr_bytes;
2163 #endif
2164
2165         struct work_struct work;
2166         unsigned long flags;
2167 #define FLUSHING_CACHED_CHARGE  0
2168 };
2169 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2170 static DEFINE_MUTEX(percpu_charge_mutex);
2171
2172 #ifdef CONFIG_MEMCG_KMEM
2173 static void drain_obj_stock(struct memcg_stock_pcp *stock);
2174 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2175                                      struct mem_cgroup *root_memcg);
2176
2177 #else
2178 static inline void drain_obj_stock(struct memcg_stock_pcp *stock)
2179 {
2180 }
2181 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2182                                      struct mem_cgroup *root_memcg)
2183 {
2184         return false;
2185 }
2186 #endif
2187
2188 /**
2189  * consume_stock: Try to consume stocked charge on this cpu.
2190  * @memcg: memcg to consume from.
2191  * @nr_pages: how many pages to charge.
2192  *
2193  * The charges will only happen if @memcg matches the current cpu's memcg
2194  * stock, and at least @nr_pages are available in that stock.  Failure to
2195  * service an allocation will refill the stock.
2196  *
2197  * returns true if successful, false otherwise.
2198  */
2199 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2200 {
2201         struct memcg_stock_pcp *stock;
2202         unsigned long flags;
2203         bool ret = false;
2204
2205         if (nr_pages > MEMCG_CHARGE_BATCH)
2206                 return ret;
2207
2208         local_irq_save(flags);
2209
2210         stock = this_cpu_ptr(&memcg_stock);
2211         if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2212                 stock->nr_pages -= nr_pages;
2213                 ret = true;
2214         }
2215
2216         local_irq_restore(flags);
2217
2218         return ret;
2219 }
2220
2221 /*
2222  * Returns stocks cached in percpu and reset cached information.
2223  */
2224 static void drain_stock(struct memcg_stock_pcp *stock)
2225 {
2226         struct mem_cgroup *old = stock->cached;
2227
2228         if (!old)
2229                 return;
2230
2231         if (stock->nr_pages) {
2232                 page_counter_uncharge(&old->memory, stock->nr_pages);
2233                 if (do_memsw_account())
2234                         page_counter_uncharge(&old->memsw, stock->nr_pages);
2235                 stock->nr_pages = 0;
2236         }
2237
2238         css_put(&old->css);
2239         stock->cached = NULL;
2240 }
2241
2242 static void drain_local_stock(struct work_struct *dummy)
2243 {
2244         struct memcg_stock_pcp *stock;
2245         unsigned long flags;
2246
2247         /*
2248          * The only protection from memory hotplug vs. drain_stock races is
2249          * that we always operate on local CPU stock here with IRQ disabled
2250          */
2251         local_irq_save(flags);
2252
2253         stock = this_cpu_ptr(&memcg_stock);
2254         drain_obj_stock(stock);
2255         drain_stock(stock);
2256         clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2257
2258         local_irq_restore(flags);
2259 }
2260
2261 /*
2262  * Cache charges(val) to local per_cpu area.
2263  * This will be consumed by consume_stock() function, later.
2264  */
2265 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2266 {
2267         struct memcg_stock_pcp *stock;
2268         unsigned long flags;
2269
2270         local_irq_save(flags);
2271
2272         stock = this_cpu_ptr(&memcg_stock);
2273         if (stock->cached != memcg) { /* reset if necessary */
2274                 drain_stock(stock);
2275                 css_get(&memcg->css);
2276                 stock->cached = memcg;
2277         }
2278         stock->nr_pages += nr_pages;
2279
2280         if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2281                 drain_stock(stock);
2282
2283         local_irq_restore(flags);
2284 }
2285
2286 /*
2287  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2288  * of the hierarchy under it.
2289  */
2290 static void drain_all_stock(struct mem_cgroup *root_memcg)
2291 {
2292         int cpu, curcpu;
2293
2294         /* If someone's already draining, avoid adding running more workers. */
2295         if (!mutex_trylock(&percpu_charge_mutex))
2296                 return;
2297         /*
2298          * Notify other cpus that system-wide "drain" is running
2299          * We do not care about races with the cpu hotplug because cpu down
2300          * as well as workers from this path always operate on the local
2301          * per-cpu data. CPU up doesn't touch memcg_stock at all.
2302          */
2303         curcpu = get_cpu();
2304         for_each_online_cpu(cpu) {
2305                 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2306                 struct mem_cgroup *memcg;
2307                 bool flush = false;
2308
2309                 rcu_read_lock();
2310                 memcg = stock->cached;
2311                 if (memcg && stock->nr_pages &&
2312                     mem_cgroup_is_descendant(memcg, root_memcg))
2313                         flush = true;
2314                 if (obj_stock_flush_required(stock, root_memcg))
2315                         flush = true;
2316                 rcu_read_unlock();
2317
2318                 if (flush &&
2319                     !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2320                         if (cpu == curcpu)
2321                                 drain_local_stock(&stock->work);
2322                         else
2323                                 schedule_work_on(cpu, &stock->work);
2324                 }
2325         }
2326         put_cpu();
2327         mutex_unlock(&percpu_charge_mutex);
2328 }
2329
2330 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2331 {
2332         struct memcg_stock_pcp *stock;
2333         struct mem_cgroup *memcg, *mi;
2334
2335         stock = &per_cpu(memcg_stock, cpu);
2336         drain_stock(stock);
2337
2338         for_each_mem_cgroup(memcg) {
2339                 int i;
2340
2341                 for (i = 0; i < MEMCG_NR_STAT; i++) {
2342                         int nid;
2343                         long x;
2344
2345                         x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
2346                         if (x)
2347                                 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2348                                         atomic_long_add(x, &memcg->vmstats[i]);
2349
2350                         if (i >= NR_VM_NODE_STAT_ITEMS)
2351                                 continue;
2352
2353                         for_each_node(nid) {
2354                                 struct mem_cgroup_per_node *pn;
2355
2356                                 pn = mem_cgroup_nodeinfo(memcg, nid);
2357                                 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
2358                                 if (x)
2359                                         do {
2360                                                 atomic_long_add(x, &pn->lruvec_stat[i]);
2361                                         } while ((pn = parent_nodeinfo(pn, nid)));
2362                         }
2363                 }
2364
2365                 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
2366                         long x;
2367
2368                         x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
2369                         if (x)
2370                                 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2371                                         atomic_long_add(x, &memcg->vmevents[i]);
2372                 }
2373         }
2374
2375         return 0;
2376 }
2377
2378 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2379                                   unsigned int nr_pages,
2380                                   gfp_t gfp_mask)
2381 {
2382         unsigned long nr_reclaimed = 0;
2383
2384         do {
2385                 unsigned long pflags;
2386
2387                 if (page_counter_read(&memcg->memory) <=
2388                     READ_ONCE(memcg->memory.high))
2389                         continue;
2390
2391                 memcg_memory_event(memcg, MEMCG_HIGH);
2392
2393                 psi_memstall_enter(&pflags);
2394                 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2395                                                              gfp_mask, true);
2396                 psi_memstall_leave(&pflags);
2397         } while ((memcg = parent_mem_cgroup(memcg)) &&
2398                  !mem_cgroup_is_root(memcg));
2399
2400         return nr_reclaimed;
2401 }
2402
2403 static void high_work_func(struct work_struct *work)
2404 {
2405         struct mem_cgroup *memcg;
2406
2407         memcg = container_of(work, struct mem_cgroup, high_work);
2408         reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2409 }
2410
2411 /*
2412  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2413  * enough to still cause a significant slowdown in most cases, while still
2414  * allowing diagnostics and tracing to proceed without becoming stuck.
2415  */
2416 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2417
2418 /*
2419  * When calculating the delay, we use these either side of the exponentiation to
2420  * maintain precision and scale to a reasonable number of jiffies (see the table
2421  * below.
2422  *
2423  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2424  *   overage ratio to a delay.
2425  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down down the
2426  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2427  *   to produce a reasonable delay curve.
2428  *
2429  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2430  * reasonable delay curve compared to precision-adjusted overage, not
2431  * penalising heavily at first, but still making sure that growth beyond the
2432  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2433  * example, with a high of 100 megabytes:
2434  *
2435  *  +-------+------------------------+
2436  *  | usage | time to allocate in ms |
2437  *  +-------+------------------------+
2438  *  | 100M  |                      0 |
2439  *  | 101M  |                      6 |
2440  *  | 102M  |                     25 |
2441  *  | 103M  |                     57 |
2442  *  | 104M  |                    102 |
2443  *  | 105M  |                    159 |
2444  *  | 106M  |                    230 |
2445  *  | 107M  |                    313 |
2446  *  | 108M  |                    409 |
2447  *  | 109M  |                    518 |
2448  *  | 110M  |                    639 |
2449  *  | 111M  |                    774 |
2450  *  | 112M  |                    921 |
2451  *  | 113M  |                   1081 |
2452  *  | 114M  |                   1254 |
2453  *  | 115M  |                   1439 |
2454  *  | 116M  |                   1638 |
2455  *  | 117M  |                   1849 |
2456  *  | 118M  |                   2000 |
2457  *  | 119M  |                   2000 |
2458  *  | 120M  |                   2000 |
2459  *  +-------+------------------------+
2460  */
2461  #define MEMCG_DELAY_PRECISION_SHIFT 20
2462  #define MEMCG_DELAY_SCALING_SHIFT 14
2463
2464 static u64 calculate_overage(unsigned long usage, unsigned long high)
2465 {
2466         u64 overage;
2467
2468         if (usage <= high)
2469                 return 0;
2470
2471         /*
2472          * Prevent division by 0 in overage calculation by acting as if
2473          * it was a threshold of 1 page
2474          */
2475         high = max(high, 1UL);
2476
2477         overage = usage - high;
2478         overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2479         return div64_u64(overage, high);
2480 }
2481
2482 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2483 {
2484         u64 overage, max_overage = 0;
2485
2486         do {
2487                 overage = calculate_overage(page_counter_read(&memcg->memory),
2488                                             READ_ONCE(memcg->memory.high));
2489                 max_overage = max(overage, max_overage);
2490         } while ((memcg = parent_mem_cgroup(memcg)) &&
2491                  !mem_cgroup_is_root(memcg));
2492
2493         return max_overage;
2494 }
2495
2496 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2497 {
2498         u64 overage, max_overage = 0;
2499
2500         do {
2501                 overage = calculate_overage(page_counter_read(&memcg->swap),
2502                                             READ_ONCE(memcg->swap.high));
2503                 if (overage)
2504                         memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2505                 max_overage = max(overage, max_overage);
2506         } while ((memcg = parent_mem_cgroup(memcg)) &&
2507                  !mem_cgroup_is_root(memcg));
2508
2509         return max_overage;
2510 }
2511
2512 /*
2513  * Get the number of jiffies that we should penalise a mischievous cgroup which
2514  * is exceeding its memory.high by checking both it and its ancestors.
2515  */
2516 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2517                                           unsigned int nr_pages,
2518                                           u64 max_overage)
2519 {
2520         unsigned long penalty_jiffies;
2521
2522         if (!max_overage)
2523                 return 0;
2524
2525         /*
2526          * We use overage compared to memory.high to calculate the number of
2527          * jiffies to sleep (penalty_jiffies). Ideally this value should be
2528          * fairly lenient on small overages, and increasingly harsh when the
2529          * memcg in question makes it clear that it has no intention of stopping
2530          * its crazy behaviour, so we exponentially increase the delay based on
2531          * overage amount.
2532          */
2533         penalty_jiffies = max_overage * max_overage * HZ;
2534         penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2535         penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2536
2537         /*
2538          * Factor in the task's own contribution to the overage, such that four
2539          * N-sized allocations are throttled approximately the same as one
2540          * 4N-sized allocation.
2541          *
2542          * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2543          * larger the current charge patch is than that.
2544          */
2545         return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2546 }
2547
2548 /*
2549  * Scheduled by try_charge() to be executed from the userland return path
2550  * and reclaims memory over the high limit.
2551  */
2552 void mem_cgroup_handle_over_high(void)
2553 {
2554         unsigned long penalty_jiffies;
2555         unsigned long pflags;
2556         unsigned long nr_reclaimed;
2557         unsigned int nr_pages = current->memcg_nr_pages_over_high;
2558         int nr_retries = MAX_RECLAIM_RETRIES;
2559         struct mem_cgroup *memcg;
2560         bool in_retry = false;
2561
2562         if (likely(!nr_pages))
2563                 return;
2564
2565         memcg = get_mem_cgroup_from_mm(current->mm);
2566         current->memcg_nr_pages_over_high = 0;
2567
2568 retry_reclaim:
2569         /*
2570          * The allocating task should reclaim at least the batch size, but for
2571          * subsequent retries we only want to do what's necessary to prevent oom
2572          * or breaching resource isolation.
2573          *
2574          * This is distinct from memory.max or page allocator behaviour because
2575          * memory.high is currently batched, whereas memory.max and the page
2576          * allocator run every time an allocation is made.
2577          */
2578         nr_reclaimed = reclaim_high(memcg,
2579                                     in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2580                                     GFP_KERNEL);
2581
2582         /*
2583          * memory.high is breached and reclaim is unable to keep up. Throttle
2584          * allocators proactively to slow down excessive growth.
2585          */
2586         penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2587                                                mem_find_max_overage(memcg));
2588
2589         penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2590                                                 swap_find_max_overage(memcg));
2591
2592         /*
2593          * Clamp the max delay per usermode return so as to still keep the
2594          * application moving forwards and also permit diagnostics, albeit
2595          * extremely slowly.
2596          */
2597         penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2598
2599         /*
2600          * Don't sleep if the amount of jiffies this memcg owes us is so low
2601          * that it's not even worth doing, in an attempt to be nice to those who
2602          * go only a small amount over their memory.high value and maybe haven't
2603          * been aggressively reclaimed enough yet.
2604          */
2605         if (penalty_jiffies <= HZ / 100)
2606                 goto out;
2607
2608         /*
2609          * If reclaim is making forward progress but we're still over
2610          * memory.high, we want to encourage that rather than doing allocator
2611          * throttling.
2612          */
2613         if (nr_reclaimed || nr_retries--) {
2614                 in_retry = true;
2615                 goto retry_reclaim;
2616         }
2617
2618         /*
2619          * If we exit early, we're guaranteed to die (since
2620          * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2621          * need to account for any ill-begotten jiffies to pay them off later.
2622          */
2623         psi_memstall_enter(&pflags);
2624         schedule_timeout_killable(penalty_jiffies);
2625         psi_memstall_leave(&pflags);
2626
2627 out:
2628         css_put(&memcg->css);
2629 }
2630
2631 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2632                       unsigned int nr_pages)
2633 {
2634         unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2635         int nr_retries = MAX_RECLAIM_RETRIES;
2636         struct mem_cgroup *mem_over_limit;
2637         struct page_counter *counter;
2638         enum oom_status oom_status;
2639         unsigned long nr_reclaimed;
2640         bool may_swap = true;
2641         bool drained = false;
2642         unsigned long pflags;
2643
2644         if (mem_cgroup_is_root(memcg))
2645                 return 0;
2646 retry:
2647         if (consume_stock(memcg, nr_pages))
2648                 return 0;
2649
2650         if (!do_memsw_account() ||
2651             page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2652                 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2653                         goto done_restock;
2654                 if (do_memsw_account())
2655                         page_counter_uncharge(&memcg->memsw, batch);
2656                 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2657         } else {
2658                 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2659                 may_swap = false;
2660         }
2661
2662         if (batch > nr_pages) {
2663                 batch = nr_pages;
2664                 goto retry;
2665         }
2666
2667         /*
2668          * Memcg doesn't have a dedicated reserve for atomic
2669          * allocations. But like the global atomic pool, we need to
2670          * put the burden of reclaim on regular allocation requests
2671          * and let these go through as privileged allocations.
2672          */
2673         if (gfp_mask & __GFP_ATOMIC)
2674                 goto force;
2675
2676         /*
2677          * Unlike in global OOM situations, memcg is not in a physical
2678          * memory shortage.  Allow dying and OOM-killed tasks to
2679          * bypass the last charges so that they can exit quickly and
2680          * free their memory.
2681          */
2682         if (unlikely(should_force_charge()))
2683                 goto force;
2684
2685         /*
2686          * Prevent unbounded recursion when reclaim operations need to
2687          * allocate memory. This might exceed the limits temporarily,
2688          * but we prefer facilitating memory reclaim and getting back
2689          * under the limit over triggering OOM kills in these cases.
2690          */
2691         if (unlikely(current->flags & PF_MEMALLOC))
2692                 goto force;
2693
2694         if (unlikely(task_in_memcg_oom(current)))
2695                 goto nomem;
2696
2697         if (!gfpflags_allow_blocking(gfp_mask))
2698                 goto nomem;
2699
2700         memcg_memory_event(mem_over_limit, MEMCG_MAX);
2701
2702         psi_memstall_enter(&pflags);
2703         nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2704                                                     gfp_mask, may_swap);
2705         psi_memstall_leave(&pflags);
2706
2707         if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2708                 goto retry;
2709
2710         if (!drained) {
2711                 drain_all_stock(mem_over_limit);
2712                 drained = true;
2713                 goto retry;
2714         }
2715
2716         if (gfp_mask & __GFP_NORETRY)
2717                 goto nomem;
2718         /*
2719          * Even though the limit is exceeded at this point, reclaim
2720          * may have been able to free some pages.  Retry the charge
2721          * before killing the task.
2722          *
2723          * Only for regular pages, though: huge pages are rather
2724          * unlikely to succeed so close to the limit, and we fall back
2725          * to regular pages anyway in case of failure.
2726          */
2727         if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2728                 goto retry;
2729         /*
2730          * At task move, charge accounts can be doubly counted. So, it's
2731          * better to wait until the end of task_move if something is going on.
2732          */
2733         if (mem_cgroup_wait_acct_move(mem_over_limit))
2734                 goto retry;
2735
2736         if (nr_retries--)
2737                 goto retry;
2738
2739         if (gfp_mask & __GFP_RETRY_MAYFAIL)
2740                 goto nomem;
2741
2742         if (gfp_mask & __GFP_NOFAIL)
2743                 goto force;
2744
2745         if (fatal_signal_pending(current))
2746                 goto force;
2747
2748         /*
2749          * keep retrying as long as the memcg oom killer is able to make
2750          * a forward progress or bypass the charge if the oom killer
2751          * couldn't make any progress.
2752          */
2753         oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2754                        get_order(nr_pages * PAGE_SIZE));
2755         switch (oom_status) {
2756         case OOM_SUCCESS:
2757                 nr_retries = MAX_RECLAIM_RETRIES;
2758                 goto retry;
2759         case OOM_FAILED:
2760                 goto force;
2761         default:
2762                 goto nomem;
2763         }
2764 nomem:
2765         if (!(gfp_mask & __GFP_NOFAIL))
2766                 return -ENOMEM;
2767 force:
2768         /*
2769          * The allocation either can't fail or will lead to more memory
2770          * being freed very soon.  Allow memory usage go over the limit
2771          * temporarily by force charging it.
2772          */
2773         page_counter_charge(&memcg->memory, nr_pages);
2774         if (do_memsw_account())
2775                 page_counter_charge(&memcg->memsw, nr_pages);
2776
2777         return 0;
2778
2779 done_restock:
2780         if (batch > nr_pages)
2781                 refill_stock(memcg, batch - nr_pages);
2782
2783         /*
2784          * If the hierarchy is above the normal consumption range, schedule
2785          * reclaim on returning to userland.  We can perform reclaim here
2786          * if __GFP_RECLAIM but let's always punt for simplicity and so that
2787          * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2788          * not recorded as it most likely matches current's and won't
2789          * change in the meantime.  As high limit is checked again before
2790          * reclaim, the cost of mismatch is negligible.
2791          */
2792         do {
2793                 bool mem_high, swap_high;
2794
2795                 mem_high = page_counter_read(&memcg->memory) >
2796                         READ_ONCE(memcg->memory.high);
2797                 swap_high = page_counter_read(&memcg->swap) >
2798                         READ_ONCE(memcg->swap.high);
2799
2800                 /* Don't bother a random interrupted task */
2801                 if (in_interrupt()) {
2802                         if (mem_high) {
2803                                 schedule_work(&memcg->high_work);
2804                                 break;
2805                         }
2806                         continue;
2807                 }
2808
2809                 if (mem_high || swap_high) {
2810                         /*
2811                          * The allocating tasks in this cgroup will need to do
2812                          * reclaim or be throttled to prevent further growth
2813                          * of the memory or swap footprints.
2814                          *
2815                          * Target some best-effort fairness between the tasks,
2816                          * and distribute reclaim work and delay penalties
2817                          * based on how much each task is actually allocating.
2818                          */
2819                         current->memcg_nr_pages_over_high += batch;
2820                         set_notify_resume(current);
2821                         break;
2822                 }
2823         } while ((memcg = parent_mem_cgroup(memcg)));
2824
2825         return 0;
2826 }
2827
2828 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
2829 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2830 {
2831         if (mem_cgroup_is_root(memcg))
2832                 return;
2833
2834         page_counter_uncharge(&memcg->memory, nr_pages);
2835         if (do_memsw_account())
2836                 page_counter_uncharge(&memcg->memsw, nr_pages);
2837 }
2838 #endif
2839
2840 static void commit_charge(struct page *page, struct mem_cgroup *memcg)
2841 {
2842         VM_BUG_ON_PAGE(page->mem_cgroup, page);
2843         /*
2844          * Any of the following ensures page->mem_cgroup stability:
2845          *
2846          * - the page lock
2847          * - LRU isolation
2848          * - lock_page_memcg()
2849          * - exclusive reference
2850          */
2851         page->mem_cgroup = memcg;
2852 }
2853
2854 #ifdef CONFIG_MEMCG_KMEM
2855 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
2856                                  gfp_t gfp)
2857 {
2858         unsigned int objects = objs_per_slab_page(s, page);
2859         void *vec;
2860
2861         vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2862                            page_to_nid(page));
2863         if (!vec)
2864                 return -ENOMEM;
2865
2866         if (cmpxchg(&page->obj_cgroups, NULL,
2867                     (struct obj_cgroup **) ((unsigned long)vec | 0x1UL)))
2868                 kfree(vec);
2869         else
2870                 kmemleak_not_leak(vec);
2871
2872         return 0;
2873 }
2874
2875 /*
2876  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2877  *
2878  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2879  * cgroup_mutex, etc.
2880  */
2881 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2882 {
2883         struct page *page;
2884
2885         if (mem_cgroup_disabled())
2886                 return NULL;
2887
2888         page = virt_to_head_page(p);
2889
2890         /*
2891          * Slab objects are accounted individually, not per-page.
2892          * Memcg membership data for each individual object is saved in
2893          * the page->obj_cgroups.
2894          */
2895         if (page_has_obj_cgroups(page)) {
2896                 struct obj_cgroup *objcg;
2897                 unsigned int off;
2898
2899                 off = obj_to_index(page->slab_cache, page, p);
2900                 objcg = page_obj_cgroups(page)[off];
2901                 if (objcg)
2902                         return obj_cgroup_memcg(objcg);
2903
2904                 return NULL;
2905         }
2906
2907         /* All other pages use page->mem_cgroup */
2908         return page->mem_cgroup;
2909 }
2910
2911 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
2912 {
2913         struct obj_cgroup *objcg = NULL;
2914         struct mem_cgroup *memcg;
2915
2916         if (unlikely(!current->mm && !current->active_memcg))
2917                 return NULL;
2918
2919         rcu_read_lock();
2920         if (unlikely(current->active_memcg))
2921                 memcg = rcu_dereference(current->active_memcg);
2922         else
2923                 memcg = mem_cgroup_from_task(current);
2924
2925         for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
2926                 objcg = rcu_dereference(memcg->objcg);
2927                 if (objcg && obj_cgroup_tryget(objcg))
2928                         break;
2929         }
2930         rcu_read_unlock();
2931
2932         return objcg;
2933 }
2934
2935 static int memcg_alloc_cache_id(void)
2936 {
2937         int id, size;
2938         int err;
2939
2940         id = ida_simple_get(&memcg_cache_ida,
2941                             0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2942         if (id < 0)
2943                 return id;
2944
2945         if (id < memcg_nr_cache_ids)
2946                 return id;
2947
2948         /*
2949          * There's no space for the new id in memcg_caches arrays,
2950          * so we have to grow them.
2951          */
2952         down_write(&memcg_cache_ids_sem);
2953
2954         size = 2 * (id + 1);
2955         if (size < MEMCG_CACHES_MIN_SIZE)
2956                 size = MEMCG_CACHES_MIN_SIZE;
2957         else if (size > MEMCG_CACHES_MAX_SIZE)
2958                 size = MEMCG_CACHES_MAX_SIZE;
2959
2960         err = memcg_update_all_list_lrus(size);
2961         if (!err)
2962                 memcg_nr_cache_ids = size;
2963
2964         up_write(&memcg_cache_ids_sem);
2965
2966         if (err) {
2967                 ida_simple_remove(&memcg_cache_ida, id);
2968                 return err;
2969         }
2970         return id;
2971 }
2972
2973 static void memcg_free_cache_id(int id)
2974 {
2975         ida_simple_remove(&memcg_cache_ida, id);
2976 }
2977
2978 /**
2979  * __memcg_kmem_charge: charge a number of kernel pages to a memcg
2980  * @memcg: memory cgroup to charge
2981  * @gfp: reclaim mode
2982  * @nr_pages: number of pages to charge
2983  *
2984  * Returns 0 on success, an error code on failure.
2985  */
2986 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
2987                         unsigned int nr_pages)
2988 {
2989         struct page_counter *counter;
2990         int ret;
2991
2992         ret = try_charge(memcg, gfp, nr_pages);
2993         if (ret)
2994                 return ret;
2995
2996         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2997             !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2998
2999                 /*
3000                  * Enforce __GFP_NOFAIL allocation because callers are not
3001                  * prepared to see failures and likely do not have any failure
3002                  * handling code.
3003                  */
3004                 if (gfp & __GFP_NOFAIL) {
3005                         page_counter_charge(&memcg->kmem, nr_pages);
3006                         return 0;
3007                 }
3008                 cancel_charge(memcg, nr_pages);
3009                 return -ENOMEM;
3010         }
3011         return 0;
3012 }
3013
3014 /**
3015  * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg
3016  * @memcg: memcg to uncharge
3017  * @nr_pages: number of pages to uncharge
3018  */
3019 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
3020 {
3021         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
3022                 page_counter_uncharge(&memcg->kmem, nr_pages);
3023
3024         page_counter_uncharge(&memcg->memory, nr_pages);
3025         if (do_memsw_account())
3026                 page_counter_uncharge(&memcg->memsw, nr_pages);
3027 }
3028
3029 /**
3030  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3031  * @page: page to charge
3032  * @gfp: reclaim mode
3033  * @order: allocation order
3034  *
3035  * Returns 0 on success, an error code on failure.
3036  */
3037 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3038 {
3039         struct mem_cgroup *memcg;
3040         int ret = 0;
3041
3042         if (memcg_kmem_bypass())
3043                 return 0;
3044
3045         memcg = get_mem_cgroup_from_current();
3046         if (!mem_cgroup_is_root(memcg)) {
3047                 ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
3048                 if (!ret) {
3049                         page->mem_cgroup = memcg;
3050                         __SetPageKmemcg(page);
3051                         return 0;
3052                 }
3053         }
3054         css_put(&memcg->css);
3055         return ret;
3056 }
3057
3058 /**
3059  * __memcg_kmem_uncharge_page: uncharge a kmem page
3060  * @page: page to uncharge
3061  * @order: allocation order
3062  */
3063 void __memcg_kmem_uncharge_page(struct page *page, int order)
3064 {
3065         struct mem_cgroup *memcg = page->mem_cgroup;
3066         unsigned int nr_pages = 1 << order;
3067
3068         if (!memcg)
3069                 return;
3070
3071         VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3072         __memcg_kmem_uncharge(memcg, nr_pages);
3073         page->mem_cgroup = NULL;
3074         css_put(&memcg->css);
3075
3076         /* slab pages do not have PageKmemcg flag set */
3077         if (PageKmemcg(page))
3078                 __ClearPageKmemcg(page);
3079 }
3080
3081 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3082 {
3083         struct memcg_stock_pcp *stock;
3084         unsigned long flags;
3085         bool ret = false;
3086
3087         local_irq_save(flags);
3088
3089         stock = this_cpu_ptr(&memcg_stock);
3090         if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3091                 stock->nr_bytes -= nr_bytes;
3092                 ret = true;
3093         }
3094
3095         local_irq_restore(flags);
3096
3097         return ret;
3098 }
3099
3100 static void drain_obj_stock(struct memcg_stock_pcp *stock)
3101 {
3102         struct obj_cgroup *old = stock->cached_objcg;
3103
3104         if (!old)
3105                 return;
3106
3107         if (stock->nr_bytes) {
3108                 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3109                 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3110
3111                 if (nr_pages) {
3112                         rcu_read_lock();
3113                         __memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
3114                         rcu_read_unlock();
3115                 }
3116
3117                 /*
3118                  * The leftover is flushed to the centralized per-memcg value.
3119                  * On the next attempt to refill obj stock it will be moved
3120                  * to a per-cpu stock (probably, on an other CPU), see
3121                  * refill_obj_stock().
3122                  *
3123                  * How often it's flushed is a trade-off between the memory
3124                  * limit enforcement accuracy and potential CPU contention,
3125                  * so it might be changed in the future.
3126                  */
3127                 atomic_add(nr_bytes, &old->nr_charged_bytes);
3128                 stock->nr_bytes = 0;
3129         }
3130
3131         obj_cgroup_put(old);
3132         stock->cached_objcg = NULL;
3133 }
3134
3135 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3136                                      struct mem_cgroup *root_memcg)
3137 {
3138         struct mem_cgroup *memcg;
3139
3140         if (stock->cached_objcg) {
3141                 memcg = obj_cgroup_memcg(stock->cached_objcg);
3142                 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3143                         return true;
3144         }
3145
3146         return false;
3147 }
3148
3149 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3150 {
3151         struct memcg_stock_pcp *stock;
3152         unsigned long flags;
3153
3154         local_irq_save(flags);
3155
3156         stock = this_cpu_ptr(&memcg_stock);
3157         if (stock->cached_objcg != objcg) { /* reset if necessary */
3158                 drain_obj_stock(stock);
3159                 obj_cgroup_get(objcg);
3160                 stock->cached_objcg = objcg;
3161                 stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0);
3162         }
3163         stock->nr_bytes += nr_bytes;
3164
3165         if (stock->nr_bytes > PAGE_SIZE)
3166                 drain_obj_stock(stock);
3167
3168         local_irq_restore(flags);
3169 }
3170
3171 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3172 {
3173         struct mem_cgroup *memcg;
3174         unsigned int nr_pages, nr_bytes;
3175         int ret;
3176
3177         if (consume_obj_stock(objcg, size))
3178                 return 0;
3179
3180         /*
3181          * In theory, memcg->nr_charged_bytes can have enough
3182          * pre-charged bytes to satisfy the allocation. However,
3183          * flushing memcg->nr_charged_bytes requires two atomic
3184          * operations, and memcg->nr_charged_bytes can't be big,
3185          * so it's better to ignore it and try grab some new pages.
3186          * memcg->nr_charged_bytes will be flushed in
3187          * refill_obj_stock(), called from this function or
3188          * independently later.
3189          */
3190         rcu_read_lock();
3191         memcg = obj_cgroup_memcg(objcg);
3192         css_get(&memcg->css);
3193         rcu_read_unlock();
3194
3195         nr_pages = size >> PAGE_SHIFT;
3196         nr_bytes = size & (PAGE_SIZE - 1);
3197
3198         if (nr_bytes)
3199                 nr_pages += 1;
3200
3201         ret = __memcg_kmem_charge(memcg, gfp, nr_pages);
3202         if (!ret && nr_bytes)
3203                 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes);
3204
3205         css_put(&memcg->css);
3206         return ret;
3207 }
3208
3209 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3210 {
3211         refill_obj_stock(objcg, size);
3212 }
3213
3214 #endif /* CONFIG_MEMCG_KMEM */
3215
3216 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3217
3218 /*
3219  * Because tail pages are not marked as "used", set it. We're under
3220  * pgdat->lru_lock and migration entries setup in all page mappings.
3221  */
3222 void mem_cgroup_split_huge_fixup(struct page *head)
3223 {
3224         struct mem_cgroup *memcg = head->mem_cgroup;
3225         int i;
3226
3227         if (mem_cgroup_disabled())
3228                 return;
3229
3230         for (i = 1; i < HPAGE_PMD_NR; i++) {
3231                 css_get(&memcg->css);
3232                 head[i].mem_cgroup = memcg;
3233         }
3234 }
3235 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3236
3237 #ifdef CONFIG_MEMCG_SWAP
3238 /**
3239  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3240  * @entry: swap entry to be moved
3241  * @from:  mem_cgroup which the entry is moved from
3242  * @to:  mem_cgroup which the entry is moved to
3243  *
3244  * It succeeds only when the swap_cgroup's record for this entry is the same
3245  * as the mem_cgroup's id of @from.
3246  *
3247  * Returns 0 on success, -EINVAL on failure.
3248  *
3249  * The caller must have charged to @to, IOW, called page_counter_charge() about
3250  * both res and memsw, and called css_get().
3251  */
3252 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3253                                 struct mem_cgroup *from, struct mem_cgroup *to)
3254 {
3255         unsigned short old_id, new_id;
3256
3257         old_id = mem_cgroup_id(from);
3258         new_id = mem_cgroup_id(to);
3259
3260         if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3261                 mod_memcg_state(from, MEMCG_SWAP, -1);
3262                 mod_memcg_state(to, MEMCG_SWAP, 1);
3263                 return 0;
3264         }
3265         return -EINVAL;
3266 }
3267 #else
3268 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3269                                 struct mem_cgroup *from, struct mem_cgroup *to)
3270 {
3271         return -EINVAL;
3272 }
3273 #endif
3274
3275 static DEFINE_MUTEX(memcg_max_mutex);
3276
3277 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3278                                  unsigned long max, bool memsw)
3279 {
3280         bool enlarge = false;
3281         bool drained = false;
3282         int ret;
3283         bool limits_invariant;
3284         struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3285
3286         do {
3287                 if (signal_pending(current)) {
3288                         ret = -EINTR;
3289                         break;
3290                 }
3291
3292                 mutex_lock(&memcg_max_mutex);
3293                 /*
3294                  * Make sure that the new limit (memsw or memory limit) doesn't
3295                  * break our basic invariant rule memory.max <= memsw.max.
3296                  */
3297                 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3298                                            max <= memcg->memsw.max;
3299                 if (!limits_invariant) {
3300                         mutex_unlock(&memcg_max_mutex);
3301                         ret = -EINVAL;
3302                         break;
3303                 }
3304                 if (max > counter->max)
3305                         enlarge = true;
3306                 ret = page_counter_set_max(counter, max);
3307                 mutex_unlock(&memcg_max_mutex);
3308
3309                 if (!ret)
3310                         break;
3311
3312                 if (!drained) {
3313                         drain_all_stock(memcg);
3314                         drained = true;
3315                         continue;
3316                 }
3317
3318                 if (!try_to_free_mem_cgroup_pages(memcg, 1,
3319                                         GFP_KERNEL, !memsw)) {
3320                         ret = -EBUSY;
3321                         break;
3322                 }
3323         } while (true);
3324
3325         if (!ret && enlarge)
3326                 memcg_oom_recover(memcg);
3327
3328         return ret;
3329 }
3330
3331 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3332                                             gfp_t gfp_mask,
3333                                             unsigned long *total_scanned)
3334 {
3335         unsigned long nr_reclaimed = 0;
3336         struct mem_cgroup_per_node *mz, *next_mz = NULL;
3337         unsigned long reclaimed;
3338         int loop = 0;
3339         struct mem_cgroup_tree_per_node *mctz;
3340         unsigned long excess;
3341         unsigned long nr_scanned;
3342
3343         if (order > 0)
3344                 return 0;
3345
3346         mctz = soft_limit_tree_node(pgdat->node_id);
3347
3348         /*
3349          * Do not even bother to check the largest node if the root
3350          * is empty. Do it lockless to prevent lock bouncing. Races
3351          * are acceptable as soft limit is best effort anyway.
3352          */
3353         if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3354                 return 0;
3355
3356         /*
3357          * This loop can run a while, specially if mem_cgroup's continuously
3358          * keep exceeding their soft limit and putting the system under
3359          * pressure
3360          */
3361         do {
3362                 if (next_mz)
3363                         mz = next_mz;
3364                 else
3365                         mz = mem_cgroup_largest_soft_limit_node(mctz);
3366                 if (!mz)
3367                         break;
3368
3369                 nr_scanned = 0;
3370                 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3371                                                     gfp_mask, &nr_scanned);
3372                 nr_reclaimed += reclaimed;
3373                 *total_scanned += nr_scanned;
3374                 spin_lock_irq(&mctz->lock);
3375                 __mem_cgroup_remove_exceeded(mz, mctz);
3376
3377                 /*
3378                  * If we failed to reclaim anything from this memory cgroup
3379                  * it is time to move on to the next cgroup
3380                  */
3381                 next_mz = NULL;
3382                 if (!reclaimed)
3383                         next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3384
3385                 excess = soft_limit_excess(mz->memcg);
3386                 /*
3387                  * One school of thought says that we should not add
3388                  * back the node to the tree if reclaim returns 0.
3389                  * But our reclaim could return 0, simply because due
3390                  * to priority we are exposing a smaller subset of
3391                  * memory to reclaim from. Consider this as a longer
3392                  * term TODO.
3393                  */
3394                 /* If excess == 0, no tree ops */
3395                 __mem_cgroup_insert_exceeded(mz, mctz, excess);
3396                 spin_unlock_irq(&mctz->lock);
3397                 css_put(&mz->memcg->css);
3398                 loop++;
3399                 /*
3400                  * Could not reclaim anything and there are no more
3401                  * mem cgroups to try or we seem to be looping without
3402                  * reclaiming anything.
3403                  */
3404                 if (!nr_reclaimed &&
3405                         (next_mz == NULL ||
3406                         loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3407                         break;
3408         } while (!nr_reclaimed);
3409         if (next_mz)
3410                 css_put(&next_mz->memcg->css);
3411         return nr_reclaimed;
3412 }
3413
3414 /*
3415  * Test whether @memcg has children, dead or alive.  Note that this
3416  * function doesn't care whether @memcg has use_hierarchy enabled and
3417  * returns %true if there are child csses according to the cgroup
3418  * hierarchy.  Testing use_hierarchy is the caller's responsibility.
3419  */
3420 static inline bool memcg_has_children(struct mem_cgroup *memcg)
3421 {
3422         bool ret;
3423
3424         rcu_read_lock();
3425         ret = css_next_child(NULL, &memcg->css);
3426         rcu_read_unlock();
3427         return ret;
3428 }
3429
3430 /*
3431  * Reclaims as many pages from the given memcg as possible.
3432  *
3433  * Caller is responsible for holding css reference for memcg.
3434  */
3435 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3436 {
3437         int nr_retries = MAX_RECLAIM_RETRIES;
3438
3439         /* we call try-to-free pages for make this cgroup empty */
3440         lru_add_drain_all();
3441
3442         drain_all_stock(memcg);
3443
3444         /* try to free all pages in this cgroup */
3445         while (nr_retries && page_counter_read(&memcg->memory)) {
3446                 int progress;
3447
3448                 if (signal_pending(current))
3449                         return -EINTR;
3450
3451                 progress = try_to_free_mem_cgroup_pages(memcg, 1,
3452                                                         GFP_KERNEL, true);
3453                 if (!progress) {
3454                         nr_retries--;
3455                         /* maybe some writeback is necessary */
3456                         congestion_wait(BLK_RW_ASYNC, HZ/10);
3457                 }
3458
3459         }
3460
3461         return 0;
3462 }
3463
3464 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3465                                             char *buf, size_t nbytes,
3466                                             loff_t off)
3467 {
3468         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3469
3470         if (mem_cgroup_is_root(memcg))
3471                 return -EINVAL;
3472         return mem_cgroup_force_empty(memcg) ?: nbytes;
3473 }
3474
3475 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3476                                      struct cftype *cft)
3477 {
3478         return mem_cgroup_from_css(css)->use_hierarchy;
3479 }
3480
3481 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3482                                       struct cftype *cft, u64 val)
3483 {
3484         int retval = 0;
3485         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3486         struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
3487
3488         if (memcg->use_hierarchy == val)
3489                 return 0;
3490
3491         /*
3492          * If parent's use_hierarchy is set, we can't make any modifications
3493          * in the child subtrees. If it is unset, then the change can
3494          * occur, provided the current cgroup has no children.
3495          *
3496          * For the root cgroup, parent_mem is NULL, we allow value to be
3497          * set if there are no children.
3498          */
3499         if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3500                                 (val == 1 || val == 0)) {
3501                 if (!memcg_has_children(memcg))
3502                         memcg->use_hierarchy = val;
3503                 else
3504                         retval = -EBUSY;
3505         } else
3506                 retval = -EINVAL;
3507
3508         return retval;
3509 }
3510
3511 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3512 {
3513         unsigned long val;
3514
3515         if (mem_cgroup_is_root(memcg)) {
3516                 val = memcg_page_state(memcg, NR_FILE_PAGES) +
3517                         memcg_page_state(memcg, NR_ANON_MAPPED);
3518                 if (swap)
3519                         val += memcg_page_state(memcg, MEMCG_SWAP);
3520         } else {
3521                 if (!swap)
3522                         val = page_counter_read(&memcg->memory);
3523                 else
3524                         val = page_counter_read(&memcg->memsw);
3525         }
3526         return val;
3527 }
3528
3529 enum {
3530         RES_USAGE,
3531         RES_LIMIT,
3532         RES_MAX_USAGE,
3533         RES_FAILCNT,
3534         RES_SOFT_LIMIT,
3535 };
3536
3537 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3538                                struct cftype *cft)
3539 {
3540         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3541         struct page_counter *counter;
3542
3543         switch (MEMFILE_TYPE(cft->private)) {
3544         case _MEM:
3545                 counter = &memcg->memory;
3546                 break;
3547         case _MEMSWAP:
3548                 counter = &memcg->memsw;
3549                 break;
3550         case _KMEM:
3551                 counter = &memcg->kmem;
3552                 break;
3553         case _TCP:
3554                 counter = &memcg->tcpmem;
3555                 break;
3556         default:
3557                 BUG();
3558         }
3559
3560         switch (MEMFILE_ATTR(cft->private)) {
3561         case RES_USAGE:
3562                 if (counter == &memcg->memory)
3563                         return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3564                 if (counter == &memcg->memsw)
3565                         return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3566                 return (u64)page_counter_read(counter) * PAGE_SIZE;
3567         case RES_LIMIT:
3568                 return (u64)counter->max * PAGE_SIZE;
3569         case RES_MAX_USAGE:
3570                 return (u64)counter->watermark * PAGE_SIZE;
3571         case RES_FAILCNT:
3572                 return counter->failcnt;
3573         case RES_SOFT_LIMIT:
3574                 return (u64)memcg->soft_limit * PAGE_SIZE;
3575         default:
3576                 BUG();
3577         }
3578 }
3579
3580 static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
3581 {
3582         unsigned long stat[MEMCG_NR_STAT] = {0};
3583         struct mem_cgroup *mi;
3584         int node, cpu, i;
3585
3586         for_each_online_cpu(cpu)
3587                 for (i = 0; i < MEMCG_NR_STAT; i++)
3588                         stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3589
3590         for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3591                 for (i = 0; i < MEMCG_NR_STAT; i++)
3592                         atomic_long_add(stat[i], &mi->vmstats[i]);
3593
3594         for_each_node(node) {
3595                 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3596                 struct mem_cgroup_per_node *pi;
3597
3598                 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3599                         stat[i] = 0;
3600
3601                 for_each_online_cpu(cpu)
3602                         for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3603                                 stat[i] += per_cpu(
3604                                         pn->lruvec_stat_cpu->count[i], cpu);
3605
3606                 for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3607                         for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3608                                 atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3609         }
3610 }
3611
3612 static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3613 {
3614         unsigned long events[NR_VM_EVENT_ITEMS];
3615         struct mem_cgroup *mi;
3616         int cpu, i;
3617
3618         for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3619                 events[i] = 0;
3620
3621         for_each_online_cpu(cpu)
3622                 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3623                         events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3624                                              cpu);
3625
3626         for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3627                 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3628                         atomic_long_add(events[i], &mi->vmevents[i]);
3629 }
3630
3631 #ifdef CONFIG_MEMCG_KMEM
3632 static int memcg_online_kmem(struct mem_cgroup *memcg)
3633 {
3634         struct obj_cgroup *objcg;
3635         int memcg_id;
3636
3637         if (cgroup_memory_nokmem)
3638                 return 0;
3639
3640         BUG_ON(memcg->kmemcg_id >= 0);
3641         BUG_ON(memcg->kmem_state);
3642
3643         memcg_id = memcg_alloc_cache_id();
3644         if (memcg_id < 0)
3645                 return memcg_id;
3646
3647         objcg = obj_cgroup_alloc();
3648         if (!objcg) {
3649                 memcg_free_cache_id(memcg_id);
3650                 return -ENOMEM;
3651         }
3652         objcg->memcg = memcg;
3653         rcu_assign_pointer(memcg->objcg, objcg);
3654
3655         static_branch_enable(&memcg_kmem_enabled_key);
3656
3657         /*
3658          * A memory cgroup is considered kmem-online as soon as it gets
3659          * kmemcg_id. Setting the id after enabling static branching will
3660          * guarantee no one starts accounting before all call sites are
3661          * patched.
3662          */
3663         memcg->kmemcg_id = memcg_id;
3664         memcg->kmem_state = KMEM_ONLINE;
3665
3666         return 0;
3667 }
3668
3669 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3670 {
3671         struct cgroup_subsys_state *css;
3672         struct mem_cgroup *parent, *child;
3673         int kmemcg_id;
3674
3675         if (memcg->kmem_state != KMEM_ONLINE)
3676                 return;
3677
3678         memcg->kmem_state = KMEM_ALLOCATED;
3679
3680         parent = parent_mem_cgroup(memcg);
3681         if (!parent)
3682                 parent = root_mem_cgroup;
3683
3684         memcg_reparent_objcgs(memcg, parent);
3685
3686         kmemcg_id = memcg->kmemcg_id;
3687         BUG_ON(kmemcg_id < 0);
3688
3689         /*
3690          * Change kmemcg_id of this cgroup and all its descendants to the
3691          * parent's id, and then move all entries from this cgroup's list_lrus
3692          * to ones of the parent. After we have finished, all list_lrus
3693          * corresponding to this cgroup are guaranteed to remain empty. The
3694          * ordering is imposed by list_lru_node->lock taken by
3695          * memcg_drain_all_list_lrus().
3696          */
3697         rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3698         css_for_each_descendant_pre(css, &memcg->css) {
3699                 child = mem_cgroup_from_css(css);
3700                 BUG_ON(child->kmemcg_id != kmemcg_id);
3701                 child->kmemcg_id = parent->kmemcg_id;
3702                 if (!memcg->use_hierarchy)
3703                         break;
3704         }
3705         rcu_read_unlock();
3706
3707         memcg_drain_all_list_lrus(kmemcg_id, parent);
3708
3709         memcg_free_cache_id(kmemcg_id);
3710 }
3711
3712 static void memcg_free_kmem(struct mem_cgroup *memcg)
3713 {
3714         /* css_alloc() failed, offlining didn't happen */
3715         if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3716                 memcg_offline_kmem(memcg);
3717 }
3718 #else
3719 static int memcg_online_kmem(struct mem_cgroup *memcg)
3720 {
3721         return 0;
3722 }
3723 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3724 {
3725 }
3726 static void memcg_free_kmem(struct mem_cgroup *memcg)
3727 {
3728 }
3729 #endif /* CONFIG_MEMCG_KMEM */
3730
3731 static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3732                                  unsigned long max)
3733 {
3734         int ret;
3735
3736         mutex_lock(&memcg_max_mutex);
3737         ret = page_counter_set_max(&memcg->kmem, max);
3738         mutex_unlock(&memcg_max_mutex);
3739         return ret;
3740 }
3741
3742 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3743 {
3744         int ret;
3745
3746         mutex_lock(&memcg_max_mutex);
3747
3748         ret = page_counter_set_max(&memcg->tcpmem, max);
3749         if (ret)
3750                 goto out;
3751
3752         if (!memcg->tcpmem_active) {
3753                 /*
3754                  * The active flag needs to be written after the static_key
3755                  * update. This is what guarantees that the socket activation
3756                  * function is the last one to run. See mem_cgroup_sk_alloc()
3757                  * for details, and note that we don't mark any socket as
3758                  * belonging to this memcg until that flag is up.
3759                  *
3760                  * We need to do this, because static_keys will span multiple
3761                  * sites, but we can't control their order. If we mark a socket
3762                  * as accounted, but the accounting functions are not patched in
3763                  * yet, we'll lose accounting.
3764                  *
3765                  * We never race with the readers in mem_cgroup_sk_alloc(),
3766                  * because when this value change, the code to process it is not
3767                  * patched in yet.
3768                  */
3769                 static_branch_inc(&memcg_sockets_enabled_key);
3770                 memcg->tcpmem_active = true;
3771         }
3772 out:
3773         mutex_unlock(&memcg_max_mutex);
3774         return ret;
3775 }
3776
3777 /*
3778  * The user of this function is...
3779  * RES_LIMIT.
3780  */
3781 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3782                                 char *buf, size_t nbytes, loff_t off)
3783 {
3784         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3785         unsigned long nr_pages;
3786         int ret;
3787
3788         buf = strstrip(buf);
3789         ret = page_counter_memparse(buf, "-1", &nr_pages);
3790         if (ret)
3791                 return ret;
3792
3793         switch (MEMFILE_ATTR(of_cft(of)->private)) {
3794         case RES_LIMIT:
3795                 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3796                         ret = -EINVAL;
3797                         break;
3798                 }
3799                 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3800                 case _MEM:
3801                         ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3802                         break;
3803                 case _MEMSWAP:
3804                         ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3805                         break;
3806                 case _KMEM:
3807                         pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3808                                      "Please report your usecase to linux-mm@kvack.org if you "
3809                                      "depend on this functionality.\n");
3810                         ret = memcg_update_kmem_max(memcg, nr_pages);
3811                         break;
3812                 case _TCP:
3813                         ret = memcg_update_tcp_max(memcg, nr_pages);
3814                         break;
3815                 }
3816                 break;
3817         case RES_SOFT_LIMIT:
3818                 memcg->soft_limit = nr_pages;
3819                 ret = 0;
3820                 break;
3821         }
3822         return ret ?: nbytes;
3823 }
3824
3825 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3826                                 size_t nbytes, loff_t off)
3827 {
3828         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3829         struct page_counter *counter;
3830
3831         switch (MEMFILE_TYPE(of_cft(of)->private)) {
3832         case _MEM:
3833                 counter = &memcg->memory;
3834                 break;
3835         case _MEMSWAP:
3836                 counter = &memcg->memsw;
3837                 break;
3838         case _KMEM:
3839                 counter = &memcg->kmem;
3840                 break;
3841         case _TCP:
3842                 counter = &memcg->tcpmem;
3843                 break;
3844         default:
3845                 BUG();
3846         }
3847
3848         switch (MEMFILE_ATTR(of_cft(of)->private)) {
3849         case RES_MAX_USAGE:
3850                 page_counter_reset_watermark(counter);
3851                 break;
3852         case RES_FAILCNT:
3853                 counter->failcnt = 0;
3854                 break;
3855         default:
3856                 BUG();
3857         }
3858
3859         return nbytes;
3860 }
3861
3862 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3863                                         struct cftype *cft)
3864 {
3865         return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3866 }
3867
3868 #ifdef CONFIG_MMU
3869 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3870                                         struct cftype *cft, u64 val)
3871 {
3872         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3873
3874         if (val & ~MOVE_MASK)
3875                 return -EINVAL;
3876
3877         /*
3878          * No kind of locking is needed in here, because ->can_attach() will
3879          * check this value once in the beginning of the process, and then carry
3880          * on with stale data. This means that changes to this value will only
3881          * affect task migrations starting after the change.
3882          */
3883         memcg->move_charge_at_immigrate = val;
3884         return 0;
3885 }
3886 #else
3887 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3888                                         struct cftype *cft, u64 val)
3889 {
3890         return -ENOSYS;
3891 }
3892 #endif
3893
3894 #ifdef CONFIG_NUMA
3895
3896 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3897 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3898 #define LRU_ALL      ((1 << NR_LRU_LISTS) - 1)
3899
3900 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3901                                 int nid, unsigned int lru_mask, bool tree)
3902 {
3903         struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3904         unsigned long nr = 0;
3905         enum lru_list lru;
3906
3907         VM_BUG_ON((unsigned)nid >= nr_node_ids);
3908
3909         for_each_lru(lru) {
3910                 if (!(BIT(lru) & lru_mask))
3911                         continue;
3912                 if (tree)
3913                         nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3914                 else
3915                         nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3916         }
3917         return nr;
3918 }
3919
3920 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3921                                              unsigned int lru_mask,
3922                                              bool tree)
3923 {
3924         unsigned long nr = 0;
3925         enum lru_list lru;
3926
3927         for_each_lru(lru) {
3928                 if (!(BIT(lru) & lru_mask))
3929                         continue;
3930                 if (tree)
3931                         nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3932                 else
3933                         nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3934         }
3935         return nr;
3936 }
3937
3938 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3939 {
3940         struct numa_stat {
3941                 const char *name;
3942                 unsigned int lru_mask;
3943         };
3944
3945         static const struct numa_stat stats[] = {
3946                 { "total", LRU_ALL },
3947                 { "file", LRU_ALL_FILE },
3948                 { "anon", LRU_ALL_ANON },
3949                 { "unevictable", BIT(LRU_UNEVICTABLE) },
3950         };
3951         const struct numa_stat *stat;
3952         int nid;
3953         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3954
3955         for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3956                 seq_printf(m, "%s=%lu", stat->name,
3957                            mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3958                                                    false));
3959                 for_each_node_state(nid, N_MEMORY)
3960                         seq_printf(m, " N%d=%lu", nid,
3961                                    mem_cgroup_node_nr_lru_pages(memcg, nid,
3962                                                         stat->lru_mask, false));
3963                 seq_putc(m, '\n');
3964         }
3965
3966         for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3967
3968                 seq_printf(m, "hierarchical_%s=%lu", stat->name,
3969                            mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3970                                                    true));
3971                 for_each_node_state(nid, N_MEMORY)
3972                         seq_printf(m, " N%d=%lu", nid,
3973                                    mem_cgroup_node_nr_lru_pages(memcg, nid,
3974                                                         stat->lru_mask, true));
3975                 seq_putc(m, '\n');
3976         }
3977
3978         return 0;
3979 }
3980 #endif /* CONFIG_NUMA */
3981
3982 static const unsigned int memcg1_stats[] = {
3983         NR_FILE_PAGES,
3984         NR_ANON_MAPPED,
3985 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3986         NR_ANON_THPS,
3987 #endif
3988         NR_SHMEM,
3989         NR_FILE_MAPPED,
3990         NR_FILE_DIRTY,
3991         NR_WRITEBACK,
3992         MEMCG_SWAP,
3993 };
3994
3995 static const char *const memcg1_stat_names[] = {
3996         "cache",
3997         "rss",
3998 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3999         "rss_huge",
4000 #endif
4001         "shmem",
4002         "mapped_file",
4003         "dirty",
4004         "writeback",
4005         "swap",
4006 };
4007
4008 /* Universal VM events cgroup1 shows, original sort order */
4009 static const unsigned int memcg1_events[] = {
4010         PGPGIN,
4011         PGPGOUT,
4012         PGFAULT,
4013         PGMAJFAULT,
4014 };
4015
4016 static int memcg_stat_show(struct seq_file *m, void *v)
4017 {
4018         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4019         unsigned long memory, memsw;
4020         struct mem_cgroup *mi;
4021         unsigned int i;
4022
4023         BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4024
4025         for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4026                 unsigned long nr;
4027
4028                 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4029                         continue;
4030                 nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4031 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4032                 if (memcg1_stats[i] == NR_ANON_THPS)
4033                         nr *= HPAGE_PMD_NR;
4034 #endif
4035                 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
4036         }
4037
4038         for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4039                 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
4040                            memcg_events_local(memcg, memcg1_events[i]));
4041
4042         for (i = 0; i < NR_LRU_LISTS; i++)
4043                 seq_printf(m, "%s %lu\n", lru_list_name(i),
4044                            memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4045                            PAGE_SIZE);
4046
4047         /* Hierarchical information */
4048         memory = memsw = PAGE_COUNTER_MAX;
4049         for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4050                 memory = min(memory, READ_ONCE(mi->memory.max));
4051                 memsw = min(memsw, READ_ONCE(mi->memsw.max));
4052         }
4053         seq_printf(m, "hierarchical_memory_limit %llu\n",
4054                    (u64)memory * PAGE_SIZE);
4055         if (do_memsw_account())
4056                 seq_printf(m, "hierarchical_memsw_limit %llu\n",
4057                            (u64)memsw * PAGE_SIZE);
4058
4059         for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4060                 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4061                         continue;
4062                 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4063                            (u64)memcg_page_state(memcg, memcg1_stats[i]) *
4064                            PAGE_SIZE);
4065         }
4066
4067         for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4068                 seq_printf(m, "total_%s %llu\n",
4069                            vm_event_name(memcg1_events[i]),
4070                            (u64)memcg_events(memcg, memcg1_events[i]));
4071
4072         for (i = 0; i < NR_LRU_LISTS; i++)
4073                 seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4074                            (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4075                            PAGE_SIZE);
4076
4077 #ifdef CONFIG_DEBUG_VM
4078         {
4079                 pg_data_t *pgdat;
4080                 struct mem_cgroup_per_node *mz;
4081                 unsigned long anon_cost = 0;
4082                 unsigned long file_cost = 0;
4083
4084                 for_each_online_pgdat(pgdat) {
4085                         mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
4086
4087                         anon_cost += mz->lruvec.anon_cost;
4088                         file_cost += mz->lruvec.file_cost;
4089                 }
4090                 seq_printf(m, "anon_cost %lu\n", anon_cost);
4091                 seq_printf(m, "file_cost %lu\n", file_cost);
4092         }
4093 #endif
4094
4095         return 0;
4096 }
4097
4098 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4099                                       struct cftype *cft)
4100 {
4101         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4102
4103         return mem_cgroup_swappiness(memcg);
4104 }
4105
4106 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4107                                        struct cftype *cft, u64 val)
4108 {
4109         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4110
4111         if (val > 100)
4112                 return -EINVAL;
4113
4114         if (css->parent)
4115                 memcg->swappiness = val;
4116         else
4117                 vm_swappiness = val;
4118
4119         return 0;
4120 }
4121
4122 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4123 {
4124         struct mem_cgroup_threshold_ary *t;
4125         unsigned long usage;
4126         int i;
4127
4128         rcu_read_lock();
4129         if (!swap)
4130                 t = rcu_dereference(memcg->thresholds.primary);
4131         else
4132                 t = rcu_dereference(memcg->memsw_thresholds.primary);
4133
4134         if (!t)
4135                 goto unlock;
4136
4137         usage = mem_cgroup_usage(memcg, swap);
4138
4139         /*
4140          * current_threshold points to threshold just below or equal to usage.
4141          * If it's not true, a threshold was crossed after last
4142          * call of __mem_cgroup_threshold().
4143          */
4144         i = t->current_threshold;
4145
4146         /*
4147          * Iterate backward over array of thresholds starting from
4148          * current_threshold and check if a threshold is crossed.
4149          * If none of thresholds below usage is crossed, we read
4150          * only one element of the array here.
4151          */
4152         for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4153                 eventfd_signal(t->entries[i].eventfd, 1);
4154
4155         /* i = current_threshold + 1 */
4156         i++;
4157
4158         /*
4159          * Iterate forward over array of thresholds starting from
4160          * current_threshold+1 and check if a threshold is crossed.
4161          * If none of thresholds above usage is crossed, we read
4162          * only one element of the array here.
4163          */
4164         for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4165                 eventfd_signal(t->entries[i].eventfd, 1);
4166
4167         /* Update current_threshold */
4168         t->current_threshold = i - 1;
4169 unlock:
4170         rcu_read_unlock();
4171 }
4172
4173 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4174 {
4175         while (memcg) {
4176                 __mem_cgroup_threshold(memcg, false);
4177                 if (do_memsw_account())
4178                         __mem_cgroup_threshold(memcg, true);
4179
4180                 memcg = parent_mem_cgroup(memcg);
4181         }
4182 }
4183
4184 static int compare_thresholds(const void *a, const void *b)
4185 {
4186         const struct mem_cgroup_threshold *_a = a;
4187         const struct mem_cgroup_threshold *_b = b;
4188
4189         if (_a->threshold > _b->threshold)
4190                 return 1;
4191
4192         if (_a->threshold < _b->threshold)
4193                 return -1;
4194
4195         return 0;
4196 }
4197
4198 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4199 {
4200         struct mem_cgroup_eventfd_list *ev;
4201
4202         spin_lock(&memcg_oom_lock);
4203
4204         list_for_each_entry(ev, &memcg->oom_notify, list)
4205                 eventfd_signal(ev->eventfd, 1);
4206
4207         spin_unlock(&memcg_oom_lock);
4208         return 0;
4209 }
4210
4211 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4212 {
4213         struct mem_cgroup *iter;
4214
4215         for_each_mem_cgroup_tree(iter, memcg)
4216                 mem_cgroup_oom_notify_cb(iter);
4217 }
4218
4219 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4220         struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4221 {
4222         struct mem_cgroup_thresholds *thresholds;
4223         struct mem_cgroup_threshold_ary *new;
4224         unsigned long threshold;
4225         unsigned long usage;
4226         int i, size, ret;
4227
4228         ret = page_counter_memparse(args, "-1", &threshold);
4229         if (ret)
4230                 return ret;
4231
4232         mutex_lock(&memcg->thresholds_lock);
4233
4234         if (type == _MEM) {
4235                 thresholds = &memcg->thresholds;
4236                 usage = mem_cgroup_usage(memcg, false);
4237         } else if (type == _MEMSWAP) {
4238                 thresholds = &memcg->memsw_thresholds;
4239                 usage = mem_cgroup_usage(memcg, true);
4240         } else
4241                 BUG();
4242
4243         /* Check if a threshold crossed before adding a new one */
4244         if (thresholds->primary)
4245                 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4246
4247         size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4248
4249         /* Allocate memory for new array of thresholds */
4250         new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4251         if (!new) {
4252                 ret = -ENOMEM;
4253                 goto unlock;
4254         }
4255         new->size = size;
4256
4257         /* Copy thresholds (if any) to new array */
4258         if (thresholds->primary) {
4259                 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4260                                 sizeof(struct mem_cgroup_threshold));
4261         }
4262
4263         /* Add new threshold */
4264         new->entries[size - 1].eventfd = eventfd;
4265         new->entries[size - 1].threshold = threshold;
4266
4267         /* Sort thresholds. Registering of new threshold isn't time-critical */
4268         sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4269                         compare_thresholds, NULL);
4270
4271         /* Find current threshold */
4272         new->current_threshold = -1;
4273         for (i = 0; i < size; i++) {
4274                 if (new->entries[i].threshold <= usage) {
4275                         /*
4276                          * new->current_threshold will not be used until
4277                          * rcu_assign_pointer(), so it's safe to increment
4278                          * it here.
4279                          */
4280                         ++new->current_threshold;
4281                 } else
4282                         break;
4283         }
4284
4285         /* Free old spare buffer and save old primary buffer as spare */
4286         kfree(thresholds->spare);
4287         thresholds->spare = thresholds->primary;
4288
4289         rcu_assign_pointer(thresholds->primary, new);
4290
4291         /* To be sure that nobody uses thresholds */
4292         synchronize_rcu();
4293
4294 unlock:
4295         mutex_unlock(&memcg->thresholds_lock);
4296
4297         return ret;
4298 }
4299
4300 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4301         struct eventfd_ctx *eventfd, const char *args)
4302 {
4303         return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4304 }
4305
4306 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4307         struct eventfd_ctx *eventfd, const char *args)
4308 {
4309         return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4310 }
4311
4312 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4313         struct eventfd_ctx *eventfd, enum res_type type)
4314 {
4315         struct mem_cgroup_thresholds *thresholds;
4316         struct mem_cgroup_threshold_ary *new;
4317         unsigned long usage;
4318         int i, j, size, entries;
4319
4320         mutex_lock(&memcg->thresholds_lock);
4321
4322         if (type == _MEM) {
4323                 thresholds = &memcg->thresholds;
4324                 usage = mem_cgroup_usage(memcg, false);
4325         } else if (type == _MEMSWAP) {
4326                 thresholds = &memcg->memsw_thresholds;
4327                 usage = mem_cgroup_usage(memcg, true);
4328         } else
4329                 BUG();
4330
4331         if (!thresholds->primary)
4332                 goto unlock;
4333
4334         /* Check if a threshold crossed before removing */
4335         __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4336
4337         /* Calculate new number of threshold */
4338         size = entries = 0;
4339         for (i = 0; i < thresholds->primary->size; i++) {
4340                 if (thresholds->primary->entries[i].eventfd != eventfd)
4341                         size++;
4342                 else
4343                         entries++;
4344         }
4345
4346         new = thresholds->spare;
4347
4348         /* If no items related to eventfd have been cleared, nothing to do */
4349         if (!entries)
4350                 goto unlock;
4351
4352         /* Set thresholds array to NULL if we don't have thresholds */
4353         if (!size) {
4354                 kfree(new);
4355                 new = NULL;
4356                 goto swap_buffers;
4357         }
4358
4359         new->size = size;
4360
4361         /* Copy thresholds and find current threshold */
4362         new->current_threshold = -1;
4363         for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4364                 if (thresholds->primary->entries[i].eventfd == eventfd)
4365                         continue;
4366
4367                 new->entries[j] = thresholds->primary->entries[i];
4368                 if (new->entries[j].threshold <= usage) {
4369                         /*
4370                          * new->current_threshold will not be used
4371                          * until rcu_assign_pointer(), so it's safe to increment
4372                          * it here.
4373                          */
4374                         ++new->current_threshold;
4375                 }
4376                 j++;
4377         }
4378
4379 swap_buffers:
4380         /* Swap primary and spare array */
4381         thresholds->spare = thresholds->primary;
4382
4383         rcu_assign_pointer(thresholds->primary, new);
4384
4385         /* To be sure that nobody uses thresholds */
4386         synchronize_rcu();
4387
4388         /* If all events are unregistered, free the spare array */
4389         if (!new) {
4390                 kfree(thresholds->spare);
4391                 thresholds->spare = NULL;
4392         }
4393 unlock:
4394         mutex_unlock(&memcg->thresholds_lock);
4395 }
4396
4397 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4398         struct eventfd_ctx *eventfd)
4399 {
4400         return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4401 }
4402
4403 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4404         struct eventfd_ctx *eventfd)
4405 {
4406         return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4407 }
4408
4409 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4410         struct eventfd_ctx *eventfd, const char *args)
4411 {
4412         struct mem_cgroup_eventfd_list *event;
4413
4414         event = kmalloc(sizeof(*event), GFP_KERNEL);
4415         if (!event)
4416                 return -ENOMEM;
4417
4418         spin_lock(&memcg_oom_lock);
4419
4420         event->eventfd = eventfd;
4421         list_add(&event->list, &memcg->oom_notify);
4422
4423         /* already in OOM ? */
4424         if (memcg->under_oom)
4425                 eventfd_signal(eventfd, 1);
4426         spin_unlock(&memcg_oom_lock);
4427
4428         return 0;
4429 }
4430
4431 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4432         struct eventfd_ctx *eventfd)
4433 {
4434         struct mem_cgroup_eventfd_list *ev, *tmp;
4435
4436         spin_lock(&memcg_oom_lock);
4437
4438         list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4439                 if (ev->eventfd == eventfd) {
4440                         list_del(&ev->list);
4441                         kfree(ev);
4442                 }
4443         }
4444
4445         spin_unlock(&memcg_oom_lock);
4446 }
4447
4448 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4449 {
4450         struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4451
4452         seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4453         seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4454         seq_printf(sf, "oom_kill %lu\n",
4455                    atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4456         return 0;
4457 }
4458
4459 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4460         struct cftype *cft, u64 val)
4461 {
4462         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4463
4464         /* cannot set to root cgroup and only 0 and 1 are allowed */
4465         if (!css->parent || !((val == 0) || (val == 1)))
4466                 return -EINVAL;
4467
4468         memcg->oom_kill_disable = val;
4469         if (!val)
4470                 memcg_oom_recover(memcg);
4471
4472         return 0;
4473 }
4474
4475 #ifdef CONFIG_CGROUP_WRITEBACK
4476
4477 #include <trace/events/writeback.h>
4478
4479 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4480 {
4481         return wb_domain_init(&memcg->cgwb_domain, gfp);
4482 }
4483
4484 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4485 {
4486         wb_domain_exit(&memcg->cgwb_domain);
4487 }
4488
4489 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4490 {
4491         wb_domain_size_changed(&memcg->cgwb_domain);
4492 }
4493
4494 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4495 {
4496         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4497
4498         if (!memcg->css.parent)
4499                 return NULL;
4500
4501         return &memcg->cgwb_domain;
4502 }
4503
4504 /*
4505  * idx can be of type enum memcg_stat_item or node_stat_item.
4506  * Keep in sync with memcg_exact_page().
4507  */
4508 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
4509 {
4510         long x = atomic_long_read(&memcg->vmstats[idx]);
4511         int cpu;
4512
4513         for_each_online_cpu(cpu)
4514                 x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
4515         if (x < 0)
4516                 x = 0;
4517         return x;
4518 }
4519
4520 /**
4521  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4522  * @wb: bdi_writeback in question
4523  * @pfilepages: out parameter for number of file pages
4524  * @pheadroom: out parameter for number of allocatable pages according to memcg
4525  * @pdirty: out parameter for number of dirty pages
4526  * @pwriteback: out parameter for number of pages under writeback
4527  *
4528  * Determine the numbers of file, headroom, dirty, and writeback pages in
4529  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4530  * is a bit more involved.
4531  *
4532  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4533  * headroom is calculated as the lowest headroom of itself and the
4534  * ancestors.  Note that this doesn't consider the actual amount of
4535  * available memory in the system.  The caller should further cap
4536  * *@pheadroom accordingly.
4537  */
4538 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4539                          unsigned long *pheadroom, unsigned long *pdirty,
4540                          unsigned long *pwriteback)
4541 {
4542         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4543         struct mem_cgroup *parent;
4544
4545         *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
4546
4547         *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
4548         *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
4549                         memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
4550         *pheadroom = PAGE_COUNTER_MAX;
4551
4552         while ((parent = parent_mem_cgroup(memcg))) {
4553                 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4554                                             READ_ONCE(memcg->memory.high));
4555                 unsigned long used = page_counter_read(&memcg->memory);
4556
4557                 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4558                 memcg = parent;
4559         }
4560 }
4561
4562 /*
4563  * Foreign dirty flushing
4564  *
4565  * There's an inherent mismatch between memcg and writeback.  The former
4566  * trackes ownership per-page while the latter per-inode.  This was a
4567  * deliberate design decision because honoring per-page ownership in the
4568  * writeback path is complicated, may lead to higher CPU and IO overheads
4569  * and deemed unnecessary given that write-sharing an inode across
4570  * different cgroups isn't a common use-case.
4571  *
4572  * Combined with inode majority-writer ownership switching, this works well
4573  * enough in most cases but there are some pathological cases.  For
4574  * example, let's say there are two cgroups A and B which keep writing to
4575  * different but confined parts of the same inode.  B owns the inode and
4576  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4577  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4578  * triggering background writeback.  A will be slowed down without a way to
4579  * make writeback of the dirty pages happen.
4580  *
4581  * Conditions like the above can lead to a cgroup getting repatedly and
4582  * severely throttled after making some progress after each
4583  * dirty_expire_interval while the underyling IO device is almost
4584  * completely idle.
4585  *
4586  * Solving this problem completely requires matching the ownership tracking
4587  * granularities between memcg and writeback in either direction.  However,
4588  * the more egregious behaviors can be avoided by simply remembering the
4589  * most recent foreign dirtying events and initiating remote flushes on
4590  * them when local writeback isn't enough to keep the memory clean enough.
4591  *
4592  * The following two functions implement such mechanism.  When a foreign
4593  * page - a page whose memcg and writeback ownerships don't match - is
4594  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4595  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4596  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4597  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4598  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4599  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4600  * limited to MEMCG_CGWB_FRN_CNT.
4601  *
4602  * The mechanism only remembers IDs and doesn't hold any object references.
4603  * As being wrong occasionally doesn't matter, updates and accesses to the
4604  * records are lockless and racy.
4605  */
4606 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4607                                              struct bdi_writeback *wb)
4608 {
4609         struct mem_cgroup *memcg = page->mem_cgroup;
4610         struct memcg_cgwb_frn *frn;
4611         u64 now = get_jiffies_64();
4612         u64 oldest_at = now;
4613         int oldest = -1;
4614         int i;
4615
4616         trace_track_foreign_dirty(page, wb);
4617
4618         /*
4619          * Pick the slot to use.  If there is already a slot for @wb, keep
4620          * using it.  If not replace the oldest one which isn't being
4621          * written out.
4622          */
4623         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4624                 frn = &memcg->cgwb_frn[i];
4625                 if (frn->bdi_id == wb->bdi->id &&
4626                     frn->memcg_id == wb->memcg_css->id)
4627                         break;
4628                 if (time_before64(frn->at, oldest_at) &&
4629                     atomic_read(&frn->done.cnt) == 1) {
4630                         oldest = i;
4631                         oldest_at = frn->at;
4632                 }
4633         }
4634
4635         if (i < MEMCG_CGWB_FRN_CNT) {
4636                 /*
4637                  * Re-using an existing one.  Update timestamp lazily to
4638                  * avoid making the cacheline hot.  We want them to be
4639                  * reasonably up-to-date and significantly shorter than
4640                  * dirty_expire_interval as that's what expires the record.
4641                  * Use the shorter of 1s and dirty_expire_interval / 8.
4642                  */
4643                 unsigned long update_intv =
4644                         min_t(unsigned long, HZ,
4645                               msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4646
4647                 if (time_before64(frn->at, now - update_intv))
4648                         frn->at = now;
4649         } else if (oldest >= 0) {
4650                 /* replace the oldest free one */
4651                 frn = &memcg->cgwb_frn[oldest];
4652                 frn->bdi_id = wb->bdi->id;
4653                 frn->memcg_id = wb->memcg_css->id;
4654                 frn->at = now;
4655         }
4656 }
4657
4658 /* issue foreign writeback flushes for recorded foreign dirtying events */
4659 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4660 {
4661         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4662         unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4663         u64 now = jiffies_64;
4664         int i;
4665
4666         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4667                 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4668
4669                 /*
4670                  * If the record is older than dirty_expire_interval,
4671                  * writeback on it has already started.  No need to kick it
4672                  * off again.  Also, don't start a new one if there's
4673                  * already one in flight.
4674                  */
4675                 if (time_after64(frn->at, now - intv) &&
4676                     atomic_read(&frn->done.cnt) == 1) {
4677                         frn->at = 0;
4678                         trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4679                         cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
4680                                                WB_REASON_FOREIGN_FLUSH,
4681                                                &frn->done);
4682                 }
4683         }
4684 }
4685
4686 #else   /* CONFIG_CGROUP_WRITEBACK */
4687
4688 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4689 {
4690         return 0;
4691 }
4692
4693 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4694 {
4695 }
4696
4697 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4698 {
4699 }
4700
4701 #endif  /* CONFIG_CGROUP_WRITEBACK */
4702
4703 /*
4704  * DO NOT USE IN NEW FILES.
4705  *
4706  * "cgroup.event_control" implementation.
4707  *
4708  * This is way over-engineered.  It tries to support fully configurable
4709  * events for each user.  Such level of flexibility is completely
4710  * unnecessary especially in the light of the planned unified hierarchy.
4711  *
4712  * Please deprecate this and replace with something simpler if at all
4713  * possible.
4714  */
4715
4716 /*
4717  * Unregister event and free resources.
4718  *
4719  * Gets called from workqueue.
4720  */
4721 static void memcg_event_remove(struct work_struct *work)
4722 {
4723         struct mem_cgroup_event *event =
4724                 container_of(work, struct mem_cgroup_event, remove);
4725         struct mem_cgroup *memcg = event->memcg;
4726
4727         remove_wait_queue(event->wqh, &event->wait);
4728
4729         event->unregister_event(memcg, event->eventfd);
4730
4731         /* Notify userspace the event is going away. */
4732         eventfd_signal(event->eventfd, 1);
4733
4734         eventfd_ctx_put(event->eventfd);
4735         kfree(event);
4736         css_put(&memcg->css);
4737 }
4738
4739 /*
4740  * Gets called on EPOLLHUP on eventfd when user closes it.
4741  *
4742  * Called with wqh->lock held and interrupts disabled.
4743  */
4744 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4745                             int sync, void *key)
4746 {
4747         struct mem_cgroup_event *event =
4748                 container_of(wait, struct mem_cgroup_event, wait);
4749         struct mem_cgroup *memcg = event->memcg;
4750         __poll_t flags = key_to_poll(key);
4751
4752         if (flags & EPOLLHUP) {
4753                 /*
4754                  * If the event has been detached at cgroup removal, we
4755                  * can simply return knowing the other side will cleanup
4756                  * for us.
4757                  *
4758                  * We can't race against event freeing since the other
4759                  * side will require wqh->lock via remove_wait_queue(),
4760                  * which we hold.
4761                  */
4762                 spin_lock(&memcg->event_list_lock);
4763                 if (!list_empty(&event->list)) {
4764                         list_del_init(&event->list);
4765                         /*
4766                          * We are in atomic context, but cgroup_event_remove()
4767                          * may sleep, so we have to call it in workqueue.
4768                          */
4769                         schedule_work(&event->remove);
4770                 }
4771                 spin_unlock(&memcg->event_list_lock);
4772         }
4773
4774         return 0;
4775 }
4776
4777 static void memcg_event_ptable_queue_proc(struct file *file,
4778                 wait_queue_head_t *wqh, poll_table *pt)
4779 {
4780         struct mem_cgroup_event *event =
4781                 container_of(pt, struct mem_cgroup_event, pt);
4782
4783         event->wqh = wqh;
4784         add_wait_queue(wqh, &event->wait);
4785 }
4786
4787 /*
4788  * DO NOT USE IN NEW FILES.
4789  *
4790  * Parse input and register new cgroup event handler.
4791  *
4792  * Input must be in format '<event_fd> <control_fd> <args>'.
4793  * Interpretation of args is defined by control file implementation.
4794  */
4795 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4796                                          char *buf, size_t nbytes, loff_t off)
4797 {
4798         struct cgroup_subsys_state *css = of_css(of);
4799         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4800         struct mem_cgroup_event *event;
4801         struct cgroup_subsys_state *cfile_css;
4802         unsigned int efd, cfd;
4803         struct fd efile;
4804         struct fd cfile;
4805         const char *name;
4806         char *endp;
4807         int ret;
4808
4809         buf = strstrip(buf);
4810
4811         efd = simple_strtoul(buf, &endp, 10);
4812         if (*endp != ' ')
4813                 return -EINVAL;
4814         buf = endp + 1;
4815
4816         cfd = simple_strtoul(buf, &endp, 10);
4817         if ((*endp != ' ') && (*endp != '\0'))
4818                 return -EINVAL;
4819         buf = endp + 1;
4820
4821         event = kzalloc(sizeof(*event), GFP_KERNEL);
4822         if (!event)
4823                 return -ENOMEM;
4824
4825         event->memcg = memcg;
4826         INIT_LIST_HEAD(&event->list);
4827         init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4828         init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4829         INIT_WORK(&event->remove, memcg_event_remove);
4830
4831         efile = fdget(efd);
4832         if (!efile.file) {
4833                 ret = -EBADF;
4834                 goto out_kfree;
4835         }
4836
4837         event->eventfd = eventfd_ctx_fileget(efile.file);
4838         if (IS_ERR(event->eventfd)) {
4839                 ret = PTR_ERR(event->eventfd);
4840                 goto out_put_efile;
4841         }
4842
4843         cfile = fdget(cfd);
4844         if (!cfile.file) {
4845                 ret = -EBADF;
4846                 goto out_put_eventfd;
4847         }
4848
4849         /* the process need read permission on control file */
4850         /* AV: shouldn't we check that it's been opened for read instead? */
4851         ret = inode_permission(file_inode(cfile.file), MAY_READ);
4852         if (ret < 0)
4853                 goto out_put_cfile;
4854
4855         /*
4856          * Determine the event callbacks and set them in @event.  This used
4857          * to be done via struct cftype but cgroup core no longer knows
4858          * about these events.  The following is crude but the whole thing
4859          * is for compatibility anyway.
4860          *
4861          * DO NOT ADD NEW FILES.
4862          */
4863         name = cfile.file->f_path.dentry->d_name.name;
4864
4865         if (!strcmp(name, "memory.usage_in_bytes")) {
4866                 event->register_event = mem_cgroup_usage_register_event;
4867                 event->unregister_event = mem_cgroup_usage_unregister_event;
4868         } else if (!strcmp(name, "memory.oom_control")) {
4869                 event->register_event = mem_cgroup_oom_register_event;
4870                 event->unregister_event = mem_cgroup_oom_unregister_event;
4871         } else if (!strcmp(name, "memory.pressure_level")) {
4872                 event->register_event = vmpressure_register_event;
4873                 event->unregister_event = vmpressure_unregister_event;
4874         } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4875                 event->register_event = memsw_cgroup_usage_register_event;
4876                 event->unregister_event = memsw_cgroup_usage_unregister_event;
4877         } else {
4878                 ret = -EINVAL;
4879                 goto out_put_cfile;
4880         }
4881
4882         /*
4883          * Verify @cfile should belong to @css.  Also, remaining events are
4884          * automatically removed on cgroup destruction but the removal is
4885          * asynchronous, so take an extra ref on @css.
4886          */
4887         cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4888                                                &memory_cgrp_subsys);
4889         ret = -EINVAL;
4890         if (IS_ERR(cfile_css))
4891                 goto out_put_cfile;
4892         if (cfile_css != css) {
4893                 css_put(cfile_css);
4894                 goto out_put_cfile;
4895         }
4896
4897         ret = event->register_event(memcg, event->eventfd, buf);
4898         if (ret)
4899                 goto out_put_css;
4900
4901         vfs_poll(efile.file, &event->pt);
4902
4903         spin_lock(&memcg->event_list_lock);
4904         list_add(&event->list, &memcg->event_list);
4905         spin_unlock(&memcg->event_list_lock);
4906
4907         fdput(cfile);
4908         fdput(efile);
4909
4910         return nbytes;
4911
4912 out_put_css:
4913         css_put(css);
4914 out_put_cfile:
4915         fdput(cfile);
4916 out_put_eventfd:
4917         eventfd_ctx_put(event->eventfd);
4918 out_put_efile:
4919         fdput(efile);
4920 out_kfree:
4921         kfree(event);
4922
4923         return ret;
4924 }
4925
4926 static struct cftype mem_cgroup_legacy_files[] = {
4927         {
4928                 .name = "usage_in_bytes",
4929                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4930                 .read_u64 = mem_cgroup_read_u64,
4931         },
4932         {
4933                 .name = "max_usage_in_bytes",
4934                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4935                 .write = mem_cgroup_reset,
4936                 .read_u64 = mem_cgroup_read_u64,
4937         },
4938         {
4939                 .name = "limit_in_bytes",
4940                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4941                 .write = mem_cgroup_write,
4942                 .read_u64 = mem_cgroup_read_u64,
4943         },
4944         {
4945                 .name = "soft_limit_in_bytes",
4946                 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4947                 .write = mem_cgroup_write,
4948                 .read_u64 = mem_cgroup_read_u64,
4949         },
4950         {
4951                 .name = "failcnt",
4952                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4953                 .write = mem_cgroup_reset,
4954                 .read_u64 = mem_cgroup_read_u64,
4955         },
4956         {
4957                 .name = "stat",
4958                 .seq_show = memcg_stat_show,
4959         },
4960         {
4961                 .name = "force_empty",
4962                 .write = mem_cgroup_force_empty_write,
4963         },
4964         {
4965                 .name = "use_hierarchy",
4966                 .write_u64 = mem_cgroup_hierarchy_write,
4967                 .read_u64 = mem_cgroup_hierarchy_read,
4968         },
4969         {
4970                 .name = "cgroup.event_control",         /* XXX: for compat */
4971                 .write = memcg_write_event_control,
4972                 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4973         },
4974         {
4975                 .name = "swappiness",
4976                 .read_u64 = mem_cgroup_swappiness_read,
4977                 .write_u64 = mem_cgroup_swappiness_write,
4978         },
4979         {
4980                 .name = "move_charge_at_immigrate",
4981                 .read_u64 = mem_cgroup_move_charge_read,
4982                 .write_u64 = mem_cgroup_move_charge_write,
4983         },
4984         {
4985                 .name = "oom_control",
4986                 .seq_show = mem_cgroup_oom_control_read,
4987                 .write_u64 = mem_cgroup_oom_control_write,
4988                 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4989         },
4990         {
4991                 .name = "pressure_level",
4992         },
4993 #ifdef CONFIG_NUMA
4994         {
4995                 .name = "numa_stat",
4996                 .seq_show = memcg_numa_stat_show,
4997         },
4998 #endif
4999         {
5000                 .name = "kmem.limit_in_bytes",
5001                 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5002                 .write = mem_cgroup_write,
5003                 .read_u64 = mem_cgroup_read_u64,
5004         },
5005         {
5006                 .name = "kmem.usage_in_bytes",
5007                 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5008                 .read_u64 = mem_cgroup_read_u64,
5009         },
5010         {
5011                 .name = "kmem.failcnt",
5012                 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5013                 .write = mem_cgroup_reset,
5014                 .read_u64 = mem_cgroup_read_u64,
5015         },
5016         {
5017                 .name = "kmem.max_usage_in_bytes",
5018                 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5019                 .write = mem_cgroup_reset,
5020                 .read_u64 = mem_cgroup_read_u64,
5021         },
5022 #if defined(CONFIG_MEMCG_KMEM) && \
5023         (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5024         {
5025                 .name = "kmem.slabinfo",
5026                 .seq_show = memcg_slab_show,
5027         },
5028 #endif
5029         {
5030                 .name = "kmem.tcp.limit_in_bytes",
5031                 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5032                 .write = mem_cgroup_write,
5033                 .read_u64 = mem_cgroup_read_u64,
5034         },
5035         {
5036                 .name = "kmem.tcp.usage_in_bytes",
5037                 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5038                 .read_u64 = mem_cgroup_read_u64,
5039         },
5040         {
5041                 .name = "kmem.tcp.failcnt",
5042                 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5043                 .write = mem_cgroup_reset,
5044                 .read_u64 = mem_cgroup_read_u64,
5045         },
5046         {
5047                 .name = "kmem.tcp.max_usage_in_bytes",
5048                 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5049                 .write = mem_cgroup_reset,
5050                 .read_u64 = mem_cgroup_read_u64,
5051         },
5052         { },    /* terminate */
5053 };
5054
5055 /*
5056  * Private memory cgroup IDR
5057  *
5058  * Swap-out records and page cache shadow entries need to store memcg
5059  * references in constrained space, so we maintain an ID space that is
5060  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5061  * memory-controlled cgroups to 64k.
5062  *
5063  * However, there usually are many references to the offline CSS after
5064  * the cgroup has been destroyed, such as page cache or reclaimable
5065  * slab objects, that don't need to hang on to the ID. We want to keep
5066  * those dead CSS from occupying IDs, or we might quickly exhaust the
5067  * relatively small ID space and prevent the creation of new cgroups
5068  * even when there are much fewer than 64k cgroups - possibly none.
5069  *
5070  * Maintain a private 16-bit ID space for memcg, and allow the ID to
5071  * be freed and recycled when it's no longer needed, which is usually
5072  * when the CSS is offlined.
5073  *
5074  * The only exception to that are records of swapped out tmpfs/shmem
5075  * pages that need to be attributed to live ancestors on swapin. But
5076  * those references are manageable from userspace.
5077  */
5078
5079 static DEFINE_IDR(mem_cgroup_idr);
5080
5081 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5082 {
5083         if (memcg->id.id > 0) {
5084                 idr_remove(&mem_cgroup_idr, memcg->id.id);
5085                 memcg->id.id = 0;
5086         }
5087 }
5088
5089 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5090                                                   unsigned int n)
5091 {
5092         refcount_add(n, &memcg->id.ref);
5093 }
5094
5095 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5096 {
5097         if (refcount_sub_and_test(n, &memcg->id.ref)) {
5098                 mem_cgroup_id_remove(memcg);
5099
5100                 /* Memcg ID pins CSS */
5101                 css_put(&memcg->css);
5102         }
5103 }
5104
5105 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5106 {
5107         mem_cgroup_id_put_many(memcg, 1);
5108 }
5109
5110 /**
5111  * mem_cgroup_from_id - look up a memcg from a memcg id
5112  * @id: the memcg id to look up
5113  *
5114  * Caller must hold rcu_read_lock().
5115  */
5116 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5117 {
5118         WARN_ON_ONCE(!rcu_read_lock_held());
5119         return idr_find(&mem_cgroup_idr, id);
5120 }
5121
5122 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5123 {
5124         struct mem_cgroup_per_node *pn;
5125         int tmp = node;
5126         /*
5127          * This routine is called against possible nodes.
5128          * But it's BUG to call kmalloc() against offline node.
5129          *
5130          * TODO: this routine can waste much memory for nodes which will
5131          *       never be onlined. It's better to use memory hotplug callback
5132          *       function.
5133          */
5134         if (!node_state(node, N_NORMAL_MEMORY))
5135                 tmp = -1;
5136         pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
5137         if (!pn)
5138                 return 1;
5139
5140         /* We charge the parent cgroup, never the current task */
5141         WARN_ON_ONCE(!current->active_memcg);
5142
5143         pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat,
5144                                                  GFP_KERNEL_ACCOUNT);
5145         if (!pn->lruvec_stat_local) {
5146                 kfree(pn);
5147                 return 1;
5148         }
5149
5150         pn->lruvec_stat_cpu = alloc_percpu_gfp(struct lruvec_stat,
5151                                                GFP_KERNEL_ACCOUNT);
5152         if (!pn->lruvec_stat_cpu) {
5153                 free_percpu(pn->lruvec_stat_local);
5154                 kfree(pn);
5155                 return 1;
5156         }
5157
5158         lruvec_init(&pn->lruvec);
5159         pn->usage_in_excess = 0;
5160         pn->on_tree = false;
5161         pn->memcg = memcg;
5162
5163         memcg->nodeinfo[node] = pn;
5164         return 0;
5165 }
5166
5167 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5168 {
5169         struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5170
5171         if (!pn)
5172                 return;
5173
5174         free_percpu(pn->lruvec_stat_cpu);
5175         free_percpu(pn->lruvec_stat_local);
5176         kfree(pn);
5177 }
5178
5179 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5180 {
5181         int node;
5182
5183         for_each_node(node)
5184                 free_mem_cgroup_per_node_info(memcg, node);
5185         free_percpu(memcg->vmstats_percpu);
5186         free_percpu(memcg->vmstats_local);
5187         kfree(memcg);
5188 }
5189
5190 static void mem_cgroup_free(struct mem_cgroup *memcg)
5191 {
5192         memcg_wb_domain_exit(memcg);
5193         /*
5194          * Flush percpu vmstats and vmevents to guarantee the value correctness
5195          * on parent's and all ancestor levels.
5196          */
5197         memcg_flush_percpu_vmstats(memcg);
5198         memcg_flush_percpu_vmevents(memcg);
5199         __mem_cgroup_free(memcg);
5200 }
5201
5202 static struct mem_cgroup *mem_cgroup_alloc(void)
5203 {
5204         struct mem_cgroup *memcg;
5205         unsigned int size;
5206         int node;
5207         int __maybe_unused i;
5208         long error = -ENOMEM;
5209
5210         size = sizeof(struct mem_cgroup);
5211         size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
5212
5213         memcg = kzalloc(size, GFP_KERNEL);
5214         if (!memcg)
5215                 return ERR_PTR(error);
5216
5217         memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5218                                  1, MEM_CGROUP_ID_MAX,
5219                                  GFP_KERNEL);
5220         if (memcg->id.id < 0) {
5221                 error = memcg->id.id;
5222                 goto fail;
5223         }
5224
5225         /* We charge the parent cgroup, never the current task */
5226         WARN_ON_ONCE(!current->active_memcg);
5227
5228         memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5229                                                 GFP_KERNEL_ACCOUNT);
5230         if (!memcg->vmstats_local)
5231                 goto fail;
5232
5233         memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5234                                                  GFP_KERNEL_ACCOUNT);
5235         if (!memcg->vmstats_percpu)
5236                 goto fail;
5237
5238         for_each_node(node)
5239                 if (alloc_mem_cgroup_per_node_info(memcg, node))
5240                         goto fail;
5241
5242         if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5243                 goto fail;
5244
5245         INIT_WORK(&memcg->high_work, high_work_func);
5246         INIT_LIST_HEAD(&memcg->oom_notify);
5247         mutex_init(&memcg->thresholds_lock);
5248         spin_lock_init(&memcg->move_lock);
5249         vmpressure_init(&memcg->vmpressure);
5250         INIT_LIST_HEAD(&memcg->event_list);
5251         spin_lock_init(&memcg->event_list_lock);
5252         memcg->socket_pressure = jiffies;
5253 #ifdef CONFIG_MEMCG_KMEM
5254         memcg->kmemcg_id = -1;
5255         INIT_LIST_HEAD(&memcg->objcg_list);
5256 #endif
5257 #ifdef CONFIG_CGROUP_WRITEBACK
5258         INIT_LIST_HEAD(&memcg->cgwb_list);
5259         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5260                 memcg->cgwb_frn[i].done =
5261                         __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5262 #endif
5263 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5264         spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5265         INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5266         memcg->deferred_split_queue.split_queue_len = 0;
5267 #endif
5268         idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5269         return memcg;
5270 fail:
5271         mem_cgroup_id_remove(memcg);
5272         __mem_cgroup_free(memcg);
5273         return ERR_PTR(error);
5274 }
5275
5276 static struct cgroup_subsys_state * __ref
5277 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5278 {
5279         struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5280         struct mem_cgroup *memcg;
5281         long error = -ENOMEM;
5282
5283         memalloc_use_memcg(parent);
5284         memcg = mem_cgroup_alloc();
5285         memalloc_unuse_memcg();
5286         if (IS_ERR(memcg))
5287                 return ERR_CAST(memcg);
5288
5289         page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5290         memcg->soft_limit = PAGE_COUNTER_MAX;
5291         page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5292         if (parent) {
5293                 memcg->swappiness = mem_cgroup_swappiness(parent);
5294                 memcg->oom_kill_disable = parent->oom_kill_disable;
5295         }
5296         if (parent && parent->use_hierarchy) {
5297                 memcg->use_hierarchy = true;
5298                 page_counter_init(&memcg->memory, &parent->memory);
5299                 page_counter_init(&memcg->swap, &parent->swap);
5300                 page_counter_init(&memcg->memsw, &parent->memsw);
5301                 page_counter_init(&memcg->kmem, &parent->kmem);
5302                 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5303         } else {
5304                 page_counter_init(&memcg->memory, NULL);
5305                 page_counter_init(&memcg->swap, NULL);
5306                 page_counter_init(&memcg->memsw, NULL);
5307                 page_counter_init(&memcg->kmem, NULL);
5308                 page_counter_init(&memcg->tcpmem, NULL);
5309                 /*
5310                  * Deeper hierachy with use_hierarchy == false doesn't make
5311                  * much sense so let cgroup subsystem know about this
5312                  * unfortunate state in our controller.
5313                  */
5314                 if (parent != root_mem_cgroup)
5315                         memory_cgrp_subsys.broken_hierarchy = true;
5316         }
5317
5318         /* The following stuff does not apply to the root */
5319         if (!parent) {
5320                 root_mem_cgroup = memcg;
5321                 return &memcg->css;
5322         }
5323
5324         error = memcg_online_kmem(memcg);
5325         if (error)
5326                 goto fail;
5327
5328         if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5329                 static_branch_inc(&memcg_sockets_enabled_key);
5330
5331         return &memcg->css;
5332 fail:
5333         mem_cgroup_id_remove(memcg);
5334         mem_cgroup_free(memcg);
5335         return ERR_PTR(error);
5336 }
5337
5338 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5339 {
5340         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5341
5342         /*
5343          * A memcg must be visible for memcg_expand_shrinker_maps()
5344          * by the time the maps are allocated. So, we allocate maps
5345          * here, when for_each_mem_cgroup() can't skip it.
5346          */
5347         if (memcg_alloc_shrinker_maps(memcg)) {
5348                 mem_cgroup_id_remove(memcg);
5349                 return -ENOMEM;
5350         }
5351
5352         /* Online state pins memcg ID, memcg ID pins CSS */
5353         refcount_set(&memcg->id.ref, 1);
5354         css_get(css);
5355         return 0;
5356 }
5357
5358 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5359 {
5360         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5361         struct mem_cgroup_event *event, *tmp;
5362
5363         /*
5364          * Unregister events and notify userspace.
5365          * Notify userspace about cgroup removing only after rmdir of cgroup
5366          * directory to avoid race between userspace and kernelspace.
5367          */
5368         spin_lock(&memcg->event_list_lock);
5369         list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5370                 list_del_init(&event->list);
5371                 schedule_work(&event->remove);
5372         }
5373         spin_unlock(&memcg->event_list_lock);
5374
5375         page_counter_set_min(&memcg->memory, 0);
5376         page_counter_set_low(&memcg->memory, 0);
5377
5378         memcg_offline_kmem(memcg);
5379         wb_memcg_offline(memcg);
5380
5381         drain_all_stock(memcg);
5382
5383         mem_cgroup_id_put(memcg);
5384 }
5385
5386 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5387 {
5388         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5389
5390         invalidate_reclaim_iterators(memcg);
5391 }
5392
5393 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5394 {
5395         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5396         int __maybe_unused i;
5397
5398 #ifdef CONFIG_CGROUP_WRITEBACK
5399         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5400                 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5401 #endif
5402         if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5403                 static_branch_dec(&memcg_sockets_enabled_key);
5404
5405         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5406                 static_branch_dec(&memcg_sockets_enabled_key);
5407
5408         vmpressure_cleanup(&memcg->vmpressure);
5409         cancel_work_sync(&memcg->high_work);
5410         mem_cgroup_remove_from_trees(memcg);
5411         memcg_free_shrinker_maps(memcg);
5412         memcg_free_kmem(memcg);
5413         mem_cgroup_free(memcg);
5414 }
5415
5416 /**
5417  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5418  * @css: the target css
5419  *
5420  * Reset the states of the mem_cgroup associated with @css.  This is
5421  * invoked when the userland requests disabling on the default hierarchy
5422  * but the memcg is pinned through dependency.  The memcg should stop
5423  * applying policies and should revert to the vanilla state as it may be
5424  * made visible again.
5425  *
5426  * The current implementation only resets the essential configurations.
5427  * This needs to be expanded to cover all the visible parts.
5428  */
5429 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5430 {
5431         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5432
5433         page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5434         page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5435         page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX);
5436         page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5437         page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5438         page_counter_set_min(&memcg->memory, 0);
5439         page_counter_set_low(&memcg->memory, 0);
5440         page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5441         memcg->soft_limit = PAGE_COUNTER_MAX;
5442         page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5443         memcg_wb_domain_size_changed(memcg);
5444 }
5445
5446 #ifdef CONFIG_MMU
5447 /* Handlers for move charge at task migration. */
5448 static int mem_cgroup_do_precharge(unsigned long count)
5449 {
5450         int ret;
5451
5452         /* Try a single bulk charge without reclaim first, kswapd may wake */
5453         ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5454         if (!ret) {
5455                 mc.precharge += count;
5456                 return ret;
5457         }
5458
5459         /* Try charges one by one with reclaim, but do not retry */
5460         while (count--) {
5461                 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5462                 if (ret)
5463                         return ret;
5464                 mc.precharge++;
5465                 cond_resched();
5466         }
5467         return 0;
5468 }
5469
5470 union mc_target {
5471         struct page     *page;
5472         swp_entry_t     ent;
5473 };
5474
5475 enum mc_target_type {
5476         MC_TARGET_NONE = 0,
5477         MC_TARGET_PAGE,
5478         MC_TARGET_SWAP,
5479         MC_TARGET_DEVICE,
5480 };
5481
5482 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5483                                                 unsigned long addr, pte_t ptent)
5484 {
5485         struct page *page = vm_normal_page(vma, addr, ptent);
5486
5487         if (!page || !page_mapped(page))
5488                 return NULL;
5489         if (PageAnon(page)) {
5490                 if (!(mc.flags & MOVE_ANON))
5491                         return NULL;
5492         } else {
5493                 if (!(mc.flags & MOVE_FILE))
5494                         return NULL;
5495         }
5496         if (!get_page_unless_zero(page))
5497                 return NULL;
5498
5499         return page;
5500 }
5501
5502 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5503 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5504                         pte_t ptent, swp_entry_t *entry)
5505 {
5506         struct page *page = NULL;
5507         swp_entry_t ent = pte_to_swp_entry(ptent);
5508
5509         if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
5510                 return NULL;
5511
5512         /*
5513          * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5514          * a device and because they are not accessible by CPU they are store
5515          * as special swap entry in the CPU page table.
5516          */
5517         if (is_device_private_entry(ent)) {
5518                 page = device_private_entry_to_page(ent);
5519                 /*
5520                  * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5521                  * a refcount of 1 when free (unlike normal page)
5522                  */
5523                 if (!page_ref_add_unless(page, 1, 1))
5524                         return NULL;
5525                 return page;
5526         }
5527
5528         /*
5529          * Because lookup_swap_cache() updates some statistics counter,
5530          * we call find_get_page() with swapper_space directly.
5531          */
5532         page = find_get_page(swap_address_space(ent), swp_offset(ent));
5533         entry->val = ent.val;
5534
5535         return page;
5536 }
5537 #else
5538 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5539                         pte_t ptent, swp_entry_t *entry)
5540 {
5541         return NULL;
5542 }
5543 #endif
5544
5545 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5546                         unsigned long addr, pte_t ptent, swp_entry_t *entry)
5547 {
5548         struct page *page = NULL;
5549         struct address_space *mapping;
5550         pgoff_t pgoff;
5551
5552         if (!vma->vm_file) /* anonymous vma */
5553                 return NULL;
5554         if (!(mc.flags & MOVE_FILE))
5555                 return NULL;
5556
5557         mapping = vma->vm_file->f_mapping;
5558         pgoff = linear_page_index(vma, addr);
5559
5560         /* page is moved even if it's not RSS of this task(page-faulted). */
5561 #ifdef CONFIG_SWAP
5562         /* shmem/tmpfs may report page out on swap: account for that too. */
5563         if (shmem_mapping(mapping)) {
5564                 page = find_get_entry(mapping, pgoff);
5565                 if (xa_is_value(page)) {
5566                         swp_entry_t swp = radix_to_swp_entry(page);
5567                         *entry = swp;
5568                         page = find_get_page(swap_address_space(swp),
5569                                              swp_offset(swp));
5570                 }
5571         } else
5572                 page = find_get_page(mapping, pgoff);
5573 #else
5574         page = find_get_page(mapping, pgoff);
5575 #endif
5576         return page;
5577 }
5578
5579 /**
5580  * mem_cgroup_move_account - move account of the page
5581  * @page: the page
5582  * @compound: charge the page as compound or small page
5583  * @from: mem_cgroup which the page is moved from.
5584  * @to: mem_cgroup which the page is moved to. @from != @to.
5585  *
5586  * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5587  *
5588  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5589  * from old cgroup.
5590  */
5591 static int mem_cgroup_move_account(struct page *page,
5592                                    bool compound,
5593                                    struct mem_cgroup *from,
5594                                    struct mem_cgroup *to)
5595 {
5596         struct lruvec *from_vec, *to_vec;
5597         struct pglist_data *pgdat;
5598         unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5599         int ret;
5600
5601         VM_BUG_ON(from == to);
5602         VM_BUG_ON_PAGE(PageLRU(page), page);
5603         VM_BUG_ON(compound && !PageTransHuge(page));
5604
5605         /*
5606          * Prevent mem_cgroup_migrate() from looking at
5607          * page->mem_cgroup of its source page while we change it.
5608          */
5609         ret = -EBUSY;
5610         if (!trylock_page(page))
5611                 goto out;
5612
5613         ret = -EINVAL;
5614         if (page->mem_cgroup != from)
5615                 goto out_unlock;
5616
5617         pgdat = page_pgdat(page);
5618         from_vec = mem_cgroup_lruvec(from, pgdat);
5619         to_vec = mem_cgroup_lruvec(to, pgdat);
5620
5621         lock_page_memcg(page);
5622
5623         if (PageAnon(page)) {
5624                 if (page_mapped(page)) {
5625                         __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5626                         __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5627                         if (PageTransHuge(page)) {
5628                                 __mod_lruvec_state(from_vec, NR_ANON_THPS,
5629                                                    -nr_pages);
5630                                 __mod_lruvec_state(to_vec, NR_ANON_THPS,
5631                                                    nr_pages);
5632                         }
5633
5634                 }
5635         } else {
5636                 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5637                 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5638
5639                 if (PageSwapBacked(page)) {
5640                         __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5641                         __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5642                 }
5643
5644                 if (page_mapped(page)) {
5645                         __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5646                         __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5647                 }
5648
5649                 if (PageDirty(page)) {
5650                         struct address_space *mapping = page_mapping(page);
5651
5652                         if (mapping_cap_account_dirty(mapping)) {
5653                                 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5654                                                    -nr_pages);
5655                                 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5656                                                    nr_pages);
5657                         }
5658                 }
5659         }
5660
5661         if (PageWriteback(page)) {
5662                 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5663                 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5664         }
5665
5666         /*
5667          * All state has been migrated, let's switch to the new memcg.
5668          *
5669          * It is safe to change page->mem_cgroup here because the page
5670          * is referenced, charged, isolated, and locked: we can't race
5671          * with (un)charging, migration, LRU putback, or anything else
5672          * that would rely on a stable page->mem_cgroup.
5673          *
5674          * Note that lock_page_memcg is a memcg lock, not a page lock,
5675          * to save space. As soon as we switch page->mem_cgroup to a
5676          * new memcg that isn't locked, the above state can change
5677          * concurrently again. Make sure we're truly done with it.
5678          */
5679         smp_mb();
5680
5681         css_get(&to->css);
5682         css_put(&from->css);
5683
5684         page->mem_cgroup = to;
5685
5686         __unlock_page_memcg(from);
5687
5688         ret = 0;
5689
5690         local_irq_disable();
5691         mem_cgroup_charge_statistics(to, page, nr_pages);
5692         memcg_check_events(to, page);
5693         mem_cgroup_charge_statistics(from, page, -nr_pages);
5694         memcg_check_events(from, page);
5695         local_irq_enable();
5696 out_unlock:
5697         unlock_page(page);
5698 out:
5699         return ret;
5700 }
5701
5702 /**
5703  * get_mctgt_type - get target type of moving charge
5704  * @vma: the vma the pte to be checked belongs
5705  * @addr: the address corresponding to the pte to be checked
5706  * @ptent: the pte to be checked
5707  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5708  *
5709  * Returns
5710  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5711  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5712  *     move charge. if @target is not NULL, the page is stored in target->page
5713  *     with extra refcnt got(Callers should handle it).
5714  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5715  *     target for charge migration. if @target is not NULL, the entry is stored
5716  *     in target->ent.
5717  *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
5718  *     (so ZONE_DEVICE page and thus not on the lru).
5719  *     For now we such page is charge like a regular page would be as for all
5720  *     intent and purposes it is just special memory taking the place of a
5721  *     regular page.
5722  *
5723  *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5724  *
5725  * Called with pte lock held.
5726  */
5727
5728 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5729                 unsigned long addr, pte_t ptent, union mc_target *target)
5730 {
5731         struct page *page = NULL;
5732         enum mc_target_type ret = MC_TARGET_NONE;
5733         swp_entry_t ent = { .val = 0 };
5734
5735         if (pte_present(ptent))
5736                 page = mc_handle_present_pte(vma, addr, ptent);
5737         else if (is_swap_pte(ptent))
5738                 page = mc_handle_swap_pte(vma, ptent, &ent);
5739         else if (pte_none(ptent))
5740                 page = mc_handle_file_pte(vma, addr, ptent, &ent);
5741
5742         if (!page && !ent.val)
5743                 return ret;
5744         if (page) {
5745                 /*
5746                  * Do only loose check w/o serialization.
5747                  * mem_cgroup_move_account() checks the page is valid or
5748                  * not under LRU exclusion.
5749                  */
5750                 if (page->mem_cgroup == mc.from) {
5751                         ret = MC_TARGET_PAGE;
5752                         if (is_device_private_page(page))
5753                                 ret = MC_TARGET_DEVICE;
5754                         if (target)
5755                                 target->page = page;
5756                 }
5757                 if (!ret || !target)
5758                         put_page(page);
5759         }
5760         /*
5761          * There is a swap entry and a page doesn't exist or isn't charged.
5762          * But we cannot move a tail-page in a THP.
5763          */
5764         if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5765             mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5766                 ret = MC_TARGET_SWAP;
5767                 if (target)
5768                         target->ent = ent;
5769         }
5770         return ret;
5771 }
5772
5773 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5774 /*
5775  * We don't consider PMD mapped swapping or file mapped pages because THP does
5776  * not support them for now.
5777  * Caller should make sure that pmd_trans_huge(pmd) is true.
5778  */
5779 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5780                 unsigned long addr, pmd_t pmd, union mc_target *target)
5781 {
5782         struct page *page = NULL;
5783         enum mc_target_type ret = MC_TARGET_NONE;
5784
5785         if (unlikely(is_swap_pmd(pmd))) {
5786                 VM_BUG_ON(thp_migration_supported() &&
5787                                   !is_pmd_migration_entry(pmd));
5788                 return ret;
5789         }
5790         page = pmd_page(pmd);
5791         VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5792         if (!(mc.flags & MOVE_ANON))
5793                 return ret;
5794         if (page->mem_cgroup == mc.from) {
5795                 ret = MC_TARGET_PAGE;
5796                 if (target) {
5797                         get_page(page);
5798                         target->page = page;
5799                 }
5800         }
5801         return ret;
5802 }
5803 #else
5804 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5805                 unsigned long addr, pmd_t pmd, union mc_target *target)
5806 {
5807         return MC_TARGET_NONE;
5808 }
5809 #endif
5810
5811 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5812                                         unsigned long addr, unsigned long end,
5813                                         struct mm_walk *walk)
5814 {
5815         struct vm_area_struct *vma = walk->vma;
5816         pte_t *pte;
5817         spinlock_t *ptl;
5818
5819         ptl = pmd_trans_huge_lock(pmd, vma);
5820         if (ptl) {
5821                 /*
5822                  * Note their can not be MC_TARGET_DEVICE for now as we do not
5823                  * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5824                  * this might change.
5825                  */
5826                 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5827                         mc.precharge += HPAGE_PMD_NR;
5828                 spin_unlock(ptl);
5829                 return 0;
5830         }
5831
5832         if (pmd_trans_unstable(pmd))
5833                 return 0;
5834         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5835         for (; addr != end; pte++, addr += PAGE_SIZE)
5836                 if (get_mctgt_type(vma, addr, *pte, NULL))
5837                         mc.precharge++; /* increment precharge temporarily */
5838         pte_unmap_unlock(pte - 1, ptl);
5839         cond_resched();
5840
5841         return 0;
5842 }
5843
5844 static const struct mm_walk_ops precharge_walk_ops = {
5845         .pmd_entry      = mem_cgroup_count_precharge_pte_range,
5846 };
5847
5848 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5849 {
5850         unsigned long precharge;
5851
5852         mmap_read_lock(mm);
5853         walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5854         mmap_read_unlock(mm);
5855
5856         precharge = mc.precharge;
5857         mc.precharge = 0;
5858
5859         return precharge;
5860 }
5861
5862 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5863 {
5864         unsigned long precharge = mem_cgroup_count_precharge(mm);
5865
5866         VM_BUG_ON(mc.moving_task);
5867         mc.moving_task = current;
5868         return mem_cgroup_do_precharge(precharge);
5869 }
5870
5871 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5872 static void __mem_cgroup_clear_mc(void)
5873 {
5874         struct mem_cgroup *from = mc.from;
5875         struct mem_cgroup *to = mc.to;
5876
5877         /* we must uncharge all the leftover precharges from mc.to */
5878         if (mc.precharge) {
5879                 cancel_charge(mc.to, mc.precharge);
5880                 mc.precharge = 0;
5881         }
5882         /*
5883          * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5884          * we must uncharge here.
5885          */
5886         if (mc.moved_charge) {
5887                 cancel_charge(mc.from, mc.moved_charge);
5888                 mc.moved_charge = 0;
5889         }
5890         /* we must fixup refcnts and charges */
5891         if (mc.moved_swap) {
5892                 /* uncharge swap account from the old cgroup */
5893                 if (!mem_cgroup_is_root(mc.from))
5894                         page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5895
5896                 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5897
5898                 /*
5899                  * we charged both to->memory and to->memsw, so we
5900                  * should uncharge to->memory.
5901                  */
5902                 if (!mem_cgroup_is_root(mc.to))
5903                         page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5904
5905                 mc.moved_swap = 0;
5906         }
5907         memcg_oom_recover(from);
5908         memcg_oom_recover(to);
5909         wake_up_all(&mc.waitq);
5910 }
5911
5912 static void mem_cgroup_clear_mc(void)
5913 {
5914         struct mm_struct *mm = mc.mm;
5915
5916         /*
5917          * we must clear moving_task before waking up waiters at the end of
5918          * task migration.
5919          */
5920         mc.moving_task = NULL;
5921         __mem_cgroup_clear_mc();
5922         spin_lock(&mc.lock);
5923         mc.from = NULL;
5924         mc.to = NULL;
5925         mc.mm = NULL;
5926         spin_unlock(&mc.lock);
5927
5928         mmput(mm);
5929 }
5930
5931 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5932 {
5933         struct cgroup_subsys_state *css;
5934         struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5935         struct mem_cgroup *from;
5936         struct task_struct *leader, *p;
5937         struct mm_struct *mm;
5938         unsigned long move_flags;
5939         int ret = 0;
5940
5941         /* charge immigration isn't supported on the default hierarchy */
5942         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5943                 return 0;
5944
5945         /*
5946          * Multi-process migrations only happen on the default hierarchy
5947          * where charge immigration is not used.  Perform charge
5948          * immigration if @tset contains a leader and whine if there are
5949          * multiple.
5950          */
5951         p = NULL;
5952         cgroup_taskset_for_each_leader(leader, css, tset) {
5953                 WARN_ON_ONCE(p);
5954                 p = leader;
5955                 memcg = mem_cgroup_from_css(css);
5956         }
5957         if (!p)
5958                 return 0;
5959
5960         /*
5961          * We are now commited to this value whatever it is. Changes in this
5962          * tunable will only affect upcoming migrations, not the current one.
5963          * So we need to save it, and keep it going.
5964          */
5965         move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5966         if (!move_flags)
5967                 return 0;
5968
5969         from = mem_cgroup_from_task(p);
5970
5971         VM_BUG_ON(from == memcg);
5972
5973         mm = get_task_mm(p);
5974         if (!mm)
5975                 return 0;
5976         /* We move charges only when we move a owner of the mm */
5977         if (mm->owner == p) {
5978                 VM_BUG_ON(mc.from);
5979                 VM_BUG_ON(mc.to);
5980                 VM_BUG_ON(mc.precharge);
5981                 VM_BUG_ON(mc.moved_charge);
5982                 VM_BUG_ON(mc.moved_swap);
5983
5984                 spin_lock(&mc.lock);
5985                 mc.mm = mm;
5986                 mc.from = from;
5987                 mc.to = memcg;
5988                 mc.flags = move_flags;
5989                 spin_unlock(&mc.lock);
5990                 /* We set mc.moving_task later */
5991
5992                 ret = mem_cgroup_precharge_mc(mm);
5993                 if (ret)
5994                         mem_cgroup_clear_mc();
5995         } else {
5996                 mmput(mm);
5997         }
5998         return ret;
5999 }
6000
6001 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6002 {
6003         if (mc.to)
6004                 mem_cgroup_clear_mc();
6005 }
6006
6007 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6008                                 unsigned long addr, unsigned long end,
6009                                 struct mm_walk *walk)
6010 {
6011         int ret = 0;
6012         struct vm_area_struct *vma = walk->vma;
6013         pte_t *pte;
6014         spinlock_t *ptl;
6015         enum mc_target_type target_type;
6016         union mc_target target;
6017         struct page *page;
6018
6019         ptl = pmd_trans_huge_lock(pmd, vma);
6020         if (ptl) {
6021                 if (mc.precharge < HPAGE_PMD_NR) {
6022                         spin_unlock(ptl);
6023                         return 0;
6024                 }
6025                 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6026                 if (target_type == MC_TARGET_PAGE) {
6027                         page = target.page;
6028                         if (!isolate_lru_page(page)) {
6029                                 if (!mem_cgroup_move_account(page, true,
6030                                                              mc.from, mc.to)) {
6031                                         mc.precharge -= HPAGE_PMD_NR;
6032                                         mc.moved_charge += HPAGE_PMD_NR;
6033                                 }
6034                                 putback_lru_page(page);
6035                         }
6036                         put_page(page);
6037                 } else if (target_type == MC_TARGET_DEVICE) {
6038                         page = target.page;
6039                         if (!mem_cgroup_move_account(page, true,
6040                                                      mc.from, mc.to)) {
6041                                 mc.precharge -= HPAGE_PMD_NR;
6042                                 mc.moved_charge += HPAGE_PMD_NR;
6043                         }
6044                         put_page(page);
6045                 }
6046                 spin_unlock(ptl);
6047                 return 0;
6048         }
6049
6050         if (pmd_trans_unstable(pmd))
6051                 return 0;
6052 retry:
6053         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6054         for (; addr != end; addr += PAGE_SIZE) {
6055                 pte_t ptent = *(pte++);
6056                 bool device = false;
6057                 swp_entry_t ent;
6058
6059                 if (!mc.precharge)
6060                         break;
6061
6062                 switch (get_mctgt_type(vma, addr, ptent, &target)) {
6063                 case MC_TARGET_DEVICE:
6064                         device = true;
6065                         fallthrough;
6066                 case MC_TARGET_PAGE:
6067                         page = target.page;
6068                         /*
6069                          * We can have a part of the split pmd here. Moving it
6070                          * can be done but it would be too convoluted so simply
6071                          * ignore such a partial THP and keep it in original
6072                          * memcg. There should be somebody mapping the head.
6073                          */
6074                         if (PageTransCompound(page))
6075                                 goto put;
6076                         if (!device && isolate_lru_page(page))
6077                                 goto put;
6078                         if (!mem_cgroup_move_account(page, false,
6079                                                 mc.from, mc.to)) {
6080                                 mc.precharge--;
6081                                 /* we uncharge from mc.from later. */
6082                                 mc.moved_charge++;
6083                         }
6084                         if (!device)
6085                                 putback_lru_page(page);
6086 put:                    /* get_mctgt_type() gets the page */
6087                         put_page(page);
6088                         break;
6089                 case MC_TARGET_SWAP:
6090                         ent = target.ent;
6091                         if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6092                                 mc.precharge--;
6093                                 mem_cgroup_id_get_many(mc.to, 1);
6094                                 /* we fixup other refcnts and charges later. */
6095                                 mc.moved_swap++;
6096                         }
6097                         break;
6098                 default:
6099                         break;
6100                 }
6101         }
6102         pte_unmap_unlock(pte - 1, ptl);
6103         cond_resched();
6104
6105         if (addr != end) {
6106                 /*
6107                  * We have consumed all precharges we got in can_attach().
6108                  * We try charge one by one, but don't do any additional
6109                  * charges to mc.to if we have failed in charge once in attach()
6110                  * phase.
6111                  */
6112                 ret = mem_cgroup_do_precharge(1);
6113                 if (!ret)
6114                         goto retry;
6115         }
6116
6117         return ret;
6118 }
6119
6120 static const struct mm_walk_ops charge_walk_ops = {
6121         .pmd_entry      = mem_cgroup_move_charge_pte_range,
6122 };
6123
6124 static void mem_cgroup_move_charge(void)
6125 {
6126         lru_add_drain_all();
6127         /*
6128          * Signal lock_page_memcg() to take the memcg's move_lock
6129          * while we're moving its pages to another memcg. Then wait
6130          * for already started RCU-only updates to finish.
6131          */
6132         atomic_inc(&mc.from->moving_account);
6133         synchronize_rcu();
6134 retry:
6135         if (unlikely(!mmap_read_trylock(mc.mm))) {
6136                 /*
6137                  * Someone who are holding the mmap_lock might be waiting in
6138                  * waitq. So we cancel all extra charges, wake up all waiters,
6139                  * and retry. Because we cancel precharges, we might not be able
6140                  * to move enough charges, but moving charge is a best-effort
6141                  * feature anyway, so it wouldn't be a big problem.
6142                  */
6143                 __mem_cgroup_clear_mc();
6144                 cond_resched();
6145                 goto retry;
6146         }
6147         /*
6148          * When we have consumed all precharges and failed in doing
6149          * additional charge, the page walk just aborts.
6150          */
6151         walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6152                         NULL);
6153
6154         mmap_read_unlock(mc.mm);
6155         atomic_dec(&mc.from->moving_account);
6156 }
6157
6158 static void mem_cgroup_move_task(void)
6159 {
6160         if (mc.to) {
6161                 mem_cgroup_move_charge();
6162                 mem_cgroup_clear_mc();
6163         }
6164 }
6165 #else   /* !CONFIG_MMU */
6166 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6167 {
6168         return 0;
6169 }
6170 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6171 {
6172 }
6173 static void mem_cgroup_move_task(void)
6174 {
6175 }
6176 #endif
6177
6178 /*
6179  * Cgroup retains root cgroups across [un]mount cycles making it necessary
6180  * to verify whether we're attached to the default hierarchy on each mount
6181  * attempt.
6182  */
6183 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
6184 {
6185         /*
6186          * use_hierarchy is forced on the default hierarchy.  cgroup core
6187          * guarantees that @root doesn't have any children, so turning it
6188          * on for the root memcg is enough.
6189          */
6190         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6191                 root_mem_cgroup->use_hierarchy = true;
6192         else
6193                 root_mem_cgroup->use_hierarchy = false;
6194 }
6195
6196 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6197 {
6198         if (value == PAGE_COUNTER_MAX)
6199                 seq_puts(m, "max\n");
6200         else
6201                 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6202
6203         return 0;
6204 }
6205
6206 static u64 memory_current_read(struct cgroup_subsys_state *css,
6207                                struct cftype *cft)
6208 {
6209         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6210
6211         return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6212 }
6213
6214 static int memory_min_show(struct seq_file *m, void *v)
6215 {
6216         return seq_puts_memcg_tunable(m,
6217                 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6218 }
6219
6220 static ssize_t memory_min_write(struct kernfs_open_file *of,
6221                                 char *buf, size_t nbytes, loff_t off)
6222 {
6223         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6224         unsigned long min;
6225         int err;
6226
6227         buf = strstrip(buf);
6228         err = page_counter_memparse(buf, "max", &min);
6229         if (err)
6230                 return err;
6231
6232         page_counter_set_min(&memcg->memory, min);
6233
6234         return nbytes;
6235 }
6236
6237 static int memory_low_show(struct seq_file *m, void *v)
6238 {
6239         return seq_puts_memcg_tunable(m,
6240                 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6241 }
6242
6243 static ssize_t memory_low_write(struct kernfs_open_file *of,
6244                                 char *buf, size_t nbytes, loff_t off)
6245 {
6246         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6247         unsigned long low;
6248         int err;
6249
6250         buf = strstrip(buf);
6251         err = page_counter_memparse(buf, "max", &low);
6252         if (err)
6253                 return err;
6254
6255         page_counter_set_low(&memcg->memory, low);
6256
6257         return nbytes;
6258 }
6259
6260 static int memory_high_show(struct seq_file *m, void *v)
6261 {
6262         return seq_puts_memcg_tunable(m,
6263                 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6264 }
6265
6266 static ssize_t memory_high_write(struct kernfs_open_file *of,
6267                                  char *buf, size_t nbytes, loff_t off)
6268 {
6269         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6270         unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6271         bool drained = false;
6272         unsigned long high;
6273         int err;
6274
6275         buf = strstrip(buf);
6276         err = page_counter_memparse(buf, "max", &high);
6277         if (err)
6278                 return err;
6279
6280         for (;;) {
6281                 unsigned long nr_pages = page_counter_read(&memcg->memory);
6282                 unsigned long reclaimed;
6283
6284                 if (nr_pages <= high)
6285                         break;
6286
6287                 if (signal_pending(current))
6288                         break;
6289
6290                 if (!drained) {
6291                         drain_all_stock(memcg);
6292                         drained = true;
6293                         continue;
6294                 }
6295
6296                 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6297                                                          GFP_KERNEL, true);
6298
6299                 if (!reclaimed && !nr_retries--)
6300                         break;
6301         }
6302
6303         page_counter_set_high(&memcg->memory, high);
6304
6305         memcg_wb_domain_size_changed(memcg);
6306
6307         return nbytes;
6308 }
6309
6310 static int memory_max_show(struct seq_file *m, void *v)
6311 {
6312         return seq_puts_memcg_tunable(m,
6313                 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6314 }
6315
6316 static ssize_t memory_max_write(struct kernfs_open_file *of,
6317                                 char *buf, size_t nbytes, loff_t off)
6318 {
6319         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6320         unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6321         bool drained = false;
6322         unsigned long max;
6323         int err;
6324
6325         buf = strstrip(buf);
6326         err = page_counter_memparse(buf, "max", &max);
6327         if (err)
6328                 return err;
6329
6330         xchg(&memcg->memory.max, max);
6331
6332         for (;;) {
6333                 unsigned long nr_pages = page_counter_read(&memcg->memory);
6334
6335                 if (nr_pages <= max)
6336                         break;
6337
6338                 if (signal_pending(current))
6339                         break;
6340
6341                 if (!drained) {
6342                         drain_all_stock(memcg);
6343                         drained = true;
6344                         continue;
6345                 }
6346
6347                 if (nr_reclaims) {
6348                         if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6349                                                           GFP_KERNEL, true))
6350                                 nr_reclaims--;
6351                         continue;
6352                 }
6353
6354                 memcg_memory_event(memcg, MEMCG_OOM);
6355                 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6356                         break;
6357         }
6358
6359         memcg_wb_domain_size_changed(memcg);
6360         return nbytes;
6361 }
6362
6363 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6364 {
6365         seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6366         seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6367         seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6368         seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6369         seq_printf(m, "oom_kill %lu\n",
6370                    atomic_long_read(&events[MEMCG_OOM_KILL]));
6371 }
6372
6373 static int memory_events_show(struct seq_file *m, void *v)
6374 {
6375         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6376
6377         __memory_events_show(m, memcg->memory_events);
6378         return 0;
6379 }
6380
6381 static int memory_events_local_show(struct seq_file *m, void *v)
6382 {
6383         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6384
6385         __memory_events_show(m, memcg->memory_events_local);
6386         return 0;
6387 }
6388
6389 static int memory_stat_show(struct seq_file *m, void *v)
6390 {
6391         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6392         char *buf;
6393
6394         buf = memory_stat_format(memcg);
6395         if (!buf)
6396                 return -ENOMEM;
6397         seq_puts(m, buf);
6398         kfree(buf);
6399         return 0;
6400 }
6401
6402 static int memory_oom_group_show(struct seq_file *m, void *v)
6403 {
6404         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6405
6406         seq_printf(m, "%d\n", memcg->oom_group);
6407
6408         return 0;
6409 }
6410
6411 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6412                                       char *buf, size_t nbytes, loff_t off)
6413 {
6414         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6415         int ret, oom_group;
6416
6417         buf = strstrip(buf);
6418         if (!buf)
6419                 return -EINVAL;
6420
6421         ret = kstrtoint(buf, 0, &oom_group);
6422         if (ret)
6423                 return ret;
6424
6425         if (oom_group != 0 && oom_group != 1)
6426                 return -EINVAL;
6427
6428         memcg->oom_group = oom_group;
6429
6430         return nbytes;
6431 }
6432
6433 static struct cftype memory_files[] = {
6434         {
6435                 .name = "current",
6436                 .flags = CFTYPE_NOT_ON_ROOT,
6437                 .read_u64 = memory_current_read,
6438         },
6439         {
6440                 .name = "min",
6441                 .flags = CFTYPE_NOT_ON_ROOT,
6442                 .seq_show = memory_min_show,
6443                 .write = memory_min_write,
6444         },
6445         {
6446                 .name = "low",
6447                 .flags = CFTYPE_NOT_ON_ROOT,
6448                 .seq_show = memory_low_show,
6449                 .write = memory_low_write,
6450         },
6451         {
6452                 .name = "high",
6453                 .flags = CFTYPE_NOT_ON_ROOT,
6454                 .seq_show = memory_high_show,
6455                 .write = memory_high_write,
6456         },
6457         {
6458                 .name = "max",
6459                 .flags = CFTYPE_NOT_ON_ROOT,
6460                 .seq_show = memory_max_show,
6461                 .write = memory_max_write,
6462         },
6463         {
6464                 .name = "events",
6465                 .flags = CFTYPE_NOT_ON_ROOT,
6466                 .file_offset = offsetof(struct mem_cgroup, events_file),
6467                 .seq_show = memory_events_show,
6468         },
6469         {
6470                 .name = "events.local",
6471                 .flags = CFTYPE_NOT_ON_ROOT,
6472                 .file_offset = offsetof(struct mem_cgroup, events_local_file),
6473                 .seq_show = memory_events_local_show,
6474         },
6475         {
6476                 .name = "stat",
6477                 .seq_show = memory_stat_show,
6478         },
6479         {
6480                 .name = "oom.group",
6481                 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6482                 .seq_show = memory_oom_group_show,
6483                 .write = memory_oom_group_write,
6484         },
6485         { }     /* terminate */
6486 };
6487
6488 struct cgroup_subsys memory_cgrp_subsys = {
6489         .css_alloc = mem_cgroup_css_alloc,
6490         .css_online = mem_cgroup_css_online,
6491         .css_offline = mem_cgroup_css_offline,
6492         .css_released = mem_cgroup_css_released,
6493         .css_free = mem_cgroup_css_free,
6494         .css_reset = mem_cgroup_css_reset,
6495         .can_attach = mem_cgroup_can_attach,
6496         .cancel_attach = mem_cgroup_cancel_attach,
6497         .post_attach = mem_cgroup_move_task,
6498         .bind = mem_cgroup_bind,
6499         .dfl_cftypes = memory_files,
6500         .legacy_cftypes = mem_cgroup_legacy_files,
6501         .early_init = 0,
6502 };
6503
6504 /*
6505  * This function calculates an individual cgroup's effective
6506  * protection which is derived from its own memory.min/low, its
6507  * parent's and siblings' settings, as well as the actual memory
6508  * distribution in the tree.
6509  *
6510  * The following rules apply to the effective protection values:
6511  *
6512  * 1. At the first level of reclaim, effective protection is equal to
6513  *    the declared protection in memory.min and memory.low.
6514  *
6515  * 2. To enable safe delegation of the protection configuration, at
6516  *    subsequent levels the effective protection is capped to the
6517  *    parent's effective protection.
6518  *
6519  * 3. To make complex and dynamic subtrees easier to configure, the
6520  *    user is allowed to overcommit the declared protection at a given
6521  *    level. If that is the case, the parent's effective protection is
6522  *    distributed to the children in proportion to how much protection
6523  *    they have declared and how much of it they are utilizing.
6524  *
6525  *    This makes distribution proportional, but also work-conserving:
6526  *    if one cgroup claims much more protection than it uses memory,
6527  *    the unused remainder is available to its siblings.
6528  *
6529  * 4. Conversely, when the declared protection is undercommitted at a
6530  *    given level, the distribution of the larger parental protection
6531  *    budget is NOT proportional. A cgroup's protection from a sibling
6532  *    is capped to its own memory.min/low setting.
6533  *
6534  * 5. However, to allow protecting recursive subtrees from each other
6535  *    without having to declare each individual cgroup's fixed share
6536  *    of the ancestor's claim to protection, any unutilized -
6537  *    "floating" - protection from up the tree is distributed in
6538  *    proportion to each cgroup's *usage*. This makes the protection
6539  *    neutral wrt sibling cgroups and lets them compete freely over
6540  *    the shared parental protection budget, but it protects the
6541  *    subtree as a whole from neighboring subtrees.
6542  *
6543  * Note that 4. and 5. are not in conflict: 4. is about protecting
6544  * against immediate siblings whereas 5. is about protecting against
6545  * neighboring subtrees.
6546  */
6547 static unsigned long effective_protection(unsigned long usage,
6548                                           unsigned long parent_usage,
6549                                           unsigned long setting,
6550                                           unsigned long parent_effective,
6551                                           unsigned long siblings_protected)
6552 {
6553         unsigned long protected;
6554         unsigned long ep;
6555
6556         protected = min(usage, setting);
6557         /*
6558          * If all cgroups at this level combined claim and use more
6559          * protection then what the parent affords them, distribute
6560          * shares in proportion to utilization.
6561          *
6562          * We are using actual utilization rather than the statically
6563          * claimed protection in order to be work-conserving: claimed
6564          * but unused protection is available to siblings that would
6565          * otherwise get a smaller chunk than what they claimed.
6566          */
6567         if (siblings_protected > parent_effective)
6568                 return protected * parent_effective / siblings_protected;
6569
6570         /*
6571          * Ok, utilized protection of all children is within what the
6572          * parent affords them, so we know whatever this child claims
6573          * and utilizes is effectively protected.
6574          *
6575          * If there is unprotected usage beyond this value, reclaim
6576          * will apply pressure in proportion to that amount.
6577          *
6578          * If there is unutilized protection, the cgroup will be fully
6579          * shielded from reclaim, but we do return a smaller value for
6580          * protection than what the group could enjoy in theory. This
6581          * is okay. With the overcommit distribution above, effective
6582          * protection is always dependent on how memory is actually
6583          * consumed among the siblings anyway.
6584          */
6585         ep = protected;
6586
6587         /*
6588          * If the children aren't claiming (all of) the protection
6589          * afforded to them by the parent, distribute the remainder in
6590          * proportion to the (unprotected) memory of each cgroup. That
6591          * way, cgroups that aren't explicitly prioritized wrt each
6592          * other compete freely over the allowance, but they are
6593          * collectively protected from neighboring trees.
6594          *
6595          * We're using unprotected memory for the weight so that if
6596          * some cgroups DO claim explicit protection, we don't protect
6597          * the same bytes twice.
6598          *
6599          * Check both usage and parent_usage against the respective
6600          * protected values. One should imply the other, but they
6601          * aren't read atomically - make sure the division is sane.
6602          */
6603         if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6604                 return ep;
6605         if (parent_effective > siblings_protected &&
6606             parent_usage > siblings_protected &&
6607             usage > protected) {
6608                 unsigned long unclaimed;
6609
6610                 unclaimed = parent_effective - siblings_protected;
6611                 unclaimed *= usage - protected;
6612                 unclaimed /= parent_usage - siblings_protected;
6613
6614                 ep += unclaimed;
6615         }
6616
6617         return ep;
6618 }
6619
6620 /**
6621  * mem_cgroup_protected - check if memory consumption is in the normal range
6622  * @root: the top ancestor of the sub-tree being checked
6623  * @memcg: the memory cgroup to check
6624  *
6625  * WARNING: This function is not stateless! It can only be used as part
6626  *          of a top-down tree iteration, not for isolated queries.
6627  */
6628 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6629                                      struct mem_cgroup *memcg)
6630 {
6631         unsigned long usage, parent_usage;
6632         struct mem_cgroup *parent;
6633
6634         if (mem_cgroup_disabled())
6635                 return;
6636
6637         if (!root)
6638                 root = root_mem_cgroup;
6639
6640         /*
6641          * Effective values of the reclaim targets are ignored so they
6642          * can be stale. Have a look at mem_cgroup_protection for more
6643          * details.
6644          * TODO: calculation should be more robust so that we do not need
6645          * that special casing.
6646          */
6647         if (memcg == root)
6648                 return;
6649
6650         usage = page_counter_read(&memcg->memory);
6651         if (!usage)
6652                 return;
6653
6654         parent = parent_mem_cgroup(memcg);
6655         /* No parent means a non-hierarchical mode on v1 memcg */
6656         if (!parent)
6657                 return;
6658
6659         if (parent == root) {
6660                 memcg->memory.emin = READ_ONCE(memcg->memory.min);
6661                 memcg->memory.elow = READ_ONCE(memcg->memory.low);
6662                 return;
6663         }
6664
6665         parent_usage = page_counter_read(&parent->memory);
6666
6667         WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6668                         READ_ONCE(memcg->memory.min),
6669                         READ_ONCE(parent->memory.emin),
6670                         atomic_long_read(&parent->memory.children_min_usage)));
6671
6672         WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6673                         READ_ONCE(memcg->memory.low),
6674                         READ_ONCE(parent->memory.elow),
6675                         atomic_long_read(&parent->memory.children_low_usage)));
6676 }
6677
6678 /**
6679  * mem_cgroup_charge - charge a newly allocated page to a cgroup
6680  * @page: page to charge
6681  * @mm: mm context of the victim
6682  * @gfp_mask: reclaim mode
6683  *
6684  * Try to charge @page to the memcg that @mm belongs to, reclaiming
6685  * pages according to @gfp_mask if necessary.
6686  *
6687  * Returns 0 on success. Otherwise, an error code is returned.
6688  */
6689 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
6690 {
6691         unsigned int nr_pages = hpage_nr_pages(page);
6692         struct mem_cgroup *memcg = NULL;
6693         int ret = 0;
6694
6695         if (mem_cgroup_disabled())
6696                 goto out;
6697
6698         if (PageSwapCache(page)) {
6699                 swp_entry_t ent = { .val = page_private(page), };
6700                 unsigned short id;
6701
6702                 /*
6703                  * Every swap fault against a single page tries to charge the
6704                  * page, bail as early as possible.  shmem_unuse() encounters
6705                  * already charged pages, too.  page->mem_cgroup is protected
6706                  * by the page lock, which serializes swap cache removal, which
6707                  * in turn serializes uncharging.
6708                  */
6709                 VM_BUG_ON_PAGE(!PageLocked(page), page);
6710                 if (compound_head(page)->mem_cgroup)
6711                         goto out;
6712
6713                 id = lookup_swap_cgroup_id(ent);
6714                 rcu_read_lock();
6715                 memcg = mem_cgroup_from_id(id);
6716                 if (memcg && !css_tryget_online(&memcg->css))
6717                         memcg = NULL;
6718                 rcu_read_unlock();
6719         }
6720
6721         if (!memcg)
6722                 memcg = get_mem_cgroup_from_mm(mm);
6723
6724         ret = try_charge(memcg, gfp_mask, nr_pages);
6725         if (ret)
6726                 goto out_put;
6727
6728         css_get(&memcg->css);
6729         commit_charge(page, memcg);
6730
6731         local_irq_disable();
6732         mem_cgroup_charge_statistics(memcg, page, nr_pages);
6733         memcg_check_events(memcg, page);
6734         local_irq_enable();
6735
6736         if (PageSwapCache(page)) {
6737                 swp_entry_t entry = { .val = page_private(page) };
6738                 /*
6739                  * The swap entry might not get freed for a long time,
6740                  * let's not wait for it.  The page already received a
6741                  * memory+swap charge, drop the swap entry duplicate.
6742                  */
6743                 mem_cgroup_uncharge_swap(entry, nr_pages);
6744         }
6745
6746 out_put:
6747         css_put(&memcg->css);
6748 out:
6749         return ret;
6750 }
6751
6752 struct uncharge_gather {
6753         struct mem_cgroup *memcg;
6754         unsigned long nr_pages;
6755         unsigned long pgpgout;
6756         unsigned long nr_kmem;
6757         struct page *dummy_page;
6758 };
6759
6760 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6761 {
6762         memset(ug, 0, sizeof(*ug));
6763 }
6764
6765 static void uncharge_batch(const struct uncharge_gather *ug)
6766 {
6767         unsigned long flags;
6768
6769         if (!mem_cgroup_is_root(ug->memcg)) {
6770                 page_counter_uncharge(&ug->memcg->memory, ug->nr_pages);
6771                 if (do_memsw_account())
6772                         page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages);
6773                 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6774                         page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6775                 memcg_oom_recover(ug->memcg);
6776         }
6777
6778         local_irq_save(flags);
6779         __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6780         __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
6781         memcg_check_events(ug->memcg, ug->dummy_page);
6782         local_irq_restore(flags);
6783 }
6784
6785 static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6786 {
6787         unsigned long nr_pages;
6788
6789         VM_BUG_ON_PAGE(PageLRU(page), page);
6790
6791         if (!page->mem_cgroup)
6792                 return;
6793
6794         /*
6795          * Nobody should be changing or seriously looking at
6796          * page->mem_cgroup at this point, we have fully
6797          * exclusive access to the page.
6798          */
6799
6800         if (ug->memcg != page->mem_cgroup) {
6801                 if (ug->memcg) {
6802                         uncharge_batch(ug);
6803                         uncharge_gather_clear(ug);
6804                 }
6805                 ug->memcg = page->mem_cgroup;
6806         }
6807
6808         nr_pages = compound_nr(page);
6809         ug->nr_pages += nr_pages;
6810
6811         if (!PageKmemcg(page)) {
6812                 ug->pgpgout++;
6813         } else {
6814                 ug->nr_kmem += nr_pages;
6815                 __ClearPageKmemcg(page);
6816         }
6817
6818         ug->dummy_page = page;
6819         page->mem_cgroup = NULL;
6820         css_put(&ug->memcg->css);
6821 }
6822
6823 static void uncharge_list(struct list_head *page_list)
6824 {
6825         struct uncharge_gather ug;
6826         struct list_head *next;
6827
6828         uncharge_gather_clear(&ug);
6829
6830         /*
6831          * Note that the list can be a single page->lru; hence the
6832          * do-while loop instead of a simple list_for_each_entry().
6833          */
6834         next = page_list->next;
6835         do {
6836                 struct page *page;
6837
6838                 page = list_entry(next, struct page, lru);
6839                 next = page->lru.next;
6840
6841                 uncharge_page(page, &ug);
6842         } while (next != page_list);
6843
6844         if (ug.memcg)
6845                 uncharge_batch(&ug);
6846 }
6847
6848 /**
6849  * mem_cgroup_uncharge - uncharge a page
6850  * @page: page to uncharge
6851  *
6852  * Uncharge a page previously charged with mem_cgroup_charge().
6853  */
6854 void mem_cgroup_uncharge(struct page *page)
6855 {
6856         struct uncharge_gather ug;
6857
6858         if (mem_cgroup_disabled())
6859                 return;
6860
6861         /* Don't touch page->lru of any random page, pre-check: */
6862         if (!page->mem_cgroup)
6863                 return;
6864
6865         uncharge_gather_clear(&ug);
6866         uncharge_page(page, &ug);
6867         uncharge_batch(&ug);
6868 }
6869
6870 /**
6871  * mem_cgroup_uncharge_list - uncharge a list of page
6872  * @page_list: list of pages to uncharge
6873  *
6874  * Uncharge a list of pages previously charged with
6875  * mem_cgroup_charge().
6876  */
6877 void mem_cgroup_uncharge_list(struct list_head *page_list)
6878 {
6879         if (mem_cgroup_disabled())
6880                 return;
6881
6882         if (!list_empty(page_list))
6883                 uncharge_list(page_list);
6884 }
6885
6886 /**
6887  * mem_cgroup_migrate - charge a page's replacement
6888  * @oldpage: currently circulating page
6889  * @newpage: replacement page
6890  *
6891  * Charge @newpage as a replacement page for @oldpage. @oldpage will
6892  * be uncharged upon free.
6893  *
6894  * Both pages must be locked, @newpage->mapping must be set up.
6895  */
6896 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
6897 {
6898         struct mem_cgroup *memcg;
6899         unsigned int nr_pages;
6900         unsigned long flags;
6901
6902         VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6903         VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
6904         VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6905         VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6906                        newpage);
6907
6908         if (mem_cgroup_disabled())
6909                 return;
6910
6911         /* Page cache replacement: new page already charged? */
6912         if (newpage->mem_cgroup)
6913                 return;
6914
6915         /* Swapcache readahead pages can get replaced before being charged */
6916         memcg = oldpage->mem_cgroup;
6917         if (!memcg)
6918                 return;
6919
6920         /* Force-charge the new page. The old one will be freed soon */
6921         nr_pages = hpage_nr_pages(newpage);
6922
6923         page_counter_charge(&memcg->memory, nr_pages);
6924         if (do_memsw_account())
6925                 page_counter_charge(&memcg->memsw, nr_pages);
6926
6927         css_get(&memcg->css);
6928         commit_charge(newpage, memcg);
6929
6930         local_irq_save(flags);
6931         mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
6932         memcg_check_events(memcg, newpage);
6933         local_irq_restore(flags);
6934 }
6935
6936 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
6937 EXPORT_SYMBOL(memcg_sockets_enabled_key);
6938
6939 void mem_cgroup_sk_alloc(struct sock *sk)
6940 {
6941         struct mem_cgroup *memcg;
6942
6943         if (!mem_cgroup_sockets_enabled)
6944                 return;
6945
6946         /* Do not associate the sock with unrelated interrupted task's memcg. */
6947         if (in_interrupt())
6948                 return;
6949
6950         rcu_read_lock();
6951         memcg = mem_cgroup_from_task(current);
6952         if (memcg == root_mem_cgroup)
6953                 goto out;
6954         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
6955                 goto out;
6956         if (css_tryget(&memcg->css))
6957                 sk->sk_memcg = memcg;
6958 out:
6959         rcu_read_unlock();
6960 }
6961
6962 void mem_cgroup_sk_free(struct sock *sk)
6963 {
6964         if (sk->sk_memcg)
6965                 css_put(&sk->sk_memcg->css);
6966 }
6967
6968 /**
6969  * mem_cgroup_charge_skmem - charge socket memory
6970  * @memcg: memcg to charge
6971  * @nr_pages: number of pages to charge
6972  *
6973  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
6974  * @memcg's configured limit, %false if the charge had to be forced.
6975  */
6976 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6977 {
6978         gfp_t gfp_mask = GFP_KERNEL;
6979
6980         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6981                 struct page_counter *fail;
6982
6983                 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
6984                         memcg->tcpmem_pressure = 0;
6985                         return true;
6986                 }
6987                 page_counter_charge(&memcg->tcpmem, nr_pages);
6988                 memcg->tcpmem_pressure = 1;
6989                 return false;
6990         }
6991
6992         /* Don't block in the packet receive path */
6993         if (in_softirq())
6994                 gfp_mask = GFP_NOWAIT;
6995
6996         mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
6997
6998         if (try_charge(memcg, gfp_mask, nr_pages) == 0)
6999                 return true;
7000
7001         try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
7002         return false;
7003 }
7004
7005 /**
7006  * mem_cgroup_uncharge_skmem - uncharge socket memory
7007  * @memcg: memcg to uncharge
7008  * @nr_pages: number of pages to uncharge
7009  */
7010 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7011 {
7012         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7013                 page_counter_uncharge(&memcg->tcpmem, nr_pages);
7014                 return;
7015         }
7016
7017         mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7018
7019         refill_stock(memcg, nr_pages);
7020 }
7021
7022 static int __init cgroup_memory(char *s)
7023 {
7024         char *token;
7025
7026         while ((token = strsep(&s, ",")) != NULL) {
7027                 if (!*token)
7028                         continue;
7029                 if (!strcmp(token, "nosocket"))
7030                         cgroup_memory_nosocket = true;
7031                 if (!strcmp(token, "nokmem"))
7032                         cgroup_memory_nokmem = true;
7033         }
7034         return 0;
7035 }
7036 __setup("cgroup.memory=", cgroup_memory);
7037
7038 /*
7039  * subsys_initcall() for memory controller.
7040  *
7041  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7042  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7043  * basically everything that doesn't depend on a specific mem_cgroup structure
7044  * should be initialized from here.
7045  */
7046 static int __init mem_cgroup_init(void)
7047 {
7048         int cpu, node;
7049
7050         cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7051                                   memcg_hotplug_cpu_dead);
7052
7053         for_each_possible_cpu(cpu)
7054                 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7055                           drain_local_stock);
7056
7057         for_each_node(node) {
7058                 struct mem_cgroup_tree_per_node *rtpn;
7059
7060                 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7061                                     node_online(node) ? node : NUMA_NO_NODE);
7062
7063                 rtpn->rb_root = RB_ROOT;
7064                 rtpn->rb_rightmost = NULL;
7065                 spin_lock_init(&rtpn->lock);
7066                 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7067         }
7068
7069         return 0;
7070 }
7071 subsys_initcall(mem_cgroup_init);
7072
7073 #ifdef CONFIG_MEMCG_SWAP
7074 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7075 {
7076         while (!refcount_inc_not_zero(&memcg->id.ref)) {
7077                 /*
7078                  * The root cgroup cannot be destroyed, so it's refcount must
7079                  * always be >= 1.
7080                  */
7081                 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7082                         VM_BUG_ON(1);
7083                         break;
7084                 }
7085                 memcg = parent_mem_cgroup(memcg);
7086                 if (!memcg)
7087                         memcg = root_mem_cgroup;
7088         }
7089         return memcg;
7090 }
7091
7092 /**
7093  * mem_cgroup_swapout - transfer a memsw charge to swap
7094  * @page: page whose memsw charge to transfer
7095  * @entry: swap entry to move the charge to
7096  *
7097  * Transfer the memsw charge of @page to @entry.
7098  */
7099 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
7100 {
7101         struct mem_cgroup *memcg, *swap_memcg;
7102         unsigned int nr_entries;
7103         unsigned short oldid;
7104
7105         VM_BUG_ON_PAGE(PageLRU(page), page);
7106         VM_BUG_ON_PAGE(page_count(page), page);
7107
7108         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7109                 return;
7110
7111         memcg = page->mem_cgroup;
7112
7113         /* Readahead page, never charged */
7114         if (!memcg)
7115                 return;
7116
7117         /*
7118          * In case the memcg owning these pages has been offlined and doesn't
7119          * have an ID allocated to it anymore, charge the closest online
7120          * ancestor for the swap instead and transfer the memory+swap charge.
7121          */
7122         swap_memcg = mem_cgroup_id_get_online(memcg);
7123         nr_entries = hpage_nr_pages(page);
7124         /* Get references for the tail pages, too */
7125         if (nr_entries > 1)
7126                 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7127         oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7128                                    nr_entries);
7129         VM_BUG_ON_PAGE(oldid, page);
7130         mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7131
7132         page->mem_cgroup = NULL;
7133
7134         if (!mem_cgroup_is_root(memcg))
7135                 page_counter_uncharge(&memcg->memory, nr_entries);
7136
7137         if (!cgroup_memory_noswap && memcg != swap_memcg) {
7138                 if (!mem_cgroup_is_root(swap_memcg))
7139                         page_counter_charge(&swap_memcg->memsw, nr_entries);
7140                 page_counter_uncharge(&memcg->memsw, nr_entries);
7141         }
7142
7143         /*
7144          * Interrupts should be disabled here because the caller holds the
7145          * i_pages lock which is taken with interrupts-off. It is
7146          * important here to have the interrupts disabled because it is the
7147          * only synchronisation we have for updating the per-CPU variables.
7148          */
7149         VM_BUG_ON(!irqs_disabled());
7150         mem_cgroup_charge_statistics(memcg, page, -nr_entries);
7151         memcg_check_events(memcg, page);
7152
7153         css_put(&memcg->css);
7154 }
7155
7156 /**
7157  * mem_cgroup_try_charge_swap - try charging swap space for a page
7158  * @page: page being added to swap
7159  * @entry: swap entry to charge
7160  *
7161  * Try to charge @page's memcg for the swap space at @entry.
7162  *
7163  * Returns 0 on success, -ENOMEM on failure.
7164  */
7165 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7166 {
7167         unsigned int nr_pages = hpage_nr_pages(page);
7168         struct page_counter *counter;
7169         struct mem_cgroup *memcg;
7170         unsigned short oldid;
7171
7172         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7173                 return 0;
7174
7175         memcg = page->mem_cgroup;
7176
7177         /* Readahead page, never charged */
7178         if (!memcg)
7179                 return 0;
7180
7181         if (!entry.val) {
7182                 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7183                 return 0;
7184         }
7185
7186         memcg = mem_cgroup_id_get_online(memcg);
7187
7188         if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7189             !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7190                 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7191                 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7192                 mem_cgroup_id_put(memcg);
7193                 return -ENOMEM;
7194         }
7195
7196         /* Get references for the tail pages, too */
7197         if (nr_pages > 1)
7198                 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7199         oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7200         VM_BUG_ON_PAGE(oldid, page);
7201         mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7202
7203         return 0;
7204 }
7205
7206 /**
7207  * mem_cgroup_uncharge_swap - uncharge swap space
7208  * @entry: swap entry to uncharge
7209  * @nr_pages: the amount of swap space to uncharge
7210  */
7211 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7212 {
7213         struct mem_cgroup *memcg;
7214         unsigned short id;
7215
7216         id = swap_cgroup_record(entry, 0, nr_pages);
7217         rcu_read_lock();
7218         memcg = mem_cgroup_from_id(id);
7219         if (memcg) {
7220                 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7221                         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7222                                 page_counter_uncharge(&memcg->swap, nr_pages);
7223                         else
7224                                 page_counter_uncharge(&memcg->memsw, nr_pages);
7225                 }
7226                 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7227                 mem_cgroup_id_put_many(memcg, nr_pages);
7228         }
7229         rcu_read_unlock();
7230 }
7231
7232 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7233 {
7234         long nr_swap_pages = get_nr_swap_pages();
7235
7236         if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7237                 return nr_swap_pages;
7238         for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7239                 nr_swap_pages = min_t(long, nr_swap_pages,
7240                                       READ_ONCE(memcg->swap.max) -
7241                                       page_counter_read(&memcg->swap));
7242         return nr_swap_pages;
7243 }
7244
7245 bool mem_cgroup_swap_full(struct page *page)
7246 {
7247         struct mem_cgroup *memcg;
7248
7249         VM_BUG_ON_PAGE(!PageLocked(page), page);
7250
7251         if (vm_swap_full())
7252                 return true;
7253         if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7254                 return false;
7255
7256         memcg = page->mem_cgroup;
7257         if (!memcg)
7258                 return false;
7259
7260         for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7261                 unsigned long usage = page_counter_read(&memcg->swap);
7262
7263                 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7264                     usage * 2 >= READ_ONCE(memcg->swap.max))
7265                         return true;
7266         }
7267
7268         return false;
7269 }
7270
7271 static int __init setup_swap_account(char *s)
7272 {
7273         if (!strcmp(s, "1"))
7274                 cgroup_memory_noswap = 0;
7275         else if (!strcmp(s, "0"))
7276                 cgroup_memory_noswap = 1;
7277         return 1;
7278 }
7279 __setup("swapaccount=", setup_swap_account);
7280
7281 static u64 swap_current_read(struct cgroup_subsys_state *css,
7282                              struct cftype *cft)
7283 {
7284         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7285
7286         return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7287 }
7288
7289 static int swap_high_show(struct seq_file *m, void *v)
7290 {
7291         return seq_puts_memcg_tunable(m,
7292                 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7293 }
7294
7295 static ssize_t swap_high_write(struct kernfs_open_file *of,
7296                                char *buf, size_t nbytes, loff_t off)
7297 {
7298         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7299         unsigned long high;
7300         int err;
7301
7302         buf = strstrip(buf);
7303         err = page_counter_memparse(buf, "max", &high);
7304         if (err)
7305                 return err;
7306
7307         page_counter_set_high(&memcg->swap, high);
7308
7309         return nbytes;
7310 }
7311
7312 static int swap_max_show(struct seq_file *m, void *v)
7313 {
7314         return seq_puts_memcg_tunable(m,
7315                 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7316 }
7317
7318 static ssize_t swap_max_write(struct kernfs_open_file *of,
7319                               char *buf, size_t nbytes, loff_t off)
7320 {
7321         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7322         unsigned long max;
7323         int err;
7324
7325         buf = strstrip(buf);
7326         err = page_counter_memparse(buf, "max", &max);
7327         if (err)
7328                 return err;
7329
7330         xchg(&memcg->swap.max, max);
7331
7332         return nbytes;
7333 }
7334
7335 static int swap_events_show(struct seq_file *m, void *v)
7336 {
7337         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7338
7339         seq_printf(m, "high %lu\n",
7340                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7341         seq_printf(m, "max %lu\n",
7342                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7343         seq_printf(m, "fail %lu\n",
7344                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7345
7346         return 0;
7347 }
7348
7349 static struct cftype swap_files[] = {
7350         {
7351                 .name = "swap.current",
7352                 .flags = CFTYPE_NOT_ON_ROOT,
7353                 .read_u64 = swap_current_read,
7354         },
7355         {
7356                 .name = "swap.high",
7357                 .flags = CFTYPE_NOT_ON_ROOT,
7358                 .seq_show = swap_high_show,
7359                 .write = swap_high_write,
7360         },
7361         {
7362                 .name = "swap.max",
7363                 .flags = CFTYPE_NOT_ON_ROOT,
7364                 .seq_show = swap_max_show,
7365                 .write = swap_max_write,
7366         },
7367         {
7368                 .name = "swap.events",
7369                 .flags = CFTYPE_NOT_ON_ROOT,
7370                 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
7371                 .seq_show = swap_events_show,
7372         },
7373         { }     /* terminate */
7374 };
7375
7376 static struct cftype memsw_files[] = {
7377         {
7378                 .name = "memsw.usage_in_bytes",
7379                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7380                 .read_u64 = mem_cgroup_read_u64,
7381         },
7382         {
7383                 .name = "memsw.max_usage_in_bytes",
7384                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7385                 .write = mem_cgroup_reset,
7386                 .read_u64 = mem_cgroup_read_u64,
7387         },
7388         {
7389                 .name = "memsw.limit_in_bytes",
7390                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7391                 .write = mem_cgroup_write,
7392                 .read_u64 = mem_cgroup_read_u64,
7393         },
7394         {
7395                 .name = "memsw.failcnt",
7396                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7397                 .write = mem_cgroup_reset,
7398                 .read_u64 = mem_cgroup_read_u64,
7399         },
7400         { },    /* terminate */
7401 };
7402
7403 /*
7404  * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7405  * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7406  * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7407  * boot parameter. This may result in premature OOPS inside
7408  * mem_cgroup_get_nr_swap_pages() function in corner cases.
7409  */
7410 static int __init mem_cgroup_swap_init(void)
7411 {
7412         /* No memory control -> no swap control */
7413         if (mem_cgroup_disabled())
7414                 cgroup_memory_noswap = true;
7415
7416         if (cgroup_memory_noswap)
7417                 return 0;
7418
7419         WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7420         WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7421
7422         return 0;
7423 }
7424 core_initcall(mem_cgroup_swap_init);
7425
7426 #endif /* CONFIG_MEMCG_SWAP */