6edef95fecf4bf2c9f1debeaaabf63d7bb82c94f
[platform/kernel/linux-starfive.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * Memory thresholds
10  * Copyright (C) 2009 Nokia Corporation
11  * Author: Kirill A. Shutemov
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  */
23
24 #include <linux/res_counter.h>
25 #include <linux/memcontrol.h>
26 #include <linux/cgroup.h>
27 #include <linux/mm.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagemap.h>
30 #include <linux/smp.h>
31 #include <linux/page-flags.h>
32 #include <linux/backing-dev.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/rcupdate.h>
35 #include <linux/limits.h>
36 #include <linux/export.h>
37 #include <linux/mutex.h>
38 #include <linux/rbtree.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/swapops.h>
42 #include <linux/spinlock.h>
43 #include <linux/eventfd.h>
44 #include <linux/sort.h>
45 #include <linux/fs.h>
46 #include <linux/seq_file.h>
47 #include <linux/vmalloc.h>
48 #include <linux/mm_inline.h>
49 #include <linux/page_cgroup.h>
50 #include <linux/cpu.h>
51 #include <linux/oom.h>
52 #include "internal.h"
53 #include <net/sock.h>
54 #include <net/tcp_memcontrol.h>
55
56 #include <asm/uaccess.h>
57
58 #include <trace/events/vmscan.h>
59
60 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
61 #define MEM_CGROUP_RECLAIM_RETRIES      5
62 struct mem_cgroup *root_mem_cgroup __read_mostly;
63
64 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
65 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
66 int do_swap_account __read_mostly;
67
68 /* for remember boot option*/
69 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
70 static int really_do_swap_account __initdata = 1;
71 #else
72 static int really_do_swap_account __initdata = 0;
73 #endif
74
75 #else
76 #define do_swap_account         (0)
77 #endif
78
79
80 /*
81  * Statistics for memory cgroup.
82  */
83 enum mem_cgroup_stat_index {
84         /*
85          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
86          */
87         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
88         MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
89         MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
90         MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
91         MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
92         MEM_CGROUP_ON_MOVE,     /* someone is moving account between groups */
93         MEM_CGROUP_STAT_NSTATS,
94 };
95
96 enum mem_cgroup_events_index {
97         MEM_CGROUP_EVENTS_PGPGIN,       /* # of pages paged in */
98         MEM_CGROUP_EVENTS_PGPGOUT,      /* # of pages paged out */
99         MEM_CGROUP_EVENTS_COUNT,        /* # of pages paged in/out */
100         MEM_CGROUP_EVENTS_PGFAULT,      /* # of page-faults */
101         MEM_CGROUP_EVENTS_PGMAJFAULT,   /* # of major page-faults */
102         MEM_CGROUP_EVENTS_NSTATS,
103 };
104 /*
105  * Per memcg event counter is incremented at every pagein/pageout. With THP,
106  * it will be incremated by the number of pages. This counter is used for
107  * for trigger some periodic events. This is straightforward and better
108  * than using jiffies etc. to handle periodic memcg event.
109  */
110 enum mem_cgroup_events_target {
111         MEM_CGROUP_TARGET_THRESH,
112         MEM_CGROUP_TARGET_SOFTLIMIT,
113         MEM_CGROUP_TARGET_NUMAINFO,
114         MEM_CGROUP_NTARGETS,
115 };
116 #define THRESHOLDS_EVENTS_TARGET (128)
117 #define SOFTLIMIT_EVENTS_TARGET (1024)
118 #define NUMAINFO_EVENTS_TARGET  (1024)
119
120 struct mem_cgroup_stat_cpu {
121         long count[MEM_CGROUP_STAT_NSTATS];
122         unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
123         unsigned long targets[MEM_CGROUP_NTARGETS];
124 };
125
126 /*
127  * per-zone information in memory controller.
128  */
129 struct mem_cgroup_per_zone {
130         /*
131          * spin_lock to protect the per cgroup LRU
132          */
133         struct list_head        lists[NR_LRU_LISTS];
134         unsigned long           count[NR_LRU_LISTS];
135
136         struct zone_reclaim_stat reclaim_stat;
137         struct rb_node          tree_node;      /* RB tree node */
138         unsigned long long      usage_in_excess;/* Set to the value by which */
139                                                 /* the soft limit is exceeded*/
140         bool                    on_tree;
141         struct mem_cgroup       *mem;           /* Back pointer, we cannot */
142                                                 /* use container_of        */
143 };
144 /* Macro for accessing counter */
145 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
146
147 struct mem_cgroup_per_node {
148         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
149 };
150
151 struct mem_cgroup_lru_info {
152         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
153 };
154
155 /*
156  * Cgroups above their limits are maintained in a RB-Tree, independent of
157  * their hierarchy representation
158  */
159
160 struct mem_cgroup_tree_per_zone {
161         struct rb_root rb_root;
162         spinlock_t lock;
163 };
164
165 struct mem_cgroup_tree_per_node {
166         struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
167 };
168
169 struct mem_cgroup_tree {
170         struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
171 };
172
173 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
174
175 struct mem_cgroup_threshold {
176         struct eventfd_ctx *eventfd;
177         u64 threshold;
178 };
179
180 /* For threshold */
181 struct mem_cgroup_threshold_ary {
182         /* An array index points to threshold just below usage. */
183         int current_threshold;
184         /* Size of entries[] */
185         unsigned int size;
186         /* Array of thresholds */
187         struct mem_cgroup_threshold entries[0];
188 };
189
190 struct mem_cgroup_thresholds {
191         /* Primary thresholds array */
192         struct mem_cgroup_threshold_ary *primary;
193         /*
194          * Spare threshold array.
195          * This is needed to make mem_cgroup_unregister_event() "never fail".
196          * It must be able to store at least primary->size - 1 entries.
197          */
198         struct mem_cgroup_threshold_ary *spare;
199 };
200
201 /* for OOM */
202 struct mem_cgroup_eventfd_list {
203         struct list_head list;
204         struct eventfd_ctx *eventfd;
205 };
206
207 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
208 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
209
210 /*
211  * The memory controller data structure. The memory controller controls both
212  * page cache and RSS per cgroup. We would eventually like to provide
213  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
214  * to help the administrator determine what knobs to tune.
215  *
216  * TODO: Add a water mark for the memory controller. Reclaim will begin when
217  * we hit the water mark. May be even add a low water mark, such that
218  * no reclaim occurs from a cgroup at it's low water mark, this is
219  * a feature that will be implemented much later in the future.
220  */
221 struct mem_cgroup {
222         struct cgroup_subsys_state css;
223         /*
224          * the counter to account for memory usage
225          */
226         struct res_counter res;
227         /*
228          * the counter to account for mem+swap usage.
229          */
230         struct res_counter memsw;
231         /*
232          * Per cgroup active and inactive list, similar to the
233          * per zone LRU lists.
234          */
235         struct mem_cgroup_lru_info info;
236         /*
237          * While reclaiming in a hierarchy, we cache the last child we
238          * reclaimed from.
239          */
240         int last_scanned_child;
241         int last_scanned_node;
242 #if MAX_NUMNODES > 1
243         nodemask_t      scan_nodes;
244         atomic_t        numainfo_events;
245         atomic_t        numainfo_updating;
246 #endif
247         /*
248          * Should the accounting and control be hierarchical, per subtree?
249          */
250         bool use_hierarchy;
251
252         bool            oom_lock;
253         atomic_t        under_oom;
254
255         atomic_t        refcnt;
256
257         int     swappiness;
258         /* OOM-Killer disable */
259         int             oom_kill_disable;
260
261         /* set when res.limit == memsw.limit */
262         bool            memsw_is_minimum;
263
264         /* protect arrays of thresholds */
265         struct mutex thresholds_lock;
266
267         /* thresholds for memory usage. RCU-protected */
268         struct mem_cgroup_thresholds thresholds;
269
270         /* thresholds for mem+swap usage. RCU-protected */
271         struct mem_cgroup_thresholds memsw_thresholds;
272
273         /* For oom notifier event fd */
274         struct list_head oom_notify;
275
276         /*
277          * Should we move charges of a task when a task is moved into this
278          * mem_cgroup ? And what type of charges should we move ?
279          */
280         unsigned long   move_charge_at_immigrate;
281         /*
282          * percpu counter.
283          */
284         struct mem_cgroup_stat_cpu *stat;
285         /*
286          * used when a cpu is offlined or other synchronizations
287          * See mem_cgroup_read_stat().
288          */
289         struct mem_cgroup_stat_cpu nocpu_base;
290         spinlock_t pcp_counter_lock;
291
292 #ifdef CONFIG_INET
293         struct tcp_memcontrol tcp_mem;
294 #endif
295 };
296
297 /* Stuffs for move charges at task migration. */
298 /*
299  * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
300  * left-shifted bitmap of these types.
301  */
302 enum move_type {
303         MOVE_CHARGE_TYPE_ANON,  /* private anonymous page and swap of it */
304         MOVE_CHARGE_TYPE_FILE,  /* file page(including tmpfs) and swap of it */
305         NR_MOVE_TYPE,
306 };
307
308 /* "mc" and its members are protected by cgroup_mutex */
309 static struct move_charge_struct {
310         spinlock_t        lock; /* for from, to */
311         struct mem_cgroup *from;
312         struct mem_cgroup *to;
313         unsigned long precharge;
314         unsigned long moved_charge;
315         unsigned long moved_swap;
316         struct task_struct *moving_task;        /* a task moving charges */
317         wait_queue_head_t waitq;                /* a waitq for other context */
318 } mc = {
319         .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
320         .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
321 };
322
323 static bool move_anon(void)
324 {
325         return test_bit(MOVE_CHARGE_TYPE_ANON,
326                                         &mc.to->move_charge_at_immigrate);
327 }
328
329 static bool move_file(void)
330 {
331         return test_bit(MOVE_CHARGE_TYPE_FILE,
332                                         &mc.to->move_charge_at_immigrate);
333 }
334
335 /*
336  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
337  * limit reclaim to prevent infinite loops, if they ever occur.
338  */
339 #define MEM_CGROUP_MAX_RECLAIM_LOOPS            (100)
340 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
341
342 enum charge_type {
343         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
344         MEM_CGROUP_CHARGE_TYPE_MAPPED,
345         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
346         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
347         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
348         MEM_CGROUP_CHARGE_TYPE_DROP,    /* a page was unused swap cache */
349         NR_CHARGE_TYPE,
350 };
351
352 /* for encoding cft->private value on file */
353 #define _MEM                    (0)
354 #define _MEMSWAP                (1)
355 #define _OOM_TYPE               (2)
356 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
357 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
358 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
359 /* Used for OOM nofiier */
360 #define OOM_CONTROL             (0)
361
362 /*
363  * Reclaim flags for mem_cgroup_hierarchical_reclaim
364  */
365 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT   0x0
366 #define MEM_CGROUP_RECLAIM_NOSWAP       (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
367 #define MEM_CGROUP_RECLAIM_SHRINK_BIT   0x1
368 #define MEM_CGROUP_RECLAIM_SHRINK       (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
369 #define MEM_CGROUP_RECLAIM_SOFT_BIT     0x2
370 #define MEM_CGROUP_RECLAIM_SOFT         (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
371
372 static void mem_cgroup_get(struct mem_cgroup *memcg);
373 static void mem_cgroup_put(struct mem_cgroup *memcg);
374
375 /* Writing them here to avoid exposing memcg's inner layout */
376 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
377 #ifdef CONFIG_INET
378 #include <net/sock.h>
379 #include <net/ip.h>
380
381 static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
382 void sock_update_memcg(struct sock *sk)
383 {
384         if (static_branch(&memcg_socket_limit_enabled)) {
385                 struct mem_cgroup *memcg;
386
387                 BUG_ON(!sk->sk_prot->proto_cgroup);
388
389                 /* Socket cloning can throw us here with sk_cgrp already
390                  * filled. It won't however, necessarily happen from
391                  * process context. So the test for root memcg given
392                  * the current task's memcg won't help us in this case.
393                  *
394                  * Respecting the original socket's memcg is a better
395                  * decision in this case.
396                  */
397                 if (sk->sk_cgrp) {
398                         BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
399                         mem_cgroup_get(sk->sk_cgrp->memcg);
400                         return;
401                 }
402
403                 rcu_read_lock();
404                 memcg = mem_cgroup_from_task(current);
405                 if (!mem_cgroup_is_root(memcg)) {
406                         mem_cgroup_get(memcg);
407                         sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg);
408                 }
409                 rcu_read_unlock();
410         }
411 }
412 EXPORT_SYMBOL(sock_update_memcg);
413
414 void sock_release_memcg(struct sock *sk)
415 {
416         if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) {
417                 struct mem_cgroup *memcg;
418                 WARN_ON(!sk->sk_cgrp->memcg);
419                 memcg = sk->sk_cgrp->memcg;
420                 mem_cgroup_put(memcg);
421         }
422 }
423
424 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
425 {
426         if (!memcg || mem_cgroup_is_root(memcg))
427                 return NULL;
428
429         return &memcg->tcp_mem.cg_proto;
430 }
431 EXPORT_SYMBOL(tcp_proto_cgroup);
432 #endif /* CONFIG_INET */
433 #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
434
435 static void drain_all_stock_async(struct mem_cgroup *memcg);
436
437 static struct mem_cgroup_per_zone *
438 mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
439 {
440         return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
441 }
442
443 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
444 {
445         return &memcg->css;
446 }
447
448 static struct mem_cgroup_per_zone *
449 page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
450 {
451         int nid = page_to_nid(page);
452         int zid = page_zonenum(page);
453
454         return mem_cgroup_zoneinfo(memcg, nid, zid);
455 }
456
457 static struct mem_cgroup_tree_per_zone *
458 soft_limit_tree_node_zone(int nid, int zid)
459 {
460         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
461 }
462
463 static struct mem_cgroup_tree_per_zone *
464 soft_limit_tree_from_page(struct page *page)
465 {
466         int nid = page_to_nid(page);
467         int zid = page_zonenum(page);
468
469         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
470 }
471
472 static void
473 __mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
474                                 struct mem_cgroup_per_zone *mz,
475                                 struct mem_cgroup_tree_per_zone *mctz,
476                                 unsigned long long new_usage_in_excess)
477 {
478         struct rb_node **p = &mctz->rb_root.rb_node;
479         struct rb_node *parent = NULL;
480         struct mem_cgroup_per_zone *mz_node;
481
482         if (mz->on_tree)
483                 return;
484
485         mz->usage_in_excess = new_usage_in_excess;
486         if (!mz->usage_in_excess)
487                 return;
488         while (*p) {
489                 parent = *p;
490                 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
491                                         tree_node);
492                 if (mz->usage_in_excess < mz_node->usage_in_excess)
493                         p = &(*p)->rb_left;
494                 /*
495                  * We can't avoid mem cgroups that are over their soft
496                  * limit by the same amount
497                  */
498                 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
499                         p = &(*p)->rb_right;
500         }
501         rb_link_node(&mz->tree_node, parent, p);
502         rb_insert_color(&mz->tree_node, &mctz->rb_root);
503         mz->on_tree = true;
504 }
505
506 static void
507 __mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
508                                 struct mem_cgroup_per_zone *mz,
509                                 struct mem_cgroup_tree_per_zone *mctz)
510 {
511         if (!mz->on_tree)
512                 return;
513         rb_erase(&mz->tree_node, &mctz->rb_root);
514         mz->on_tree = false;
515 }
516
517 static void
518 mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
519                                 struct mem_cgroup_per_zone *mz,
520                                 struct mem_cgroup_tree_per_zone *mctz)
521 {
522         spin_lock(&mctz->lock);
523         __mem_cgroup_remove_exceeded(memcg, mz, mctz);
524         spin_unlock(&mctz->lock);
525 }
526
527
528 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
529 {
530         unsigned long long excess;
531         struct mem_cgroup_per_zone *mz;
532         struct mem_cgroup_tree_per_zone *mctz;
533         int nid = page_to_nid(page);
534         int zid = page_zonenum(page);
535         mctz = soft_limit_tree_from_page(page);
536
537         /*
538          * Necessary to update all ancestors when hierarchy is used.
539          * because their event counter is not touched.
540          */
541         for (; memcg; memcg = parent_mem_cgroup(memcg)) {
542                 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
543                 excess = res_counter_soft_limit_excess(&memcg->res);
544                 /*
545                  * We have to update the tree if mz is on RB-tree or
546                  * mem is over its softlimit.
547                  */
548                 if (excess || mz->on_tree) {
549                         spin_lock(&mctz->lock);
550                         /* if on-tree, remove it */
551                         if (mz->on_tree)
552                                 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
553                         /*
554                          * Insert again. mz->usage_in_excess will be updated.
555                          * If excess is 0, no tree ops.
556                          */
557                         __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
558                         spin_unlock(&mctz->lock);
559                 }
560         }
561 }
562
563 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
564 {
565         int node, zone;
566         struct mem_cgroup_per_zone *mz;
567         struct mem_cgroup_tree_per_zone *mctz;
568
569         for_each_node_state(node, N_POSSIBLE) {
570                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
571                         mz = mem_cgroup_zoneinfo(memcg, node, zone);
572                         mctz = soft_limit_tree_node_zone(node, zone);
573                         mem_cgroup_remove_exceeded(memcg, mz, mctz);
574                 }
575         }
576 }
577
578 static struct mem_cgroup_per_zone *
579 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
580 {
581         struct rb_node *rightmost = NULL;
582         struct mem_cgroup_per_zone *mz;
583
584 retry:
585         mz = NULL;
586         rightmost = rb_last(&mctz->rb_root);
587         if (!rightmost)
588                 goto done;              /* Nothing to reclaim from */
589
590         mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
591         /*
592          * Remove the node now but someone else can add it back,
593          * we will to add it back at the end of reclaim to its correct
594          * position in the tree.
595          */
596         __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
597         if (!res_counter_soft_limit_excess(&mz->mem->res) ||
598                 !css_tryget(&mz->mem->css))
599                 goto retry;
600 done:
601         return mz;
602 }
603
604 static struct mem_cgroup_per_zone *
605 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
606 {
607         struct mem_cgroup_per_zone *mz;
608
609         spin_lock(&mctz->lock);
610         mz = __mem_cgroup_largest_soft_limit_node(mctz);
611         spin_unlock(&mctz->lock);
612         return mz;
613 }
614
615 /*
616  * Implementation Note: reading percpu statistics for memcg.
617  *
618  * Both of vmstat[] and percpu_counter has threshold and do periodic
619  * synchronization to implement "quick" read. There are trade-off between
620  * reading cost and precision of value. Then, we may have a chance to implement
621  * a periodic synchronizion of counter in memcg's counter.
622  *
623  * But this _read() function is used for user interface now. The user accounts
624  * memory usage by memory cgroup and he _always_ requires exact value because
625  * he accounts memory. Even if we provide quick-and-fuzzy read, we always
626  * have to visit all online cpus and make sum. So, for now, unnecessary
627  * synchronization is not implemented. (just implemented for cpu hotplug)
628  *
629  * If there are kernel internal actions which can make use of some not-exact
630  * value, and reading all cpu value can be performance bottleneck in some
631  * common workload, threashold and synchonization as vmstat[] should be
632  * implemented.
633  */
634 static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
635                                  enum mem_cgroup_stat_index idx)
636 {
637         long val = 0;
638         int cpu;
639
640         get_online_cpus();
641         for_each_online_cpu(cpu)
642                 val += per_cpu(memcg->stat->count[idx], cpu);
643 #ifdef CONFIG_HOTPLUG_CPU
644         spin_lock(&memcg->pcp_counter_lock);
645         val += memcg->nocpu_base.count[idx];
646         spin_unlock(&memcg->pcp_counter_lock);
647 #endif
648         put_online_cpus();
649         return val;
650 }
651
652 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
653                                          bool charge)
654 {
655         int val = (charge) ? 1 : -1;
656         this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
657 }
658
659 void mem_cgroup_pgfault(struct mem_cgroup *memcg, int val)
660 {
661         this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val);
662 }
663
664 void mem_cgroup_pgmajfault(struct mem_cgroup *memcg, int val)
665 {
666         this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val);
667 }
668
669 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
670                                             enum mem_cgroup_events_index idx)
671 {
672         unsigned long val = 0;
673         int cpu;
674
675         for_each_online_cpu(cpu)
676                 val += per_cpu(memcg->stat->events[idx], cpu);
677 #ifdef CONFIG_HOTPLUG_CPU
678         spin_lock(&memcg->pcp_counter_lock);
679         val += memcg->nocpu_base.events[idx];
680         spin_unlock(&memcg->pcp_counter_lock);
681 #endif
682         return val;
683 }
684
685 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
686                                          bool file, int nr_pages)
687 {
688         preempt_disable();
689
690         if (file)
691                 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
692                                 nr_pages);
693         else
694                 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
695                                 nr_pages);
696
697         /* pagein of a big page is an event. So, ignore page size */
698         if (nr_pages > 0)
699                 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
700         else {
701                 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
702                 nr_pages = -nr_pages; /* for event */
703         }
704
705         __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
706
707         preempt_enable();
708 }
709
710 unsigned long
711 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
712                         unsigned int lru_mask)
713 {
714         struct mem_cgroup_per_zone *mz;
715         enum lru_list l;
716         unsigned long ret = 0;
717
718         mz = mem_cgroup_zoneinfo(memcg, nid, zid);
719
720         for_each_lru(l) {
721                 if (BIT(l) & lru_mask)
722                         ret += MEM_CGROUP_ZSTAT(mz, l);
723         }
724         return ret;
725 }
726
727 static unsigned long
728 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
729                         int nid, unsigned int lru_mask)
730 {
731         u64 total = 0;
732         int zid;
733
734         for (zid = 0; zid < MAX_NR_ZONES; zid++)
735                 total += mem_cgroup_zone_nr_lru_pages(memcg,
736                                                 nid, zid, lru_mask);
737
738         return total;
739 }
740
741 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
742                         unsigned int lru_mask)
743 {
744         int nid;
745         u64 total = 0;
746
747         for_each_node_state(nid, N_HIGH_MEMORY)
748                 total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
749         return total;
750 }
751
752 static bool __memcg_event_check(struct mem_cgroup *memcg, int target)
753 {
754         unsigned long val, next;
755
756         val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
757         next = __this_cpu_read(memcg->stat->targets[target]);
758         /* from time_after() in jiffies.h */
759         return ((long)next - (long)val < 0);
760 }
761
762 static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target)
763 {
764         unsigned long val, next;
765
766         val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
767
768         switch (target) {
769         case MEM_CGROUP_TARGET_THRESH:
770                 next = val + THRESHOLDS_EVENTS_TARGET;
771                 break;
772         case MEM_CGROUP_TARGET_SOFTLIMIT:
773                 next = val + SOFTLIMIT_EVENTS_TARGET;
774                 break;
775         case MEM_CGROUP_TARGET_NUMAINFO:
776                 next = val + NUMAINFO_EVENTS_TARGET;
777                 break;
778         default:
779                 return;
780         }
781
782         __this_cpu_write(memcg->stat->targets[target], next);
783 }
784
785 /*
786  * Check events in order.
787  *
788  */
789 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
790 {
791         preempt_disable();
792         /* threshold event is triggered in finer grain than soft limit */
793         if (unlikely(__memcg_event_check(memcg, MEM_CGROUP_TARGET_THRESH))) {
794                 mem_cgroup_threshold(memcg);
795                 __mem_cgroup_target_update(memcg, MEM_CGROUP_TARGET_THRESH);
796                 if (unlikely(__memcg_event_check(memcg,
797                              MEM_CGROUP_TARGET_SOFTLIMIT))) {
798                         mem_cgroup_update_tree(memcg, page);
799                         __mem_cgroup_target_update(memcg,
800                                                    MEM_CGROUP_TARGET_SOFTLIMIT);
801                 }
802 #if MAX_NUMNODES > 1
803                 if (unlikely(__memcg_event_check(memcg,
804                         MEM_CGROUP_TARGET_NUMAINFO))) {
805                         atomic_inc(&memcg->numainfo_events);
806                         __mem_cgroup_target_update(memcg,
807                                 MEM_CGROUP_TARGET_NUMAINFO);
808                 }
809 #endif
810         }
811         preempt_enable();
812 }
813
814 struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
815 {
816         return container_of(cgroup_subsys_state(cont,
817                                 mem_cgroup_subsys_id), struct mem_cgroup,
818                                 css);
819 }
820
821 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
822 {
823         /*
824          * mm_update_next_owner() may clear mm->owner to NULL
825          * if it races with swapoff, page migration, etc.
826          * So this can be called with p == NULL.
827          */
828         if (unlikely(!p))
829                 return NULL;
830
831         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
832                                 struct mem_cgroup, css);
833 }
834
835 struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
836 {
837         struct mem_cgroup *memcg = NULL;
838
839         if (!mm)
840                 return NULL;
841         /*
842          * Because we have no locks, mm->owner's may be being moved to other
843          * cgroup. We use css_tryget() here even if this looks
844          * pessimistic (rather than adding locks here).
845          */
846         rcu_read_lock();
847         do {
848                 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
849                 if (unlikely(!memcg))
850                         break;
851         } while (!css_tryget(&memcg->css));
852         rcu_read_unlock();
853         return memcg;
854 }
855
856 static struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
857                                           struct mem_cgroup *prev,
858                                           bool reclaim)
859 {
860         struct mem_cgroup *memcg = NULL;
861         int id = 0;
862
863         if (!root)
864                 root = root_mem_cgroup;
865
866         if (prev && !reclaim)
867                 id = css_id(&prev->css);
868
869         if (prev && prev != root)
870                 css_put(&prev->css);
871
872         if (!root->use_hierarchy && root != root_mem_cgroup) {
873                 if (prev)
874                         return NULL;
875                 return root;
876         }
877
878         while (!memcg) {
879                 struct cgroup_subsys_state *css;
880
881                 if (reclaim)
882                         id = root->last_scanned_child;
883
884                 rcu_read_lock();
885                 css = css_get_next(&mem_cgroup_subsys, id + 1, &root->css, &id);
886                 if (css) {
887                         if (css == &root->css || css_tryget(css))
888                                 memcg = container_of(css,
889                                                      struct mem_cgroup, css);
890                 } else
891                         id = 0;
892                 rcu_read_unlock();
893
894                 if (reclaim)
895                         root->last_scanned_child = id;
896
897                 if (prev && !css)
898                         return NULL;
899         }
900         return memcg;
901 }
902
903 static void mem_cgroup_iter_break(struct mem_cgroup *root,
904                                   struct mem_cgroup *prev)
905 {
906         if (!root)
907                 root = root_mem_cgroup;
908         if (prev && prev != root)
909                 css_put(&prev->css);
910 }
911
912 /*
913  * Iteration constructs for visiting all cgroups (under a tree).  If
914  * loops are exited prematurely (break), mem_cgroup_iter_break() must
915  * be used for reference counting.
916  */
917 #define for_each_mem_cgroup_tree(iter, root)            \
918         for (iter = mem_cgroup_iter(root, NULL, false); \
919              iter != NULL;                              \
920              iter = mem_cgroup_iter(root, iter, false))
921
922 #define for_each_mem_cgroup(iter)                       \
923         for (iter = mem_cgroup_iter(NULL, NULL, false); \
924              iter != NULL;                              \
925              iter = mem_cgroup_iter(NULL, iter, false))
926
927 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
928 {
929         return (memcg == root_mem_cgroup);
930 }
931
932 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
933 {
934         struct mem_cgroup *memcg;
935
936         if (!mm)
937                 return;
938
939         rcu_read_lock();
940         memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
941         if (unlikely(!memcg))
942                 goto out;
943
944         switch (idx) {
945         case PGMAJFAULT:
946                 mem_cgroup_pgmajfault(memcg, 1);
947                 break;
948         case PGFAULT:
949                 mem_cgroup_pgfault(memcg, 1);
950                 break;
951         default:
952                 BUG();
953         }
954 out:
955         rcu_read_unlock();
956 }
957 EXPORT_SYMBOL(mem_cgroup_count_vm_event);
958
959 /*
960  * Following LRU functions are allowed to be used without PCG_LOCK.
961  * Operations are called by routine of global LRU independently from memcg.
962  * What we have to take care of here is validness of pc->mem_cgroup.
963  *
964  * Changes to pc->mem_cgroup happens when
965  * 1. charge
966  * 2. moving account
967  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
968  * It is added to LRU before charge.
969  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
970  * When moving account, the page is not on LRU. It's isolated.
971  */
972
973 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
974 {
975         struct page_cgroup *pc;
976         struct mem_cgroup_per_zone *mz;
977
978         if (mem_cgroup_disabled())
979                 return;
980         pc = lookup_page_cgroup(page);
981         /* can happen while we handle swapcache. */
982         if (!TestClearPageCgroupAcctLRU(pc))
983                 return;
984         VM_BUG_ON(!pc->mem_cgroup);
985         /*
986          * We don't check PCG_USED bit. It's cleared when the "page" is finally
987          * removed from global LRU.
988          */
989         mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
990         /* huge page split is done under lru_lock. so, we have no races. */
991         MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
992         if (mem_cgroup_is_root(pc->mem_cgroup))
993                 return;
994         VM_BUG_ON(list_empty(&pc->lru));
995         list_del_init(&pc->lru);
996 }
997
998 void mem_cgroup_del_lru(struct page *page)
999 {
1000         mem_cgroup_del_lru_list(page, page_lru(page));
1001 }
1002
1003 /*
1004  * Writeback is about to end against a page which has been marked for immediate
1005  * reclaim.  If it still appears to be reclaimable, move it to the tail of the
1006  * inactive list.
1007  */
1008 void mem_cgroup_rotate_reclaimable_page(struct page *page)
1009 {
1010         struct mem_cgroup_per_zone *mz;
1011         struct page_cgroup *pc;
1012         enum lru_list lru = page_lru(page);
1013
1014         if (mem_cgroup_disabled())
1015                 return;
1016
1017         pc = lookup_page_cgroup(page);
1018         /* unused or root page is not rotated. */
1019         if (!PageCgroupUsed(pc))
1020                 return;
1021         /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1022         smp_rmb();
1023         if (mem_cgroup_is_root(pc->mem_cgroup))
1024                 return;
1025         mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1026         list_move_tail(&pc->lru, &mz->lists[lru]);
1027 }
1028
1029 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
1030 {
1031         struct mem_cgroup_per_zone *mz;
1032         struct page_cgroup *pc;
1033
1034         if (mem_cgroup_disabled())
1035                 return;
1036
1037         pc = lookup_page_cgroup(page);
1038         /* unused or root page is not rotated. */
1039         if (!PageCgroupUsed(pc))
1040                 return;
1041         /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1042         smp_rmb();
1043         if (mem_cgroup_is_root(pc->mem_cgroup))
1044                 return;
1045         mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1046         list_move(&pc->lru, &mz->lists[lru]);
1047 }
1048
1049 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
1050 {
1051         struct page_cgroup *pc;
1052         struct mem_cgroup_per_zone *mz;
1053
1054         if (mem_cgroup_disabled())
1055                 return;
1056         pc = lookup_page_cgroup(page);
1057         VM_BUG_ON(PageCgroupAcctLRU(pc));
1058         /*
1059          * putback:                             charge:
1060          * SetPageLRU                           SetPageCgroupUsed
1061          * smp_mb                               smp_mb
1062          * PageCgroupUsed && add to memcg LRU   PageLRU && add to memcg LRU
1063          *
1064          * Ensure that one of the two sides adds the page to the memcg
1065          * LRU during a race.
1066          */
1067         smp_mb();
1068         if (!PageCgroupUsed(pc))
1069                 return;
1070         /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1071         smp_rmb();
1072         mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1073         /* huge page split is done under lru_lock. so, we have no races. */
1074         MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
1075         SetPageCgroupAcctLRU(pc);
1076         if (mem_cgroup_is_root(pc->mem_cgroup))
1077                 return;
1078         list_add(&pc->lru, &mz->lists[lru]);
1079 }
1080
1081 /*
1082  * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed
1083  * while it's linked to lru because the page may be reused after it's fully
1084  * uncharged. To handle that, unlink page_cgroup from LRU when charge it again.
1085  * It's done under lock_page and expected that zone->lru_lock isnever held.
1086  */
1087 static void mem_cgroup_lru_del_before_commit(struct page *page)
1088 {
1089         unsigned long flags;
1090         struct zone *zone = page_zone(page);
1091         struct page_cgroup *pc = lookup_page_cgroup(page);
1092
1093         /*
1094          * Doing this check without taking ->lru_lock seems wrong but this
1095          * is safe. Because if page_cgroup's USED bit is unset, the page
1096          * will not be added to any memcg's LRU. If page_cgroup's USED bit is
1097          * set, the commit after this will fail, anyway.
1098          * This all charge/uncharge is done under some mutual execustion.
1099          * So, we don't need to taking care of changes in USED bit.
1100          */
1101         if (likely(!PageLRU(page)))
1102                 return;
1103
1104         spin_lock_irqsave(&zone->lru_lock, flags);
1105         /*
1106          * Forget old LRU when this page_cgroup is *not* used. This Used bit
1107          * is guarded by lock_page() because the page is SwapCache.
1108          */
1109         if (!PageCgroupUsed(pc))
1110                 mem_cgroup_del_lru_list(page, page_lru(page));
1111         spin_unlock_irqrestore(&zone->lru_lock, flags);
1112 }
1113
1114 static void mem_cgroup_lru_add_after_commit(struct page *page)
1115 {
1116         unsigned long flags;
1117         struct zone *zone = page_zone(page);
1118         struct page_cgroup *pc = lookup_page_cgroup(page);
1119         /*
1120          * putback:                             charge:
1121          * SetPageLRU                           SetPageCgroupUsed
1122          * smp_mb                               smp_mb
1123          * PageCgroupUsed && add to memcg LRU   PageLRU && add to memcg LRU
1124          *
1125          * Ensure that one of the two sides adds the page to the memcg
1126          * LRU during a race.
1127          */
1128         smp_mb();
1129         /* taking care of that the page is added to LRU while we commit it */
1130         if (likely(!PageLRU(page)))
1131                 return;
1132         spin_lock_irqsave(&zone->lru_lock, flags);
1133         /* link when the page is linked to LRU but page_cgroup isn't */
1134         if (PageLRU(page) && !PageCgroupAcctLRU(pc))
1135                 mem_cgroup_add_lru_list(page, page_lru(page));
1136         spin_unlock_irqrestore(&zone->lru_lock, flags);
1137 }
1138
1139
1140 void mem_cgroup_move_lists(struct page *page,
1141                            enum lru_list from, enum lru_list to)
1142 {
1143         if (mem_cgroup_disabled())
1144                 return;
1145         mem_cgroup_del_lru_list(page, from);
1146         mem_cgroup_add_lru_list(page, to);
1147 }
1148
1149 /*
1150  * Checks whether given mem is same or in the root_mem_cgroup's
1151  * hierarchy subtree
1152  */
1153 static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1154                 struct mem_cgroup *memcg)
1155 {
1156         if (root_memcg != memcg) {
1157                 return (root_memcg->use_hierarchy &&
1158                         css_is_ancestor(&memcg->css, &root_memcg->css));
1159         }
1160
1161         return true;
1162 }
1163
1164 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
1165 {
1166         int ret;
1167         struct mem_cgroup *curr = NULL;
1168         struct task_struct *p;
1169
1170         p = find_lock_task_mm(task);
1171         if (!p)
1172                 return 0;
1173         curr = try_get_mem_cgroup_from_mm(p->mm);
1174         task_unlock(p);
1175         if (!curr)
1176                 return 0;
1177         /*
1178          * We should check use_hierarchy of "memcg" not "curr". Because checking
1179          * use_hierarchy of "curr" here make this function true if hierarchy is
1180          * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1181          * hierarchy(even if use_hierarchy is disabled in "memcg").
1182          */
1183         ret = mem_cgroup_same_or_subtree(memcg, curr);
1184         css_put(&curr->css);
1185         return ret;
1186 }
1187
1188 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
1189 {
1190         unsigned long inactive_ratio;
1191         int nid = zone_to_nid(zone);
1192         int zid = zone_idx(zone);
1193         unsigned long inactive;
1194         unsigned long active;
1195         unsigned long gb;
1196
1197         inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1198                                                 BIT(LRU_INACTIVE_ANON));
1199         active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1200                                               BIT(LRU_ACTIVE_ANON));
1201
1202         gb = (inactive + active) >> (30 - PAGE_SHIFT);
1203         if (gb)
1204                 inactive_ratio = int_sqrt(10 * gb);
1205         else
1206                 inactive_ratio = 1;
1207
1208         return inactive * inactive_ratio < active;
1209 }
1210
1211 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
1212 {
1213         unsigned long active;
1214         unsigned long inactive;
1215         int zid = zone_idx(zone);
1216         int nid = zone_to_nid(zone);
1217
1218         inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1219                                                 BIT(LRU_INACTIVE_FILE));
1220         active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1221                                               BIT(LRU_ACTIVE_FILE));
1222
1223         return (active > inactive);
1224 }
1225
1226 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
1227                                                       struct zone *zone)
1228 {
1229         int nid = zone_to_nid(zone);
1230         int zid = zone_idx(zone);
1231         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1232
1233         return &mz->reclaim_stat;
1234 }
1235
1236 struct zone_reclaim_stat *
1237 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
1238 {
1239         struct page_cgroup *pc;
1240         struct mem_cgroup_per_zone *mz;
1241
1242         if (mem_cgroup_disabled())
1243                 return NULL;
1244
1245         pc = lookup_page_cgroup(page);
1246         if (!PageCgroupUsed(pc))
1247                 return NULL;
1248         /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1249         smp_rmb();
1250         mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1251         return &mz->reclaim_stat;
1252 }
1253
1254 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
1255                                         struct list_head *dst,
1256                                         unsigned long *scanned, int order,
1257                                         isolate_mode_t mode,
1258                                         struct zone *z,
1259                                         struct mem_cgroup *mem_cont,
1260                                         int active, int file)
1261 {
1262         unsigned long nr_taken = 0;
1263         struct page *page;
1264         unsigned long scan;
1265         LIST_HEAD(pc_list);
1266         struct list_head *src;
1267         struct page_cgroup *pc, *tmp;
1268         int nid = zone_to_nid(z);
1269         int zid = zone_idx(z);
1270         struct mem_cgroup_per_zone *mz;
1271         int lru = LRU_FILE * file + active;
1272         int ret;
1273
1274         BUG_ON(!mem_cont);
1275         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1276         src = &mz->lists[lru];
1277
1278         scan = 0;
1279         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
1280                 if (scan >= nr_to_scan)
1281                         break;
1282
1283                 if (unlikely(!PageCgroupUsed(pc)))
1284                         continue;
1285
1286                 page = lookup_cgroup_page(pc);
1287
1288                 if (unlikely(!PageLRU(page)))
1289                         continue;
1290
1291                 scan++;
1292                 ret = __isolate_lru_page(page, mode, file);
1293                 switch (ret) {
1294                 case 0:
1295                         list_move(&page->lru, dst);
1296                         mem_cgroup_del_lru(page);
1297                         nr_taken += hpage_nr_pages(page);
1298                         break;
1299                 case -EBUSY:
1300                         /* we don't affect global LRU but rotate in our LRU */
1301                         mem_cgroup_rotate_lru_list(page, page_lru(page));
1302                         break;
1303                 default:
1304                         break;
1305                 }
1306         }
1307
1308         *scanned = scan;
1309
1310         trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
1311                                       0, 0, 0, mode);
1312
1313         return nr_taken;
1314 }
1315
1316 #define mem_cgroup_from_res_counter(counter, member)    \
1317         container_of(counter, struct mem_cgroup, member)
1318
1319 /**
1320  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1321  * @mem: the memory cgroup
1322  *
1323  * Returns the maximum amount of memory @mem can be charged with, in
1324  * pages.
1325  */
1326 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1327 {
1328         unsigned long long margin;
1329
1330         margin = res_counter_margin(&memcg->res);
1331         if (do_swap_account)
1332                 margin = min(margin, res_counter_margin(&memcg->memsw));
1333         return margin >> PAGE_SHIFT;
1334 }
1335
1336 int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1337 {
1338         struct cgroup *cgrp = memcg->css.cgroup;
1339
1340         /* root ? */
1341         if (cgrp->parent == NULL)
1342                 return vm_swappiness;
1343
1344         return memcg->swappiness;
1345 }
1346
1347 static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1348 {
1349         int cpu;
1350
1351         get_online_cpus();
1352         spin_lock(&memcg->pcp_counter_lock);
1353         for_each_online_cpu(cpu)
1354                 per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
1355         memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
1356         spin_unlock(&memcg->pcp_counter_lock);
1357         put_online_cpus();
1358
1359         synchronize_rcu();
1360 }
1361
1362 static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1363 {
1364         int cpu;
1365
1366         if (!memcg)
1367                 return;
1368         get_online_cpus();
1369         spin_lock(&memcg->pcp_counter_lock);
1370         for_each_online_cpu(cpu)
1371                 per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
1372         memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
1373         spin_unlock(&memcg->pcp_counter_lock);
1374         put_online_cpus();
1375 }
1376 /*
1377  * 2 routines for checking "mem" is under move_account() or not.
1378  *
1379  * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
1380  *                        for avoiding race in accounting. If true,
1381  *                        pc->mem_cgroup may be overwritten.
1382  *
1383  * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1384  *                        under hierarchy of moving cgroups. This is for
1385  *                        waiting at hith-memory prressure caused by "move".
1386  */
1387
1388 static bool mem_cgroup_stealed(struct mem_cgroup *memcg)
1389 {
1390         VM_BUG_ON(!rcu_read_lock_held());
1391         return this_cpu_read(memcg->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
1392 }
1393
1394 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1395 {
1396         struct mem_cgroup *from;
1397         struct mem_cgroup *to;
1398         bool ret = false;
1399         /*
1400          * Unlike task_move routines, we access mc.to, mc.from not under
1401          * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1402          */
1403         spin_lock(&mc.lock);
1404         from = mc.from;
1405         to = mc.to;
1406         if (!from)
1407                 goto unlock;
1408
1409         ret = mem_cgroup_same_or_subtree(memcg, from)
1410                 || mem_cgroup_same_or_subtree(memcg, to);
1411 unlock:
1412         spin_unlock(&mc.lock);
1413         return ret;
1414 }
1415
1416 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1417 {
1418         if (mc.moving_task && current != mc.moving_task) {
1419                 if (mem_cgroup_under_move(memcg)) {
1420                         DEFINE_WAIT(wait);
1421                         prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1422                         /* moving charge context might have finished. */
1423                         if (mc.moving_task)
1424                                 schedule();
1425                         finish_wait(&mc.waitq, &wait);
1426                         return true;
1427                 }
1428         }
1429         return false;
1430 }
1431
1432 /**
1433  * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1434  * @memcg: The memory cgroup that went over limit
1435  * @p: Task that is going to be killed
1436  *
1437  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1438  * enabled
1439  */
1440 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1441 {
1442         struct cgroup *task_cgrp;
1443         struct cgroup *mem_cgrp;
1444         /*
1445          * Need a buffer in BSS, can't rely on allocations. The code relies
1446          * on the assumption that OOM is serialized for memory controller.
1447          * If this assumption is broken, revisit this code.
1448          */
1449         static char memcg_name[PATH_MAX];
1450         int ret;
1451
1452         if (!memcg || !p)
1453                 return;
1454
1455
1456         rcu_read_lock();
1457
1458         mem_cgrp = memcg->css.cgroup;
1459         task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1460
1461         ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1462         if (ret < 0) {
1463                 /*
1464                  * Unfortunately, we are unable to convert to a useful name
1465                  * But we'll still print out the usage information
1466                  */
1467                 rcu_read_unlock();
1468                 goto done;
1469         }
1470         rcu_read_unlock();
1471
1472         printk(KERN_INFO "Task in %s killed", memcg_name);
1473
1474         rcu_read_lock();
1475         ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1476         if (ret < 0) {
1477                 rcu_read_unlock();
1478                 goto done;
1479         }
1480         rcu_read_unlock();
1481
1482         /*
1483          * Continues from above, so we don't need an KERN_ level
1484          */
1485         printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1486 done:
1487
1488         printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1489                 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1490                 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1491                 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1492         printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1493                 "failcnt %llu\n",
1494                 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1495                 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1496                 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1497 }
1498
1499 /*
1500  * This function returns the number of memcg under hierarchy tree. Returns
1501  * 1(self count) if no children.
1502  */
1503 static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1504 {
1505         int num = 0;
1506         struct mem_cgroup *iter;
1507
1508         for_each_mem_cgroup_tree(iter, memcg)
1509                 num++;
1510         return num;
1511 }
1512
1513 /*
1514  * Return the memory (and swap, if configured) limit for a memcg.
1515  */
1516 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1517 {
1518         u64 limit;
1519         u64 memsw;
1520
1521         limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1522         limit += total_swap_pages << PAGE_SHIFT;
1523
1524         memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1525         /*
1526          * If memsw is finite and limits the amount of swap space available
1527          * to this memcg, return that limit.
1528          */
1529         return min(limit, memsw);
1530 }
1531
1532 /**
1533  * test_mem_cgroup_node_reclaimable
1534  * @mem: the target memcg
1535  * @nid: the node ID to be checked.
1536  * @noswap : specify true here if the user wants flle only information.
1537  *
1538  * This function returns whether the specified memcg contains any
1539  * reclaimable pages on a node. Returns true if there are any reclaimable
1540  * pages in the node.
1541  */
1542 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1543                 int nid, bool noswap)
1544 {
1545         if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1546                 return true;
1547         if (noswap || !total_swap_pages)
1548                 return false;
1549         if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1550                 return true;
1551         return false;
1552
1553 }
1554 #if MAX_NUMNODES > 1
1555
1556 /*
1557  * Always updating the nodemask is not very good - even if we have an empty
1558  * list or the wrong list here, we can start from some node and traverse all
1559  * nodes based on the zonelist. So update the list loosely once per 10 secs.
1560  *
1561  */
1562 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1563 {
1564         int nid;
1565         /*
1566          * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1567          * pagein/pageout changes since the last update.
1568          */
1569         if (!atomic_read(&memcg->numainfo_events))
1570                 return;
1571         if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1572                 return;
1573
1574         /* make a nodemask where this memcg uses memory from */
1575         memcg->scan_nodes = node_states[N_HIGH_MEMORY];
1576
1577         for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
1578
1579                 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1580                         node_clear(nid, memcg->scan_nodes);
1581         }
1582
1583         atomic_set(&memcg->numainfo_events, 0);
1584         atomic_set(&memcg->numainfo_updating, 0);
1585 }
1586
1587 /*
1588  * Selecting a node where we start reclaim from. Because what we need is just
1589  * reducing usage counter, start from anywhere is O,K. Considering
1590  * memory reclaim from current node, there are pros. and cons.
1591  *
1592  * Freeing memory from current node means freeing memory from a node which
1593  * we'll use or we've used. So, it may make LRU bad. And if several threads
1594  * hit limits, it will see a contention on a node. But freeing from remote
1595  * node means more costs for memory reclaim because of memory latency.
1596  *
1597  * Now, we use round-robin. Better algorithm is welcomed.
1598  */
1599 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1600 {
1601         int node;
1602
1603         mem_cgroup_may_update_nodemask(memcg);
1604         node = memcg->last_scanned_node;
1605
1606         node = next_node(node, memcg->scan_nodes);
1607         if (node == MAX_NUMNODES)
1608                 node = first_node(memcg->scan_nodes);
1609         /*
1610          * We call this when we hit limit, not when pages are added to LRU.
1611          * No LRU may hold pages because all pages are UNEVICTABLE or
1612          * memcg is too small and all pages are not on LRU. In that case,
1613          * we use curret node.
1614          */
1615         if (unlikely(node == MAX_NUMNODES))
1616                 node = numa_node_id();
1617
1618         memcg->last_scanned_node = node;
1619         return node;
1620 }
1621
1622 /*
1623  * Check all nodes whether it contains reclaimable pages or not.
1624  * For quick scan, we make use of scan_nodes. This will allow us to skip
1625  * unused nodes. But scan_nodes is lazily updated and may not cotain
1626  * enough new information. We need to do double check.
1627  */
1628 bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1629 {
1630         int nid;
1631
1632         /*
1633          * quick check...making use of scan_node.
1634          * We can skip unused nodes.
1635          */
1636         if (!nodes_empty(memcg->scan_nodes)) {
1637                 for (nid = first_node(memcg->scan_nodes);
1638                      nid < MAX_NUMNODES;
1639                      nid = next_node(nid, memcg->scan_nodes)) {
1640
1641                         if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1642                                 return true;
1643                 }
1644         }
1645         /*
1646          * Check rest of nodes.
1647          */
1648         for_each_node_state(nid, N_HIGH_MEMORY) {
1649                 if (node_isset(nid, memcg->scan_nodes))
1650                         continue;
1651                 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1652                         return true;
1653         }
1654         return false;
1655 }
1656
1657 #else
1658 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1659 {
1660         return 0;
1661 }
1662
1663 bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1664 {
1665         return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
1666 }
1667 #endif
1668
1669 /*
1670  * Scan the hierarchy if needed to reclaim memory. We remember the last child
1671  * we reclaimed from, so that we don't end up penalizing one child extensively
1672  * based on its position in the children list.
1673  *
1674  * root_memcg is the original ancestor that we've been reclaim from.
1675  *
1676  * We give up and return to the caller when we visit root_memcg twice.
1677  * (other groups can be removed while we're walking....)
1678  *
1679  * If shrink==true, for avoiding to free too much, this returns immedieately.
1680  */
1681 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_memcg,
1682                                                 struct zone *zone,
1683                                                 gfp_t gfp_mask,
1684                                                 unsigned long reclaim_options,
1685                                                 unsigned long *total_scanned)
1686 {
1687         struct mem_cgroup *victim = NULL;
1688         int ret, total = 0;
1689         int loop = 0;
1690         bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1691         bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1692         bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1693         unsigned long excess;
1694         unsigned long nr_scanned;
1695
1696         excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
1697
1698         /* If memsw_is_minimum==1, swap-out is of-no-use. */
1699         if (!check_soft && !shrink && root_memcg->memsw_is_minimum)
1700                 noswap = true;
1701
1702         while (1) {
1703                 victim = mem_cgroup_iter(root_memcg, victim, true);
1704                 if (!victim) {
1705                         loop++;
1706                         /*
1707                          * We are not draining per cpu cached charges during
1708                          * soft limit reclaim  because global reclaim doesn't
1709                          * care about charges. It tries to free some memory and
1710                          * charges will not give any.
1711                          */
1712                         if (!check_soft && loop >= 1)
1713                                 drain_all_stock_async(root_memcg);
1714                         if (loop >= 2) {
1715                                 /*
1716                                  * If we have not been able to reclaim
1717                                  * anything, it might because there are
1718                                  * no reclaimable pages under this hierarchy
1719                                  */
1720                                 if (!check_soft || !total)
1721                                         break;
1722                                 /*
1723                                  * We want to do more targeted reclaim.
1724                                  * excess >> 2 is not to excessive so as to
1725                                  * reclaim too much, nor too less that we keep
1726                                  * coming back to reclaim from this cgroup
1727                                  */
1728                                 if (total >= (excess >> 2) ||
1729                                         (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1730                                         break;
1731                         }
1732                         continue;
1733                 }
1734                 if (!mem_cgroup_reclaimable(victim, noswap)) {
1735                         /* this cgroup's local usage == 0 */
1736                         continue;
1737                 }
1738                 /* we use swappiness of local cgroup */
1739                 if (check_soft) {
1740                         ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1741                                 noswap, zone, &nr_scanned);
1742                         *total_scanned += nr_scanned;
1743                 } else
1744                         ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1745                                                 noswap);
1746                 total += ret;
1747                 /*
1748                  * At shrinking usage, we can't check we should stop here or
1749                  * reclaim more. It's depends on callers. last_scanned_child
1750                  * will work enough for keeping fairness under tree.
1751                  */
1752                 if (shrink)
1753                         break;
1754                 if (check_soft) {
1755                         if (!res_counter_soft_limit_excess(&root_memcg->res))
1756                                 break;
1757                 } else if (mem_cgroup_margin(root_memcg))
1758                         break;
1759         }
1760         mem_cgroup_iter_break(root_memcg, victim);
1761         return total;
1762 }
1763
1764 /*
1765  * Check OOM-Killer is already running under our hierarchy.
1766  * If someone is running, return false.
1767  * Has to be called with memcg_oom_lock
1768  */
1769 static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
1770 {
1771         struct mem_cgroup *iter, *failed = NULL;
1772
1773         for_each_mem_cgroup_tree(iter, memcg) {
1774                 if (iter->oom_lock) {
1775                         /*
1776                          * this subtree of our hierarchy is already locked
1777                          * so we cannot give a lock.
1778                          */
1779                         failed = iter;
1780                         mem_cgroup_iter_break(memcg, iter);
1781                         break;
1782                 } else
1783                         iter->oom_lock = true;
1784         }
1785
1786         if (!failed)
1787                 return true;
1788
1789         /*
1790          * OK, we failed to lock the whole subtree so we have to clean up
1791          * what we set up to the failing subtree
1792          */
1793         for_each_mem_cgroup_tree(iter, memcg) {
1794                 if (iter == failed) {
1795                         mem_cgroup_iter_break(memcg, iter);
1796                         break;
1797                 }
1798                 iter->oom_lock = false;
1799         }
1800         return false;
1801 }
1802
1803 /*
1804  * Has to be called with memcg_oom_lock
1805  */
1806 static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1807 {
1808         struct mem_cgroup *iter;
1809
1810         for_each_mem_cgroup_tree(iter, memcg)
1811                 iter->oom_lock = false;
1812         return 0;
1813 }
1814
1815 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1816 {
1817         struct mem_cgroup *iter;
1818
1819         for_each_mem_cgroup_tree(iter, memcg)
1820                 atomic_inc(&iter->under_oom);
1821 }
1822
1823 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1824 {
1825         struct mem_cgroup *iter;
1826
1827         /*
1828          * When a new child is created while the hierarchy is under oom,
1829          * mem_cgroup_oom_lock() may not be called. We have to use
1830          * atomic_add_unless() here.
1831          */
1832         for_each_mem_cgroup_tree(iter, memcg)
1833                 atomic_add_unless(&iter->under_oom, -1, 0);
1834 }
1835
1836 static DEFINE_SPINLOCK(memcg_oom_lock);
1837 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1838
1839 struct oom_wait_info {
1840         struct mem_cgroup *mem;
1841         wait_queue_t    wait;
1842 };
1843
1844 static int memcg_oom_wake_function(wait_queue_t *wait,
1845         unsigned mode, int sync, void *arg)
1846 {
1847         struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg,
1848                           *oom_wait_memcg;
1849         struct oom_wait_info *oom_wait_info;
1850
1851         oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1852         oom_wait_memcg = oom_wait_info->mem;
1853
1854         /*
1855          * Both of oom_wait_info->mem and wake_mem are stable under us.
1856          * Then we can use css_is_ancestor without taking care of RCU.
1857          */
1858         if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
1859                 && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
1860                 return 0;
1861         return autoremove_wake_function(wait, mode, sync, arg);
1862 }
1863
1864 static void memcg_wakeup_oom(struct mem_cgroup *memcg)
1865 {
1866         /* for filtering, pass "memcg" as argument. */
1867         __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1868 }
1869
1870 static void memcg_oom_recover(struct mem_cgroup *memcg)
1871 {
1872         if (memcg && atomic_read(&memcg->under_oom))
1873                 memcg_wakeup_oom(memcg);
1874 }
1875
1876 /*
1877  * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1878  */
1879 bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask)
1880 {
1881         struct oom_wait_info owait;
1882         bool locked, need_to_kill;
1883
1884         owait.mem = memcg;
1885         owait.wait.flags = 0;
1886         owait.wait.func = memcg_oom_wake_function;
1887         owait.wait.private = current;
1888         INIT_LIST_HEAD(&owait.wait.task_list);
1889         need_to_kill = true;
1890         mem_cgroup_mark_under_oom(memcg);
1891
1892         /* At first, try to OOM lock hierarchy under memcg.*/
1893         spin_lock(&memcg_oom_lock);
1894         locked = mem_cgroup_oom_lock(memcg);
1895         /*
1896          * Even if signal_pending(), we can't quit charge() loop without
1897          * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1898          * under OOM is always welcomed, use TASK_KILLABLE here.
1899          */
1900         prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1901         if (!locked || memcg->oom_kill_disable)
1902                 need_to_kill = false;
1903         if (locked)
1904                 mem_cgroup_oom_notify(memcg);
1905         spin_unlock(&memcg_oom_lock);
1906
1907         if (need_to_kill) {
1908                 finish_wait(&memcg_oom_waitq, &owait.wait);
1909                 mem_cgroup_out_of_memory(memcg, mask);
1910         } else {
1911                 schedule();
1912                 finish_wait(&memcg_oom_waitq, &owait.wait);
1913         }
1914         spin_lock(&memcg_oom_lock);
1915         if (locked)
1916                 mem_cgroup_oom_unlock(memcg);
1917         memcg_wakeup_oom(memcg);
1918         spin_unlock(&memcg_oom_lock);
1919
1920         mem_cgroup_unmark_under_oom(memcg);
1921
1922         if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1923                 return false;
1924         /* Give chance to dying process */
1925         schedule_timeout_uninterruptible(1);
1926         return true;
1927 }
1928
1929 /*
1930  * Currently used to update mapped file statistics, but the routine can be
1931  * generalized to update other statistics as well.
1932  *
1933  * Notes: Race condition
1934  *
1935  * We usually use page_cgroup_lock() for accessing page_cgroup member but
1936  * it tends to be costly. But considering some conditions, we doesn't need
1937  * to do so _always_.
1938  *
1939  * Considering "charge", lock_page_cgroup() is not required because all
1940  * file-stat operations happen after a page is attached to radix-tree. There
1941  * are no race with "charge".
1942  *
1943  * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
1944  * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
1945  * if there are race with "uncharge". Statistics itself is properly handled
1946  * by flags.
1947  *
1948  * Considering "move", this is an only case we see a race. To make the race
1949  * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
1950  * possibility of race condition. If there is, we take a lock.
1951  */
1952
1953 void mem_cgroup_update_page_stat(struct page *page,
1954                                  enum mem_cgroup_page_stat_item idx, int val)
1955 {
1956         struct mem_cgroup *memcg;
1957         struct page_cgroup *pc = lookup_page_cgroup(page);
1958         bool need_unlock = false;
1959         unsigned long uninitialized_var(flags);
1960
1961         if (unlikely(!pc))
1962                 return;
1963
1964         rcu_read_lock();
1965         memcg = pc->mem_cgroup;
1966         if (unlikely(!memcg || !PageCgroupUsed(pc)))
1967                 goto out;
1968         /* pc->mem_cgroup is unstable ? */
1969         if (unlikely(mem_cgroup_stealed(memcg)) || PageTransHuge(page)) {
1970                 /* take a lock against to access pc->mem_cgroup */
1971                 move_lock_page_cgroup(pc, &flags);
1972                 need_unlock = true;
1973                 memcg = pc->mem_cgroup;
1974                 if (!memcg || !PageCgroupUsed(pc))
1975                         goto out;
1976         }
1977
1978         switch (idx) {
1979         case MEMCG_NR_FILE_MAPPED:
1980                 if (val > 0)
1981                         SetPageCgroupFileMapped(pc);
1982                 else if (!page_mapped(page))
1983                         ClearPageCgroupFileMapped(pc);
1984                 idx = MEM_CGROUP_STAT_FILE_MAPPED;
1985                 break;
1986         default:
1987                 BUG();
1988         }
1989
1990         this_cpu_add(memcg->stat->count[idx], val);
1991
1992 out:
1993         if (unlikely(need_unlock))
1994                 move_unlock_page_cgroup(pc, &flags);
1995         rcu_read_unlock();
1996         return;
1997 }
1998 EXPORT_SYMBOL(mem_cgroup_update_page_stat);
1999
2000 /*
2001  * size of first charge trial. "32" comes from vmscan.c's magic value.
2002  * TODO: maybe necessary to use big numbers in big irons.
2003  */
2004 #define CHARGE_BATCH    32U
2005 struct memcg_stock_pcp {
2006         struct mem_cgroup *cached; /* this never be root cgroup */
2007         unsigned int nr_pages;
2008         struct work_struct work;
2009         unsigned long flags;
2010 #define FLUSHING_CACHED_CHARGE  (0)
2011 };
2012 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2013 static DEFINE_MUTEX(percpu_charge_mutex);
2014
2015 /*
2016  * Try to consume stocked charge on this cpu. If success, one page is consumed
2017  * from local stock and true is returned. If the stock is 0 or charges from a
2018  * cgroup which is not current target, returns false. This stock will be
2019  * refilled.
2020  */
2021 static bool consume_stock(struct mem_cgroup *memcg)
2022 {
2023         struct memcg_stock_pcp *stock;
2024         bool ret = true;
2025
2026         stock = &get_cpu_var(memcg_stock);
2027         if (memcg == stock->cached && stock->nr_pages)
2028                 stock->nr_pages--;
2029         else /* need to call res_counter_charge */
2030                 ret = false;
2031         put_cpu_var(memcg_stock);
2032         return ret;
2033 }
2034
2035 /*
2036  * Returns stocks cached in percpu to res_counter and reset cached information.
2037  */
2038 static void drain_stock(struct memcg_stock_pcp *stock)
2039 {
2040         struct mem_cgroup *old = stock->cached;
2041
2042         if (stock->nr_pages) {
2043                 unsigned long bytes = stock->nr_pages * PAGE_SIZE;
2044
2045                 res_counter_uncharge(&old->res, bytes);
2046                 if (do_swap_account)
2047                         res_counter_uncharge(&old->memsw, bytes);
2048                 stock->nr_pages = 0;
2049         }
2050         stock->cached = NULL;
2051 }
2052
2053 /*
2054  * This must be called under preempt disabled or must be called by
2055  * a thread which is pinned to local cpu.
2056  */
2057 static void drain_local_stock(struct work_struct *dummy)
2058 {
2059         struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
2060         drain_stock(stock);
2061         clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2062 }
2063
2064 /*
2065  * Cache charges(val) which is from res_counter, to local per_cpu area.
2066  * This will be consumed by consume_stock() function, later.
2067  */
2068 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2069 {
2070         struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2071
2072         if (stock->cached != memcg) { /* reset if necessary */
2073                 drain_stock(stock);
2074                 stock->cached = memcg;
2075         }
2076         stock->nr_pages += nr_pages;
2077         put_cpu_var(memcg_stock);
2078 }
2079
2080 /*
2081  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2082  * of the hierarchy under it. sync flag says whether we should block
2083  * until the work is done.
2084  */
2085 static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
2086 {
2087         int cpu, curcpu;
2088
2089         /* Notify other cpus that system-wide "drain" is running */
2090         get_online_cpus();
2091         curcpu = get_cpu();
2092         for_each_online_cpu(cpu) {
2093                 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2094                 struct mem_cgroup *memcg;
2095
2096                 memcg = stock->cached;
2097                 if (!memcg || !stock->nr_pages)
2098                         continue;
2099                 if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
2100                         continue;
2101                 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2102                         if (cpu == curcpu)
2103                                 drain_local_stock(&stock->work);
2104                         else
2105                                 schedule_work_on(cpu, &stock->work);
2106                 }
2107         }
2108         put_cpu();
2109
2110         if (!sync)
2111                 goto out;
2112
2113         for_each_online_cpu(cpu) {
2114                 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2115                 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2116                         flush_work(&stock->work);
2117         }
2118 out:
2119         put_online_cpus();
2120 }
2121
2122 /*
2123  * Tries to drain stocked charges in other cpus. This function is asynchronous
2124  * and just put a work per cpu for draining localy on each cpu. Caller can
2125  * expects some charges will be back to res_counter later but cannot wait for
2126  * it.
2127  */
2128 static void drain_all_stock_async(struct mem_cgroup *root_memcg)
2129 {
2130         /*
2131          * If someone calls draining, avoid adding more kworker runs.
2132          */
2133         if (!mutex_trylock(&percpu_charge_mutex))
2134                 return;
2135         drain_all_stock(root_memcg, false);
2136         mutex_unlock(&percpu_charge_mutex);
2137 }
2138
2139 /* This is a synchronous drain interface. */
2140 static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
2141 {
2142         /* called when force_empty is called */
2143         mutex_lock(&percpu_charge_mutex);
2144         drain_all_stock(root_memcg, true);
2145         mutex_unlock(&percpu_charge_mutex);
2146 }
2147
2148 /*
2149  * This function drains percpu counter value from DEAD cpu and
2150  * move it to local cpu. Note that this function can be preempted.
2151  */
2152 static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2153 {
2154         int i;
2155
2156         spin_lock(&memcg->pcp_counter_lock);
2157         for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
2158                 long x = per_cpu(memcg->stat->count[i], cpu);
2159
2160                 per_cpu(memcg->stat->count[i], cpu) = 0;
2161                 memcg->nocpu_base.count[i] += x;
2162         }
2163         for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2164                 unsigned long x = per_cpu(memcg->stat->events[i], cpu);
2165
2166                 per_cpu(memcg->stat->events[i], cpu) = 0;
2167                 memcg->nocpu_base.events[i] += x;
2168         }
2169         /* need to clear ON_MOVE value, works as a kind of lock. */
2170         per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
2171         spin_unlock(&memcg->pcp_counter_lock);
2172 }
2173
2174 static void synchronize_mem_cgroup_on_move(struct mem_cgroup *memcg, int cpu)
2175 {
2176         int idx = MEM_CGROUP_ON_MOVE;
2177
2178         spin_lock(&memcg->pcp_counter_lock);
2179         per_cpu(memcg->stat->count[idx], cpu) = memcg->nocpu_base.count[idx];
2180         spin_unlock(&memcg->pcp_counter_lock);
2181 }
2182
2183 static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
2184                                         unsigned long action,
2185                                         void *hcpu)
2186 {
2187         int cpu = (unsigned long)hcpu;
2188         struct memcg_stock_pcp *stock;
2189         struct mem_cgroup *iter;
2190
2191         if ((action == CPU_ONLINE)) {
2192                 for_each_mem_cgroup(iter)
2193                         synchronize_mem_cgroup_on_move(iter, cpu);
2194                 return NOTIFY_OK;
2195         }
2196
2197         if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
2198                 return NOTIFY_OK;
2199
2200         for_each_mem_cgroup(iter)
2201                 mem_cgroup_drain_pcp_counter(iter, cpu);
2202
2203         stock = &per_cpu(memcg_stock, cpu);
2204         drain_stock(stock);
2205         return NOTIFY_OK;
2206 }
2207
2208
2209 /* See __mem_cgroup_try_charge() for details */
2210 enum {
2211         CHARGE_OK,              /* success */
2212         CHARGE_RETRY,           /* need to retry but retry is not bad */
2213         CHARGE_NOMEM,           /* we can't do more. return -ENOMEM */
2214         CHARGE_WOULDBLOCK,      /* GFP_WAIT wasn't set and no enough res. */
2215         CHARGE_OOM_DIE,         /* the current is killed because of OOM */
2216 };
2217
2218 static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2219                                 unsigned int nr_pages, bool oom_check)
2220 {
2221         unsigned long csize = nr_pages * PAGE_SIZE;
2222         struct mem_cgroup *mem_over_limit;
2223         struct res_counter *fail_res;
2224         unsigned long flags = 0;
2225         int ret;
2226
2227         ret = res_counter_charge(&memcg->res, csize, &fail_res);
2228
2229         if (likely(!ret)) {
2230                 if (!do_swap_account)
2231                         return CHARGE_OK;
2232                 ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
2233                 if (likely(!ret))
2234                         return CHARGE_OK;
2235
2236                 res_counter_uncharge(&memcg->res, csize);
2237                 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2238                 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2239         } else
2240                 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2241         /*
2242          * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
2243          * of regular pages (CHARGE_BATCH), or a single regular page (1).
2244          *
2245          * Never reclaim on behalf of optional batching, retry with a
2246          * single page instead.
2247          */
2248         if (nr_pages == CHARGE_BATCH)
2249                 return CHARGE_RETRY;
2250
2251         if (!(gfp_mask & __GFP_WAIT))
2252                 return CHARGE_WOULDBLOCK;
2253
2254         ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
2255                                               gfp_mask, flags, NULL);
2256         if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2257                 return CHARGE_RETRY;
2258         /*
2259          * Even though the limit is exceeded at this point, reclaim
2260          * may have been able to free some pages.  Retry the charge
2261          * before killing the task.
2262          *
2263          * Only for regular pages, though: huge pages are rather
2264          * unlikely to succeed so close to the limit, and we fall back
2265          * to regular pages anyway in case of failure.
2266          */
2267         if (nr_pages == 1 && ret)
2268                 return CHARGE_RETRY;
2269
2270         /*
2271          * At task move, charge accounts can be doubly counted. So, it's
2272          * better to wait until the end of task_move if something is going on.
2273          */
2274         if (mem_cgroup_wait_acct_move(mem_over_limit))
2275                 return CHARGE_RETRY;
2276
2277         /* If we don't need to call oom-killer at el, return immediately */
2278         if (!oom_check)
2279                 return CHARGE_NOMEM;
2280         /* check OOM */
2281         if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
2282                 return CHARGE_OOM_DIE;
2283
2284         return CHARGE_RETRY;
2285 }
2286
2287 /*
2288  * Unlike exported interface, "oom" parameter is added. if oom==true,
2289  * oom-killer can be invoked.
2290  */
2291 static int __mem_cgroup_try_charge(struct mm_struct *mm,
2292                                    gfp_t gfp_mask,
2293                                    unsigned int nr_pages,
2294                                    struct mem_cgroup **ptr,
2295                                    bool oom)
2296 {
2297         unsigned int batch = max(CHARGE_BATCH, nr_pages);
2298         int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2299         struct mem_cgroup *memcg = NULL;
2300         int ret;
2301
2302         /*
2303          * Unlike gloval-vm's OOM-kill, we're not in memory shortage
2304          * in system level. So, allow to go ahead dying process in addition to
2305          * MEMDIE process.
2306          */
2307         if (unlikely(test_thread_flag(TIF_MEMDIE)
2308                      || fatal_signal_pending(current)))
2309                 goto bypass;
2310
2311         /*
2312          * We always charge the cgroup the mm_struct belongs to.
2313          * The mm_struct's mem_cgroup changes on task migration if the
2314          * thread group leader migrates. It's possible that mm is not
2315          * set, if so charge the init_mm (happens for pagecache usage).
2316          */
2317         if (!*ptr && !mm)
2318                 goto bypass;
2319 again:
2320         if (*ptr) { /* css should be a valid one */
2321                 memcg = *ptr;
2322                 VM_BUG_ON(css_is_removed(&memcg->css));
2323                 if (mem_cgroup_is_root(memcg))
2324                         goto done;
2325                 if (nr_pages == 1 && consume_stock(memcg))
2326                         goto done;
2327                 css_get(&memcg->css);
2328         } else {
2329                 struct task_struct *p;
2330
2331                 rcu_read_lock();
2332                 p = rcu_dereference(mm->owner);
2333                 /*
2334                  * Because we don't have task_lock(), "p" can exit.
2335                  * In that case, "memcg" can point to root or p can be NULL with
2336                  * race with swapoff. Then, we have small risk of mis-accouning.
2337                  * But such kind of mis-account by race always happens because
2338                  * we don't have cgroup_mutex(). It's overkill and we allo that
2339                  * small race, here.
2340                  * (*) swapoff at el will charge against mm-struct not against
2341                  * task-struct. So, mm->owner can be NULL.
2342                  */
2343                 memcg = mem_cgroup_from_task(p);
2344                 if (!memcg || mem_cgroup_is_root(memcg)) {
2345                         rcu_read_unlock();
2346                         goto done;
2347                 }
2348                 if (nr_pages == 1 && consume_stock(memcg)) {
2349                         /*
2350                          * It seems dagerous to access memcg without css_get().
2351                          * But considering how consume_stok works, it's not
2352                          * necessary. If consume_stock success, some charges
2353                          * from this memcg are cached on this cpu. So, we
2354                          * don't need to call css_get()/css_tryget() before
2355                          * calling consume_stock().
2356                          */
2357                         rcu_read_unlock();
2358                         goto done;
2359                 }
2360                 /* after here, we may be blocked. we need to get refcnt */
2361                 if (!css_tryget(&memcg->css)) {
2362                         rcu_read_unlock();
2363                         goto again;
2364                 }
2365                 rcu_read_unlock();
2366         }
2367
2368         do {
2369                 bool oom_check;
2370
2371                 /* If killed, bypass charge */
2372                 if (fatal_signal_pending(current)) {
2373                         css_put(&memcg->css);
2374                         goto bypass;
2375                 }
2376
2377                 oom_check = false;
2378                 if (oom && !nr_oom_retries) {
2379                         oom_check = true;
2380                         nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2381                 }
2382
2383                 ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check);
2384                 switch (ret) {
2385                 case CHARGE_OK:
2386                         break;
2387                 case CHARGE_RETRY: /* not in OOM situation but retry */
2388                         batch = nr_pages;
2389                         css_put(&memcg->css);
2390                         memcg = NULL;
2391                         goto again;
2392                 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2393                         css_put(&memcg->css);
2394                         goto nomem;
2395                 case CHARGE_NOMEM: /* OOM routine works */
2396                         if (!oom) {
2397                                 css_put(&memcg->css);
2398                                 goto nomem;
2399                         }
2400                         /* If oom, we never return -ENOMEM */
2401                         nr_oom_retries--;
2402                         break;
2403                 case CHARGE_OOM_DIE: /* Killed by OOM Killer */
2404                         css_put(&memcg->css);
2405                         goto bypass;
2406                 }
2407         } while (ret != CHARGE_OK);
2408
2409         if (batch > nr_pages)
2410                 refill_stock(memcg, batch - nr_pages);
2411         css_put(&memcg->css);
2412 done:
2413         *ptr = memcg;
2414         return 0;
2415 nomem:
2416         *ptr = NULL;
2417         return -ENOMEM;
2418 bypass:
2419         *ptr = NULL;
2420         return 0;
2421 }
2422
2423 /*
2424  * Somemtimes we have to undo a charge we got by try_charge().
2425  * This function is for that and do uncharge, put css's refcnt.
2426  * gotten by try_charge().
2427  */
2428 static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
2429                                        unsigned int nr_pages)
2430 {
2431         if (!mem_cgroup_is_root(memcg)) {
2432                 unsigned long bytes = nr_pages * PAGE_SIZE;
2433
2434                 res_counter_uncharge(&memcg->res, bytes);
2435                 if (do_swap_account)
2436                         res_counter_uncharge(&memcg->memsw, bytes);
2437         }
2438 }
2439
2440 /*
2441  * A helper function to get mem_cgroup from ID. must be called under
2442  * rcu_read_lock(). The caller must check css_is_removed() or some if
2443  * it's concern. (dropping refcnt from swap can be called against removed
2444  * memcg.)
2445  */
2446 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2447 {
2448         struct cgroup_subsys_state *css;
2449
2450         /* ID 0 is unused ID */
2451         if (!id)
2452                 return NULL;
2453         css = css_lookup(&mem_cgroup_subsys, id);
2454         if (!css)
2455                 return NULL;
2456         return container_of(css, struct mem_cgroup, css);
2457 }
2458
2459 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2460 {
2461         struct mem_cgroup *memcg = NULL;
2462         struct page_cgroup *pc;
2463         unsigned short id;
2464         swp_entry_t ent;
2465
2466         VM_BUG_ON(!PageLocked(page));
2467
2468         pc = lookup_page_cgroup(page);
2469         lock_page_cgroup(pc);
2470         if (PageCgroupUsed(pc)) {
2471                 memcg = pc->mem_cgroup;
2472                 if (memcg && !css_tryget(&memcg->css))
2473                         memcg = NULL;
2474         } else if (PageSwapCache(page)) {
2475                 ent.val = page_private(page);
2476                 id = lookup_swap_cgroup(ent);
2477                 rcu_read_lock();
2478                 memcg = mem_cgroup_lookup(id);
2479                 if (memcg && !css_tryget(&memcg->css))
2480                         memcg = NULL;
2481                 rcu_read_unlock();
2482         }
2483         unlock_page_cgroup(pc);
2484         return memcg;
2485 }
2486
2487 static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2488                                        struct page *page,
2489                                        unsigned int nr_pages,
2490                                        struct page_cgroup *pc,
2491                                        enum charge_type ctype)
2492 {
2493         lock_page_cgroup(pc);
2494         if (unlikely(PageCgroupUsed(pc))) {
2495                 unlock_page_cgroup(pc);
2496                 __mem_cgroup_cancel_charge(memcg, nr_pages);
2497                 return;
2498         }
2499         /*
2500          * we don't need page_cgroup_lock about tail pages, becase they are not
2501          * accessed by any other context at this point.
2502          */
2503         pc->mem_cgroup = memcg;
2504         /*
2505          * We access a page_cgroup asynchronously without lock_page_cgroup().
2506          * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2507          * is accessed after testing USED bit. To make pc->mem_cgroup visible
2508          * before USED bit, we need memory barrier here.
2509          * See mem_cgroup_add_lru_list(), etc.
2510          */
2511         smp_wmb();
2512         switch (ctype) {
2513         case MEM_CGROUP_CHARGE_TYPE_CACHE:
2514         case MEM_CGROUP_CHARGE_TYPE_SHMEM:
2515                 SetPageCgroupCache(pc);
2516                 SetPageCgroupUsed(pc);
2517                 break;
2518         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2519                 ClearPageCgroupCache(pc);
2520                 SetPageCgroupUsed(pc);
2521                 break;
2522         default:
2523                 break;
2524         }
2525
2526         mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
2527         unlock_page_cgroup(pc);
2528         /*
2529          * "charge_statistics" updated event counter. Then, check it.
2530          * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2531          * if they exceeds softlimit.
2532          */
2533         memcg_check_events(memcg, page);
2534 }
2535
2536 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2537
2538 #define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
2539                         (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION))
2540 /*
2541  * Because tail pages are not marked as "used", set it. We're under
2542  * zone->lru_lock, 'splitting on pmd' and compund_lock.
2543  */
2544 void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
2545 {
2546         struct page_cgroup *head_pc = lookup_page_cgroup(head);
2547         struct page_cgroup *tail_pc = lookup_page_cgroup(tail);
2548         unsigned long flags;
2549
2550         if (mem_cgroup_disabled())
2551                 return;
2552         /*
2553          * We have no races with charge/uncharge but will have races with
2554          * page state accounting.
2555          */
2556         move_lock_page_cgroup(head_pc, &flags);
2557
2558         tail_pc->mem_cgroup = head_pc->mem_cgroup;
2559         smp_wmb(); /* see __commit_charge() */
2560         if (PageCgroupAcctLRU(head_pc)) {
2561                 enum lru_list lru;
2562                 struct mem_cgroup_per_zone *mz;
2563
2564                 /*
2565                  * LRU flags cannot be copied because we need to add tail
2566                  *.page to LRU by generic call and our hook will be called.
2567                  * We hold lru_lock, then, reduce counter directly.
2568                  */
2569                 lru = page_lru(head);
2570                 mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head);
2571                 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
2572         }
2573         tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
2574         move_unlock_page_cgroup(head_pc, &flags);
2575 }
2576 #endif
2577
2578 /**
2579  * mem_cgroup_move_account - move account of the page
2580  * @page: the page
2581  * @nr_pages: number of regular pages (>1 for huge pages)
2582  * @pc: page_cgroup of the page.
2583  * @from: mem_cgroup which the page is moved from.
2584  * @to: mem_cgroup which the page is moved to. @from != @to.
2585  * @uncharge: whether we should call uncharge and css_put against @from.
2586  *
2587  * The caller must confirm following.
2588  * - page is not on LRU (isolate_page() is useful.)
2589  * - compound_lock is held when nr_pages > 1
2590  *
2591  * This function doesn't do "charge" nor css_get to new cgroup. It should be
2592  * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
2593  * true, this function does "uncharge" from old cgroup, but it doesn't if
2594  * @uncharge is false, so a caller should do "uncharge".
2595  */
2596 static int mem_cgroup_move_account(struct page *page,
2597                                    unsigned int nr_pages,
2598                                    struct page_cgroup *pc,
2599                                    struct mem_cgroup *from,
2600                                    struct mem_cgroup *to,
2601                                    bool uncharge)
2602 {
2603         unsigned long flags;
2604         int ret;
2605
2606         VM_BUG_ON(from == to);
2607         VM_BUG_ON(PageLRU(page));
2608         /*
2609          * The page is isolated from LRU. So, collapse function
2610          * will not handle this page. But page splitting can happen.
2611          * Do this check under compound_page_lock(). The caller should
2612          * hold it.
2613          */
2614         ret = -EBUSY;
2615         if (nr_pages > 1 && !PageTransHuge(page))
2616                 goto out;
2617
2618         lock_page_cgroup(pc);
2619
2620         ret = -EINVAL;
2621         if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
2622                 goto unlock;
2623
2624         move_lock_page_cgroup(pc, &flags);
2625
2626         if (PageCgroupFileMapped(pc)) {
2627                 /* Update mapped_file data for mem_cgroup */
2628                 preempt_disable();
2629                 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2630                 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2631                 preempt_enable();
2632         }
2633         mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
2634         if (uncharge)
2635                 /* This is not "cancel", but cancel_charge does all we need. */
2636                 __mem_cgroup_cancel_charge(from, nr_pages);
2637
2638         /* caller should have done css_get */
2639         pc->mem_cgroup = to;
2640         mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
2641         /*
2642          * We charges against "to" which may not have any tasks. Then, "to"
2643          * can be under rmdir(). But in current implementation, caller of
2644          * this function is just force_empty() and move charge, so it's
2645          * guaranteed that "to" is never removed. So, we don't check rmdir
2646          * status here.
2647          */
2648         move_unlock_page_cgroup(pc, &flags);
2649         ret = 0;
2650 unlock:
2651         unlock_page_cgroup(pc);
2652         /*
2653          * check events
2654          */
2655         memcg_check_events(to, page);
2656         memcg_check_events(from, page);
2657 out:
2658         return ret;
2659 }
2660
2661 /*
2662  * move charges to its parent.
2663  */
2664
2665 static int mem_cgroup_move_parent(struct page *page,
2666                                   struct page_cgroup *pc,
2667                                   struct mem_cgroup *child,
2668                                   gfp_t gfp_mask)
2669 {
2670         struct cgroup *cg = child->css.cgroup;
2671         struct cgroup *pcg = cg->parent;
2672         struct mem_cgroup *parent;
2673         unsigned int nr_pages;
2674         unsigned long uninitialized_var(flags);
2675         int ret;
2676
2677         /* Is ROOT ? */
2678         if (!pcg)
2679                 return -EINVAL;
2680
2681         ret = -EBUSY;
2682         if (!get_page_unless_zero(page))
2683                 goto out;
2684         if (isolate_lru_page(page))
2685                 goto put;
2686
2687         nr_pages = hpage_nr_pages(page);
2688
2689         parent = mem_cgroup_from_cont(pcg);
2690         ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
2691         if (ret || !parent)
2692                 goto put_back;
2693
2694         if (nr_pages > 1)
2695                 flags = compound_lock_irqsave(page);
2696
2697         ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
2698         if (ret)
2699                 __mem_cgroup_cancel_charge(parent, nr_pages);
2700
2701         if (nr_pages > 1)
2702                 compound_unlock_irqrestore(page, flags);
2703 put_back:
2704         putback_lru_page(page);
2705 put:
2706         put_page(page);
2707 out:
2708         return ret;
2709 }
2710
2711 /*
2712  * Charge the memory controller for page usage.
2713  * Return
2714  * 0 if the charge was successful
2715  * < 0 if the cgroup is over its limit
2716  */
2717 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2718                                 gfp_t gfp_mask, enum charge_type ctype)
2719 {
2720         struct mem_cgroup *memcg = NULL;
2721         unsigned int nr_pages = 1;
2722         struct page_cgroup *pc;
2723         bool oom = true;
2724         int ret;
2725
2726         if (PageTransHuge(page)) {
2727                 nr_pages <<= compound_order(page);
2728                 VM_BUG_ON(!PageTransHuge(page));
2729                 /*
2730                  * Never OOM-kill a process for a huge page.  The
2731                  * fault handler will fall back to regular pages.
2732                  */
2733                 oom = false;
2734         }
2735
2736         pc = lookup_page_cgroup(page);
2737         BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
2738
2739         ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
2740         if (ret || !memcg)
2741                 return ret;
2742
2743         __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype);
2744         return 0;
2745 }
2746
2747 int mem_cgroup_newpage_charge(struct page *page,
2748                               struct mm_struct *mm, gfp_t gfp_mask)
2749 {
2750         if (mem_cgroup_disabled())
2751                 return 0;
2752         /*
2753          * If already mapped, we don't have to account.
2754          * If page cache, page->mapping has address_space.
2755          * But page->mapping may have out-of-use anon_vma pointer,
2756          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
2757          * is NULL.
2758          */
2759         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
2760                 return 0;
2761         if (unlikely(!mm))
2762                 mm = &init_mm;
2763         return mem_cgroup_charge_common(page, mm, gfp_mask,
2764                                 MEM_CGROUP_CHARGE_TYPE_MAPPED);
2765 }
2766
2767 static void
2768 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2769                                         enum charge_type ctype);
2770
2771 static void
2772 __mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
2773                                         enum charge_type ctype)
2774 {
2775         struct page_cgroup *pc = lookup_page_cgroup(page);
2776         /*
2777          * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
2778          * is already on LRU. It means the page may on some other page_cgroup's
2779          * LRU. Take care of it.
2780          */
2781         mem_cgroup_lru_del_before_commit(page);
2782         __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
2783         mem_cgroup_lru_add_after_commit(page);
2784         return;
2785 }
2786
2787 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2788                                 gfp_t gfp_mask)
2789 {
2790         struct mem_cgroup *memcg = NULL;
2791         int ret;
2792
2793         if (mem_cgroup_disabled())
2794                 return 0;
2795         if (PageCompound(page))
2796                 return 0;
2797
2798         if (unlikely(!mm))
2799                 mm = &init_mm;
2800
2801         if (page_is_file_cache(page)) {
2802                 ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &memcg, true);
2803                 if (ret || !memcg)
2804                         return ret;
2805
2806                 /*
2807                  * FUSE reuses pages without going through the final
2808                  * put that would remove them from the LRU list, make
2809                  * sure that they get relinked properly.
2810                  */
2811                 __mem_cgroup_commit_charge_lrucare(page, memcg,
2812                                         MEM_CGROUP_CHARGE_TYPE_CACHE);
2813                 return ret;
2814         }
2815         /* shmem */
2816         if (PageSwapCache(page)) {
2817                 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &memcg);
2818                 if (!ret)
2819                         __mem_cgroup_commit_charge_swapin(page, memcg,
2820                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
2821         } else
2822                 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
2823                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
2824
2825         return ret;
2826 }
2827
2828 /*
2829  * While swap-in, try_charge -> commit or cancel, the page is locked.
2830  * And when try_charge() successfully returns, one refcnt to memcg without
2831  * struct page_cgroup is acquired. This refcnt will be consumed by
2832  * "commit()" or removed by "cancel()"
2833  */
2834 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2835                                  struct page *page,
2836                                  gfp_t mask, struct mem_cgroup **ptr)
2837 {
2838         struct mem_cgroup *memcg;
2839         int ret;
2840
2841         *ptr = NULL;
2842
2843         if (mem_cgroup_disabled())
2844                 return 0;
2845
2846         if (!do_swap_account)
2847                 goto charge_cur_mm;
2848         /*
2849          * A racing thread's fault, or swapoff, may have already updated
2850          * the pte, and even removed page from swap cache: in those cases
2851          * do_swap_page()'s pte_same() test will fail; but there's also a
2852          * KSM case which does need to charge the page.
2853          */
2854         if (!PageSwapCache(page))
2855                 goto charge_cur_mm;
2856         memcg = try_get_mem_cgroup_from_page(page);
2857         if (!memcg)
2858                 goto charge_cur_mm;
2859         *ptr = memcg;
2860         ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true);
2861         css_put(&memcg->css);
2862         return ret;
2863 charge_cur_mm:
2864         if (unlikely(!mm))
2865                 mm = &init_mm;
2866         return __mem_cgroup_try_charge(mm, mask, 1, ptr, true);
2867 }
2868
2869 static void
2870 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2871                                         enum charge_type ctype)
2872 {
2873         if (mem_cgroup_disabled())
2874                 return;
2875         if (!ptr)
2876                 return;
2877         cgroup_exclude_rmdir(&ptr->css);
2878
2879         __mem_cgroup_commit_charge_lrucare(page, ptr, ctype);
2880         /*
2881          * Now swap is on-memory. This means this page may be
2882          * counted both as mem and swap....double count.
2883          * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2884          * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2885          * may call delete_from_swap_cache() before reach here.
2886          */
2887         if (do_swap_account && PageSwapCache(page)) {
2888                 swp_entry_t ent = {.val = page_private(page)};
2889                 unsigned short id;
2890                 struct mem_cgroup *memcg;
2891
2892                 id = swap_cgroup_record(ent, 0);
2893                 rcu_read_lock();
2894                 memcg = mem_cgroup_lookup(id);
2895                 if (memcg) {
2896                         /*
2897                          * This recorded memcg can be obsolete one. So, avoid
2898                          * calling css_tryget
2899                          */
2900                         if (!mem_cgroup_is_root(memcg))
2901                                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2902                         mem_cgroup_swap_statistics(memcg, false);
2903                         mem_cgroup_put(memcg);
2904                 }
2905                 rcu_read_unlock();
2906         }
2907         /*
2908          * At swapin, we may charge account against cgroup which has no tasks.
2909          * So, rmdir()->pre_destroy() can be called while we do this charge.
2910          * In that case, we need to call pre_destroy() again. check it here.
2911          */
2912         cgroup_release_and_wakeup_rmdir(&ptr->css);
2913 }
2914
2915 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2916 {
2917         __mem_cgroup_commit_charge_swapin(page, ptr,
2918                                         MEM_CGROUP_CHARGE_TYPE_MAPPED);
2919 }
2920
2921 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
2922 {
2923         if (mem_cgroup_disabled())
2924                 return;
2925         if (!memcg)
2926                 return;
2927         __mem_cgroup_cancel_charge(memcg, 1);
2928 }
2929
2930 static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
2931                                    unsigned int nr_pages,
2932                                    const enum charge_type ctype)
2933 {
2934         struct memcg_batch_info *batch = NULL;
2935         bool uncharge_memsw = true;
2936
2937         /* If swapout, usage of swap doesn't decrease */
2938         if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2939                 uncharge_memsw = false;
2940
2941         batch = &current->memcg_batch;
2942         /*
2943          * In usual, we do css_get() when we remember memcg pointer.
2944          * But in this case, we keep res->usage until end of a series of
2945          * uncharges. Then, it's ok to ignore memcg's refcnt.
2946          */
2947         if (!batch->memcg)
2948                 batch->memcg = memcg;
2949         /*
2950          * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2951          * In those cases, all pages freed continuously can be expected to be in
2952          * the same cgroup and we have chance to coalesce uncharges.
2953          * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2954          * because we want to do uncharge as soon as possible.
2955          */
2956
2957         if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2958                 goto direct_uncharge;
2959
2960         if (nr_pages > 1)
2961                 goto direct_uncharge;
2962
2963         /*
2964          * In typical case, batch->memcg == mem. This means we can
2965          * merge a series of uncharges to an uncharge of res_counter.
2966          * If not, we uncharge res_counter ony by one.
2967          */
2968         if (batch->memcg != memcg)
2969                 goto direct_uncharge;
2970         /* remember freed charge and uncharge it later */
2971         batch->nr_pages++;
2972         if (uncharge_memsw)
2973                 batch->memsw_nr_pages++;
2974         return;
2975 direct_uncharge:
2976         res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
2977         if (uncharge_memsw)
2978                 res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
2979         if (unlikely(batch->memcg != memcg))
2980                 memcg_oom_recover(memcg);
2981         return;
2982 }
2983
2984 /*
2985  * uncharge if !page_mapped(page)
2986  */
2987 static struct mem_cgroup *
2988 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2989 {
2990         struct mem_cgroup *memcg = NULL;
2991         unsigned int nr_pages = 1;
2992         struct page_cgroup *pc;
2993
2994         if (mem_cgroup_disabled())
2995                 return NULL;
2996
2997         if (PageSwapCache(page))
2998                 return NULL;
2999
3000         if (PageTransHuge(page)) {
3001                 nr_pages <<= compound_order(page);
3002                 VM_BUG_ON(!PageTransHuge(page));
3003         }
3004         /*
3005          * Check if our page_cgroup is valid
3006          */
3007         pc = lookup_page_cgroup(page);
3008         if (unlikely(!pc || !PageCgroupUsed(pc)))
3009                 return NULL;
3010
3011         lock_page_cgroup(pc);
3012
3013         memcg = pc->mem_cgroup;
3014
3015         if (!PageCgroupUsed(pc))
3016                 goto unlock_out;
3017
3018         switch (ctype) {
3019         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
3020         case MEM_CGROUP_CHARGE_TYPE_DROP:
3021                 /* See mem_cgroup_prepare_migration() */
3022                 if (page_mapped(page) || PageCgroupMigration(pc))
3023                         goto unlock_out;
3024                 break;
3025         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
3026                 if (!PageAnon(page)) {  /* Shared memory */
3027                         if (page->mapping && !page_is_file_cache(page))
3028                                 goto unlock_out;
3029                 } else if (page_mapped(page)) /* Anon */
3030                                 goto unlock_out;
3031                 break;
3032         default:
3033                 break;
3034         }
3035
3036         mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -nr_pages);
3037
3038         ClearPageCgroupUsed(pc);
3039         /*
3040          * pc->mem_cgroup is not cleared here. It will be accessed when it's
3041          * freed from LRU. This is safe because uncharged page is expected not
3042          * to be reused (freed soon). Exception is SwapCache, it's handled by
3043          * special functions.
3044          */
3045
3046         unlock_page_cgroup(pc);
3047         /*
3048          * even after unlock, we have memcg->res.usage here and this memcg
3049          * will never be freed.
3050          */
3051         memcg_check_events(memcg, page);
3052         if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
3053                 mem_cgroup_swap_statistics(memcg, true);
3054                 mem_cgroup_get(memcg);
3055         }
3056         if (!mem_cgroup_is_root(memcg))
3057                 mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
3058
3059         return memcg;
3060
3061 unlock_out:
3062         unlock_page_cgroup(pc);
3063         return NULL;
3064 }
3065
3066 void mem_cgroup_uncharge_page(struct page *page)
3067 {
3068         /* early check. */
3069         if (page_mapped(page))
3070                 return;
3071         if (page->mapping && !PageAnon(page))
3072                 return;
3073         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
3074 }
3075
3076 void mem_cgroup_uncharge_cache_page(struct page *page)
3077 {
3078         VM_BUG_ON(page_mapped(page));
3079         VM_BUG_ON(page->mapping);
3080         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
3081 }
3082
3083 /*
3084  * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
3085  * In that cases, pages are freed continuously and we can expect pages
3086  * are in the same memcg. All these calls itself limits the number of
3087  * pages freed at once, then uncharge_start/end() is called properly.
3088  * This may be called prural(2) times in a context,
3089  */
3090
3091 void mem_cgroup_uncharge_start(void)
3092 {
3093         current->memcg_batch.do_batch++;
3094         /* We can do nest. */
3095         if (current->memcg_batch.do_batch == 1) {
3096                 current->memcg_batch.memcg = NULL;
3097                 current->memcg_batch.nr_pages = 0;
3098                 current->memcg_batch.memsw_nr_pages = 0;
3099         }
3100 }
3101
3102 void mem_cgroup_uncharge_end(void)
3103 {
3104         struct memcg_batch_info *batch = &current->memcg_batch;
3105
3106         if (!batch->do_batch)
3107                 return;
3108
3109         batch->do_batch--;
3110         if (batch->do_batch) /* If stacked, do nothing. */
3111                 return;
3112
3113         if (!batch->memcg)
3114                 return;
3115         /*
3116          * This "batch->memcg" is valid without any css_get/put etc...
3117          * bacause we hide charges behind us.
3118          */
3119         if (batch->nr_pages)
3120                 res_counter_uncharge(&batch->memcg->res,
3121                                      batch->nr_pages * PAGE_SIZE);
3122         if (batch->memsw_nr_pages)
3123                 res_counter_uncharge(&batch->memcg->memsw,
3124                                      batch->memsw_nr_pages * PAGE_SIZE);
3125         memcg_oom_recover(batch->memcg);
3126         /* forget this pointer (for sanity check) */
3127         batch->memcg = NULL;
3128 }
3129
3130 #ifdef CONFIG_SWAP
3131 /*
3132  * called after __delete_from_swap_cache() and drop "page" account.
3133  * memcg information is recorded to swap_cgroup of "ent"
3134  */
3135 void
3136 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
3137 {
3138         struct mem_cgroup *memcg;
3139         int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
3140
3141         if (!swapout) /* this was a swap cache but the swap is unused ! */
3142                 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
3143
3144         memcg = __mem_cgroup_uncharge_common(page, ctype);
3145
3146         /*
3147          * record memcg information,  if swapout && memcg != NULL,
3148          * mem_cgroup_get() was called in uncharge().
3149          */
3150         if (do_swap_account && swapout && memcg)
3151                 swap_cgroup_record(ent, css_id(&memcg->css));
3152 }
3153 #endif
3154
3155 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3156 /*
3157  * called from swap_entry_free(). remove record in swap_cgroup and
3158  * uncharge "memsw" account.
3159  */
3160 void mem_cgroup_uncharge_swap(swp_entry_t ent)
3161 {
3162         struct mem_cgroup *memcg;
3163         unsigned short id;
3164
3165         if (!do_swap_account)
3166                 return;
3167
3168         id = swap_cgroup_record(ent, 0);
3169         rcu_read_lock();
3170         memcg = mem_cgroup_lookup(id);
3171         if (memcg) {
3172                 /*
3173                  * We uncharge this because swap is freed.
3174                  * This memcg can be obsolete one. We avoid calling css_tryget
3175                  */
3176                 if (!mem_cgroup_is_root(memcg))
3177                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
3178                 mem_cgroup_swap_statistics(memcg, false);
3179                 mem_cgroup_put(memcg);
3180         }
3181         rcu_read_unlock();
3182 }
3183
3184 /**
3185  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3186  * @entry: swap entry to be moved
3187  * @from:  mem_cgroup which the entry is moved from
3188  * @to:  mem_cgroup which the entry is moved to
3189  * @need_fixup: whether we should fixup res_counters and refcounts.
3190  *
3191  * It succeeds only when the swap_cgroup's record for this entry is the same
3192  * as the mem_cgroup's id of @from.
3193  *
3194  * Returns 0 on success, -EINVAL on failure.
3195  *
3196  * The caller must have charged to @to, IOW, called res_counter_charge() about
3197  * both res and memsw, and called css_get().
3198  */
3199 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3200                 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
3201 {
3202         unsigned short old_id, new_id;
3203
3204         old_id = css_id(&from->css);
3205         new_id = css_id(&to->css);
3206
3207         if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3208                 mem_cgroup_swap_statistics(from, false);
3209                 mem_cgroup_swap_statistics(to, true);
3210                 /*
3211                  * This function is only called from task migration context now.
3212                  * It postpones res_counter and refcount handling till the end
3213                  * of task migration(mem_cgroup_clear_mc()) for performance
3214                  * improvement. But we cannot postpone mem_cgroup_get(to)
3215                  * because if the process that has been moved to @to does
3216                  * swap-in, the refcount of @to might be decreased to 0.
3217                  */
3218                 mem_cgroup_get(to);
3219                 if (need_fixup) {
3220                         if (!mem_cgroup_is_root(from))
3221                                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
3222                         mem_cgroup_put(from);
3223                         /*
3224                          * we charged both to->res and to->memsw, so we should
3225                          * uncharge to->res.
3226                          */
3227                         if (!mem_cgroup_is_root(to))
3228                                 res_counter_uncharge(&to->res, PAGE_SIZE);
3229                 }
3230                 return 0;
3231         }
3232         return -EINVAL;
3233 }
3234 #else
3235 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3236                 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
3237 {
3238         return -EINVAL;
3239 }
3240 #endif
3241
3242 /*
3243  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
3244  * page belongs to.
3245  */
3246 int mem_cgroup_prepare_migration(struct page *page,
3247         struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
3248 {
3249         struct mem_cgroup *memcg = NULL;
3250         struct page_cgroup *pc;
3251         enum charge_type ctype;
3252         int ret = 0;
3253
3254         *ptr = NULL;
3255
3256         VM_BUG_ON(PageTransHuge(page));
3257         if (mem_cgroup_disabled())
3258                 return 0;
3259
3260         pc = lookup_page_cgroup(page);
3261         lock_page_cgroup(pc);
3262         if (PageCgroupUsed(pc)) {
3263                 memcg = pc->mem_cgroup;
3264                 css_get(&memcg->css);
3265                 /*
3266                  * At migrating an anonymous page, its mapcount goes down
3267                  * to 0 and uncharge() will be called. But, even if it's fully
3268                  * unmapped, migration may fail and this page has to be
3269                  * charged again. We set MIGRATION flag here and delay uncharge
3270                  * until end_migration() is called
3271                  *
3272                  * Corner Case Thinking
3273                  * A)
3274                  * When the old page was mapped as Anon and it's unmap-and-freed
3275                  * while migration was ongoing.
3276                  * If unmap finds the old page, uncharge() of it will be delayed
3277                  * until end_migration(). If unmap finds a new page, it's
3278                  * uncharged when it make mapcount to be 1->0. If unmap code
3279                  * finds swap_migration_entry, the new page will not be mapped
3280                  * and end_migration() will find it(mapcount==0).
3281                  *
3282                  * B)
3283                  * When the old page was mapped but migraion fails, the kernel
3284                  * remaps it. A charge for it is kept by MIGRATION flag even
3285                  * if mapcount goes down to 0. We can do remap successfully
3286                  * without charging it again.
3287                  *
3288                  * C)
3289                  * The "old" page is under lock_page() until the end of
3290                  * migration, so, the old page itself will not be swapped-out.
3291                  * If the new page is swapped out before end_migraton, our
3292                  * hook to usual swap-out path will catch the event.
3293                  */
3294                 if (PageAnon(page))
3295                         SetPageCgroupMigration(pc);
3296         }
3297         unlock_page_cgroup(pc);
3298         /*
3299          * If the page is not charged at this point,
3300          * we return here.
3301          */
3302         if (!memcg)
3303                 return 0;
3304
3305         *ptr = memcg;
3306         ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false);
3307         css_put(&memcg->css);/* drop extra refcnt */
3308         if (ret || *ptr == NULL) {
3309                 if (PageAnon(page)) {
3310                         lock_page_cgroup(pc);
3311                         ClearPageCgroupMigration(pc);
3312                         unlock_page_cgroup(pc);
3313                         /*
3314                          * The old page may be fully unmapped while we kept it.
3315                          */
3316                         mem_cgroup_uncharge_page(page);
3317                 }
3318                 return -ENOMEM;
3319         }
3320         /*
3321          * We charge new page before it's used/mapped. So, even if unlock_page()
3322          * is called before end_migration, we can catch all events on this new
3323          * page. In the case new page is migrated but not remapped, new page's
3324          * mapcount will be finally 0 and we call uncharge in end_migration().
3325          */
3326         pc = lookup_page_cgroup(newpage);
3327         if (PageAnon(page))
3328                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
3329         else if (page_is_file_cache(page))
3330                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3331         else
3332                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3333         __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
3334         return ret;
3335 }
3336
3337 /* remove redundant charge if migration failed*/
3338 void mem_cgroup_end_migration(struct mem_cgroup *memcg,
3339         struct page *oldpage, struct page *newpage, bool migration_ok)
3340 {
3341         struct page *used, *unused;
3342         struct page_cgroup *pc;
3343
3344         if (!memcg)
3345                 return;
3346         /* blocks rmdir() */
3347         cgroup_exclude_rmdir(&memcg->css);
3348         if (!migration_ok) {
3349                 used = oldpage;
3350                 unused = newpage;
3351         } else {
3352                 used = newpage;
3353                 unused = oldpage;
3354         }
3355         /*
3356          * We disallowed uncharge of pages under migration because mapcount
3357          * of the page goes down to zero, temporarly.
3358          * Clear the flag and check the page should be charged.
3359          */
3360         pc = lookup_page_cgroup(oldpage);
3361         lock_page_cgroup(pc);
3362         ClearPageCgroupMigration(pc);
3363         unlock_page_cgroup(pc);
3364
3365         __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
3366
3367         /*
3368          * If a page is a file cache, radix-tree replacement is very atomic
3369          * and we can skip this check. When it was an Anon page, its mapcount
3370          * goes down to 0. But because we added MIGRATION flage, it's not
3371          * uncharged yet. There are several case but page->mapcount check
3372          * and USED bit check in mem_cgroup_uncharge_page() will do enough
3373          * check. (see prepare_charge() also)
3374          */
3375         if (PageAnon(used))
3376                 mem_cgroup_uncharge_page(used);
3377         /*
3378          * At migration, we may charge account against cgroup which has no
3379          * tasks.
3380          * So, rmdir()->pre_destroy() can be called while we do this charge.
3381          * In that case, we need to call pre_destroy() again. check it here.
3382          */
3383         cgroup_release_and_wakeup_rmdir(&memcg->css);
3384 }
3385
3386 /*
3387  * At replace page cache, newpage is not under any memcg but it's on
3388  * LRU. So, this function doesn't touch res_counter but handles LRU
3389  * in correct way. Both pages are locked so we cannot race with uncharge.
3390  */
3391 void mem_cgroup_replace_page_cache(struct page *oldpage,
3392                                   struct page *newpage)
3393 {
3394         struct mem_cgroup *memcg;
3395         struct page_cgroup *pc;
3396         struct zone *zone;
3397         enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
3398         unsigned long flags;
3399
3400         if (mem_cgroup_disabled())
3401                 return;
3402
3403         pc = lookup_page_cgroup(oldpage);
3404         /* fix accounting on old pages */
3405         lock_page_cgroup(pc);
3406         memcg = pc->mem_cgroup;
3407         mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
3408         ClearPageCgroupUsed(pc);
3409         unlock_page_cgroup(pc);
3410
3411         if (PageSwapBacked(oldpage))
3412                 type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3413
3414         zone = page_zone(newpage);
3415         pc = lookup_page_cgroup(newpage);
3416         /*
3417          * Even if newpage->mapping was NULL before starting replacement,
3418          * the newpage may be on LRU(or pagevec for LRU) already. We lock
3419          * LRU while we overwrite pc->mem_cgroup.
3420          */
3421         spin_lock_irqsave(&zone->lru_lock, flags);
3422         if (PageLRU(newpage))
3423                 del_page_from_lru_list(zone, newpage, page_lru(newpage));
3424         __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type);
3425         if (PageLRU(newpage))
3426                 add_page_to_lru_list(zone, newpage, page_lru(newpage));
3427         spin_unlock_irqrestore(&zone->lru_lock, flags);
3428 }
3429
3430 #ifdef CONFIG_DEBUG_VM
3431 static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
3432 {
3433         struct page_cgroup *pc;
3434
3435         pc = lookup_page_cgroup(page);
3436         if (likely(pc) && PageCgroupUsed(pc))
3437                 return pc;
3438         return NULL;
3439 }
3440
3441 bool mem_cgroup_bad_page_check(struct page *page)
3442 {
3443         if (mem_cgroup_disabled())
3444                 return false;
3445
3446         return lookup_page_cgroup_used(page) != NULL;
3447 }
3448
3449 void mem_cgroup_print_bad_page(struct page *page)
3450 {
3451         struct page_cgroup *pc;
3452
3453         pc = lookup_page_cgroup_used(page);
3454         if (pc) {
3455                 int ret = -1;
3456                 char *path;
3457
3458                 printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p",
3459                        pc, pc->flags, pc->mem_cgroup);
3460
3461                 path = kmalloc(PATH_MAX, GFP_KERNEL);
3462                 if (path) {
3463                         rcu_read_lock();
3464                         ret = cgroup_path(pc->mem_cgroup->css.cgroup,
3465                                                         path, PATH_MAX);
3466                         rcu_read_unlock();
3467                 }
3468
3469                 printk(KERN_CONT "(%s)\n",
3470                                 (ret < 0) ? "cannot get the path" : path);
3471                 kfree(path);
3472         }
3473 }
3474 #endif
3475
3476 static DEFINE_MUTEX(set_limit_mutex);
3477
3478 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3479                                 unsigned long long val)
3480 {
3481         int retry_count;
3482         u64 memswlimit, memlimit;
3483         int ret = 0;
3484         int children = mem_cgroup_count_children(memcg);
3485         u64 curusage, oldusage;
3486         int enlarge;
3487
3488         /*
3489          * For keeping hierarchical_reclaim simple, how long we should retry
3490          * is depends on callers. We set our retry-count to be function
3491          * of # of children which we should visit in this loop.
3492          */
3493         retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
3494
3495         oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3496
3497         enlarge = 0;
3498         while (retry_count) {
3499                 if (signal_pending(current)) {
3500                         ret = -EINTR;
3501                         break;
3502                 }
3503                 /*
3504                  * Rather than hide all in some function, I do this in
3505                  * open coded manner. You see what this really does.
3506                  * We have to guarantee memcg->res.limit < memcg->memsw.limit.
3507                  */
3508                 mutex_lock(&set_limit_mutex);
3509                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3510                 if (memswlimit < val) {
3511                         ret = -EINVAL;
3512                         mutex_unlock(&set_limit_mutex);
3513                         break;
3514                 }
3515
3516                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3517                 if (memlimit < val)
3518                         enlarge = 1;
3519
3520                 ret = res_counter_set_limit(&memcg->res, val);
3521                 if (!ret) {
3522                         if (memswlimit == val)
3523                                 memcg->memsw_is_minimum = true;
3524                         else
3525                                 memcg->memsw_is_minimum = false;
3526                 }
3527                 mutex_unlock(&set_limit_mutex);
3528
3529                 if (!ret)
3530                         break;
3531
3532                 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3533                                                 MEM_CGROUP_RECLAIM_SHRINK,
3534                                                 NULL);
3535                 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3536                 /* Usage is reduced ? */
3537                 if (curusage >= oldusage)
3538                         retry_count--;
3539                 else
3540                         oldusage = curusage;
3541         }
3542         if (!ret && enlarge)
3543                 memcg_oom_recover(memcg);
3544
3545         return ret;
3546 }
3547
3548 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3549                                         unsigned long long val)
3550 {
3551         int retry_count;
3552         u64 memlimit, memswlimit, oldusage, curusage;
3553         int children = mem_cgroup_count_children(memcg);
3554         int ret = -EBUSY;
3555         int enlarge = 0;
3556
3557         /* see mem_cgroup_resize_res_limit */
3558         retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
3559         oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3560         while (retry_count) {
3561                 if (signal_pending(current)) {
3562                         ret = -EINTR;
3563                         break;
3564                 }
3565                 /*
3566                  * Rather than hide all in some function, I do this in
3567                  * open coded manner. You see what this really does.
3568                  * We have to guarantee memcg->res.limit < memcg->memsw.limit.
3569                  */
3570                 mutex_lock(&set_limit_mutex);
3571                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3572                 if (memlimit > val) {
3573                         ret = -EINVAL;
3574                         mutex_unlock(&set_limit_mutex);
3575                         break;
3576                 }
3577                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3578                 if (memswlimit < val)
3579                         enlarge = 1;
3580                 ret = res_counter_set_limit(&memcg->memsw, val);
3581                 if (!ret) {
3582                         if (memlimit == val)
3583                                 memcg->memsw_is_minimum = true;
3584                         else
3585                                 memcg->memsw_is_minimum = false;
3586                 }
3587                 mutex_unlock(&set_limit_mutex);
3588
3589                 if (!ret)
3590                         break;
3591
3592                 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3593                                                 MEM_CGROUP_RECLAIM_NOSWAP |
3594                                                 MEM_CGROUP_RECLAIM_SHRINK,
3595                                                 NULL);
3596                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3597                 /* Usage is reduced ? */
3598                 if (curusage >= oldusage)
3599                         retry_count--;
3600                 else
3601                         oldusage = curusage;
3602         }
3603         if (!ret && enlarge)
3604                 memcg_oom_recover(memcg);
3605         return ret;
3606 }
3607
3608 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3609                                             gfp_t gfp_mask,
3610                                             unsigned long *total_scanned)
3611 {
3612         unsigned long nr_reclaimed = 0;
3613         struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3614         unsigned long reclaimed;
3615         int loop = 0;
3616         struct mem_cgroup_tree_per_zone *mctz;
3617         unsigned long long excess;
3618         unsigned long nr_scanned;
3619
3620         if (order > 0)
3621                 return 0;
3622
3623         mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
3624         /*
3625          * This loop can run a while, specially if mem_cgroup's continuously
3626          * keep exceeding their soft limit and putting the system under
3627          * pressure
3628          */
3629         do {
3630                 if (next_mz)
3631                         mz = next_mz;
3632                 else
3633                         mz = mem_cgroup_largest_soft_limit_node(mctz);
3634                 if (!mz)
3635                         break;
3636
3637                 nr_scanned = 0;
3638                 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
3639                                                 gfp_mask,
3640                                                 MEM_CGROUP_RECLAIM_SOFT,
3641                                                 &nr_scanned);
3642                 nr_reclaimed += reclaimed;
3643                 *total_scanned += nr_scanned;
3644                 spin_lock(&mctz->lock);
3645
3646                 /*
3647                  * If we failed to reclaim anything from this memory cgroup
3648                  * it is time to move on to the next cgroup
3649                  */
3650                 next_mz = NULL;
3651                 if (!reclaimed) {
3652                         do {
3653                                 /*
3654                                  * Loop until we find yet another one.
3655                                  *
3656                                  * By the time we get the soft_limit lock
3657                                  * again, someone might have aded the
3658                                  * group back on the RB tree. Iterate to
3659                                  * make sure we get a different mem.
3660                                  * mem_cgroup_largest_soft_limit_node returns
3661                                  * NULL if no other cgroup is present on
3662                                  * the tree
3663                                  */
3664                                 next_mz =
3665                                 __mem_cgroup_largest_soft_limit_node(mctz);
3666                                 if (next_mz == mz)
3667                                         css_put(&next_mz->mem->css);
3668                                 else /* next_mz == NULL or other memcg */
3669                                         break;
3670                         } while (1);
3671                 }
3672                 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
3673                 excess = res_counter_soft_limit_excess(&mz->mem->res);
3674                 /*
3675                  * One school of thought says that we should not add
3676                  * back the node to the tree if reclaim returns 0.
3677                  * But our reclaim could return 0, simply because due
3678                  * to priority we are exposing a smaller subset of
3679                  * memory to reclaim from. Consider this as a longer
3680                  * term TODO.
3681                  */
3682                 /* If excess == 0, no tree ops */
3683                 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
3684                 spin_unlock(&mctz->lock);
3685                 css_put(&mz->mem->css);
3686                 loop++;
3687                 /*
3688                  * Could not reclaim anything and there are no more
3689                  * mem cgroups to try or we seem to be looping without
3690                  * reclaiming anything.
3691                  */
3692                 if (!nr_reclaimed &&
3693                         (next_mz == NULL ||
3694                         loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3695                         break;
3696         } while (!nr_reclaimed);
3697         if (next_mz)
3698                 css_put(&next_mz->mem->css);
3699         return nr_reclaimed;
3700 }
3701
3702 /*
3703  * This routine traverse page_cgroup in given list and drop them all.
3704  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
3705  */
3706 static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
3707                                 int node, int zid, enum lru_list lru)
3708 {
3709         struct zone *zone;
3710         struct mem_cgroup_per_zone *mz;
3711         struct page_cgroup *pc, *busy;
3712         unsigned long flags, loop;
3713         struct list_head *list;
3714         int ret = 0;
3715
3716         zone = &NODE_DATA(node)->node_zones[zid];
3717         mz = mem_cgroup_zoneinfo(memcg, node, zid);
3718         list = &mz->lists[lru];
3719
3720         loop = MEM_CGROUP_ZSTAT(mz, lru);
3721         /* give some margin against EBUSY etc...*/
3722         loop += 256;
3723         busy = NULL;
3724         while (loop--) {
3725                 struct page *page;
3726
3727                 ret = 0;
3728                 spin_lock_irqsave(&zone->lru_lock, flags);
3729                 if (list_empty(list)) {
3730                         spin_unlock_irqrestore(&zone->lru_lock, flags);
3731                         break;
3732                 }
3733                 pc = list_entry(list->prev, struct page_cgroup, lru);
3734                 if (busy == pc) {
3735                         list_move(&pc->lru, list);
3736                         busy = NULL;
3737                         spin_unlock_irqrestore(&zone->lru_lock, flags);
3738                         continue;
3739                 }
3740                 spin_unlock_irqrestore(&zone->lru_lock, flags);
3741
3742                 page = lookup_cgroup_page(pc);
3743
3744                 ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL);
3745                 if (ret == -ENOMEM)
3746                         break;
3747
3748                 if (ret == -EBUSY || ret == -EINVAL) {
3749                         /* found lock contention or "pc" is obsolete. */
3750                         busy = pc;
3751                         cond_resched();
3752                 } else
3753                         busy = NULL;
3754         }
3755
3756         if (!ret && !list_empty(list))
3757                 return -EBUSY;
3758         return ret;
3759 }
3760
3761 /*
3762  * make mem_cgroup's charge to be 0 if there is no task.
3763  * This enables deleting this mem_cgroup.
3764  */
3765 static int mem_cgroup_force_empty(struct mem_cgroup *memcg, bool free_all)
3766 {
3767         int ret;
3768         int node, zid, shrink;
3769         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3770         struct cgroup *cgrp = memcg->css.cgroup;
3771
3772         css_get(&memcg->css);
3773
3774         shrink = 0;
3775         /* should free all ? */
3776         if (free_all)
3777                 goto try_to_free;
3778 move_account:
3779         do {
3780                 ret = -EBUSY;
3781                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
3782                         goto out;
3783                 ret = -EINTR;
3784                 if (signal_pending(current))
3785                         goto out;
3786                 /* This is for making all *used* pages to be on LRU. */
3787                 lru_add_drain_all();
3788                 drain_all_stock_sync(memcg);
3789                 ret = 0;
3790                 mem_cgroup_start_move(memcg);
3791                 for_each_node_state(node, N_HIGH_MEMORY) {
3792                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
3793                                 enum lru_list l;
3794                                 for_each_lru(l) {
3795                                         ret = mem_cgroup_force_empty_list(memcg,
3796                                                         node, zid, l);
3797                                         if (ret)
3798                                                 break;
3799                                 }
3800                         }
3801                         if (ret)
3802                                 break;
3803                 }
3804                 mem_cgroup_end_move(memcg);
3805                 memcg_oom_recover(memcg);
3806                 /* it seems parent cgroup doesn't have enough mem */
3807                 if (ret == -ENOMEM)
3808                         goto try_to_free;
3809                 cond_resched();
3810         /* "ret" should also be checked to ensure all lists are empty. */
3811         } while (memcg->res.usage > 0 || ret);
3812 out:
3813         css_put(&memcg->css);
3814         return ret;
3815
3816 try_to_free:
3817         /* returns EBUSY if there is a task or if we come here twice. */
3818         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
3819                 ret = -EBUSY;
3820                 goto out;
3821         }
3822         /* we call try-to-free pages for make this cgroup empty */
3823         lru_add_drain_all();
3824         /* try to free all pages in this cgroup */
3825         shrink = 1;
3826         while (nr_retries && memcg->res.usage > 0) {
3827                 int progress;
3828
3829                 if (signal_pending(current)) {
3830                         ret = -EINTR;
3831                         goto out;
3832                 }
3833                 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
3834                                                 false);
3835                 if (!progress) {
3836                         nr_retries--;
3837                         /* maybe some writeback is necessary */
3838                         congestion_wait(BLK_RW_ASYNC, HZ/10);
3839                 }
3840
3841         }
3842         lru_add_drain();
3843         /* try move_account...there may be some *locked* pages. */
3844         goto move_account;
3845 }
3846
3847 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3848 {
3849         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3850 }
3851
3852
3853 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3854 {
3855         return mem_cgroup_from_cont(cont)->use_hierarchy;
3856 }
3857
3858 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3859                                         u64 val)
3860 {
3861         int retval = 0;
3862         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3863         struct cgroup *parent = cont->parent;
3864         struct mem_cgroup *parent_memcg = NULL;
3865
3866         if (parent)
3867                 parent_memcg = mem_cgroup_from_cont(parent);
3868
3869         cgroup_lock();
3870         /*
3871          * If parent's use_hierarchy is set, we can't make any modifications
3872          * in the child subtrees. If it is unset, then the change can
3873          * occur, provided the current cgroup has no children.
3874          *
3875          * For the root cgroup, parent_mem is NULL, we allow value to be
3876          * set if there are no children.
3877          */
3878         if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3879                                 (val == 1 || val == 0)) {
3880                 if (list_empty(&cont->children))
3881                         memcg->use_hierarchy = val;
3882                 else
3883                         retval = -EBUSY;
3884         } else
3885                 retval = -EINVAL;
3886         cgroup_unlock();
3887
3888         return retval;
3889 }
3890
3891
3892 static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
3893                                                enum mem_cgroup_stat_index idx)
3894 {
3895         struct mem_cgroup *iter;
3896         long val = 0;
3897
3898         /* Per-cpu values can be negative, use a signed accumulator */
3899         for_each_mem_cgroup_tree(iter, memcg)
3900                 val += mem_cgroup_read_stat(iter, idx);
3901
3902         if (val < 0) /* race ? */
3903                 val = 0;
3904         return val;
3905 }
3906
3907 static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3908 {
3909         u64 val;
3910
3911         if (!mem_cgroup_is_root(memcg)) {
3912                 if (!swap)
3913                         return res_counter_read_u64(&memcg->res, RES_USAGE);
3914                 else
3915                         return res_counter_read_u64(&memcg->memsw, RES_USAGE);
3916         }
3917
3918         val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
3919         val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
3920
3921         if (swap)
3922                 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
3923
3924         return val << PAGE_SHIFT;
3925 }
3926
3927 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
3928 {
3929         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3930         u64 val;
3931         int type, name;
3932
3933         type = MEMFILE_TYPE(cft->private);
3934         name = MEMFILE_ATTR(cft->private);
3935         switch (type) {
3936         case _MEM:
3937                 if (name == RES_USAGE)
3938                         val = mem_cgroup_usage(memcg, false);
3939                 else
3940                         val = res_counter_read_u64(&memcg->res, name);
3941                 break;
3942         case _MEMSWAP:
3943                 if (name == RES_USAGE)
3944                         val = mem_cgroup_usage(memcg, true);
3945                 else
3946                         val = res_counter_read_u64(&memcg->memsw, name);
3947                 break;
3948         default:
3949                 BUG();
3950                 break;
3951         }
3952         return val;
3953 }
3954 /*
3955  * The user of this function is...
3956  * RES_LIMIT.
3957  */
3958 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3959                             const char *buffer)
3960 {
3961         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3962         int type, name;
3963         unsigned long long val;
3964         int ret;
3965
3966         type = MEMFILE_TYPE(cft->private);
3967         name = MEMFILE_ATTR(cft->private);
3968         switch (name) {
3969         case RES_LIMIT:
3970                 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3971                         ret = -EINVAL;
3972                         break;
3973                 }
3974                 /* This function does all necessary parse...reuse it */
3975                 ret = res_counter_memparse_write_strategy(buffer, &val);
3976                 if (ret)
3977                         break;
3978                 if (type == _MEM)
3979                         ret = mem_cgroup_resize_limit(memcg, val);
3980                 else
3981                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
3982                 break;
3983         case RES_SOFT_LIMIT:
3984                 ret = res_counter_memparse_write_strategy(buffer, &val);
3985                 if (ret)
3986                         break;
3987                 /*
3988                  * For memsw, soft limits are hard to implement in terms
3989                  * of semantics, for now, we support soft limits for
3990                  * control without swap
3991                  */
3992                 if (type == _MEM)
3993                         ret = res_counter_set_soft_limit(&memcg->res, val);
3994                 else
3995                         ret = -EINVAL;
3996                 break;
3997         default:
3998                 ret = -EINVAL; /* should be BUG() ? */
3999                 break;
4000         }
4001         return ret;
4002 }
4003
4004 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
4005                 unsigned long long *mem_limit, unsigned long long *memsw_limit)
4006 {
4007         struct cgroup *cgroup;
4008         unsigned long long min_limit, min_memsw_limit, tmp;
4009
4010         min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4011         min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4012         cgroup = memcg->css.cgroup;
4013         if (!memcg->use_hierarchy)
4014                 goto out;
4015
4016         while (cgroup->parent) {
4017                 cgroup = cgroup->parent;
4018                 memcg = mem_cgroup_from_cont(cgroup);
4019                 if (!memcg->use_hierarchy)
4020                         break;
4021                 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
4022                 min_limit = min(min_limit, tmp);
4023                 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4024                 min_memsw_limit = min(min_memsw_limit, tmp);
4025         }
4026 out:
4027         *mem_limit = min_limit;
4028         *memsw_limit = min_memsw_limit;
4029         return;
4030 }
4031
4032 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
4033 {
4034         struct mem_cgroup *memcg;
4035         int type, name;
4036
4037         memcg = mem_cgroup_from_cont(cont);
4038         type = MEMFILE_TYPE(event);
4039         name = MEMFILE_ATTR(event);
4040         switch (name) {
4041         case RES_MAX_USAGE:
4042                 if (type == _MEM)
4043                         res_counter_reset_max(&memcg->res);
4044                 else
4045                         res_counter_reset_max(&memcg->memsw);
4046                 break;
4047         case RES_FAILCNT:
4048                 if (type == _MEM)
4049                         res_counter_reset_failcnt(&memcg->res);
4050                 else
4051                         res_counter_reset_failcnt(&memcg->memsw);
4052                 break;
4053         }
4054
4055         return 0;
4056 }
4057
4058 static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
4059                                         struct cftype *cft)
4060 {
4061         return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
4062 }
4063
4064 #ifdef CONFIG_MMU
4065 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
4066                                         struct cftype *cft, u64 val)
4067 {
4068         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4069
4070         if (val >= (1 << NR_MOVE_TYPE))
4071                 return -EINVAL;
4072         /*
4073          * We check this value several times in both in can_attach() and
4074          * attach(), so we need cgroup lock to prevent this value from being
4075          * inconsistent.
4076          */
4077         cgroup_lock();
4078         memcg->move_charge_at_immigrate = val;
4079         cgroup_unlock();
4080
4081         return 0;
4082 }
4083 #else
4084 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
4085                                         struct cftype *cft, u64 val)
4086 {
4087         return -ENOSYS;
4088 }
4089 #endif
4090
4091
4092 /* For read statistics */
4093 enum {
4094         MCS_CACHE,
4095         MCS_RSS,
4096         MCS_FILE_MAPPED,
4097         MCS_PGPGIN,
4098         MCS_PGPGOUT,
4099         MCS_SWAP,
4100         MCS_PGFAULT,
4101         MCS_PGMAJFAULT,
4102         MCS_INACTIVE_ANON,
4103         MCS_ACTIVE_ANON,
4104         MCS_INACTIVE_FILE,
4105         MCS_ACTIVE_FILE,
4106         MCS_UNEVICTABLE,
4107         NR_MCS_STAT,
4108 };
4109
4110 struct mcs_total_stat {
4111         s64 stat[NR_MCS_STAT];
4112 };
4113
4114 struct {
4115         char *local_name;
4116         char *total_name;
4117 } memcg_stat_strings[NR_MCS_STAT] = {
4118         {"cache", "total_cache"},
4119         {"rss", "total_rss"},
4120         {"mapped_file", "total_mapped_file"},
4121         {"pgpgin", "total_pgpgin"},
4122         {"pgpgout", "total_pgpgout"},
4123         {"swap", "total_swap"},
4124         {"pgfault", "total_pgfault"},
4125         {"pgmajfault", "total_pgmajfault"},
4126         {"inactive_anon", "total_inactive_anon"},
4127         {"active_anon", "total_active_anon"},
4128         {"inactive_file", "total_inactive_file"},
4129         {"active_file", "total_active_file"},
4130         {"unevictable", "total_unevictable"}
4131 };
4132
4133
4134 static void
4135 mem_cgroup_get_local_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
4136 {
4137         s64 val;
4138
4139         /* per cpu stat */
4140         val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_CACHE);
4141         s->stat[MCS_CACHE] += val * PAGE_SIZE;
4142         val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_RSS);
4143         s->stat[MCS_RSS] += val * PAGE_SIZE;
4144         val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
4145         s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
4146         val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGIN);
4147         s->stat[MCS_PGPGIN] += val;
4148         val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGOUT);
4149         s->stat[MCS_PGPGOUT] += val;
4150         if (do_swap_account) {
4151                 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
4152                 s->stat[MCS_SWAP] += val * PAGE_SIZE;
4153         }
4154         val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGFAULT);
4155         s->stat[MCS_PGFAULT] += val;
4156         val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT);
4157         s->stat[MCS_PGMAJFAULT] += val;
4158
4159         /* per zone stat */
4160         val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
4161         s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
4162         val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
4163         s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
4164         val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
4165         s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
4166         val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
4167         s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
4168         val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
4169         s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
4170 }
4171
4172 static void
4173 mem_cgroup_get_total_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
4174 {
4175         struct mem_cgroup *iter;
4176
4177         for_each_mem_cgroup_tree(iter, memcg)
4178                 mem_cgroup_get_local_stat(iter, s);
4179 }
4180
4181 #ifdef CONFIG_NUMA
4182 static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
4183 {
4184         int nid;
4185         unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
4186         unsigned long node_nr;
4187         struct cgroup *cont = m->private;
4188         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
4189
4190         total_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL);
4191         seq_printf(m, "total=%lu", total_nr);
4192         for_each_node_state(nid, N_HIGH_MEMORY) {
4193                 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL);
4194                 seq_printf(m, " N%d=%lu", nid, node_nr);
4195         }
4196         seq_putc(m, '\n');
4197
4198         file_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_FILE);
4199         seq_printf(m, "file=%lu", file_nr);
4200         for_each_node_state(nid, N_HIGH_MEMORY) {
4201                 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4202                                 LRU_ALL_FILE);
4203                 seq_printf(m, " N%d=%lu", nid, node_nr);
4204         }
4205         seq_putc(m, '\n');
4206
4207         anon_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_ANON);
4208         seq_printf(m, "anon=%lu", anon_nr);
4209         for_each_node_state(nid, N_HIGH_MEMORY) {
4210                 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4211                                 LRU_ALL_ANON);
4212                 seq_printf(m, " N%d=%lu", nid, node_nr);
4213         }
4214         seq_putc(m, '\n');
4215
4216         unevictable_nr = mem_cgroup_nr_lru_pages(mem_cont, BIT(LRU_UNEVICTABLE));
4217         seq_printf(m, "unevictable=%lu", unevictable_nr);
4218         for_each_node_state(nid, N_HIGH_MEMORY) {
4219                 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4220                                 BIT(LRU_UNEVICTABLE));
4221                 seq_printf(m, " N%d=%lu", nid, node_nr);
4222         }
4223         seq_putc(m, '\n');
4224         return 0;
4225 }
4226 #endif /* CONFIG_NUMA */
4227
4228 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
4229                                  struct cgroup_map_cb *cb)
4230 {
4231         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
4232         struct mcs_total_stat mystat;
4233         int i;
4234
4235         memset(&mystat, 0, sizeof(mystat));
4236         mem_cgroup_get_local_stat(mem_cont, &mystat);
4237
4238
4239         for (i = 0; i < NR_MCS_STAT; i++) {
4240                 if (i == MCS_SWAP && !do_swap_account)
4241                         continue;
4242                 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
4243         }
4244
4245         /* Hierarchical information */
4246         {
4247                 unsigned long long limit, memsw_limit;
4248                 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
4249                 cb->fill(cb, "hierarchical_memory_limit", limit);
4250                 if (do_swap_account)
4251                         cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
4252         }
4253
4254         memset(&mystat, 0, sizeof(mystat));
4255         mem_cgroup_get_total_stat(mem_cont, &mystat);
4256         for (i = 0; i < NR_MCS_STAT; i++) {
4257                 if (i == MCS_SWAP && !do_swap_account)
4258                         continue;
4259                 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
4260         }
4261
4262 #ifdef CONFIG_DEBUG_VM
4263         {
4264                 int nid, zid;
4265                 struct mem_cgroup_per_zone *mz;
4266                 unsigned long recent_rotated[2] = {0, 0};
4267                 unsigned long recent_scanned[2] = {0, 0};
4268
4269                 for_each_online_node(nid)
4270                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4271                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
4272
4273                                 recent_rotated[0] +=
4274                                         mz->reclaim_stat.recent_rotated[0];
4275                                 recent_rotated[1] +=
4276                                         mz->reclaim_stat.recent_rotated[1];
4277                                 recent_scanned[0] +=
4278                                         mz->reclaim_stat.recent_scanned[0];
4279                                 recent_scanned[1] +=
4280                                         mz->reclaim_stat.recent_scanned[1];
4281                         }
4282                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
4283                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
4284                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
4285                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
4286         }
4287 #endif
4288
4289         return 0;
4290 }
4291
4292 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
4293 {
4294         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4295
4296         return mem_cgroup_swappiness(memcg);
4297 }
4298
4299 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
4300                                        u64 val)
4301 {
4302         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4303         struct mem_cgroup *parent;
4304
4305         if (val > 100)
4306                 return -EINVAL;
4307
4308         if (cgrp->parent == NULL)
4309                 return -EINVAL;
4310
4311         parent = mem_cgroup_from_cont(cgrp->parent);
4312
4313         cgroup_lock();
4314
4315         /* If under hierarchy, only empty-root can set this value */
4316         if ((parent->use_hierarchy) ||
4317             (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4318                 cgroup_unlock();
4319                 return -EINVAL;
4320         }
4321
4322         memcg->swappiness = val;
4323
4324         cgroup_unlock();
4325
4326         return 0;
4327 }
4328
4329 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4330 {
4331         struct mem_cgroup_threshold_ary *t;
4332         u64 usage;
4333         int i;
4334
4335         rcu_read_lock();
4336         if (!swap)
4337                 t = rcu_dereference(memcg->thresholds.primary);
4338         else
4339                 t = rcu_dereference(memcg->memsw_thresholds.primary);
4340
4341         if (!t)
4342                 goto unlock;
4343
4344         usage = mem_cgroup_usage(memcg, swap);
4345
4346         /*
4347          * current_threshold points to threshold just below usage.
4348          * If it's not true, a threshold was crossed after last
4349          * call of __mem_cgroup_threshold().
4350          */
4351         i = t->current_threshold;
4352
4353         /*
4354          * Iterate backward over array of thresholds starting from
4355          * current_threshold and check if a threshold is crossed.
4356          * If none of thresholds below usage is crossed, we read
4357          * only one element of the array here.
4358          */
4359         for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4360                 eventfd_signal(t->entries[i].eventfd, 1);
4361
4362         /* i = current_threshold + 1 */
4363         i++;
4364
4365         /*
4366          * Iterate forward over array of thresholds starting from
4367          * current_threshold+1 and check if a threshold is crossed.
4368          * If none of thresholds above usage is crossed, we read
4369          * only one element of the array here.
4370          */
4371         for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4372                 eventfd_signal(t->entries[i].eventfd, 1);
4373
4374         /* Update current_threshold */
4375         t->current_threshold = i - 1;
4376 unlock:
4377         rcu_read_unlock();
4378 }
4379
4380 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4381 {
4382         while (memcg) {
4383                 __mem_cgroup_threshold(memcg, false);
4384                 if (do_swap_account)
4385                         __mem_cgroup_threshold(memcg, true);
4386
4387                 memcg = parent_mem_cgroup(memcg);
4388         }
4389 }
4390
4391 static int compare_thresholds(const void *a, const void *b)
4392 {
4393         const struct mem_cgroup_threshold *_a = a;
4394         const struct mem_cgroup_threshold *_b = b;
4395
4396         return _a->threshold - _b->threshold;
4397 }
4398
4399 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4400 {
4401         struct mem_cgroup_eventfd_list *ev;
4402
4403         list_for_each_entry(ev, &memcg->oom_notify, list)
4404                 eventfd_signal(ev->eventfd, 1);
4405         return 0;
4406 }
4407
4408 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4409 {
4410         struct mem_cgroup *iter;
4411
4412         for_each_mem_cgroup_tree(iter, memcg)
4413                 mem_cgroup_oom_notify_cb(iter);
4414 }
4415
4416 static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
4417         struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4418 {
4419         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4420         struct mem_cgroup_thresholds *thresholds;
4421         struct mem_cgroup_threshold_ary *new;
4422         int type = MEMFILE_TYPE(cft->private);
4423         u64 threshold, usage;
4424         int i, size, ret;
4425
4426         ret = res_counter_memparse_write_strategy(args, &threshold);
4427         if (ret)
4428                 return ret;
4429
4430         mutex_lock(&memcg->thresholds_lock);
4431
4432         if (type == _MEM)
4433                 thresholds = &memcg->thresholds;
4434         else if (type == _MEMSWAP)
4435                 thresholds = &memcg->memsw_thresholds;
4436         else
4437                 BUG();
4438
4439         usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4440
4441         /* Check if a threshold crossed before adding a new one */
4442         if (thresholds->primary)
4443                 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4444
4445         size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4446
4447         /* Allocate memory for new array of thresholds */
4448         new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
4449                         GFP_KERNEL);
4450         if (!new) {
4451                 ret = -ENOMEM;
4452                 goto unlock;
4453         }
4454         new->size = size;
4455
4456         /* Copy thresholds (if any) to new array */
4457         if (thresholds->primary) {
4458                 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4459                                 sizeof(struct mem_cgroup_threshold));
4460         }
4461
4462         /* Add new threshold */
4463         new->entries[size - 1].eventfd = eventfd;
4464         new->entries[size - 1].threshold = threshold;
4465
4466         /* Sort thresholds. Registering of new threshold isn't time-critical */
4467         sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4468                         compare_thresholds, NULL);
4469
4470         /* Find current threshold */
4471         new->current_threshold = -1;
4472         for (i = 0; i < size; i++) {
4473                 if (new->entries[i].threshold < usage) {
4474                         /*
4475                          * new->current_threshold will not be used until
4476                          * rcu_assign_pointer(), so it's safe to increment
4477                          * it here.
4478                          */
4479                         ++new->current_threshold;
4480                 }
4481         }
4482
4483         /* Free old spare buffer and save old primary buffer as spare */
4484         kfree(thresholds->spare);
4485         thresholds->spare = thresholds->primary;
4486
4487         rcu_assign_pointer(thresholds->primary, new);
4488
4489         /* To be sure that nobody uses thresholds */
4490         synchronize_rcu();
4491
4492 unlock:
4493         mutex_unlock(&memcg->thresholds_lock);
4494
4495         return ret;
4496 }
4497
4498 static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
4499         struct cftype *cft, struct eventfd_ctx *eventfd)
4500 {
4501         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4502         struct mem_cgroup_thresholds *thresholds;
4503         struct mem_cgroup_threshold_ary *new;
4504         int type = MEMFILE_TYPE(cft->private);
4505         u64 usage;
4506         int i, j, size;
4507
4508         mutex_lock(&memcg->thresholds_lock);
4509         if (type == _MEM)
4510                 thresholds = &memcg->thresholds;
4511         else if (type == _MEMSWAP)
4512                 thresholds = &memcg->memsw_thresholds;
4513         else
4514                 BUG();
4515
4516         /*
4517          * Something went wrong if we trying to unregister a threshold
4518          * if we don't have thresholds
4519          */
4520         BUG_ON(!thresholds);
4521
4522         usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4523
4524         /* Check if a threshold crossed before removing */
4525         __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4526
4527         /* Calculate new number of threshold */
4528         size = 0;
4529         for (i = 0; i < thresholds->primary->size; i++) {
4530                 if (thresholds->primary->entries[i].eventfd != eventfd)
4531                         size++;
4532         }
4533
4534         new = thresholds->spare;
4535
4536         /* Set thresholds array to NULL if we don't have thresholds */
4537         if (!size) {
4538                 kfree(new);
4539                 new = NULL;
4540                 goto swap_buffers;
4541         }
4542
4543         new->size = size;
4544
4545         /* Copy thresholds and find current threshold */
4546         new->current_threshold = -1;
4547         for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4548                 if (thresholds->primary->entries[i].eventfd == eventfd)
4549                         continue;
4550
4551                 new->entries[j] = thresholds->primary->entries[i];
4552                 if (new->entries[j].threshold < usage) {
4553                         /*
4554                          * new->current_threshold will not be used
4555                          * until rcu_assign_pointer(), so it's safe to increment
4556                          * it here.
4557                          */
4558                         ++new->current_threshold;
4559                 }
4560                 j++;
4561         }
4562
4563 swap_buffers:
4564         /* Swap primary and spare array */
4565         thresholds->spare = thresholds->primary;
4566         rcu_assign_pointer(thresholds->primary, new);
4567
4568         /* To be sure that nobody uses thresholds */
4569         synchronize_rcu();
4570
4571         mutex_unlock(&memcg->thresholds_lock);
4572 }
4573
4574 static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
4575         struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4576 {
4577         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4578         struct mem_cgroup_eventfd_list *event;
4579         int type = MEMFILE_TYPE(cft->private);
4580
4581         BUG_ON(type != _OOM_TYPE);
4582         event = kmalloc(sizeof(*event), GFP_KERNEL);
4583         if (!event)
4584                 return -ENOMEM;
4585
4586         spin_lock(&memcg_oom_lock);
4587
4588         event->eventfd = eventfd;
4589         list_add(&event->list, &memcg->oom_notify);
4590
4591         /* already in OOM ? */
4592         if (atomic_read(&memcg->under_oom))
4593                 eventfd_signal(eventfd, 1);
4594         spin_unlock(&memcg_oom_lock);
4595
4596         return 0;
4597 }
4598
4599 static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
4600         struct cftype *cft, struct eventfd_ctx *eventfd)
4601 {
4602         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4603         struct mem_cgroup_eventfd_list *ev, *tmp;
4604         int type = MEMFILE_TYPE(cft->private);
4605
4606         BUG_ON(type != _OOM_TYPE);
4607
4608         spin_lock(&memcg_oom_lock);
4609
4610         list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4611                 if (ev->eventfd == eventfd) {
4612                         list_del(&ev->list);
4613                         kfree(ev);
4614                 }
4615         }
4616
4617         spin_unlock(&memcg_oom_lock);
4618 }
4619
4620 static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
4621         struct cftype *cft,  struct cgroup_map_cb *cb)
4622 {
4623         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4624
4625         cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
4626
4627         if (atomic_read(&memcg->under_oom))
4628                 cb->fill(cb, "under_oom", 1);
4629         else
4630                 cb->fill(cb, "under_oom", 0);
4631         return 0;
4632 }
4633
4634 static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4635         struct cftype *cft, u64 val)
4636 {
4637         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4638         struct mem_cgroup *parent;
4639
4640         /* cannot set to root cgroup and only 0 and 1 are allowed */
4641         if (!cgrp->parent || !((val == 0) || (val == 1)))
4642                 return -EINVAL;
4643
4644         parent = mem_cgroup_from_cont(cgrp->parent);
4645
4646         cgroup_lock();
4647         /* oom-kill-disable is a flag for subhierarchy. */
4648         if ((parent->use_hierarchy) ||
4649             (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4650                 cgroup_unlock();
4651                 return -EINVAL;
4652         }
4653         memcg->oom_kill_disable = val;
4654         if (!val)
4655                 memcg_oom_recover(memcg);
4656         cgroup_unlock();
4657         return 0;
4658 }
4659
4660 #ifdef CONFIG_NUMA
4661 static const struct file_operations mem_control_numa_stat_file_operations = {
4662         .read = seq_read,
4663         .llseek = seq_lseek,
4664         .release = single_release,
4665 };
4666
4667 static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
4668 {
4669         struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
4670
4671         file->f_op = &mem_control_numa_stat_file_operations;
4672         return single_open(file, mem_control_numa_stat_show, cont);
4673 }
4674 #endif /* CONFIG_NUMA */
4675
4676 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
4677 static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
4678 {
4679         /*
4680          * Part of this would be better living in a separate allocation
4681          * function, leaving us with just the cgroup tree population work.
4682          * We, however, depend on state such as network's proto_list that
4683          * is only initialized after cgroup creation. I found the less
4684          * cumbersome way to deal with it to defer it all to populate time
4685          */
4686         return mem_cgroup_sockets_init(cont, ss);
4687 };
4688
4689 static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
4690                                 struct cgroup *cont)
4691 {
4692         mem_cgroup_sockets_destroy(cont, ss);
4693 }
4694 #else
4695 static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
4696 {
4697         return 0;
4698 }
4699
4700 static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
4701                                 struct cgroup *cont)
4702 {
4703 }
4704 #endif
4705
4706 static struct cftype mem_cgroup_files[] = {
4707         {
4708                 .name = "usage_in_bytes",
4709                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4710                 .read_u64 = mem_cgroup_read,
4711                 .register_event = mem_cgroup_usage_register_event,
4712                 .unregister_event = mem_cgroup_usage_unregister_event,
4713         },
4714         {
4715                 .name = "max_usage_in_bytes",
4716                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4717                 .trigger = mem_cgroup_reset,
4718                 .read_u64 = mem_cgroup_read,
4719         },
4720         {
4721                 .name = "limit_in_bytes",
4722                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4723                 .write_string = mem_cgroup_write,
4724                 .read_u64 = mem_cgroup_read,
4725         },
4726         {
4727                 .name = "soft_limit_in_bytes",
4728                 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4729                 .write_string = mem_cgroup_write,
4730                 .read_u64 = mem_cgroup_read,
4731         },
4732         {
4733                 .name = "failcnt",
4734                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4735                 .trigger = mem_cgroup_reset,
4736                 .read_u64 = mem_cgroup_read,
4737         },
4738         {
4739                 .name = "stat",
4740                 .read_map = mem_control_stat_show,
4741         },
4742         {
4743                 .name = "force_empty",
4744                 .trigger = mem_cgroup_force_empty_write,
4745         },
4746         {
4747                 .name = "use_hierarchy",
4748                 .write_u64 = mem_cgroup_hierarchy_write,
4749                 .read_u64 = mem_cgroup_hierarchy_read,
4750         },
4751         {
4752                 .name = "swappiness",
4753                 .read_u64 = mem_cgroup_swappiness_read,
4754                 .write_u64 = mem_cgroup_swappiness_write,
4755         },
4756         {
4757                 .name = "move_charge_at_immigrate",
4758                 .read_u64 = mem_cgroup_move_charge_read,
4759                 .write_u64 = mem_cgroup_move_charge_write,
4760         },
4761         {
4762                 .name = "oom_control",
4763                 .read_map = mem_cgroup_oom_control_read,
4764                 .write_u64 = mem_cgroup_oom_control_write,
4765                 .register_event = mem_cgroup_oom_register_event,
4766                 .unregister_event = mem_cgroup_oom_unregister_event,
4767                 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4768         },
4769 #ifdef CONFIG_NUMA
4770         {
4771                 .name = "numa_stat",
4772                 .open = mem_control_numa_stat_open,
4773                 .mode = S_IRUGO,
4774         },
4775 #endif
4776 };
4777
4778 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4779 static struct cftype memsw_cgroup_files[] = {
4780         {
4781                 .name = "memsw.usage_in_bytes",
4782                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
4783                 .read_u64 = mem_cgroup_read,
4784                 .register_event = mem_cgroup_usage_register_event,
4785                 .unregister_event = mem_cgroup_usage_unregister_event,
4786         },
4787         {
4788                 .name = "memsw.max_usage_in_bytes",
4789                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
4790                 .trigger = mem_cgroup_reset,
4791                 .read_u64 = mem_cgroup_read,
4792         },
4793         {
4794                 .name = "memsw.limit_in_bytes",
4795                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
4796                 .write_string = mem_cgroup_write,
4797                 .read_u64 = mem_cgroup_read,
4798         },
4799         {
4800                 .name = "memsw.failcnt",
4801                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
4802                 .trigger = mem_cgroup_reset,
4803                 .read_u64 = mem_cgroup_read,
4804         },
4805 };
4806
4807 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4808 {
4809         if (!do_swap_account)
4810                 return 0;
4811         return cgroup_add_files(cont, ss, memsw_cgroup_files,
4812                                 ARRAY_SIZE(memsw_cgroup_files));
4813 };
4814 #else
4815 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4816 {
4817         return 0;
4818 }
4819 #endif
4820
4821 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4822 {
4823         struct mem_cgroup_per_node *pn;
4824         struct mem_cgroup_per_zone *mz;
4825         enum lru_list l;
4826         int zone, tmp = node;
4827         /*
4828          * This routine is called against possible nodes.
4829          * But it's BUG to call kmalloc() against offline node.
4830          *
4831          * TODO: this routine can waste much memory for nodes which will
4832          *       never be onlined. It's better to use memory hotplug callback
4833          *       function.
4834          */
4835         if (!node_state(node, N_NORMAL_MEMORY))
4836                 tmp = -1;
4837         pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4838         if (!pn)
4839                 return 1;
4840
4841         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4842                 mz = &pn->zoneinfo[zone];
4843                 for_each_lru(l)
4844                         INIT_LIST_HEAD(&mz->lists[l]);
4845                 mz->usage_in_excess = 0;
4846                 mz->on_tree = false;
4847                 mz->mem = memcg;
4848         }
4849         memcg->info.nodeinfo[node] = pn;
4850         return 0;
4851 }
4852
4853 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4854 {
4855         kfree(memcg->info.nodeinfo[node]);
4856 }
4857
4858 static struct mem_cgroup *mem_cgroup_alloc(void)
4859 {
4860         struct mem_cgroup *mem;
4861         int size = sizeof(struct mem_cgroup);
4862
4863         /* Can be very big if MAX_NUMNODES is very big */
4864         if (size < PAGE_SIZE)
4865                 mem = kzalloc(size, GFP_KERNEL);
4866         else
4867                 mem = vzalloc(size);
4868
4869         if (!mem)
4870                 return NULL;
4871
4872         mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4873         if (!mem->stat)
4874                 goto out_free;
4875         spin_lock_init(&mem->pcp_counter_lock);
4876         return mem;
4877
4878 out_free:
4879         if (size < PAGE_SIZE)
4880                 kfree(mem);
4881         else
4882                 vfree(mem);
4883         return NULL;
4884 }
4885
4886 /*
4887  * At destroying mem_cgroup, references from swap_cgroup can remain.
4888  * (scanning all at force_empty is too costly...)
4889  *
4890  * Instead of clearing all references at force_empty, we remember
4891  * the number of reference from swap_cgroup and free mem_cgroup when
4892  * it goes down to 0.
4893  *
4894  * Removal of cgroup itself succeeds regardless of refs from swap.
4895  */
4896
4897 static void __mem_cgroup_free(struct mem_cgroup *memcg)
4898 {
4899         int node;
4900
4901         mem_cgroup_remove_from_trees(memcg);
4902         free_css_id(&mem_cgroup_subsys, &memcg->css);
4903
4904         for_each_node_state(node, N_POSSIBLE)
4905                 free_mem_cgroup_per_zone_info(memcg, node);
4906
4907         free_percpu(memcg->stat);
4908         if (sizeof(struct mem_cgroup) < PAGE_SIZE)
4909                 kfree(memcg);
4910         else
4911                 vfree(memcg);
4912 }
4913
4914 static void mem_cgroup_get(struct mem_cgroup *memcg)
4915 {
4916         atomic_inc(&memcg->refcnt);
4917 }
4918
4919 static void __mem_cgroup_put(struct mem_cgroup *memcg, int count)
4920 {
4921         if (atomic_sub_and_test(count, &memcg->refcnt)) {
4922                 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4923                 __mem_cgroup_free(memcg);
4924                 if (parent)
4925                         mem_cgroup_put(parent);
4926         }
4927 }
4928
4929 static void mem_cgroup_put(struct mem_cgroup *memcg)
4930 {
4931         __mem_cgroup_put(memcg, 1);
4932 }
4933
4934 /*
4935  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4936  */
4937 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
4938 {
4939         if (!memcg->res.parent)
4940                 return NULL;
4941         return mem_cgroup_from_res_counter(memcg->res.parent, res);
4942 }
4943 EXPORT_SYMBOL(parent_mem_cgroup);
4944
4945 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4946 static void __init enable_swap_cgroup(void)
4947 {
4948         if (!mem_cgroup_disabled() && really_do_swap_account)
4949                 do_swap_account = 1;
4950 }
4951 #else
4952 static void __init enable_swap_cgroup(void)
4953 {
4954 }
4955 #endif
4956
4957 static int mem_cgroup_soft_limit_tree_init(void)
4958 {
4959         struct mem_cgroup_tree_per_node *rtpn;
4960         struct mem_cgroup_tree_per_zone *rtpz;
4961         int tmp, node, zone;
4962
4963         for_each_node_state(node, N_POSSIBLE) {
4964                 tmp = node;
4965                 if (!node_state(node, N_NORMAL_MEMORY))
4966                         tmp = -1;
4967                 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4968                 if (!rtpn)
4969                         return 1;
4970
4971                 soft_limit_tree.rb_tree_per_node[node] = rtpn;
4972
4973                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4974                         rtpz = &rtpn->rb_tree_per_zone[zone];
4975                         rtpz->rb_root = RB_ROOT;
4976                         spin_lock_init(&rtpz->lock);
4977                 }
4978         }
4979         return 0;
4980 }
4981
4982 static struct cgroup_subsys_state * __ref
4983 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
4984 {
4985         struct mem_cgroup *memcg, *parent;
4986         long error = -ENOMEM;
4987         int node;
4988
4989         memcg = mem_cgroup_alloc();
4990         if (!memcg)
4991                 return ERR_PTR(error);
4992
4993         for_each_node_state(node, N_POSSIBLE)
4994                 if (alloc_mem_cgroup_per_zone_info(memcg, node))
4995                         goto free_out;
4996
4997         /* root ? */
4998         if (cont->parent == NULL) {
4999                 int cpu;
5000                 enable_swap_cgroup();
5001                 parent = NULL;
5002                 if (mem_cgroup_soft_limit_tree_init())
5003                         goto free_out;
5004                 root_mem_cgroup = memcg;
5005                 for_each_possible_cpu(cpu) {
5006                         struct memcg_stock_pcp *stock =
5007                                                 &per_cpu(memcg_stock, cpu);
5008                         INIT_WORK(&stock->work, drain_local_stock);
5009                 }
5010                 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
5011         } else {
5012                 parent = mem_cgroup_from_cont(cont->parent);
5013                 memcg->use_hierarchy = parent->use_hierarchy;
5014                 memcg->oom_kill_disable = parent->oom_kill_disable;
5015         }
5016
5017         if (parent && parent->use_hierarchy) {
5018                 res_counter_init(&memcg->res, &parent->res);
5019                 res_counter_init(&memcg->memsw, &parent->memsw);
5020                 /*
5021                  * We increment refcnt of the parent to ensure that we can
5022                  * safely access it on res_counter_charge/uncharge.
5023                  * This refcnt will be decremented when freeing this
5024                  * mem_cgroup(see mem_cgroup_put).
5025                  */
5026                 mem_cgroup_get(parent);
5027         } else {
5028                 res_counter_init(&memcg->res, NULL);
5029                 res_counter_init(&memcg->memsw, NULL);
5030         }
5031         memcg->last_scanned_child = 0;
5032         memcg->last_scanned_node = MAX_NUMNODES;
5033         INIT_LIST_HEAD(&memcg->oom_notify);
5034
5035         if (parent)
5036                 memcg->swappiness = mem_cgroup_swappiness(parent);
5037         atomic_set(&memcg->refcnt, 1);
5038         memcg->move_charge_at_immigrate = 0;
5039         mutex_init(&memcg->thresholds_lock);
5040         return &memcg->css;
5041 free_out:
5042         __mem_cgroup_free(memcg);
5043         return ERR_PTR(error);
5044 }
5045
5046 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
5047                                         struct cgroup *cont)
5048 {
5049         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
5050
5051         return mem_cgroup_force_empty(memcg, false);
5052 }
5053
5054 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
5055                                 struct cgroup *cont)
5056 {
5057         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
5058
5059         kmem_cgroup_destroy(ss, cont);
5060
5061         mem_cgroup_put(memcg);
5062 }
5063
5064 static int mem_cgroup_populate(struct cgroup_subsys *ss,
5065                                 struct cgroup *cont)
5066 {
5067         int ret;
5068
5069         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
5070                                 ARRAY_SIZE(mem_cgroup_files));
5071
5072         if (!ret)
5073                 ret = register_memsw_files(cont, ss);
5074
5075         if (!ret)
5076                 ret = register_kmem_files(cont, ss);
5077
5078         return ret;
5079 }
5080
5081 #ifdef CONFIG_MMU
5082 /* Handlers for move charge at task migration. */
5083 #define PRECHARGE_COUNT_AT_ONCE 256
5084 static int mem_cgroup_do_precharge(unsigned long count)
5085 {
5086         int ret = 0;
5087         int batch_count = PRECHARGE_COUNT_AT_ONCE;
5088         struct mem_cgroup *memcg = mc.to;
5089
5090         if (mem_cgroup_is_root(memcg)) {
5091                 mc.precharge += count;
5092                 /* we don't need css_get for root */
5093                 return ret;
5094         }
5095         /* try to charge at once */
5096         if (count > 1) {
5097                 struct res_counter *dummy;
5098                 /*
5099                  * "memcg" cannot be under rmdir() because we've already checked
5100                  * by cgroup_lock_live_cgroup() that it is not removed and we
5101                  * are still under the same cgroup_mutex. So we can postpone
5102                  * css_get().
5103                  */
5104                 if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
5105                         goto one_by_one;
5106                 if (do_swap_account && res_counter_charge(&memcg->memsw,
5107                                                 PAGE_SIZE * count, &dummy)) {
5108                         res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
5109                         goto one_by_one;
5110                 }
5111                 mc.precharge += count;
5112                 return ret;
5113         }
5114 one_by_one:
5115         /* fall back to one by one charge */
5116         while (count--) {
5117                 if (signal_pending(current)) {
5118                         ret = -EINTR;
5119                         break;
5120                 }
5121                 if (!batch_count--) {
5122                         batch_count = PRECHARGE_COUNT_AT_ONCE;
5123                         cond_resched();
5124                 }
5125                 ret = __mem_cgroup_try_charge(NULL,
5126                                         GFP_KERNEL, 1, &memcg, false);
5127                 if (ret || !memcg)
5128                         /* mem_cgroup_clear_mc() will do uncharge later */
5129                         return -ENOMEM;
5130                 mc.precharge++;
5131         }
5132         return ret;
5133 }
5134
5135 /**
5136  * is_target_pte_for_mc - check a pte whether it is valid for move charge
5137  * @vma: the vma the pte to be checked belongs
5138  * @addr: the address corresponding to the pte to be checked
5139  * @ptent: the pte to be checked
5140  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5141  *
5142  * Returns
5143  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5144  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5145  *     move charge. if @target is not NULL, the page is stored in target->page
5146  *     with extra refcnt got(Callers should handle it).
5147  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5148  *     target for charge migration. if @target is not NULL, the entry is stored
5149  *     in target->ent.
5150  *
5151  * Called with pte lock held.
5152  */
5153 union mc_target {
5154         struct page     *page;
5155         swp_entry_t     ent;
5156 };
5157
5158 enum mc_target_type {
5159         MC_TARGET_NONE, /* not used */
5160         MC_TARGET_PAGE,
5161         MC_TARGET_SWAP,
5162 };
5163
5164 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5165                                                 unsigned long addr, pte_t ptent)
5166 {
5167         struct page *page = vm_normal_page(vma, addr, ptent);
5168
5169         if (!page || !page_mapped(page))
5170                 return NULL;
5171         if (PageAnon(page)) {
5172                 /* we don't move shared anon */
5173                 if (!move_anon() || page_mapcount(page) > 2)
5174                         return NULL;
5175         } else if (!move_file())
5176                 /* we ignore mapcount for file pages */
5177                 return NULL;
5178         if (!get_page_unless_zero(page))
5179                 return NULL;
5180
5181         return page;
5182 }
5183
5184 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5185                         unsigned long addr, pte_t ptent, swp_entry_t *entry)
5186 {
5187         int usage_count;
5188         struct page *page = NULL;
5189         swp_entry_t ent = pte_to_swp_entry(ptent);
5190
5191         if (!move_anon() || non_swap_entry(ent))
5192                 return NULL;
5193         usage_count = mem_cgroup_count_swap_user(ent, &page);
5194         if (usage_count > 1) { /* we don't move shared anon */
5195                 if (page)
5196                         put_page(page);
5197                 return NULL;
5198         }
5199         if (do_swap_account)
5200                 entry->val = ent.val;
5201
5202         return page;
5203 }
5204
5205 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5206                         unsigned long addr, pte_t ptent, swp_entry_t *entry)
5207 {
5208         struct page *page = NULL;
5209         struct inode *inode;
5210         struct address_space *mapping;
5211         pgoff_t pgoff;
5212
5213         if (!vma->vm_file) /* anonymous vma */
5214                 return NULL;
5215         if (!move_file())
5216                 return NULL;
5217
5218         inode = vma->vm_file->f_path.dentry->d_inode;
5219         mapping = vma->vm_file->f_mapping;
5220         if (pte_none(ptent))
5221                 pgoff = linear_page_index(vma, addr);
5222         else /* pte_file(ptent) is true */
5223                 pgoff = pte_to_pgoff(ptent);
5224
5225         /* page is moved even if it's not RSS of this task(page-faulted). */
5226         page = find_get_page(mapping, pgoff);
5227
5228 #ifdef CONFIG_SWAP
5229         /* shmem/tmpfs may report page out on swap: account for that too. */
5230         if (radix_tree_exceptional_entry(page)) {
5231                 swp_entry_t swap = radix_to_swp_entry(page);
5232                 if (do_swap_account)
5233                         *entry = swap;
5234                 page = find_get_page(&swapper_space, swap.val);
5235         }
5236 #endif
5237         return page;
5238 }
5239
5240 static int is_target_pte_for_mc(struct vm_area_struct *vma,
5241                 unsigned long addr, pte_t ptent, union mc_target *target)
5242 {
5243         struct page *page = NULL;
5244         struct page_cgroup *pc;
5245         int ret = 0;
5246         swp_entry_t ent = { .val = 0 };
5247
5248         if (pte_present(ptent))
5249                 page = mc_handle_present_pte(vma, addr, ptent);
5250         else if (is_swap_pte(ptent))
5251                 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
5252         else if (pte_none(ptent) || pte_file(ptent))
5253                 page = mc_handle_file_pte(vma, addr, ptent, &ent);
5254
5255         if (!page && !ent.val)
5256                 return 0;
5257         if (page) {
5258                 pc = lookup_page_cgroup(page);
5259                 /*
5260                  * Do only loose check w/o page_cgroup lock.
5261                  * mem_cgroup_move_account() checks the pc is valid or not under
5262                  * the lock.
5263                  */
5264                 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
5265                         ret = MC_TARGET_PAGE;
5266                         if (target)
5267                                 target->page = page;
5268                 }
5269                 if (!ret || !target)
5270                         put_page(page);
5271         }
5272         /* There is a swap entry and a page doesn't exist or isn't charged */
5273         if (ent.val && !ret &&
5274                         css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
5275                 ret = MC_TARGET_SWAP;
5276                 if (target)
5277                         target->ent = ent;
5278         }
5279         return ret;
5280 }
5281
5282 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5283                                         unsigned long addr, unsigned long end,
5284                                         struct mm_walk *walk)
5285 {
5286         struct vm_area_struct *vma = walk->private;
5287         pte_t *pte;
5288         spinlock_t *ptl;
5289
5290         split_huge_page_pmd(walk->mm, pmd);
5291
5292         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5293         for (; addr != end; pte++, addr += PAGE_SIZE)
5294                 if (is_target_pte_for_mc(vma, addr, *pte, NULL))
5295                         mc.precharge++; /* increment precharge temporarily */
5296         pte_unmap_unlock(pte - 1, ptl);
5297         cond_resched();
5298
5299         return 0;
5300 }
5301
5302 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5303 {
5304         unsigned long precharge;
5305         struct vm_area_struct *vma;
5306
5307         down_read(&mm->mmap_sem);
5308         for (vma = mm->mmap; vma; vma = vma->vm_next) {
5309                 struct mm_walk mem_cgroup_count_precharge_walk = {
5310                         .pmd_entry = mem_cgroup_count_precharge_pte_range,
5311                         .mm = mm,
5312                         .private = vma,
5313                 };
5314                 if (is_vm_hugetlb_page(vma))
5315                         continue;
5316                 walk_page_range(vma->vm_start, vma->vm_end,
5317                                         &mem_cgroup_count_precharge_walk);
5318         }
5319         up_read(&mm->mmap_sem);
5320
5321         precharge = mc.precharge;
5322         mc.precharge = 0;
5323
5324         return precharge;
5325 }
5326
5327 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5328 {
5329         unsigned long precharge = mem_cgroup_count_precharge(mm);
5330
5331         VM_BUG_ON(mc.moving_task);
5332         mc.moving_task = current;
5333         return mem_cgroup_do_precharge(precharge);
5334 }
5335
5336 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5337 static void __mem_cgroup_clear_mc(void)
5338 {
5339         struct mem_cgroup *from = mc.from;
5340         struct mem_cgroup *to = mc.to;
5341
5342         /* we must uncharge all the leftover precharges from mc.to */
5343         if (mc.precharge) {
5344                 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
5345                 mc.precharge = 0;
5346         }
5347         /*
5348          * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5349          * we must uncharge here.
5350          */
5351         if (mc.moved_charge) {
5352                 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
5353                 mc.moved_charge = 0;
5354         }
5355         /* we must fixup refcnts and charges */
5356         if (mc.moved_swap) {
5357                 /* uncharge swap account from the old cgroup */
5358                 if (!mem_cgroup_is_root(mc.from))
5359                         res_counter_uncharge(&mc.from->memsw,
5360                                                 PAGE_SIZE * mc.moved_swap);
5361                 __mem_cgroup_put(mc.from, mc.moved_swap);
5362
5363                 if (!mem_cgroup_is_root(mc.to)) {
5364                         /*
5365                          * we charged both to->res and to->memsw, so we should
5366                          * uncharge to->res.
5367                          */
5368                         res_counter_uncharge(&mc.to->res,
5369                                                 PAGE_SIZE * mc.moved_swap);
5370                 }
5371                 /* we've already done mem_cgroup_get(mc.to) */
5372                 mc.moved_swap = 0;
5373         }
5374         memcg_oom_recover(from);
5375         memcg_oom_recover(to);
5376         wake_up_all(&mc.waitq);
5377 }
5378
5379 static void mem_cgroup_clear_mc(void)
5380 {
5381         struct mem_cgroup *from = mc.from;
5382
5383         /*
5384          * we must clear moving_task before waking up waiters at the end of
5385          * task migration.
5386          */
5387         mc.moving_task = NULL;
5388         __mem_cgroup_clear_mc();
5389         spin_lock(&mc.lock);
5390         mc.from = NULL;
5391         mc.to = NULL;
5392         spin_unlock(&mc.lock);
5393         mem_cgroup_end_move(from);
5394 }
5395
5396 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5397                                 struct cgroup *cgroup,
5398                                 struct cgroup_taskset *tset)
5399 {
5400         struct task_struct *p = cgroup_taskset_first(tset);
5401         int ret = 0;
5402         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
5403
5404         if (memcg->move_charge_at_immigrate) {
5405                 struct mm_struct *mm;
5406                 struct mem_cgroup *from = mem_cgroup_from_task(p);
5407
5408                 VM_BUG_ON(from == memcg);
5409
5410                 mm = get_task_mm(p);
5411                 if (!mm)
5412                         return 0;
5413                 /* We move charges only when we move a owner of the mm */
5414                 if (mm->owner == p) {
5415                         VM_BUG_ON(mc.from);
5416                         VM_BUG_ON(mc.to);
5417                         VM_BUG_ON(mc.precharge);
5418                         VM_BUG_ON(mc.moved_charge);
5419                         VM_BUG_ON(mc.moved_swap);
5420                         mem_cgroup_start_move(from);
5421                         spin_lock(&mc.lock);
5422                         mc.from = from;
5423                         mc.to = memcg;
5424                         spin_unlock(&mc.lock);
5425                         /* We set mc.moving_task later */
5426
5427                         ret = mem_cgroup_precharge_mc(mm);
5428                         if (ret)
5429                                 mem_cgroup_clear_mc();
5430                 }
5431                 mmput(mm);
5432         }
5433         return ret;
5434 }
5435
5436 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5437                                 struct cgroup *cgroup,
5438                                 struct cgroup_taskset *tset)
5439 {
5440         mem_cgroup_clear_mc();
5441 }
5442
5443 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5444                                 unsigned long addr, unsigned long end,
5445                                 struct mm_walk *walk)
5446 {
5447         int ret = 0;
5448         struct vm_area_struct *vma = walk->private;
5449         pte_t *pte;
5450         spinlock_t *ptl;
5451
5452         split_huge_page_pmd(walk->mm, pmd);
5453 retry:
5454         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5455         for (; addr != end; addr += PAGE_SIZE) {
5456                 pte_t ptent = *(pte++);
5457                 union mc_target target;
5458                 int type;
5459                 struct page *page;
5460                 struct page_cgroup *pc;
5461                 swp_entry_t ent;
5462
5463                 if (!mc.precharge)
5464                         break;
5465
5466                 type = is_target_pte_for_mc(vma, addr, ptent, &target);
5467                 switch (type) {
5468                 case MC_TARGET_PAGE:
5469                         page = target.page;
5470                         if (isolate_lru_page(page))
5471                                 goto put;
5472                         pc = lookup_page_cgroup(page);
5473                         if (!mem_cgroup_move_account(page, 1, pc,
5474                                                      mc.from, mc.to, false)) {
5475                                 mc.precharge--;
5476                                 /* we uncharge from mc.from later. */
5477                                 mc.moved_charge++;
5478                         }
5479                         putback_lru_page(page);
5480 put:                    /* is_target_pte_for_mc() gets the page */
5481                         put_page(page);
5482                         break;
5483                 case MC_TARGET_SWAP:
5484                         ent = target.ent;
5485                         if (!mem_cgroup_move_swap_account(ent,
5486                                                 mc.from, mc.to, false)) {
5487                                 mc.precharge--;
5488                                 /* we fixup refcnts and charges later. */
5489                                 mc.moved_swap++;
5490                         }
5491                         break;
5492                 default:
5493                         break;
5494                 }
5495         }
5496         pte_unmap_unlock(pte - 1, ptl);
5497         cond_resched();
5498
5499         if (addr != end) {
5500                 /*
5501                  * We have consumed all precharges we got in can_attach().
5502                  * We try charge one by one, but don't do any additional
5503                  * charges to mc.to if we have failed in charge once in attach()
5504                  * phase.
5505                  */
5506                 ret = mem_cgroup_do_precharge(1);
5507                 if (!ret)
5508                         goto retry;
5509         }
5510
5511         return ret;
5512 }
5513
5514 static void mem_cgroup_move_charge(struct mm_struct *mm)
5515 {
5516         struct vm_area_struct *vma;
5517
5518         lru_add_drain_all();
5519 retry:
5520         if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5521                 /*
5522                  * Someone who are holding the mmap_sem might be waiting in
5523                  * waitq. So we cancel all extra charges, wake up all waiters,
5524                  * and retry. Because we cancel precharges, we might not be able
5525                  * to move enough charges, but moving charge is a best-effort
5526                  * feature anyway, so it wouldn't be a big problem.
5527                  */
5528                 __mem_cgroup_clear_mc();
5529                 cond_resched();
5530                 goto retry;
5531         }
5532         for (vma = mm->mmap; vma; vma = vma->vm_next) {
5533                 int ret;
5534                 struct mm_walk mem_cgroup_move_charge_walk = {
5535                         .pmd_entry = mem_cgroup_move_charge_pte_range,
5536                         .mm = mm,
5537                         .private = vma,
5538                 };
5539                 if (is_vm_hugetlb_page(vma))
5540                         continue;
5541                 ret = walk_page_range(vma->vm_start, vma->vm_end,
5542                                                 &mem_cgroup_move_charge_walk);
5543                 if (ret)
5544                         /*
5545                          * means we have consumed all precharges and failed in
5546                          * doing additional charge. Just abandon here.
5547                          */
5548                         break;
5549         }
5550         up_read(&mm->mmap_sem);
5551 }
5552
5553 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5554                                 struct cgroup *cont,
5555                                 struct cgroup_taskset *tset)
5556 {
5557         struct task_struct *p = cgroup_taskset_first(tset);
5558         struct mm_struct *mm = get_task_mm(p);
5559
5560         if (mm) {
5561                 if (mc.to)
5562                         mem_cgroup_move_charge(mm);
5563                 put_swap_token(mm);
5564                 mmput(mm);
5565         }
5566         if (mc.to)
5567                 mem_cgroup_clear_mc();
5568 }
5569 #else   /* !CONFIG_MMU */
5570 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5571                                 struct cgroup *cgroup,
5572                                 struct cgroup_taskset *tset)
5573 {
5574         return 0;
5575 }
5576 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5577                                 struct cgroup *cgroup,
5578                                 struct cgroup_taskset *tset)
5579 {
5580 }
5581 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5582                                 struct cgroup *cont,
5583                                 struct cgroup_taskset *tset)
5584 {
5585 }
5586 #endif
5587
5588 struct cgroup_subsys mem_cgroup_subsys = {
5589         .name = "memory",
5590         .subsys_id = mem_cgroup_subsys_id,
5591         .create = mem_cgroup_create,
5592         .pre_destroy = mem_cgroup_pre_destroy,
5593         .destroy = mem_cgroup_destroy,
5594         .populate = mem_cgroup_populate,
5595         .can_attach = mem_cgroup_can_attach,
5596         .cancel_attach = mem_cgroup_cancel_attach,
5597         .attach = mem_cgroup_move_task,
5598         .early_init = 0,
5599         .use_id = 1,
5600 };
5601
5602 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
5603 static int __init enable_swap_account(char *s)
5604 {
5605         /* consider enabled if no parameter or 1 is given */
5606         if (!strcmp(s, "1"))
5607                 really_do_swap_account = 1;
5608         else if (!strcmp(s, "0"))
5609                 really_do_swap_account = 0;
5610         return 1;
5611 }
5612 __setup("swapaccount=", enable_swap_account);
5613
5614 #endif