mm: make per-memcg LRU lists exclusive
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / page_cgroup.c
1 #include <linux/mm.h>
2 #include <linux/mmzone.h>
3 #include <linux/bootmem.h>
4 #include <linux/bit_spinlock.h>
5 #include <linux/page_cgroup.h>
6 #include <linux/hash.h>
7 #include <linux/slab.h>
8 #include <linux/memory.h>
9 #include <linux/vmalloc.h>
10 #include <linux/cgroup.h>
11 #include <linux/swapops.h>
12 #include <linux/kmemleak.h>
13
14 static void __meminit init_page_cgroup(struct page_cgroup *pc, unsigned long id)
15 {
16         pc->flags = 0;
17         set_page_cgroup_array_id(pc, id);
18         pc->mem_cgroup = NULL;
19 }
20 static unsigned long total_usage;
21
22 #if !defined(CONFIG_SPARSEMEM)
23
24
25 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
26 {
27         pgdat->node_page_cgroup = NULL;
28 }
29
30 struct page_cgroup *lookup_page_cgroup(struct page *page)
31 {
32         unsigned long pfn = page_to_pfn(page);
33         unsigned long offset;
34         struct page_cgroup *base;
35
36         base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
37         if (unlikely(!base))
38                 return NULL;
39
40         offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
41         return base + offset;
42 }
43
44 struct page *lookup_cgroup_page(struct page_cgroup *pc)
45 {
46         unsigned long pfn;
47         struct page *page;
48         pg_data_t *pgdat;
49
50         pgdat = NODE_DATA(page_cgroup_array_id(pc));
51         pfn = pc - pgdat->node_page_cgroup + pgdat->node_start_pfn;
52         page = pfn_to_page(pfn);
53         VM_BUG_ON(pc != lookup_page_cgroup(page));
54         return page;
55 }
56
57 static int __init alloc_node_page_cgroup(int nid)
58 {
59         struct page_cgroup *base, *pc;
60         unsigned long table_size;
61         unsigned long start_pfn, nr_pages, index;
62
63         start_pfn = NODE_DATA(nid)->node_start_pfn;
64         nr_pages = NODE_DATA(nid)->node_spanned_pages;
65
66         if (!nr_pages)
67                 return 0;
68
69         table_size = sizeof(struct page_cgroup) * nr_pages;
70
71         base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
72                         table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
73         if (!base)
74                 return -ENOMEM;
75         for (index = 0; index < nr_pages; index++) {
76                 pc = base + index;
77                 init_page_cgroup(pc, nid);
78         }
79         NODE_DATA(nid)->node_page_cgroup = base;
80         total_usage += table_size;
81         return 0;
82 }
83
84 void __init page_cgroup_init_flatmem(void)
85 {
86
87         int nid, fail;
88
89         if (mem_cgroup_disabled())
90                 return;
91
92         for_each_online_node(nid)  {
93                 fail = alloc_node_page_cgroup(nid);
94                 if (fail)
95                         goto fail;
96         }
97         printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
98         printk(KERN_INFO "please try 'cgroup_disable=memory' option if you"
99         " don't want memory cgroups\n");
100         return;
101 fail:
102         printk(KERN_CRIT "allocation of page_cgroup failed.\n");
103         printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n");
104         panic("Out of memory");
105 }
106
107 #else /* CONFIG_FLAT_NODE_MEM_MAP */
108
109 struct page_cgroup *lookup_page_cgroup(struct page *page)
110 {
111         unsigned long pfn = page_to_pfn(page);
112         struct mem_section *section = __pfn_to_section(pfn);
113
114         if (!section->page_cgroup)
115                 return NULL;
116         return section->page_cgroup + pfn;
117 }
118
119 struct page *lookup_cgroup_page(struct page_cgroup *pc)
120 {
121         struct mem_section *section;
122         struct page *page;
123         unsigned long nr;
124
125         nr = page_cgroup_array_id(pc);
126         section = __nr_to_section(nr);
127         page = pfn_to_page(pc - section->page_cgroup);
128         VM_BUG_ON(pc != lookup_page_cgroup(page));
129         return page;
130 }
131
132 static void *__meminit alloc_page_cgroup(size_t size, int nid)
133 {
134         void *addr = NULL;
135         gfp_t flags = GFP_KERNEL | __GFP_NOWARN;
136
137         addr = alloc_pages_exact_nid(nid, size, flags);
138         if (addr) {
139                 kmemleak_alloc(addr, size, 1, flags);
140                 return addr;
141         }
142
143         if (node_state(nid, N_HIGH_MEMORY))
144                 addr = vmalloc_node(size, nid);
145         else
146                 addr = vmalloc(size);
147
148         return addr;
149 }
150
151 #ifdef CONFIG_MEMORY_HOTPLUG
152 static void free_page_cgroup(void *addr)
153 {
154         if (is_vmalloc_addr(addr)) {
155                 vfree(addr);
156         } else {
157                 struct page *page = virt_to_page(addr);
158                 size_t table_size =
159                         sizeof(struct page_cgroup) * PAGES_PER_SECTION;
160
161                 BUG_ON(PageReserved(page));
162                 free_pages_exact(addr, table_size);
163         }
164 }
165 #endif
166
167 static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
168 {
169         struct page_cgroup *base, *pc;
170         struct mem_section *section;
171         unsigned long table_size;
172         unsigned long nr;
173         int index;
174
175         nr = pfn_to_section_nr(pfn);
176         section = __nr_to_section(nr);
177
178         if (section->page_cgroup)
179                 return 0;
180
181         table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
182         base = alloc_page_cgroup(table_size, nid);
183
184         /*
185          * The value stored in section->page_cgroup is (base - pfn)
186          * and it does not point to the memory block allocated above,
187          * causing kmemleak false positives.
188          */
189         kmemleak_not_leak(base);
190
191         if (!base) {
192                 printk(KERN_ERR "page cgroup allocation failure\n");
193                 return -ENOMEM;
194         }
195
196         for (index = 0; index < PAGES_PER_SECTION; index++) {
197                 pc = base + index;
198                 init_page_cgroup(pc, nr);
199         }
200         /*
201          * The passed "pfn" may not be aligned to SECTION.  For the calculation
202          * we need to apply a mask.
203          */
204         pfn &= PAGE_SECTION_MASK;
205         section->page_cgroup = base - pfn;
206         total_usage += table_size;
207         return 0;
208 }
209 #ifdef CONFIG_MEMORY_HOTPLUG
210 void __free_page_cgroup(unsigned long pfn)
211 {
212         struct mem_section *ms;
213         struct page_cgroup *base;
214
215         ms = __pfn_to_section(pfn);
216         if (!ms || !ms->page_cgroup)
217                 return;
218         base = ms->page_cgroup + pfn;
219         free_page_cgroup(base);
220         ms->page_cgroup = NULL;
221 }
222
223 int __meminit online_page_cgroup(unsigned long start_pfn,
224                         unsigned long nr_pages,
225                         int nid)
226 {
227         unsigned long start, end, pfn;
228         int fail = 0;
229
230         start = SECTION_ALIGN_DOWN(start_pfn);
231         end = SECTION_ALIGN_UP(start_pfn + nr_pages);
232
233         if (nid == -1) {
234                 /*
235                  * In this case, "nid" already exists and contains valid memory.
236                  * "start_pfn" passed to us is a pfn which is an arg for
237                  * online__pages(), and start_pfn should exist.
238                  */
239                 nid = pfn_to_nid(start_pfn);
240                 VM_BUG_ON(!node_state(nid, N_ONLINE));
241         }
242
243         for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
244                 if (!pfn_present(pfn))
245                         continue;
246                 fail = init_section_page_cgroup(pfn, nid);
247         }
248         if (!fail)
249                 return 0;
250
251         /* rollback */
252         for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
253                 __free_page_cgroup(pfn);
254
255         return -ENOMEM;
256 }
257
258 int __meminit offline_page_cgroup(unsigned long start_pfn,
259                 unsigned long nr_pages, int nid)
260 {
261         unsigned long start, end, pfn;
262
263         start = SECTION_ALIGN_DOWN(start_pfn);
264         end = SECTION_ALIGN_UP(start_pfn + nr_pages);
265
266         for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
267                 __free_page_cgroup(pfn);
268         return 0;
269
270 }
271
272 static int __meminit page_cgroup_callback(struct notifier_block *self,
273                                unsigned long action, void *arg)
274 {
275         struct memory_notify *mn = arg;
276         int ret = 0;
277         switch (action) {
278         case MEM_GOING_ONLINE:
279                 ret = online_page_cgroup(mn->start_pfn,
280                                    mn->nr_pages, mn->status_change_nid);
281                 break;
282         case MEM_OFFLINE:
283                 offline_page_cgroup(mn->start_pfn,
284                                 mn->nr_pages, mn->status_change_nid);
285                 break;
286         case MEM_CANCEL_ONLINE:
287         case MEM_GOING_OFFLINE:
288                 break;
289         case MEM_ONLINE:
290         case MEM_CANCEL_OFFLINE:
291                 break;
292         }
293
294         return notifier_from_errno(ret);
295 }
296
297 #endif
298
299 void __init page_cgroup_init(void)
300 {
301         unsigned long pfn;
302         int nid;
303
304         if (mem_cgroup_disabled())
305                 return;
306
307         for_each_node_state(nid, N_HIGH_MEMORY) {
308                 unsigned long start_pfn, end_pfn;
309
310                 start_pfn = node_start_pfn(nid);
311                 end_pfn = node_end_pfn(nid);
312                 /*
313                  * start_pfn and end_pfn may not be aligned to SECTION and the
314                  * page->flags of out of node pages are not initialized.  So we
315                  * scan [start_pfn, the biggest section's pfn < end_pfn) here.
316                  */
317                 for (pfn = start_pfn;
318                      pfn < end_pfn;
319                      pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
320
321                         if (!pfn_valid(pfn))
322                                 continue;
323                         /*
324                          * Nodes's pfns can be overlapping.
325                          * We know some arch can have a nodes layout such as
326                          * -------------pfn-------------->
327                          * N0 | N1 | N2 | N0 | N1 | N2|....
328                          */
329                         if (pfn_to_nid(pfn) != nid)
330                                 continue;
331                         if (init_section_page_cgroup(pfn, nid))
332                                 goto oom;
333                 }
334         }
335         hotplug_memory_notifier(page_cgroup_callback, 0);
336         printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
337         printk(KERN_INFO "please try 'cgroup_disable=memory' option if you "
338                          "don't want memory cgroups\n");
339         return;
340 oom:
341         printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
342         panic("Out of memory");
343 }
344
345 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
346 {
347         return;
348 }
349
350 #endif
351
352
353 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
354
355 static DEFINE_MUTEX(swap_cgroup_mutex);
356 struct swap_cgroup_ctrl {
357         struct page **map;
358         unsigned long length;
359         spinlock_t      lock;
360 };
361
362 static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
363
364 struct swap_cgroup {
365         unsigned short          id;
366 };
367 #define SC_PER_PAGE     (PAGE_SIZE/sizeof(struct swap_cgroup))
368 #define SC_POS_MASK     (SC_PER_PAGE - 1)
369
370 /*
371  * SwapCgroup implements "lookup" and "exchange" operations.
372  * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
373  * against SwapCache. At swap_free(), this is accessed directly from swap.
374  *
375  * This means,
376  *  - we have no race in "exchange" when we're accessed via SwapCache because
377  *    SwapCache(and its swp_entry) is under lock.
378  *  - When called via swap_free(), there is no user of this entry and no race.
379  * Then, we don't need lock around "exchange".
380  *
381  * TODO: we can push these buffers out to HIGHMEM.
382  */
383
384 /*
385  * allocate buffer for swap_cgroup.
386  */
387 static int swap_cgroup_prepare(int type)
388 {
389         struct page *page;
390         struct swap_cgroup_ctrl *ctrl;
391         unsigned long idx, max;
392
393         ctrl = &swap_cgroup_ctrl[type];
394
395         for (idx = 0; idx < ctrl->length; idx++) {
396                 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
397                 if (!page)
398                         goto not_enough_page;
399                 ctrl->map[idx] = page;
400         }
401         return 0;
402 not_enough_page:
403         max = idx;
404         for (idx = 0; idx < max; idx++)
405                 __free_page(ctrl->map[idx]);
406
407         return -ENOMEM;
408 }
409
410 /**
411  * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
412  * @end: swap entry to be cmpxchged
413  * @old: old id
414  * @new: new id
415  *
416  * Returns old id at success, 0 at failure.
417  * (There is no mem_cgroup using 0 as its id)
418  */
419 unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
420                                         unsigned short old, unsigned short new)
421 {
422         int type = swp_type(ent);
423         unsigned long offset = swp_offset(ent);
424         unsigned long idx = offset / SC_PER_PAGE;
425         unsigned long pos = offset & SC_POS_MASK;
426         struct swap_cgroup_ctrl *ctrl;
427         struct page *mappage;
428         struct swap_cgroup *sc;
429         unsigned long flags;
430         unsigned short retval;
431
432         ctrl = &swap_cgroup_ctrl[type];
433
434         mappage = ctrl->map[idx];
435         sc = page_address(mappage);
436         sc += pos;
437         spin_lock_irqsave(&ctrl->lock, flags);
438         retval = sc->id;
439         if (retval == old)
440                 sc->id = new;
441         else
442                 retval = 0;
443         spin_unlock_irqrestore(&ctrl->lock, flags);
444         return retval;
445 }
446
447 /**
448  * swap_cgroup_record - record mem_cgroup for this swp_entry.
449  * @ent: swap entry to be recorded into
450  * @mem: mem_cgroup to be recorded
451  *
452  * Returns old value at success, 0 at failure.
453  * (Of course, old value can be 0.)
454  */
455 unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
456 {
457         int type = swp_type(ent);
458         unsigned long offset = swp_offset(ent);
459         unsigned long idx = offset / SC_PER_PAGE;
460         unsigned long pos = offset & SC_POS_MASK;
461         struct swap_cgroup_ctrl *ctrl;
462         struct page *mappage;
463         struct swap_cgroup *sc;
464         unsigned short old;
465         unsigned long flags;
466
467         ctrl = &swap_cgroup_ctrl[type];
468
469         mappage = ctrl->map[idx];
470         sc = page_address(mappage);
471         sc += pos;
472         spin_lock_irqsave(&ctrl->lock, flags);
473         old = sc->id;
474         sc->id = id;
475         spin_unlock_irqrestore(&ctrl->lock, flags);
476
477         return old;
478 }
479
480 /**
481  * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
482  * @ent: swap entry to be looked up.
483  *
484  * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
485  */
486 unsigned short lookup_swap_cgroup(swp_entry_t ent)
487 {
488         int type = swp_type(ent);
489         unsigned long offset = swp_offset(ent);
490         unsigned long idx = offset / SC_PER_PAGE;
491         unsigned long pos = offset & SC_POS_MASK;
492         struct swap_cgroup_ctrl *ctrl;
493         struct page *mappage;
494         struct swap_cgroup *sc;
495         unsigned short ret;
496
497         ctrl = &swap_cgroup_ctrl[type];
498         mappage = ctrl->map[idx];
499         sc = page_address(mappage);
500         sc += pos;
501         ret = sc->id;
502         return ret;
503 }
504
505 int swap_cgroup_swapon(int type, unsigned long max_pages)
506 {
507         void *array;
508         unsigned long array_size;
509         unsigned long length;
510         struct swap_cgroup_ctrl *ctrl;
511
512         if (!do_swap_account)
513                 return 0;
514
515         length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
516         array_size = length * sizeof(void *);
517
518         array = vzalloc(array_size);
519         if (!array)
520                 goto nomem;
521
522         ctrl = &swap_cgroup_ctrl[type];
523         mutex_lock(&swap_cgroup_mutex);
524         ctrl->length = length;
525         ctrl->map = array;
526         spin_lock_init(&ctrl->lock);
527         if (swap_cgroup_prepare(type)) {
528                 /* memory shortage */
529                 ctrl->map = NULL;
530                 ctrl->length = 0;
531                 mutex_unlock(&swap_cgroup_mutex);
532                 vfree(array);
533                 goto nomem;
534         }
535         mutex_unlock(&swap_cgroup_mutex);
536
537         return 0;
538 nomem:
539         printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
540         printk(KERN_INFO
541                 "swap_cgroup can be disabled by swapaccount=0 boot option\n");
542         return -ENOMEM;
543 }
544
545 void swap_cgroup_swapoff(int type)
546 {
547         struct page **map;
548         unsigned long i, length;
549         struct swap_cgroup_ctrl *ctrl;
550
551         if (!do_swap_account)
552                 return;
553
554         mutex_lock(&swap_cgroup_mutex);
555         ctrl = &swap_cgroup_ctrl[type];
556         map = ctrl->map;
557         length = ctrl->length;
558         ctrl->map = NULL;
559         ctrl->length = 0;
560         mutex_unlock(&swap_cgroup_mutex);
561
562         if (map) {
563                 for (i = 0; i < length; i++) {
564                         struct page *page = map[i];
565                         if (page)
566                                 __free_page(page);
567                 }
568                 vfree(map);
569         }
570 }
571
572 #endif