PCI: dra7xx: Group PHY API invocations
[platform/kernel/linux-rpi.git] / mm / page_owner.c
1 #include <linux/debugfs.h>
2 #include <linux/mm.h>
3 #include <linux/slab.h>
4 #include <linux/uaccess.h>
5 #include <linux/bootmem.h>
6 #include <linux/stacktrace.h>
7 #include <linux/page_owner.h>
8 #include <linux/jump_label.h>
9 #include <linux/migrate.h>
10 #include <linux/stackdepot.h>
11 #include <linux/seq_file.h>
12
13 #include "internal.h"
14
15 /*
16  * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
17  * to use off stack temporal storage
18  */
19 #define PAGE_OWNER_STACK_DEPTH (16)
20
21 struct page_owner {
22         unsigned int order;
23         gfp_t gfp_mask;
24         int last_migrate_reason;
25         depot_stack_handle_t handle;
26 };
27
28 static bool page_owner_disabled = true;
29 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
30
31 static depot_stack_handle_t dummy_handle;
32 static depot_stack_handle_t failure_handle;
33
34 static void init_early_allocated_pages(void);
35
36 static int early_page_owner_param(char *buf)
37 {
38         if (!buf)
39                 return -EINVAL;
40
41         if (strcmp(buf, "on") == 0)
42                 page_owner_disabled = false;
43
44         return 0;
45 }
46 early_param("page_owner", early_page_owner_param);
47
48 static bool need_page_owner(void)
49 {
50         if (page_owner_disabled)
51                 return false;
52
53         return true;
54 }
55
56 static noinline void register_dummy_stack(void)
57 {
58         unsigned long entries[4];
59         struct stack_trace dummy;
60
61         dummy.nr_entries = 0;
62         dummy.max_entries = ARRAY_SIZE(entries);
63         dummy.entries = &entries[0];
64         dummy.skip = 0;
65
66         save_stack_trace(&dummy);
67         dummy_handle = depot_save_stack(&dummy, GFP_KERNEL);
68 }
69
70 static noinline void register_failure_stack(void)
71 {
72         unsigned long entries[4];
73         struct stack_trace failure;
74
75         failure.nr_entries = 0;
76         failure.max_entries = ARRAY_SIZE(entries);
77         failure.entries = &entries[0];
78         failure.skip = 0;
79
80         save_stack_trace(&failure);
81         failure_handle = depot_save_stack(&failure, GFP_KERNEL);
82 }
83
84 static void init_page_owner(void)
85 {
86         if (page_owner_disabled)
87                 return;
88
89         register_dummy_stack();
90         register_failure_stack();
91         static_branch_enable(&page_owner_inited);
92         init_early_allocated_pages();
93 }
94
95 struct page_ext_operations page_owner_ops = {
96         .size = sizeof(struct page_owner),
97         .need = need_page_owner,
98         .init = init_page_owner,
99 };
100
101 static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
102 {
103         return (void *)page_ext + page_owner_ops.offset;
104 }
105
106 void __reset_page_owner(struct page *page, unsigned int order)
107 {
108         int i;
109         struct page_ext *page_ext;
110
111         for (i = 0; i < (1 << order); i++) {
112                 page_ext = lookup_page_ext(page + i);
113                 if (unlikely(!page_ext))
114                         continue;
115                 __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
116         }
117 }
118
119 static inline bool check_recursive_alloc(struct stack_trace *trace,
120                                         unsigned long ip)
121 {
122         int i, count;
123
124         if (!trace->nr_entries)
125                 return false;
126
127         for (i = 0, count = 0; i < trace->nr_entries; i++) {
128                 if (trace->entries[i] == ip && ++count == 2)
129                         return true;
130         }
131
132         return false;
133 }
134
135 static noinline depot_stack_handle_t save_stack(gfp_t flags)
136 {
137         unsigned long entries[PAGE_OWNER_STACK_DEPTH];
138         struct stack_trace trace = {
139                 .nr_entries = 0,
140                 .entries = entries,
141                 .max_entries = PAGE_OWNER_STACK_DEPTH,
142                 .skip = 0
143         };
144         depot_stack_handle_t handle;
145
146         save_stack_trace(&trace);
147         if (trace.nr_entries != 0 &&
148             trace.entries[trace.nr_entries-1] == ULONG_MAX)
149                 trace.nr_entries--;
150
151         /*
152          * We need to check recursion here because our request to stackdepot
153          * could trigger memory allocation to save new entry. New memory
154          * allocation would reach here and call depot_save_stack() again
155          * if we don't catch it. There is still not enough memory in stackdepot
156          * so it would try to allocate memory again and loop forever.
157          */
158         if (check_recursive_alloc(&trace, _RET_IP_))
159                 return dummy_handle;
160
161         handle = depot_save_stack(&trace, flags);
162         if (!handle)
163                 handle = failure_handle;
164
165         return handle;
166 }
167
168 noinline void __set_page_owner(struct page *page, unsigned int order,
169                                         gfp_t gfp_mask)
170 {
171         struct page_ext *page_ext = lookup_page_ext(page);
172         struct page_owner *page_owner;
173
174         if (unlikely(!page_ext))
175                 return;
176
177         page_owner = get_page_owner(page_ext);
178         page_owner->handle = save_stack(gfp_mask);
179         page_owner->order = order;
180         page_owner->gfp_mask = gfp_mask;
181         page_owner->last_migrate_reason = -1;
182
183         __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
184 }
185
186 void __set_page_owner_migrate_reason(struct page *page, int reason)
187 {
188         struct page_ext *page_ext = lookup_page_ext(page);
189         struct page_owner *page_owner;
190
191         if (unlikely(!page_ext))
192                 return;
193
194         page_owner = get_page_owner(page_ext);
195         page_owner->last_migrate_reason = reason;
196 }
197
198 void __split_page_owner(struct page *page, unsigned int order)
199 {
200         int i;
201         struct page_ext *page_ext = lookup_page_ext(page);
202         struct page_owner *page_owner;
203
204         if (unlikely(!page_ext))
205                 return;
206
207         page_owner = get_page_owner(page_ext);
208         page_owner->order = 0;
209         for (i = 1; i < (1 << order); i++)
210                 __copy_page_owner(page, page + i);
211 }
212
213 void __copy_page_owner(struct page *oldpage, struct page *newpage)
214 {
215         struct page_ext *old_ext = lookup_page_ext(oldpage);
216         struct page_ext *new_ext = lookup_page_ext(newpage);
217         struct page_owner *old_page_owner, *new_page_owner;
218
219         if (unlikely(!old_ext || !new_ext))
220                 return;
221
222         old_page_owner = get_page_owner(old_ext);
223         new_page_owner = get_page_owner(new_ext);
224         new_page_owner->order = old_page_owner->order;
225         new_page_owner->gfp_mask = old_page_owner->gfp_mask;
226         new_page_owner->last_migrate_reason =
227                 old_page_owner->last_migrate_reason;
228         new_page_owner->handle = old_page_owner->handle;
229
230         /*
231          * We don't clear the bit on the oldpage as it's going to be freed
232          * after migration. Until then, the info can be useful in case of
233          * a bug, and the overal stats will be off a bit only temporarily.
234          * Also, migrate_misplaced_transhuge_page() can still fail the
235          * migration and then we want the oldpage to retain the info. But
236          * in that case we also don't need to explicitly clear the info from
237          * the new page, which will be freed.
238          */
239         __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
240 }
241
242 void pagetypeinfo_showmixedcount_print(struct seq_file *m,
243                                        pg_data_t *pgdat, struct zone *zone)
244 {
245         struct page *page;
246         struct page_ext *page_ext;
247         struct page_owner *page_owner;
248         unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
249         unsigned long end_pfn = pfn + zone->spanned_pages;
250         unsigned long count[MIGRATE_TYPES] = { 0, };
251         int pageblock_mt, page_mt;
252         int i;
253
254         /* Scan block by block. First and last block may be incomplete */
255         pfn = zone->zone_start_pfn;
256
257         /*
258          * Walk the zone in pageblock_nr_pages steps. If a page block spans
259          * a zone boundary, it will be double counted between zones. This does
260          * not matter as the mixed block count will still be correct
261          */
262         for (; pfn < end_pfn; ) {
263                 if (!pfn_valid(pfn)) {
264                         pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
265                         continue;
266                 }
267
268                 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
269                 block_end_pfn = min(block_end_pfn, end_pfn);
270
271                 page = pfn_to_page(pfn);
272                 pageblock_mt = get_pageblock_migratetype(page);
273
274                 for (; pfn < block_end_pfn; pfn++) {
275                         if (!pfn_valid_within(pfn))
276                                 continue;
277
278                         page = pfn_to_page(pfn);
279
280                         if (page_zone(page) != zone)
281                                 continue;
282
283                         if (PageBuddy(page)) {
284                                 pfn += (1UL << page_order(page)) - 1;
285                                 continue;
286                         }
287
288                         if (PageReserved(page))
289                                 continue;
290
291                         page_ext = lookup_page_ext(page);
292                         if (unlikely(!page_ext))
293                                 continue;
294
295                         if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
296                                 continue;
297
298                         page_owner = get_page_owner(page_ext);
299                         page_mt = gfpflags_to_migratetype(
300                                         page_owner->gfp_mask);
301                         if (pageblock_mt != page_mt) {
302                                 if (is_migrate_cma(pageblock_mt))
303                                         count[MIGRATE_MOVABLE]++;
304                                 else
305                                         count[pageblock_mt]++;
306
307                                 pfn = block_end_pfn;
308                                 break;
309                         }
310                         pfn += (1UL << page_owner->order) - 1;
311                 }
312         }
313
314         /* Print counts */
315         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
316         for (i = 0; i < MIGRATE_TYPES; i++)
317                 seq_printf(m, "%12lu ", count[i]);
318         seq_putc(m, '\n');
319 }
320
321 static ssize_t
322 print_page_owner(char __user *buf, size_t count, unsigned long pfn,
323                 struct page *page, struct page_owner *page_owner,
324                 depot_stack_handle_t handle)
325 {
326         int ret;
327         int pageblock_mt, page_mt;
328         char *kbuf;
329         unsigned long entries[PAGE_OWNER_STACK_DEPTH];
330         struct stack_trace trace = {
331                 .nr_entries = 0,
332                 .entries = entries,
333                 .max_entries = PAGE_OWNER_STACK_DEPTH,
334                 .skip = 0
335         };
336
337         kbuf = kmalloc(count, GFP_KERNEL);
338         if (!kbuf)
339                 return -ENOMEM;
340
341         ret = snprintf(kbuf, count,
342                         "Page allocated via order %u, mask %#x(%pGg)\n",
343                         page_owner->order, page_owner->gfp_mask,
344                         &page_owner->gfp_mask);
345
346         if (ret >= count)
347                 goto err;
348
349         /* Print information relevant to grouping pages by mobility */
350         pageblock_mt = get_pageblock_migratetype(page);
351         page_mt  = gfpflags_to_migratetype(page_owner->gfp_mask);
352         ret += snprintf(kbuf + ret, count - ret,
353                         "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
354                         pfn,
355                         migratetype_names[page_mt],
356                         pfn >> pageblock_order,
357                         migratetype_names[pageblock_mt],
358                         page->flags, &page->flags);
359
360         if (ret >= count)
361                 goto err;
362
363         depot_fetch_stack(handle, &trace);
364         ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
365         if (ret >= count)
366                 goto err;
367
368         if (page_owner->last_migrate_reason != -1) {
369                 ret += snprintf(kbuf + ret, count - ret,
370                         "Page has been migrated, last migrate reason: %s\n",
371                         migrate_reason_names[page_owner->last_migrate_reason]);
372                 if (ret >= count)
373                         goto err;
374         }
375
376         ret += snprintf(kbuf + ret, count - ret, "\n");
377         if (ret >= count)
378                 goto err;
379
380         if (copy_to_user(buf, kbuf, ret))
381                 ret = -EFAULT;
382
383         kfree(kbuf);
384         return ret;
385
386 err:
387         kfree(kbuf);
388         return -ENOMEM;
389 }
390
391 void __dump_page_owner(struct page *page)
392 {
393         struct page_ext *page_ext = lookup_page_ext(page);
394         struct page_owner *page_owner;
395         unsigned long entries[PAGE_OWNER_STACK_DEPTH];
396         struct stack_trace trace = {
397                 .nr_entries = 0,
398                 .entries = entries,
399                 .max_entries = PAGE_OWNER_STACK_DEPTH,
400                 .skip = 0
401         };
402         depot_stack_handle_t handle;
403         gfp_t gfp_mask;
404         int mt;
405
406         if (unlikely(!page_ext)) {
407                 pr_alert("There is not page extension available.\n");
408                 return;
409         }
410
411         page_owner = get_page_owner(page_ext);
412         gfp_mask = page_owner->gfp_mask;
413         mt = gfpflags_to_migratetype(gfp_mask);
414
415         if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
416                 pr_alert("page_owner info is not active (free page?)\n");
417                 return;
418         }
419
420         handle = READ_ONCE(page_owner->handle);
421         if (!handle) {
422                 pr_alert("page_owner info is not active (free page?)\n");
423                 return;
424         }
425
426         depot_fetch_stack(handle, &trace);
427         pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
428                  page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
429         print_stack_trace(&trace, 0);
430
431         if (page_owner->last_migrate_reason != -1)
432                 pr_alert("page has been migrated, last migrate reason: %s\n",
433                         migrate_reason_names[page_owner->last_migrate_reason]);
434 }
435
436 static ssize_t
437 read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
438 {
439         unsigned long pfn;
440         struct page *page;
441         struct page_ext *page_ext;
442         struct page_owner *page_owner;
443         depot_stack_handle_t handle;
444
445         if (!static_branch_unlikely(&page_owner_inited))
446                 return -EINVAL;
447
448         page = NULL;
449         pfn = min_low_pfn + *ppos;
450
451         /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
452         while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
453                 pfn++;
454
455         drain_all_pages(NULL);
456
457         /* Find an allocated page */
458         for (; pfn < max_pfn; pfn++) {
459                 /*
460                  * If the new page is in a new MAX_ORDER_NR_PAGES area,
461                  * validate the area as existing, skip it if not
462                  */
463                 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
464                         pfn += MAX_ORDER_NR_PAGES - 1;
465                         continue;
466                 }
467
468                 /* Check for holes within a MAX_ORDER area */
469                 if (!pfn_valid_within(pfn))
470                         continue;
471
472                 page = pfn_to_page(pfn);
473                 if (PageBuddy(page)) {
474                         unsigned long freepage_order = page_order_unsafe(page);
475
476                         if (freepage_order < MAX_ORDER)
477                                 pfn += (1UL << freepage_order) - 1;
478                         continue;
479                 }
480
481                 page_ext = lookup_page_ext(page);
482                 if (unlikely(!page_ext))
483                         continue;
484
485                 /*
486                  * Some pages could be missed by concurrent allocation or free,
487                  * because we don't hold the zone lock.
488                  */
489                 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
490                         continue;
491
492                 page_owner = get_page_owner(page_ext);
493
494                 /*
495                  * Access to page_ext->handle isn't synchronous so we should
496                  * be careful to access it.
497                  */
498                 handle = READ_ONCE(page_owner->handle);
499                 if (!handle)
500                         continue;
501
502                 /* Record the next PFN to read in the file offset */
503                 *ppos = (pfn - min_low_pfn) + 1;
504
505                 return print_page_owner(buf, count, pfn, page,
506                                 page_owner, handle);
507         }
508
509         return 0;
510 }
511
512 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
513 {
514         struct page *page;
515         struct page_ext *page_ext;
516         unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
517         unsigned long end_pfn = pfn + zone->spanned_pages;
518         unsigned long count = 0;
519
520         /* Scan block by block. First and last block may be incomplete */
521         pfn = zone->zone_start_pfn;
522
523         /*
524          * Walk the zone in pageblock_nr_pages steps. If a page block spans
525          * a zone boundary, it will be double counted between zones. This does
526          * not matter as the mixed block count will still be correct
527          */
528         for (; pfn < end_pfn; ) {
529                 if (!pfn_valid(pfn)) {
530                         pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
531                         continue;
532                 }
533
534                 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
535                 block_end_pfn = min(block_end_pfn, end_pfn);
536
537                 page = pfn_to_page(pfn);
538
539                 for (; pfn < block_end_pfn; pfn++) {
540                         if (!pfn_valid_within(pfn))
541                                 continue;
542
543                         page = pfn_to_page(pfn);
544
545                         if (page_zone(page) != zone)
546                                 continue;
547
548                         /*
549                          * We are safe to check buddy flag and order, because
550                          * this is init stage and only single thread runs.
551                          */
552                         if (PageBuddy(page)) {
553                                 pfn += (1UL << page_order(page)) - 1;
554                                 continue;
555                         }
556
557                         if (PageReserved(page))
558                                 continue;
559
560                         page_ext = lookup_page_ext(page);
561                         if (unlikely(!page_ext))
562                                 continue;
563
564                         /* Maybe overraping zone */
565                         if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
566                                 continue;
567
568                         /* Found early allocated page */
569                         set_page_owner(page, 0, 0);
570                         count++;
571                 }
572         }
573
574         pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
575                 pgdat->node_id, zone->name, count);
576 }
577
578 static void init_zones_in_node(pg_data_t *pgdat)
579 {
580         struct zone *zone;
581         struct zone *node_zones = pgdat->node_zones;
582         unsigned long flags;
583
584         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
585                 if (!populated_zone(zone))
586                         continue;
587
588                 spin_lock_irqsave(&zone->lock, flags);
589                 init_pages_in_zone(pgdat, zone);
590                 spin_unlock_irqrestore(&zone->lock, flags);
591         }
592 }
593
594 static void init_early_allocated_pages(void)
595 {
596         pg_data_t *pgdat;
597
598         drain_all_pages(NULL);
599         for_each_online_pgdat(pgdat)
600                 init_zones_in_node(pgdat);
601 }
602
603 static const struct file_operations proc_page_owner_operations = {
604         .read           = read_page_owner,
605 };
606
607 static int __init pageowner_init(void)
608 {
609         struct dentry *dentry;
610
611         if (!static_branch_unlikely(&page_owner_inited)) {
612                 pr_info("page_owner is disabled\n");
613                 return 0;
614         }
615
616         dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
617                         NULL, &proc_page_owner_operations);
618         if (IS_ERR(dentry))
619                 return PTR_ERR(dentry);
620
621         return 0;
622 }
623 late_initcall(pageowner_init)