riscv: perf: RISCV_BASE_PMU should be independent
[platform/kernel/linux-rpi.git] / mm / page_owner.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/debugfs.h>
3 #include <linux/mm.h>
4 #include <linux/slab.h>
5 #include <linux/uaccess.h>
6 #include <linux/memblock.h>
7 #include <linux/stacktrace.h>
8 #include <linux/page_owner.h>
9 #include <linux/jump_label.h>
10 #include <linux/migrate.h>
11 #include <linux/stackdepot.h>
12 #include <linux/seq_file.h>
13
14 #include "internal.h"
15
16 /*
17  * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
18  * to use off stack temporal storage
19  */
20 #define PAGE_OWNER_STACK_DEPTH (16)
21
22 struct page_owner {
23         unsigned short order;
24         short last_migrate_reason;
25         gfp_t gfp_mask;
26         depot_stack_handle_t handle;
27         depot_stack_handle_t free_handle;
28 };
29
30 static bool page_owner_enabled = false;
31 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
32
33 static depot_stack_handle_t dummy_handle;
34 static depot_stack_handle_t failure_handle;
35 static depot_stack_handle_t early_handle;
36
37 static void init_early_allocated_pages(void);
38
39 static int __init early_page_owner_param(char *buf)
40 {
41         if (!buf)
42                 return -EINVAL;
43
44         if (strcmp(buf, "on") == 0)
45                 page_owner_enabled = true;
46
47         return 0;
48 }
49 early_param("page_owner", early_page_owner_param);
50
51 static bool need_page_owner(void)
52 {
53         return page_owner_enabled;
54 }
55
56 static __always_inline depot_stack_handle_t create_dummy_stack(void)
57 {
58         unsigned long entries[4];
59         unsigned int nr_entries;
60
61         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
62         return stack_depot_save(entries, nr_entries, GFP_KERNEL);
63 }
64
65 static noinline void register_dummy_stack(void)
66 {
67         dummy_handle = create_dummy_stack();
68 }
69
70 static noinline void register_failure_stack(void)
71 {
72         failure_handle = create_dummy_stack();
73 }
74
75 static noinline void register_early_stack(void)
76 {
77         early_handle = create_dummy_stack();
78 }
79
80 static void init_page_owner(void)
81 {
82         if (!page_owner_enabled)
83                 return;
84
85         register_dummy_stack();
86         register_failure_stack();
87         register_early_stack();
88         static_branch_enable(&page_owner_inited);
89         init_early_allocated_pages();
90 }
91
92 struct page_ext_operations page_owner_ops = {
93         .size = sizeof(struct page_owner),
94         .need = need_page_owner,
95         .init = init_page_owner,
96 };
97
98 static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
99 {
100         return (void *)page_ext + page_owner_ops.offset;
101 }
102
103 static inline bool check_recursive_alloc(unsigned long *entries,
104                                          unsigned int nr_entries,
105                                          unsigned long ip)
106 {
107         unsigned int i;
108
109         for (i = 0; i < nr_entries; i++) {
110                 if (entries[i] == ip)
111                         return true;
112         }
113         return false;
114 }
115
116 static noinline depot_stack_handle_t save_stack(gfp_t flags)
117 {
118         unsigned long entries[PAGE_OWNER_STACK_DEPTH];
119         depot_stack_handle_t handle;
120         unsigned int nr_entries;
121
122         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
123
124         /*
125          * We need to check recursion here because our request to
126          * stackdepot could trigger memory allocation to save new
127          * entry. New memory allocation would reach here and call
128          * stack_depot_save_entries() again if we don't catch it. There is
129          * still not enough memory in stackdepot so it would try to
130          * allocate memory again and loop forever.
131          */
132         if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
133                 return dummy_handle;
134
135         handle = stack_depot_save(entries, nr_entries, flags);
136         if (!handle)
137                 handle = failure_handle;
138
139         return handle;
140 }
141
142 void __reset_page_owner(struct page *page, unsigned int order)
143 {
144         int i;
145         struct page_ext *page_ext;
146         depot_stack_handle_t handle = 0;
147         struct page_owner *page_owner;
148
149         handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
150
151         page_ext = lookup_page_ext(page);
152         if (unlikely(!page_ext))
153                 return;
154         for (i = 0; i < (1 << order); i++) {
155                 __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
156                 page_owner = get_page_owner(page_ext);
157                 page_owner->free_handle = handle;
158                 page_ext = page_ext_next(page_ext);
159         }
160 }
161
162 static inline void __set_page_owner_handle(struct page *page,
163         struct page_ext *page_ext, depot_stack_handle_t handle,
164         unsigned int order, gfp_t gfp_mask)
165 {
166         struct page_owner *page_owner;
167         int i;
168
169         for (i = 0; i < (1 << order); i++) {
170                 page_owner = get_page_owner(page_ext);
171                 page_owner->handle = handle;
172                 page_owner->order = order;
173                 page_owner->gfp_mask = gfp_mask;
174                 page_owner->last_migrate_reason = -1;
175                 __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
176                 __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
177
178                 page_ext = page_ext_next(page_ext);
179         }
180 }
181
182 noinline void __set_page_owner(struct page *page, unsigned int order,
183                                         gfp_t gfp_mask)
184 {
185         struct page_ext *page_ext = lookup_page_ext(page);
186         depot_stack_handle_t handle;
187
188         if (unlikely(!page_ext))
189                 return;
190
191         handle = save_stack(gfp_mask);
192         __set_page_owner_handle(page, page_ext, handle, order, gfp_mask);
193 }
194
195 void __set_page_owner_migrate_reason(struct page *page, int reason)
196 {
197         struct page_ext *page_ext = lookup_page_ext(page);
198         struct page_owner *page_owner;
199
200         if (unlikely(!page_ext))
201                 return;
202
203         page_owner = get_page_owner(page_ext);
204         page_owner->last_migrate_reason = reason;
205 }
206
207 void __split_page_owner(struct page *page, unsigned int order)
208 {
209         int i;
210         struct page_ext *page_ext = lookup_page_ext(page);
211         struct page_owner *page_owner;
212
213         if (unlikely(!page_ext))
214                 return;
215
216         for (i = 0; i < (1 << order); i++) {
217                 page_owner = get_page_owner(page_ext);
218                 page_owner->order = 0;
219                 page_ext = page_ext_next(page_ext);
220         }
221 }
222
223 void __copy_page_owner(struct page *oldpage, struct page *newpage)
224 {
225         struct page_ext *old_ext = lookup_page_ext(oldpage);
226         struct page_ext *new_ext = lookup_page_ext(newpage);
227         struct page_owner *old_page_owner, *new_page_owner;
228
229         if (unlikely(!old_ext || !new_ext))
230                 return;
231
232         old_page_owner = get_page_owner(old_ext);
233         new_page_owner = get_page_owner(new_ext);
234         new_page_owner->order = old_page_owner->order;
235         new_page_owner->gfp_mask = old_page_owner->gfp_mask;
236         new_page_owner->last_migrate_reason =
237                 old_page_owner->last_migrate_reason;
238         new_page_owner->handle = old_page_owner->handle;
239
240         /*
241          * We don't clear the bit on the oldpage as it's going to be freed
242          * after migration. Until then, the info can be useful in case of
243          * a bug, and the overal stats will be off a bit only temporarily.
244          * Also, migrate_misplaced_transhuge_page() can still fail the
245          * migration and then we want the oldpage to retain the info. But
246          * in that case we also don't need to explicitly clear the info from
247          * the new page, which will be freed.
248          */
249         __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
250         __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
251 }
252
253 void pagetypeinfo_showmixedcount_print(struct seq_file *m,
254                                        pg_data_t *pgdat, struct zone *zone)
255 {
256         struct page *page;
257         struct page_ext *page_ext;
258         struct page_owner *page_owner;
259         unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
260         unsigned long end_pfn = pfn + zone->spanned_pages;
261         unsigned long count[MIGRATE_TYPES] = { 0, };
262         int pageblock_mt, page_mt;
263         int i;
264
265         /* Scan block by block. First and last block may be incomplete */
266         pfn = zone->zone_start_pfn;
267
268         /*
269          * Walk the zone in pageblock_nr_pages steps. If a page block spans
270          * a zone boundary, it will be double counted between zones. This does
271          * not matter as the mixed block count will still be correct
272          */
273         for (; pfn < end_pfn; ) {
274                 page = pfn_to_online_page(pfn);
275                 if (!page) {
276                         pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
277                         continue;
278                 }
279
280                 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
281                 block_end_pfn = min(block_end_pfn, end_pfn);
282
283                 pageblock_mt = get_pageblock_migratetype(page);
284
285                 for (; pfn < block_end_pfn; pfn++) {
286                         if (!pfn_valid_within(pfn))
287                                 continue;
288
289                         /* The pageblock is online, no need to recheck. */
290                         page = pfn_to_page(pfn);
291
292                         if (page_zone(page) != zone)
293                                 continue;
294
295                         if (PageBuddy(page)) {
296                                 unsigned long freepage_order;
297
298                                 freepage_order = page_order_unsafe(page);
299                                 if (freepage_order < MAX_ORDER)
300                                         pfn += (1UL << freepage_order) - 1;
301                                 continue;
302                         }
303
304                         if (PageReserved(page))
305                                 continue;
306
307                         page_ext = lookup_page_ext(page);
308                         if (unlikely(!page_ext))
309                                 continue;
310
311                         if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
312                                 continue;
313
314                         page_owner = get_page_owner(page_ext);
315                         page_mt = gfpflags_to_migratetype(
316                                         page_owner->gfp_mask);
317                         if (pageblock_mt != page_mt) {
318                                 if (is_migrate_cma(pageblock_mt))
319                                         count[MIGRATE_MOVABLE]++;
320                                 else
321                                         count[pageblock_mt]++;
322
323                                 pfn = block_end_pfn;
324                                 break;
325                         }
326                         pfn += (1UL << page_owner->order) - 1;
327                 }
328         }
329
330         /* Print counts */
331         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
332         for (i = 0; i < MIGRATE_TYPES; i++)
333                 seq_printf(m, "%12lu ", count[i]);
334         seq_putc(m, '\n');
335 }
336
337 static ssize_t
338 print_page_owner(char __user *buf, size_t count, unsigned long pfn,
339                 struct page *page, struct page_owner *page_owner,
340                 depot_stack_handle_t handle)
341 {
342         int ret, pageblock_mt, page_mt;
343         unsigned long *entries;
344         unsigned int nr_entries;
345         char *kbuf;
346
347         count = min_t(size_t, count, PAGE_SIZE);
348         kbuf = kmalloc(count, GFP_KERNEL);
349         if (!kbuf)
350                 return -ENOMEM;
351
352         ret = snprintf(kbuf, count,
353                         "Page allocated via order %u, mask %#x(%pGg)\n",
354                         page_owner->order, page_owner->gfp_mask,
355                         &page_owner->gfp_mask);
356
357         if (ret >= count)
358                 goto err;
359
360         /* Print information relevant to grouping pages by mobility */
361         pageblock_mt = get_pageblock_migratetype(page);
362         page_mt  = gfpflags_to_migratetype(page_owner->gfp_mask);
363         ret += snprintf(kbuf + ret, count - ret,
364                         "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
365                         pfn,
366                         migratetype_names[page_mt],
367                         pfn >> pageblock_order,
368                         migratetype_names[pageblock_mt],
369                         page->flags, &page->flags);
370
371         if (ret >= count)
372                 goto err;
373
374         nr_entries = stack_depot_fetch(handle, &entries);
375         ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
376         if (ret >= count)
377                 goto err;
378
379         if (page_owner->last_migrate_reason != -1) {
380                 ret += snprintf(kbuf + ret, count - ret,
381                         "Page has been migrated, last migrate reason: %s\n",
382                         migrate_reason_names[page_owner->last_migrate_reason]);
383                 if (ret >= count)
384                         goto err;
385         }
386
387         ret += snprintf(kbuf + ret, count - ret, "\n");
388         if (ret >= count)
389                 goto err;
390
391         if (copy_to_user(buf, kbuf, ret))
392                 ret = -EFAULT;
393
394         kfree(kbuf);
395         return ret;
396
397 err:
398         kfree(kbuf);
399         return -ENOMEM;
400 }
401
402 void __dump_page_owner(struct page *page)
403 {
404         struct page_ext *page_ext = lookup_page_ext(page);
405         struct page_owner *page_owner;
406         depot_stack_handle_t handle;
407         unsigned long *entries;
408         unsigned int nr_entries;
409         gfp_t gfp_mask;
410         int mt;
411
412         if (unlikely(!page_ext)) {
413                 pr_alert("There is not page extension available.\n");
414                 return;
415         }
416
417         page_owner = get_page_owner(page_ext);
418         gfp_mask = page_owner->gfp_mask;
419         mt = gfpflags_to_migratetype(gfp_mask);
420
421         if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
422                 pr_alert("page_owner info is not present (never set?)\n");
423                 return;
424         }
425
426         if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
427                 pr_alert("page_owner tracks the page as allocated\n");
428         else
429                 pr_alert("page_owner tracks the page as freed\n");
430
431         pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
432                  page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
433
434         handle = READ_ONCE(page_owner->handle);
435         if (!handle) {
436                 pr_alert("page_owner allocation stack trace missing\n");
437         } else {
438                 nr_entries = stack_depot_fetch(handle, &entries);
439                 stack_trace_print(entries, nr_entries, 0);
440         }
441
442         handle = READ_ONCE(page_owner->free_handle);
443         if (!handle) {
444                 pr_alert("page_owner free stack trace missing\n");
445         } else {
446                 nr_entries = stack_depot_fetch(handle, &entries);
447                 pr_alert("page last free stack trace:\n");
448                 stack_trace_print(entries, nr_entries, 0);
449         }
450
451         if (page_owner->last_migrate_reason != -1)
452                 pr_alert("page has been migrated, last migrate reason: %s\n",
453                         migrate_reason_names[page_owner->last_migrate_reason]);
454 }
455
456 static ssize_t
457 read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
458 {
459         unsigned long pfn;
460         struct page *page;
461         struct page_ext *page_ext;
462         struct page_owner *page_owner;
463         depot_stack_handle_t handle;
464
465         if (!static_branch_unlikely(&page_owner_inited))
466                 return -EINVAL;
467
468         page = NULL;
469         pfn = min_low_pfn + *ppos;
470
471         /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
472         while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
473                 pfn++;
474
475         drain_all_pages(NULL);
476
477         /* Find an allocated page */
478         for (; pfn < max_pfn; pfn++) {
479                 /*
480                  * If the new page is in a new MAX_ORDER_NR_PAGES area,
481                  * validate the area as existing, skip it if not
482                  */
483                 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
484                         pfn += MAX_ORDER_NR_PAGES - 1;
485                         continue;
486                 }
487
488                 /* Check for holes within a MAX_ORDER area */
489                 if (!pfn_valid_within(pfn))
490                         continue;
491
492                 page = pfn_to_page(pfn);
493                 if (PageBuddy(page)) {
494                         unsigned long freepage_order = page_order_unsafe(page);
495
496                         if (freepage_order < MAX_ORDER)
497                                 pfn += (1UL << freepage_order) - 1;
498                         continue;
499                 }
500
501                 page_ext = lookup_page_ext(page);
502                 if (unlikely(!page_ext))
503                         continue;
504
505                 /*
506                  * Some pages could be missed by concurrent allocation or free,
507                  * because we don't hold the zone lock.
508                  */
509                 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
510                         continue;
511
512                 /*
513                  * Although we do have the info about past allocation of free
514                  * pages, it's not relevant for current memory usage.
515                  */
516                 if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
517                         continue;
518
519                 page_owner = get_page_owner(page_ext);
520
521                 /*
522                  * Don't print "tail" pages of high-order allocations as that
523                  * would inflate the stats.
524                  */
525                 if (!IS_ALIGNED(pfn, 1 << page_owner->order))
526                         continue;
527
528                 /*
529                  * Access to page_ext->handle isn't synchronous so we should
530                  * be careful to access it.
531                  */
532                 handle = READ_ONCE(page_owner->handle);
533                 if (!handle)
534                         continue;
535
536                 /* Record the next PFN to read in the file offset */
537                 *ppos = (pfn - min_low_pfn) + 1;
538
539                 return print_page_owner(buf, count, pfn, page,
540                                 page_owner, handle);
541         }
542
543         return 0;
544 }
545
546 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
547 {
548         unsigned long pfn = zone->zone_start_pfn;
549         unsigned long end_pfn = zone_end_pfn(zone);
550         unsigned long count = 0;
551
552         /*
553          * Walk the zone in pageblock_nr_pages steps. If a page block spans
554          * a zone boundary, it will be double counted between zones. This does
555          * not matter as the mixed block count will still be correct
556          */
557         for (; pfn < end_pfn; ) {
558                 unsigned long block_end_pfn;
559
560                 if (!pfn_valid(pfn)) {
561                         pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
562                         continue;
563                 }
564
565                 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
566                 block_end_pfn = min(block_end_pfn, end_pfn);
567
568                 for (; pfn < block_end_pfn; pfn++) {
569                         struct page *page;
570                         struct page_ext *page_ext;
571
572                         if (!pfn_valid_within(pfn))
573                                 continue;
574
575                         page = pfn_to_page(pfn);
576
577                         if (page_zone(page) != zone)
578                                 continue;
579
580                         /*
581                          * To avoid having to grab zone->lock, be a little
582                          * careful when reading buddy page order. The only
583                          * danger is that we skip too much and potentially miss
584                          * some early allocated pages, which is better than
585                          * heavy lock contention.
586                          */
587                         if (PageBuddy(page)) {
588                                 unsigned long order = page_order_unsafe(page);
589
590                                 if (order > 0 && order < MAX_ORDER)
591                                         pfn += (1UL << order) - 1;
592                                 continue;
593                         }
594
595                         if (PageReserved(page))
596                                 continue;
597
598                         page_ext = lookup_page_ext(page);
599                         if (unlikely(!page_ext))
600                                 continue;
601
602                         /* Maybe overlapping zone */
603                         if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
604                                 continue;
605
606                         /* Found early allocated page */
607                         __set_page_owner_handle(page, page_ext, early_handle,
608                                                 0, 0);
609                         count++;
610                 }
611                 cond_resched();
612         }
613
614         pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
615                 pgdat->node_id, zone->name, count);
616 }
617
618 static void init_zones_in_node(pg_data_t *pgdat)
619 {
620         struct zone *zone;
621         struct zone *node_zones = pgdat->node_zones;
622
623         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
624                 if (!populated_zone(zone))
625                         continue;
626
627                 init_pages_in_zone(pgdat, zone);
628         }
629 }
630
631 static void init_early_allocated_pages(void)
632 {
633         pg_data_t *pgdat;
634
635         for_each_online_pgdat(pgdat)
636                 init_zones_in_node(pgdat);
637 }
638
639 static const struct file_operations proc_page_owner_operations = {
640         .read           = read_page_owner,
641 };
642
643 static int __init pageowner_init(void)
644 {
645         if (!static_branch_unlikely(&page_owner_inited)) {
646                 pr_info("page_owner is disabled\n");
647                 return 0;
648         }
649
650         debugfs_create_file("page_owner", 0400, NULL, NULL,
651                             &proc_page_owner_operations);
652
653         return 0;
654 }
655 late_initcall(pageowner_init)