mm/page_owner: print memcg information
[platform/kernel/linux-starfive.git] / mm / page_owner.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/debugfs.h>
3 #include <linux/mm.h>
4 #include <linux/slab.h>
5 #include <linux/uaccess.h>
6 #include <linux/memblock.h>
7 #include <linux/stacktrace.h>
8 #include <linux/page_owner.h>
9 #include <linux/jump_label.h>
10 #include <linux/migrate.h>
11 #include <linux/stackdepot.h>
12 #include <linux/seq_file.h>
13 #include <linux/memcontrol.h>
14 #include <linux/sched/clock.h>
15
16 #include "internal.h"
17
18 /*
19  * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
20  * to use off stack temporal storage
21  */
22 #define PAGE_OWNER_STACK_DEPTH (16)
23
24 struct page_owner {
25         unsigned short order;
26         short last_migrate_reason;
27         gfp_t gfp_mask;
28         depot_stack_handle_t handle;
29         depot_stack_handle_t free_handle;
30         u64 ts_nsec;
31         u64 free_ts_nsec;
32         pid_t pid;
33 };
34
35 static bool page_owner_enabled = false;
36 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
37
38 static depot_stack_handle_t dummy_handle;
39 static depot_stack_handle_t failure_handle;
40 static depot_stack_handle_t early_handle;
41
42 static void init_early_allocated_pages(void);
43
44 static int __init early_page_owner_param(char *buf)
45 {
46         return kstrtobool(buf, &page_owner_enabled);
47 }
48 early_param("page_owner", early_page_owner_param);
49
50 static __init bool need_page_owner(void)
51 {
52         return page_owner_enabled;
53 }
54
55 static __always_inline depot_stack_handle_t create_dummy_stack(void)
56 {
57         unsigned long entries[4];
58         unsigned int nr_entries;
59
60         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
61         return stack_depot_save(entries, nr_entries, GFP_KERNEL);
62 }
63
64 static noinline void register_dummy_stack(void)
65 {
66         dummy_handle = create_dummy_stack();
67 }
68
69 static noinline void register_failure_stack(void)
70 {
71         failure_handle = create_dummy_stack();
72 }
73
74 static noinline void register_early_stack(void)
75 {
76         early_handle = create_dummy_stack();
77 }
78
79 static __init void init_page_owner(void)
80 {
81         if (!page_owner_enabled)
82                 return;
83
84         stack_depot_init();
85
86         register_dummy_stack();
87         register_failure_stack();
88         register_early_stack();
89         static_branch_enable(&page_owner_inited);
90         init_early_allocated_pages();
91 }
92
93 struct page_ext_operations page_owner_ops = {
94         .size = sizeof(struct page_owner),
95         .need = need_page_owner,
96         .init = init_page_owner,
97 };
98
99 static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
100 {
101         return (void *)page_ext + page_owner_ops.offset;
102 }
103
104 static noinline depot_stack_handle_t save_stack(gfp_t flags)
105 {
106         unsigned long entries[PAGE_OWNER_STACK_DEPTH];
107         depot_stack_handle_t handle;
108         unsigned int nr_entries;
109
110         /*
111          * Avoid recursion.
112          *
113          * Sometimes page metadata allocation tracking requires more
114          * memory to be allocated:
115          * - when new stack trace is saved to stack depot
116          * - when backtrace itself is calculated (ia64)
117          */
118         if (current->in_page_owner)
119                 return dummy_handle;
120         current->in_page_owner = 1;
121
122         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
123         handle = stack_depot_save(entries, nr_entries, flags);
124         if (!handle)
125                 handle = failure_handle;
126
127         current->in_page_owner = 0;
128         return handle;
129 }
130
131 void __reset_page_owner(struct page *page, unsigned short order)
132 {
133         int i;
134         struct page_ext *page_ext;
135         depot_stack_handle_t handle;
136         struct page_owner *page_owner;
137         u64 free_ts_nsec = local_clock();
138
139         page_ext = lookup_page_ext(page);
140         if (unlikely(!page_ext))
141                 return;
142
143         handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
144         for (i = 0; i < (1 << order); i++) {
145                 __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
146                 page_owner = get_page_owner(page_ext);
147                 page_owner->free_handle = handle;
148                 page_owner->free_ts_nsec = free_ts_nsec;
149                 page_ext = page_ext_next(page_ext);
150         }
151 }
152
153 static inline void __set_page_owner_handle(struct page_ext *page_ext,
154                                         depot_stack_handle_t handle,
155                                         unsigned short order, gfp_t gfp_mask)
156 {
157         struct page_owner *page_owner;
158         int i;
159
160         for (i = 0; i < (1 << order); i++) {
161                 page_owner = get_page_owner(page_ext);
162                 page_owner->handle = handle;
163                 page_owner->order = order;
164                 page_owner->gfp_mask = gfp_mask;
165                 page_owner->last_migrate_reason = -1;
166                 page_owner->pid = current->pid;
167                 page_owner->ts_nsec = local_clock();
168                 __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
169                 __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
170
171                 page_ext = page_ext_next(page_ext);
172         }
173 }
174
175 noinline void __set_page_owner(struct page *page, unsigned short order,
176                                         gfp_t gfp_mask)
177 {
178         struct page_ext *page_ext = lookup_page_ext(page);
179         depot_stack_handle_t handle;
180
181         if (unlikely(!page_ext))
182                 return;
183
184         handle = save_stack(gfp_mask);
185         __set_page_owner_handle(page_ext, handle, order, gfp_mask);
186 }
187
188 void __set_page_owner_migrate_reason(struct page *page, int reason)
189 {
190         struct page_ext *page_ext = lookup_page_ext(page);
191         struct page_owner *page_owner;
192
193         if (unlikely(!page_ext))
194                 return;
195
196         page_owner = get_page_owner(page_ext);
197         page_owner->last_migrate_reason = reason;
198 }
199
200 void __split_page_owner(struct page *page, unsigned int nr)
201 {
202         int i;
203         struct page_ext *page_ext = lookup_page_ext(page);
204         struct page_owner *page_owner;
205
206         if (unlikely(!page_ext))
207                 return;
208
209         for (i = 0; i < nr; i++) {
210                 page_owner = get_page_owner(page_ext);
211                 page_owner->order = 0;
212                 page_ext = page_ext_next(page_ext);
213         }
214 }
215
216 void __folio_copy_owner(struct folio *newfolio, struct folio *old)
217 {
218         struct page_ext *old_ext = lookup_page_ext(&old->page);
219         struct page_ext *new_ext = lookup_page_ext(&newfolio->page);
220         struct page_owner *old_page_owner, *new_page_owner;
221
222         if (unlikely(!old_ext || !new_ext))
223                 return;
224
225         old_page_owner = get_page_owner(old_ext);
226         new_page_owner = get_page_owner(new_ext);
227         new_page_owner->order = old_page_owner->order;
228         new_page_owner->gfp_mask = old_page_owner->gfp_mask;
229         new_page_owner->last_migrate_reason =
230                 old_page_owner->last_migrate_reason;
231         new_page_owner->handle = old_page_owner->handle;
232         new_page_owner->pid = old_page_owner->pid;
233         new_page_owner->ts_nsec = old_page_owner->ts_nsec;
234         new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
235
236         /*
237          * We don't clear the bit on the old folio as it's going to be freed
238          * after migration. Until then, the info can be useful in case of
239          * a bug, and the overall stats will be off a bit only temporarily.
240          * Also, migrate_misplaced_transhuge_page() can still fail the
241          * migration and then we want the old folio to retain the info. But
242          * in that case we also don't need to explicitly clear the info from
243          * the new page, which will be freed.
244          */
245         __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
246         __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
247 }
248
249 void pagetypeinfo_showmixedcount_print(struct seq_file *m,
250                                        pg_data_t *pgdat, struct zone *zone)
251 {
252         struct page *page;
253         struct page_ext *page_ext;
254         struct page_owner *page_owner;
255         unsigned long pfn, block_end_pfn;
256         unsigned long end_pfn = zone_end_pfn(zone);
257         unsigned long count[MIGRATE_TYPES] = { 0, };
258         int pageblock_mt, page_mt;
259         int i;
260
261         /* Scan block by block. First and last block may be incomplete */
262         pfn = zone->zone_start_pfn;
263
264         /*
265          * Walk the zone in pageblock_nr_pages steps. If a page block spans
266          * a zone boundary, it will be double counted between zones. This does
267          * not matter as the mixed block count will still be correct
268          */
269         for (; pfn < end_pfn; ) {
270                 page = pfn_to_online_page(pfn);
271                 if (!page) {
272                         pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
273                         continue;
274                 }
275
276                 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
277                 block_end_pfn = min(block_end_pfn, end_pfn);
278
279                 pageblock_mt = get_pageblock_migratetype(page);
280
281                 for (; pfn < block_end_pfn; pfn++) {
282                         /* The pageblock is online, no need to recheck. */
283                         page = pfn_to_page(pfn);
284
285                         if (page_zone(page) != zone)
286                                 continue;
287
288                         if (PageBuddy(page)) {
289                                 unsigned long freepage_order;
290
291                                 freepage_order = buddy_order_unsafe(page);
292                                 if (freepage_order < MAX_ORDER)
293                                         pfn += (1UL << freepage_order) - 1;
294                                 continue;
295                         }
296
297                         if (PageReserved(page))
298                                 continue;
299
300                         page_ext = lookup_page_ext(page);
301                         if (unlikely(!page_ext))
302                                 continue;
303
304                         if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
305                                 continue;
306
307                         page_owner = get_page_owner(page_ext);
308                         page_mt = gfp_migratetype(page_owner->gfp_mask);
309                         if (pageblock_mt != page_mt) {
310                                 if (is_migrate_cma(pageblock_mt))
311                                         count[MIGRATE_MOVABLE]++;
312                                 else
313                                         count[pageblock_mt]++;
314
315                                 pfn = block_end_pfn;
316                                 break;
317                         }
318                         pfn += (1UL << page_owner->order) - 1;
319                 }
320         }
321
322         /* Print counts */
323         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
324         for (i = 0; i < MIGRATE_TYPES; i++)
325                 seq_printf(m, "%12lu ", count[i]);
326         seq_putc(m, '\n');
327 }
328
329 /*
330  * Looking for memcg information and print it out
331  */
332 static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret,
333                                          struct page *page)
334 {
335 #ifdef CONFIG_MEMCG
336         unsigned long memcg_data;
337         struct mem_cgroup *memcg;
338         bool online;
339         char name[80];
340
341         rcu_read_lock();
342         memcg_data = READ_ONCE(page->memcg_data);
343         if (!memcg_data)
344                 goto out_unlock;
345
346         if (memcg_data & MEMCG_DATA_OBJCGS)
347                 ret += scnprintf(kbuf + ret, count - ret,
348                                 "Slab cache page\n");
349
350         memcg = page_memcg_check(page);
351         if (!memcg)
352                 goto out_unlock;
353
354         online = (memcg->css.flags & CSS_ONLINE);
355         cgroup_name(memcg->css.cgroup, name, sizeof(name));
356         ret += scnprintf(kbuf + ret, count - ret,
357                         "Charged %sto %smemcg %s\n",
358                         PageMemcgKmem(page) ? "(via objcg) " : "",
359                         online ? "" : "offline ",
360                         name);
361 out_unlock:
362         rcu_read_unlock();
363 #endif /* CONFIG_MEMCG */
364
365         return ret;
366 }
367
368 static ssize_t
369 print_page_owner(char __user *buf, size_t count, unsigned long pfn,
370                 struct page *page, struct page_owner *page_owner,
371                 depot_stack_handle_t handle)
372 {
373         int ret, pageblock_mt, page_mt;
374         char *kbuf;
375
376         count = min_t(size_t, count, PAGE_SIZE);
377         kbuf = kmalloc(count, GFP_KERNEL);
378         if (!kbuf)
379                 return -ENOMEM;
380
381         ret = scnprintf(kbuf, count,
382                         "Page allocated via order %u, mask %#x(%pGg), pid %d, ts %llu ns, free_ts %llu ns\n",
383                         page_owner->order, page_owner->gfp_mask,
384                         &page_owner->gfp_mask, page_owner->pid,
385                         page_owner->ts_nsec, page_owner->free_ts_nsec);
386
387         /* Print information relevant to grouping pages by mobility */
388         pageblock_mt = get_pageblock_migratetype(page);
389         page_mt  = gfp_migratetype(page_owner->gfp_mask);
390         ret += scnprintf(kbuf + ret, count - ret,
391                         "PFN %lu type %s Block %lu type %s Flags %pGp\n",
392                         pfn,
393                         migratetype_names[page_mt],
394                         pfn >> pageblock_order,
395                         migratetype_names[pageblock_mt],
396                         &page->flags);
397
398         ret += stack_depot_snprint(handle, kbuf + ret, count - ret, 0);
399         if (ret >= count)
400                 goto err;
401
402         if (page_owner->last_migrate_reason != -1) {
403                 ret += scnprintf(kbuf + ret, count - ret,
404                         "Page has been migrated, last migrate reason: %s\n",
405                         migrate_reason_names[page_owner->last_migrate_reason]);
406         }
407
408         ret = print_page_owner_memcg(kbuf, count, ret, page);
409
410         ret += snprintf(kbuf + ret, count - ret, "\n");
411         if (ret >= count)
412                 goto err;
413
414         if (copy_to_user(buf, kbuf, ret))
415                 ret = -EFAULT;
416
417         kfree(kbuf);
418         return ret;
419
420 err:
421         kfree(kbuf);
422         return -ENOMEM;
423 }
424
425 void __dump_page_owner(const struct page *page)
426 {
427         struct page_ext *page_ext = lookup_page_ext(page);
428         struct page_owner *page_owner;
429         depot_stack_handle_t handle;
430         gfp_t gfp_mask;
431         int mt;
432
433         if (unlikely(!page_ext)) {
434                 pr_alert("There is not page extension available.\n");
435                 return;
436         }
437
438         page_owner = get_page_owner(page_ext);
439         gfp_mask = page_owner->gfp_mask;
440         mt = gfp_migratetype(gfp_mask);
441
442         if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
443                 pr_alert("page_owner info is not present (never set?)\n");
444                 return;
445         }
446
447         if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
448                 pr_alert("page_owner tracks the page as allocated\n");
449         else
450                 pr_alert("page_owner tracks the page as freed\n");
451
452         pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, ts %llu, free_ts %llu\n",
453                  page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
454                  page_owner->pid, page_owner->ts_nsec, page_owner->free_ts_nsec);
455
456         handle = READ_ONCE(page_owner->handle);
457         if (!handle)
458                 pr_alert("page_owner allocation stack trace missing\n");
459         else
460                 stack_depot_print(handle);
461
462         handle = READ_ONCE(page_owner->free_handle);
463         if (!handle) {
464                 pr_alert("page_owner free stack trace missing\n");
465         } else {
466                 pr_alert("page last free stack trace:\n");
467                 stack_depot_print(handle);
468         }
469
470         if (page_owner->last_migrate_reason != -1)
471                 pr_alert("page has been migrated, last migrate reason: %s\n",
472                         migrate_reason_names[page_owner->last_migrate_reason]);
473 }
474
475 static ssize_t
476 read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
477 {
478         unsigned long pfn;
479         struct page *page;
480         struct page_ext *page_ext;
481         struct page_owner *page_owner;
482         depot_stack_handle_t handle;
483
484         if (!static_branch_unlikely(&page_owner_inited))
485                 return -EINVAL;
486
487         page = NULL;
488         pfn = min_low_pfn + *ppos;
489
490         /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
491         while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
492                 pfn++;
493
494         drain_all_pages(NULL);
495
496         /* Find an allocated page */
497         for (; pfn < max_pfn; pfn++) {
498                 /*
499                  * If the new page is in a new MAX_ORDER_NR_PAGES area,
500                  * validate the area as existing, skip it if not
501                  */
502                 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
503                         pfn += MAX_ORDER_NR_PAGES - 1;
504                         continue;
505                 }
506
507                 page = pfn_to_page(pfn);
508                 if (PageBuddy(page)) {
509                         unsigned long freepage_order = buddy_order_unsafe(page);
510
511                         if (freepage_order < MAX_ORDER)
512                                 pfn += (1UL << freepage_order) - 1;
513                         continue;
514                 }
515
516                 page_ext = lookup_page_ext(page);
517                 if (unlikely(!page_ext))
518                         continue;
519
520                 /*
521                  * Some pages could be missed by concurrent allocation or free,
522                  * because we don't hold the zone lock.
523                  */
524                 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
525                         continue;
526
527                 /*
528                  * Although we do have the info about past allocation of free
529                  * pages, it's not relevant for current memory usage.
530                  */
531                 if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
532                         continue;
533
534                 page_owner = get_page_owner(page_ext);
535
536                 /*
537                  * Don't print "tail" pages of high-order allocations as that
538                  * would inflate the stats.
539                  */
540                 if (!IS_ALIGNED(pfn, 1 << page_owner->order))
541                         continue;
542
543                 /*
544                  * Access to page_ext->handle isn't synchronous so we should
545                  * be careful to access it.
546                  */
547                 handle = READ_ONCE(page_owner->handle);
548                 if (!handle)
549                         continue;
550
551                 /* Record the next PFN to read in the file offset */
552                 *ppos = (pfn - min_low_pfn) + 1;
553
554                 return print_page_owner(buf, count, pfn, page,
555                                 page_owner, handle);
556         }
557
558         return 0;
559 }
560
561 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
562 {
563         unsigned long pfn = zone->zone_start_pfn;
564         unsigned long end_pfn = zone_end_pfn(zone);
565         unsigned long count = 0;
566
567         /*
568          * Walk the zone in pageblock_nr_pages steps. If a page block spans
569          * a zone boundary, it will be double counted between zones. This does
570          * not matter as the mixed block count will still be correct
571          */
572         for (; pfn < end_pfn; ) {
573                 unsigned long block_end_pfn;
574
575                 if (!pfn_valid(pfn)) {
576                         pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
577                         continue;
578                 }
579
580                 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
581                 block_end_pfn = min(block_end_pfn, end_pfn);
582
583                 for (; pfn < block_end_pfn; pfn++) {
584                         struct page *page = pfn_to_page(pfn);
585                         struct page_ext *page_ext;
586
587                         if (page_zone(page) != zone)
588                                 continue;
589
590                         /*
591                          * To avoid having to grab zone->lock, be a little
592                          * careful when reading buddy page order. The only
593                          * danger is that we skip too much and potentially miss
594                          * some early allocated pages, which is better than
595                          * heavy lock contention.
596                          */
597                         if (PageBuddy(page)) {
598                                 unsigned long order = buddy_order_unsafe(page);
599
600                                 if (order > 0 && order < MAX_ORDER)
601                                         pfn += (1UL << order) - 1;
602                                 continue;
603                         }
604
605                         if (PageReserved(page))
606                                 continue;
607
608                         page_ext = lookup_page_ext(page);
609                         if (unlikely(!page_ext))
610                                 continue;
611
612                         /* Maybe overlapping zone */
613                         if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
614                                 continue;
615
616                         /* Found early allocated page */
617                         __set_page_owner_handle(page_ext, early_handle,
618                                                 0, 0);
619                         count++;
620                 }
621                 cond_resched();
622         }
623
624         pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
625                 pgdat->node_id, zone->name, count);
626 }
627
628 static void init_zones_in_node(pg_data_t *pgdat)
629 {
630         struct zone *zone;
631         struct zone *node_zones = pgdat->node_zones;
632
633         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
634                 if (!populated_zone(zone))
635                         continue;
636
637                 init_pages_in_zone(pgdat, zone);
638         }
639 }
640
641 static void init_early_allocated_pages(void)
642 {
643         pg_data_t *pgdat;
644
645         for_each_online_pgdat(pgdat)
646                 init_zones_in_node(pgdat);
647 }
648
649 static const struct file_operations proc_page_owner_operations = {
650         .read           = read_page_owner,
651 };
652
653 static int __init pageowner_init(void)
654 {
655         if (!static_branch_unlikely(&page_owner_inited)) {
656                 pr_info("page_owner is disabled\n");
657                 return 0;
658         }
659
660         debugfs_create_file("page_owner", 0400, NULL, NULL,
661                             &proc_page_owner_operations);
662
663         return 0;
664 }
665 late_initcall(pageowner_init)