Merge tag 'jfs-5.18' of https://github.com/kleikamp/linux-shaggy
[platform/kernel/linux-starfive.git] / mm / debug.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * mm/debug.c
4  *
5  * mm/ specific debug routines.
6  *
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/trace_events.h>
12 #include <linux/memcontrol.h>
13 #include <trace/events/mmflags.h>
14 #include <linux/migrate.h>
15 #include <linux/page_owner.h>
16 #include <linux/ctype.h>
17
18 #include "internal.h"
19 #include <trace/events/migrate.h>
20
21 /*
22  * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23  * be used to populate migrate_reason_names[].
24  */
25 #undef EM
26 #undef EMe
27 #define EM(a, b)        b,
28 #define EMe(a, b)       b
29
30 const char *migrate_reason_names[MR_TYPES] = {
31         MIGRATE_REASON
32 };
33
34 const struct trace_print_flags pageflag_names[] = {
35         __def_pageflag_names,
36         {0, NULL}
37 };
38
39 const struct trace_print_flags gfpflag_names[] = {
40         __def_gfpflag_names,
41         {0, NULL}
42 };
43
44 const struct trace_print_flags vmaflag_names[] = {
45         __def_vmaflag_names,
46         {0, NULL}
47 };
48
49 static void __dump_page(struct page *page)
50 {
51         struct folio *folio = page_folio(page);
52         struct page *head = &folio->page;
53         struct address_space *mapping;
54         bool compound = PageCompound(page);
55         /*
56          * Accessing the pageblock without the zone lock. It could change to
57          * "isolate" again in the meantime, but since we are just dumping the
58          * state for debugging, it should be fine to accept a bit of
59          * inaccuracy here due to racing.
60          */
61         bool page_cma = is_migrate_cma_page(page);
62         int mapcount;
63         char *type = "";
64
65         if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
66                 /*
67                  * Corrupt page, so we cannot call page_mapping. Instead, do a
68                  * safe subset of the steps that page_mapping() does. Caution:
69                  * this will be misleading for tail pages, PageSwapCache pages,
70                  * and potentially other situations. (See the page_mapping()
71                  * implementation for what's missing here.)
72                  */
73                 unsigned long tmp = (unsigned long)page->mapping;
74
75                 if (tmp & PAGE_MAPPING_ANON)
76                         mapping = NULL;
77                 else
78                         mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
79                 head = page;
80                 folio = (struct folio *)page;
81                 compound = false;
82         } else {
83                 mapping = page_mapping(page);
84         }
85
86         /*
87          * Avoid VM_BUG_ON() in page_mapcount().
88          * page->_mapcount space in struct page is used by sl[aou]b pages to
89          * encode own info.
90          */
91         mapcount = PageSlab(head) ? 0 : page_mapcount(page);
92
93         pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
94                         page, page_ref_count(head), mapcount, mapping,
95                         page_to_pgoff(page), page_to_pfn(page));
96         if (compound) {
97                 pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n",
98                                 head, compound_order(head),
99                                 folio_entire_mapcount(folio),
100                                 head_compound_pincount(head));
101         }
102
103 #ifdef CONFIG_MEMCG
104         if (head->memcg_data)
105                 pr_warn("memcg:%lx\n", head->memcg_data);
106 #endif
107         if (PageKsm(page))
108                 type = "ksm ";
109         else if (PageAnon(page))
110                 type = "anon ";
111         else if (mapping)
112                 dump_mapping(mapping);
113         BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
114
115         pr_warn("%sflags: %pGp%s\n", type, &head->flags,
116                 page_cma ? " CMA" : "");
117         print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
118                         sizeof(unsigned long), page,
119                         sizeof(struct page), false);
120         if (head != page)
121                 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
122                         sizeof(unsigned long), head,
123                         sizeof(struct page), false);
124 }
125
126 void dump_page(struct page *page, const char *reason)
127 {
128         if (PagePoisoned(page))
129                 pr_warn("page:%p is uninitialized and poisoned", page);
130         else
131                 __dump_page(page);
132         if (reason)
133                 pr_warn("page dumped because: %s\n", reason);
134         dump_page_owner(page);
135 }
136 EXPORT_SYMBOL(dump_page);
137
138 #ifdef CONFIG_DEBUG_VM
139
140 void dump_vma(const struct vm_area_struct *vma)
141 {
142         pr_emerg("vma %px start %px end %px\n"
143                 "next %px prev %px mm %px\n"
144                 "prot %lx anon_vma %px vm_ops %px\n"
145                 "pgoff %lx file %px private_data %px\n"
146                 "flags: %#lx(%pGv)\n",
147                 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
148                 vma->vm_prev, vma->vm_mm,
149                 (unsigned long)pgprot_val(vma->vm_page_prot),
150                 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
151                 vma->vm_file, vma->vm_private_data,
152                 vma->vm_flags, &vma->vm_flags);
153 }
154 EXPORT_SYMBOL(dump_vma);
155
156 void dump_mm(const struct mm_struct *mm)
157 {
158         pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
159 #ifdef CONFIG_MMU
160                 "get_unmapped_area %px\n"
161 #endif
162                 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
163                 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
164                 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
165                 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
166                 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
167                 "start_brk %lx brk %lx start_stack %lx\n"
168                 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
169                 "binfmt %px flags %lx\n"
170 #ifdef CONFIG_AIO
171                 "ioctx_table %px\n"
172 #endif
173 #ifdef CONFIG_MEMCG
174                 "owner %px "
175 #endif
176                 "exe_file %px\n"
177 #ifdef CONFIG_MMU_NOTIFIER
178                 "notifier_subscriptions %px\n"
179 #endif
180 #ifdef CONFIG_NUMA_BALANCING
181                 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
182 #endif
183                 "tlb_flush_pending %d\n"
184                 "def_flags: %#lx(%pGv)\n",
185
186                 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
187 #ifdef CONFIG_MMU
188                 mm->get_unmapped_area,
189 #endif
190                 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
191                 mm->pgd, atomic_read(&mm->mm_users),
192                 atomic_read(&mm->mm_count),
193                 mm_pgtables_bytes(mm),
194                 mm->map_count,
195                 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
196                 (u64)atomic64_read(&mm->pinned_vm),
197                 mm->data_vm, mm->exec_vm, mm->stack_vm,
198                 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
199                 mm->start_brk, mm->brk, mm->start_stack,
200                 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
201                 mm->binfmt, mm->flags,
202 #ifdef CONFIG_AIO
203                 mm->ioctx_table,
204 #endif
205 #ifdef CONFIG_MEMCG
206                 mm->owner,
207 #endif
208                 mm->exe_file,
209 #ifdef CONFIG_MMU_NOTIFIER
210                 mm->notifier_subscriptions,
211 #endif
212 #ifdef CONFIG_NUMA_BALANCING
213                 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
214 #endif
215                 atomic_read(&mm->tlb_flush_pending),
216                 mm->def_flags, &mm->def_flags
217         );
218 }
219
220 static bool page_init_poisoning __read_mostly = true;
221
222 static int __init setup_vm_debug(char *str)
223 {
224         bool __page_init_poisoning = true;
225
226         /*
227          * Calling vm_debug with no arguments is equivalent to requesting
228          * to enable all debugging options we can control.
229          */
230         if (*str++ != '=' || !*str)
231                 goto out;
232
233         __page_init_poisoning = false;
234         if (*str == '-')
235                 goto out;
236
237         while (*str) {
238                 switch (tolower(*str)) {
239                 case'p':
240                         __page_init_poisoning = true;
241                         break;
242                 default:
243                         pr_err("vm_debug option '%c' unknown. skipped\n",
244                                *str);
245                 }
246
247                 str++;
248         }
249 out:
250         if (page_init_poisoning && !__page_init_poisoning)
251                 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
252
253         page_init_poisoning = __page_init_poisoning;
254
255         return 1;
256 }
257 __setup("vm_debug", setup_vm_debug);
258
259 void page_init_poison(struct page *page, size_t size)
260 {
261         if (page_init_poisoning)
262                 memset(page, PAGE_POISON_PATTERN, size);
263 }
264 #endif          /* CONFIG_DEBUG_VM */