Merge tag 'clang-lto-v5.12-rc1-fix1' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-starfive.git] / mm / debug.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * mm/debug.c
4  *
5  * mm/ specific debug routines.
6  *
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/trace_events.h>
12 #include <linux/memcontrol.h>
13 #include <trace/events/mmflags.h>
14 #include <linux/migrate.h>
15 #include <linux/page_owner.h>
16 #include <linux/ctype.h>
17
18 #include "internal.h"
19
20 const char *migrate_reason_names[MR_TYPES] = {
21         "compaction",
22         "memory_failure",
23         "memory_hotplug",
24         "syscall_or_cpuset",
25         "mempolicy_mbind",
26         "numa_misplaced",
27         "cma",
28 };
29
30 const struct trace_print_flags pageflag_names[] = {
31         __def_pageflag_names,
32         {0, NULL}
33 };
34
35 const struct trace_print_flags gfpflag_names[] = {
36         __def_gfpflag_names,
37         {0, NULL}
38 };
39
40 const struct trace_print_flags vmaflag_names[] = {
41         __def_vmaflag_names,
42         {0, NULL}
43 };
44
45 void __dump_page(struct page *page, const char *reason)
46 {
47         struct page *head = compound_head(page);
48         struct address_space *mapping;
49         bool page_poisoned = PagePoisoned(page);
50         bool compound = PageCompound(page);
51         /*
52          * Accessing the pageblock without the zone lock. It could change to
53          * "isolate" again in the meantime, but since we are just dumping the
54          * state for debugging, it should be fine to accept a bit of
55          * inaccuracy here due to racing.
56          */
57         bool page_cma = is_migrate_cma_page(page);
58         int mapcount;
59         char *type = "";
60
61         /*
62          * If struct page is poisoned don't access Page*() functions as that
63          * leads to recursive loop. Page*() check for poisoned pages, and calls
64          * dump_page() when detected.
65          */
66         if (page_poisoned) {
67                 pr_warn("page:%px is uninitialized and poisoned", page);
68                 goto hex_only;
69         }
70
71         if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
72                 /*
73                  * Corrupt page, so we cannot call page_mapping. Instead, do a
74                  * safe subset of the steps that page_mapping() does. Caution:
75                  * this will be misleading for tail pages, PageSwapCache pages,
76                  * and potentially other situations. (See the page_mapping()
77                  * implementation for what's missing here.)
78                  */
79                 unsigned long tmp = (unsigned long)page->mapping;
80
81                 if (tmp & PAGE_MAPPING_ANON)
82                         mapping = NULL;
83                 else
84                         mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
85                 head = page;
86                 compound = false;
87         } else {
88                 mapping = page_mapping(page);
89         }
90
91         /*
92          * Avoid VM_BUG_ON() in page_mapcount().
93          * page->_mapcount space in struct page is used by sl[aou]b pages to
94          * encode own info.
95          */
96         mapcount = PageSlab(head) ? 0 : page_mapcount(page);
97
98         pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
99                         page, page_ref_count(head), mapcount, mapping,
100                         page_to_pgoff(page), page_to_pfn(page));
101         if (compound) {
102                 if (hpage_pincount_available(page)) {
103                         pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n",
104                                         head, compound_order(head),
105                                         head_compound_mapcount(head),
106                                         head_compound_pincount(head));
107                 } else {
108                         pr_warn("head:%p order:%u compound_mapcount:%d\n",
109                                         head, compound_order(head),
110                                         head_compound_mapcount(head));
111                 }
112         }
113
114 #ifdef CONFIG_MEMCG
115         if (head->memcg_data)
116                 pr_warn("memcg:%lx\n", head->memcg_data);
117 #endif
118         if (PageKsm(page))
119                 type = "ksm ";
120         else if (PageAnon(page))
121                 type = "anon ";
122         else if (mapping) {
123                 struct inode *host;
124                 const struct address_space_operations *a_ops;
125                 struct hlist_node *dentry_first;
126                 struct dentry *dentry_ptr;
127                 struct dentry dentry;
128                 unsigned long ino;
129
130                 /*
131                  * mapping can be invalid pointer and we don't want to crash
132                  * accessing it, so probe everything depending on it carefully
133                  */
134                 if (get_kernel_nofault(host, &mapping->host) ||
135                     get_kernel_nofault(a_ops, &mapping->a_ops)) {
136                         pr_warn("failed to read mapping contents, not a valid kernel address?\n");
137                         goto out_mapping;
138                 }
139
140                 if (!host) {
141                         pr_warn("aops:%ps\n", a_ops);
142                         goto out_mapping;
143                 }
144
145                 if (get_kernel_nofault(dentry_first, &host->i_dentry.first) ||
146                     get_kernel_nofault(ino, &host->i_ino)) {
147                         pr_warn("aops:%ps with invalid host inode %px\n",
148                                         a_ops, host);
149                         goto out_mapping;
150                 }
151
152                 if (!dentry_first) {
153                         pr_warn("aops:%ps ino:%lx\n", a_ops, ino);
154                         goto out_mapping;
155                 }
156
157                 dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
158                 if (get_kernel_nofault(dentry, dentry_ptr)) {
159                         pr_warn("aops:%ps ino:%lx with invalid dentry %px\n",
160                                         a_ops, ino, dentry_ptr);
161                 } else {
162                         /*
163                          * if dentry is corrupted, the %pd handler may still
164                          * crash, but it's unlikely that we reach here with a
165                          * corrupted struct page
166                          */
167                         pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n",
168                                         a_ops, ino, &dentry);
169                 }
170         }
171 out_mapping:
172         BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
173
174         pr_warn("%sflags: %#lx(%pGp)%s\n", type, head->flags, &head->flags,
175                 page_cma ? " CMA" : "");
176
177 hex_only:
178         print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
179                         sizeof(unsigned long), page,
180                         sizeof(struct page), false);
181         if (head != page)
182                 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
183                         sizeof(unsigned long), head,
184                         sizeof(struct page), false);
185
186         if (reason)
187                 pr_warn("page dumped because: %s\n", reason);
188 }
189
190 void dump_page(struct page *page, const char *reason)
191 {
192         __dump_page(page, reason);
193         dump_page_owner(page);
194 }
195 EXPORT_SYMBOL(dump_page);
196
197 #ifdef CONFIG_DEBUG_VM
198
199 void dump_vma(const struct vm_area_struct *vma)
200 {
201         pr_emerg("vma %px start %px end %px\n"
202                 "next %px prev %px mm %px\n"
203                 "prot %lx anon_vma %px vm_ops %px\n"
204                 "pgoff %lx file %px private_data %px\n"
205                 "flags: %#lx(%pGv)\n",
206                 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
207                 vma->vm_prev, vma->vm_mm,
208                 (unsigned long)pgprot_val(vma->vm_page_prot),
209                 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
210                 vma->vm_file, vma->vm_private_data,
211                 vma->vm_flags, &vma->vm_flags);
212 }
213 EXPORT_SYMBOL(dump_vma);
214
215 void dump_mm(const struct mm_struct *mm)
216 {
217         pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
218 #ifdef CONFIG_MMU
219                 "get_unmapped_area %px\n"
220 #endif
221                 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
222                 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
223                 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
224                 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
225                 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
226                 "start_brk %lx brk %lx start_stack %lx\n"
227                 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
228                 "binfmt %px flags %lx core_state %px\n"
229 #ifdef CONFIG_AIO
230                 "ioctx_table %px\n"
231 #endif
232 #ifdef CONFIG_MEMCG
233                 "owner %px "
234 #endif
235                 "exe_file %px\n"
236 #ifdef CONFIG_MMU_NOTIFIER
237                 "notifier_subscriptions %px\n"
238 #endif
239 #ifdef CONFIG_NUMA_BALANCING
240                 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
241 #endif
242                 "tlb_flush_pending %d\n"
243                 "def_flags: %#lx(%pGv)\n",
244
245                 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
246 #ifdef CONFIG_MMU
247                 mm->get_unmapped_area,
248 #endif
249                 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
250                 mm->pgd, atomic_read(&mm->mm_users),
251                 atomic_read(&mm->mm_count),
252                 mm_pgtables_bytes(mm),
253                 mm->map_count,
254                 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
255                 (u64)atomic64_read(&mm->pinned_vm),
256                 mm->data_vm, mm->exec_vm, mm->stack_vm,
257                 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
258                 mm->start_brk, mm->brk, mm->start_stack,
259                 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
260                 mm->binfmt, mm->flags, mm->core_state,
261 #ifdef CONFIG_AIO
262                 mm->ioctx_table,
263 #endif
264 #ifdef CONFIG_MEMCG
265                 mm->owner,
266 #endif
267                 mm->exe_file,
268 #ifdef CONFIG_MMU_NOTIFIER
269                 mm->notifier_subscriptions,
270 #endif
271 #ifdef CONFIG_NUMA_BALANCING
272                 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
273 #endif
274                 atomic_read(&mm->tlb_flush_pending),
275                 mm->def_flags, &mm->def_flags
276         );
277 }
278
279 static bool page_init_poisoning __read_mostly = true;
280
281 static int __init setup_vm_debug(char *str)
282 {
283         bool __page_init_poisoning = true;
284
285         /*
286          * Calling vm_debug with no arguments is equivalent to requesting
287          * to enable all debugging options we can control.
288          */
289         if (*str++ != '=' || !*str)
290                 goto out;
291
292         __page_init_poisoning = false;
293         if (*str == '-')
294                 goto out;
295
296         while (*str) {
297                 switch (tolower(*str)) {
298                 case'p':
299                         __page_init_poisoning = true;
300                         break;
301                 default:
302                         pr_err("vm_debug option '%c' unknown. skipped\n",
303                                *str);
304                 }
305
306                 str++;
307         }
308 out:
309         if (page_init_poisoning && !__page_init_poisoning)
310                 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
311
312         page_init_poisoning = __page_init_poisoning;
313
314         return 1;
315 }
316 __setup("vm_debug", setup_vm_debug);
317
318 void page_init_poison(struct page *page, size_t size)
319 {
320         if (page_init_poisoning)
321                 memset(page, PAGE_POISON_PATTERN, size);
322 }
323 EXPORT_SYMBOL_GPL(page_init_poison);
324 #endif          /* CONFIG_DEBUG_VM */