1 // SPDX-License-Identifier: GPL-2.0
5 * mm/ specific debug routines.
9 #include <linux/kernel.h>
11 #include <linux/trace_events.h>
12 #include <linux/memcontrol.h>
13 #include <trace/events/mmflags.h>
14 #include <linux/migrate.h>
15 #include <linux/page_owner.h>
16 #include <linux/ctype.h>
19 #include <trace/events/migrate.h>
22 * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23 * be used to populate migrate_reason_names[].
30 const char *migrate_reason_names[MR_TYPES] = {
34 const struct trace_print_flags pageflag_names[] = {
39 const struct trace_print_flags gfpflag_names[] = {
44 const struct trace_print_flags vmaflag_names[] = {
49 static void __dump_page(struct page *page)
51 struct folio *folio = page_folio(page);
52 struct page *head = &folio->page;
53 struct address_space *mapping;
54 bool compound = PageCompound(page);
56 * Accessing the pageblock without the zone lock. It could change to
57 * "isolate" again in the meantime, but since we are just dumping the
58 * state for debugging, it should be fine to accept a bit of
59 * inaccuracy here due to racing.
61 bool page_cma = is_migrate_cma_page(page);
65 if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
67 * Corrupt page, so we cannot call page_mapping. Instead, do a
68 * safe subset of the steps that page_mapping() does. Caution:
69 * this will be misleading for tail pages, PageSwapCache pages,
70 * and potentially other situations. (See the page_mapping()
71 * implementation for what's missing here.)
73 unsigned long tmp = (unsigned long)page->mapping;
75 if (tmp & PAGE_MAPPING_ANON)
78 mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
80 folio = (struct folio *)page;
83 mapping = page_mapping(page);
87 * Avoid VM_BUG_ON() in page_mapcount().
88 * page->_mapcount space in struct page is used by sl[aou]b pages to
91 mapcount = PageSlab(head) ? 0 : page_mapcount(page);
93 pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
94 page, page_ref_count(head), mapcount, mapping,
95 page_to_pgoff(page), page_to_pfn(page));
97 pr_warn("head:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
98 head, compound_order(head),
99 folio_entire_mapcount(folio),
100 folio_nr_pages_mapped(folio),
101 atomic_read(&folio->_pincount));
105 if (head->memcg_data)
106 pr_warn("memcg:%lx\n", head->memcg_data);
110 else if (PageAnon(page))
113 dump_mapping(mapping);
114 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
116 pr_warn("%sflags: %pGp%s\n", type, &head->flags,
117 page_cma ? " CMA" : "");
118 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
119 sizeof(unsigned long), page,
120 sizeof(struct page), false);
122 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
123 sizeof(unsigned long), head,
124 sizeof(struct page), false);
127 void dump_page(struct page *page, const char *reason)
129 if (PagePoisoned(page))
130 pr_warn("page:%p is uninitialized and poisoned", page);
134 pr_warn("page dumped because: %s\n", reason);
135 dump_page_owner(page);
137 EXPORT_SYMBOL(dump_page);
139 #ifdef CONFIG_DEBUG_VM
141 void dump_vma(const struct vm_area_struct *vma)
143 pr_emerg("vma %px start %px end %px mm %px\n"
144 "prot %lx anon_vma %px vm_ops %px\n"
145 "pgoff %lx file %px private_data %px\n"
146 "flags: %#lx(%pGv)\n",
147 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
148 (unsigned long)pgprot_val(vma->vm_page_prot),
149 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
150 vma->vm_file, vma->vm_private_data,
151 vma->vm_flags, &vma->vm_flags);
153 EXPORT_SYMBOL(dump_vma);
155 void dump_mm(const struct mm_struct *mm)
157 pr_emerg("mm %px task_size %lu\n"
159 "get_unmapped_area %px\n"
161 "mmap_base %lu mmap_legacy_base %lu\n"
162 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
163 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
164 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
165 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
166 "start_brk %lx brk %lx start_stack %lx\n"
167 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
168 "binfmt %px flags %lx\n"
176 #ifdef CONFIG_MMU_NOTIFIER
177 "notifier_subscriptions %px\n"
179 #ifdef CONFIG_NUMA_BALANCING
180 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
182 "tlb_flush_pending %d\n"
183 "def_flags: %#lx(%pGv)\n",
187 mm->get_unmapped_area,
189 mm->mmap_base, mm->mmap_legacy_base,
190 mm->pgd, atomic_read(&mm->mm_users),
191 atomic_read(&mm->mm_count),
192 mm_pgtables_bytes(mm),
194 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
195 (u64)atomic64_read(&mm->pinned_vm),
196 mm->data_vm, mm->exec_vm, mm->stack_vm,
197 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
198 mm->start_brk, mm->brk, mm->start_stack,
199 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
200 mm->binfmt, mm->flags,
208 #ifdef CONFIG_MMU_NOTIFIER
209 mm->notifier_subscriptions,
211 #ifdef CONFIG_NUMA_BALANCING
212 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
214 atomic_read(&mm->tlb_flush_pending),
215 mm->def_flags, &mm->def_flags
218 EXPORT_SYMBOL(dump_mm);
220 static bool page_init_poisoning __read_mostly = true;
222 static int __init setup_vm_debug(char *str)
224 bool __page_init_poisoning = true;
227 * Calling vm_debug with no arguments is equivalent to requesting
228 * to enable all debugging options we can control.
230 if (*str++ != '=' || !*str)
233 __page_init_poisoning = false;
238 switch (tolower(*str)) {
240 __page_init_poisoning = true;
243 pr_err("vm_debug option '%c' unknown. skipped\n",
250 if (page_init_poisoning && !__page_init_poisoning)
251 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
253 page_init_poisoning = __page_init_poisoning;
257 __setup("vm_debug", setup_vm_debug);
259 void page_init_poison(struct page *page, size_t size)
261 if (page_init_poisoning)
262 memset(page, PAGE_POISON_PATTERN, size);
264 #endif /* CONFIG_DEBUG_VM */