4 * mm/ specific debug routines.
8 #include <linux/kernel.h>
10 #include <linux/trace_events.h>
11 #include <linux/memcontrol.h>
12 #include <trace/events/mmflags.h>
13 #include <linux/migrate.h>
14 #include <linux/page_owner.h>
18 char *migrate_reason_names[MR_TYPES] = {
28 const struct trace_print_flags pageflag_names[] = {
33 const struct trace_print_flags gfpflag_names[] = {
38 const struct trace_print_flags vmaflag_names[] = {
43 void __dump_page(struct page *page, const char *reason)
46 * Avoid VM_BUG_ON() in page_mapcount().
47 * page->_mapcount space in struct page is used by sl[aou]b pages to
50 int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
52 pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx",
53 page, page_ref_count(page), mapcount,
54 page->mapping, page_to_pgoff(page));
55 if (PageCompound(page))
56 pr_cont(" compound_mapcount: %d", compound_mapcount(page));
58 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
60 pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags);
62 print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32,
63 sizeof(unsigned long), page,
64 sizeof(struct page), false);
67 pr_alert("page dumped because: %s\n", reason);
71 pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup);
75 void dump_page(struct page *page, const char *reason)
77 __dump_page(page, reason);
78 dump_page_owner(page);
80 EXPORT_SYMBOL(dump_page);
82 #ifdef CONFIG_DEBUG_VM
84 void dump_vma(const struct vm_area_struct *vma)
86 pr_emerg("vma %p start %p end %p\n"
87 "next %p prev %p mm %p\n"
88 "prot %lx anon_vma %p vm_ops %p\n"
89 "pgoff %lx file %p private_data %p\n"
90 "flags: %#lx(%pGv)\n",
91 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
92 vma->vm_prev, vma->vm_mm,
93 (unsigned long)pgprot_val(vma->vm_page_prot),
94 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
95 vma->vm_file, vma->vm_private_data,
96 vma->vm_flags, &vma->vm_flags);
98 EXPORT_SYMBOL(dump_vma);
100 void dump_mm(const struct mm_struct *mm)
102 pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
104 "get_unmapped_area %p\n"
106 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
107 "pgd %p mm_users %d mm_count %d nr_ptes %lu nr_pmds %lu map_count %d\n"
108 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
109 "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
110 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
111 "start_brk %lx brk %lx start_stack %lx\n"
112 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
113 "binfmt %p flags %lx core_state %p\n"
121 #ifdef CONFIG_MMU_NOTIFIER
122 "mmu_notifier_mm %p\n"
124 #ifdef CONFIG_NUMA_BALANCING
125 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
127 #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
128 "tlb_flush_pending %d\n"
130 "def_flags: %#lx(%pGv)\n",
132 mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
134 mm->get_unmapped_area,
136 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
137 mm->pgd, atomic_read(&mm->mm_users),
138 atomic_read(&mm->mm_count),
139 atomic_long_read((atomic_long_t *)&mm->nr_ptes),
140 mm_nr_pmds((struct mm_struct *)mm),
142 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
143 mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm,
144 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
145 mm->start_brk, mm->brk, mm->start_stack,
146 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
147 mm->binfmt, mm->flags, mm->core_state,
155 #ifdef CONFIG_MMU_NOTIFIER
158 #ifdef CONFIG_NUMA_BALANCING
159 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
161 #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
162 mm->tlb_flush_pending,
164 mm->def_flags, &mm->def_flags
168 #endif /* CONFIG_DEBUG_VM */