1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 SiFive
7 #include <linux/init.h>
8 #include <linux/debugfs.h>
9 #include <linux/seq_file.h>
10 #include <linux/ptdump.h>
12 #include <asm/ptdump.h>
13 #include <linux/pgtable.h>
14 #include <asm/kasan.h>
16 #define pt_dump_seq_printf(m, fmt, args...) \
19 seq_printf(m, fmt, ##args); \
22 #define pt_dump_seq_puts(m, fmt) \
29 * The page dumper groups page table entries of the same type into a single
30 * description. It uses pg_state to track the range information while
31 * iterating over the pte entries. When the continuity is broken it then
32 * dumps out a description of the range.
35 struct ptdump_state ptdump;
37 const struct addr_marker *marker;
38 unsigned long start_address;
39 unsigned long start_pa;
40 unsigned long last_pa;
44 unsigned long wx_pages;
49 unsigned long start_address;
53 /* Private information for debugfs */
56 const struct addr_marker *markers;
57 unsigned long base_addr;
61 enum address_markers_idx {
66 #ifdef CONFIG_SPARSEMEM_VMEMMAP
74 KASAN_SHADOW_START_NR,
84 static struct addr_marker address_markers[] = {
89 #ifdef CONFIG_SPARSEMEM_VMEMMAP
93 {0, "vmalloc() area"},
95 {0, "Linear mapping"},
97 {0, "Kasan shadow start"},
98 {0, "Kasan shadow end"},
101 {0, "Modules/BPF mapping"},
102 {0, "Kernel mapping"},
107 static struct ptd_mm_info kernel_ptd_info = {
109 .markers = address_markers,
115 static struct addr_marker efi_addr_markers[] = {
116 { 0, "UEFI runtime start" },
117 { SZ_1G, "UEFI runtime end" },
121 static struct ptd_mm_info efi_ptd_info = {
123 .markers = efi_addr_markers,
129 /* Page Table Entry */
137 static const struct prot_bits pte_bits[] = {
149 .mask = _PAGE_ACCESSED,
150 .val = _PAGE_ACCESSED,
154 .mask = _PAGE_GLOBAL,
179 .mask = _PAGE_PRESENT,
180 .val = _PAGE_PRESENT,
192 static struct pg_level pg_level[] = {
196 .name = (CONFIG_PGTABLE_LEVELS > 4) ? "P4D" : "PGD",
198 .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
200 .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
206 static void dump_prot(struct pg_state *st)
210 for (i = 0; i < ARRAY_SIZE(pte_bits); i++) {
213 if ((st->current_prot & pte_bits[i].mask) == pte_bits[i].val)
216 s = pte_bits[i].clear;
219 pt_dump_seq_printf(st->seq, " %s", s);
224 #define ADDR_FORMAT "0x%016lx"
226 #define ADDR_FORMAT "0x%08lx"
228 static void dump_addr(struct pg_state *st, unsigned long addr)
230 static const char units[] = "KMGTPE";
231 const char *unit = units;
234 pt_dump_seq_printf(st->seq, ADDR_FORMAT "-" ADDR_FORMAT " ",
235 st->start_address, addr);
237 pt_dump_seq_printf(st->seq, " " ADDR_FORMAT " ", st->start_pa);
238 delta = (addr - st->start_address) >> 10;
240 while (!(delta & 1023) && unit[1]) {
245 pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
246 pg_level[st->level].name);
249 static void note_prot_wx(struct pg_state *st, unsigned long addr)
254 if ((st->current_prot & (_PAGE_WRITE | _PAGE_EXEC)) !=
255 (_PAGE_WRITE | _PAGE_EXEC))
258 WARN_ONCE(1, "riscv/mm: Found insecure W+X mapping at address %p/%pS\n",
259 (void *)st->start_address, (void *)st->start_address);
261 st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
264 static void note_page(struct ptdump_state *pt_st, unsigned long addr,
267 struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
268 u64 pa = PFN_PHYS(pte_pfn(__pte(val)));
272 prot = val & pg_level[level].mask;
274 if (st->level == -1) {
276 st->current_prot = prot;
277 st->start_address = addr;
280 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
281 } else if (prot != st->current_prot ||
282 level != st->level || addr >= st->marker[1].start_address) {
283 if (st->current_prot) {
284 note_prot_wx(st, addr);
287 pt_dump_seq_puts(st->seq, "\n");
290 while (addr >= st->marker[1].start_address) {
292 pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
296 st->start_address = addr;
299 st->current_prot = prot;
306 static void ptdump_walk(struct seq_file *s, struct ptd_mm_info *pinfo)
308 struct pg_state st = {
310 .marker = pinfo->markers,
313 .note_page = note_page,
314 .range = (struct ptdump_range[]) {
315 {pinfo->base_addr, pinfo->end},
321 ptdump_walk_pgd(&st.ptdump, pinfo->mm, NULL);
324 void ptdump_check_wx(void)
326 struct pg_state st = {
328 .marker = (struct addr_marker[]) {
335 .note_page = note_page,
336 .range = (struct ptdump_range[]) {
337 {KERN_VIRT_START, ULONG_MAX},
343 ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
346 pr_warn("Checked W+X mappings: failed, %lu W+X pages found\n",
349 pr_info("Checked W+X mappings: passed, no W+X pages found\n");
352 static int ptdump_show(struct seq_file *m, void *v)
354 ptdump_walk(m, m->private);
359 DEFINE_SHOW_ATTRIBUTE(ptdump);
361 static int __init ptdump_init(void)
365 address_markers[FIXMAP_START_NR].start_address = FIXADDR_START;
366 address_markers[FIXMAP_END_NR].start_address = FIXADDR_TOP;
367 address_markers[PCI_IO_START_NR].start_address = PCI_IO_START;
368 address_markers[PCI_IO_END_NR].start_address = PCI_IO_END;
369 #ifdef CONFIG_SPARSEMEM_VMEMMAP
370 address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
371 address_markers[VMEMMAP_END_NR].start_address = VMEMMAP_END;
373 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
374 address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
375 address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET;
377 address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
378 address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
381 address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR;
382 address_markers[KERNEL_MAPPING_NR].start_address = kernel_map.virt_addr;
385 kernel_ptd_info.base_addr = KERN_VIRT_START;
387 for (i = 0; i < ARRAY_SIZE(pg_level); i++)
388 for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
389 pg_level[i].mask |= pte_bits[j].mask;
391 debugfs_create_file("kernel_page_tables", 0400, NULL, &kernel_ptd_info,
394 if (efi_enabled(EFI_RUNTIME_SERVICES))
395 debugfs_create_file("efi_page_tables", 0400, NULL, &efi_ptd_info,
402 device_initcall(ptdump_init);