x86/mm/dump_pagetables: Define INIT_PGD
[platform/kernel/linux-rpi.git] / arch / x86 / mm / dump_pagetables.c
1 /*
2  * Debug helper to dump the current kernel pagetables of the system
3  * so that we can see what the various memory ranges are set to.
4  *
5  * (C) Copyright 2008 Intel Corporation
6  *
7  * Author: Arjan van de Ven <arjan@linux.intel.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; version 2
12  * of the License.
13  */
14
15 #include <linux/debugfs.h>
16 #include <linux/kasan.h>
17 #include <linux/mm.h>
18 #include <linux/init.h>
19 #include <linux/sched.h>
20 #include <linux/seq_file.h>
21 #include <linux/highmem.h>
22
23 #include <asm/pgtable.h>
24
25 /*
26  * The dumper groups pagetable entries of the same type into one, and for
27  * that it needs to keep some state when walking, and flush this state
28  * when a "break" in the continuity is found.
29  */
30 struct pg_state {
31         int level;
32         pgprot_t current_prot;
33         pgprotval_t effective_prot;
34         unsigned long start_address;
35         unsigned long current_address;
36         const struct addr_marker *marker;
37         unsigned long lines;
38         bool to_dmesg;
39         bool check_wx;
40         unsigned long wx_pages;
41 };
42
43 struct addr_marker {
44         unsigned long start_address;
45         const char *name;
46         unsigned long max_lines;
47 };
48
49 /* Address space markers hints */
50
51 #ifdef CONFIG_X86_64
52
53 enum address_markers_idx {
54         USER_SPACE_NR = 0,
55         KERNEL_SPACE_NR,
56         LOW_KERNEL_NR,
57 #if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
58         LDT_NR,
59 #endif
60         VMALLOC_START_NR,
61         VMEMMAP_START_NR,
62 #ifdef CONFIG_KASAN
63         KASAN_SHADOW_START_NR,
64         KASAN_SHADOW_END_NR,
65 #endif
66         CPU_ENTRY_AREA_NR,
67 #if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
68         LDT_NR,
69 #endif
70 #ifdef CONFIG_X86_ESPFIX64
71         ESPFIX_START_NR,
72 #endif
73 #ifdef CONFIG_EFI
74         EFI_END_NR,
75 #endif
76         HIGH_KERNEL_NR,
77         MODULES_VADDR_NR,
78         MODULES_END_NR,
79         FIXADDR_START_NR,
80         END_OF_SPACE_NR,
81 };
82
83 static struct addr_marker address_markers[] = {
84         [USER_SPACE_NR]         = { 0,                  "User Space" },
85         [KERNEL_SPACE_NR]       = { (1UL << 63),        "Kernel Space" },
86         [LOW_KERNEL_NR]         = { 0UL,                "Low Kernel Mapping" },
87         [VMALLOC_START_NR]      = { 0UL,                "vmalloc() Area" },
88         [VMEMMAP_START_NR]      = { 0UL,                "Vmemmap" },
89 #ifdef CONFIG_KASAN
90         /*
91          * These fields get initialized with the (dynamic)
92          * KASAN_SHADOW_{START,END} values in pt_dump_init().
93          */
94         [KASAN_SHADOW_START_NR] = { 0UL,                "KASAN shadow" },
95         [KASAN_SHADOW_END_NR]   = { 0UL,                "KASAN shadow end" },
96 #endif
97 #ifdef CONFIG_MODIFY_LDT_SYSCALL
98         [LDT_NR]                = { 0UL,                "LDT remap" },
99 #endif
100         [CPU_ENTRY_AREA_NR]     = { CPU_ENTRY_AREA_BASE,"CPU entry Area" },
101 #ifdef CONFIG_X86_ESPFIX64
102         [ESPFIX_START_NR]       = { ESPFIX_BASE_ADDR,   "ESPfix Area", 16 },
103 #endif
104 #ifdef CONFIG_EFI
105         [EFI_END_NR]            = { EFI_VA_END,         "EFI Runtime Services" },
106 #endif
107         [HIGH_KERNEL_NR]        = { __START_KERNEL_map, "High Kernel Mapping" },
108         [MODULES_VADDR_NR]      = { MODULES_VADDR,      "Modules" },
109         [MODULES_END_NR]        = { MODULES_END,        "End Modules" },
110         [FIXADDR_START_NR]      = { FIXADDR_START,      "Fixmap Area" },
111         [END_OF_SPACE_NR]       = { -1,                 NULL }
112 };
113
114 #define INIT_PGD        ((pgd_t *) &init_top_pgt)
115
116 #else /* CONFIG_X86_64 */
117
118 enum address_markers_idx {
119         USER_SPACE_NR = 0,
120         KERNEL_SPACE_NR,
121         VMALLOC_START_NR,
122         VMALLOC_END_NR,
123 #ifdef CONFIG_HIGHMEM
124         PKMAP_BASE_NR,
125 #endif
126         CPU_ENTRY_AREA_NR,
127         FIXADDR_START_NR,
128         END_OF_SPACE_NR,
129 };
130
131 static struct addr_marker address_markers[] = {
132         [USER_SPACE_NR]         = { 0,                  "User Space" },
133         [KERNEL_SPACE_NR]       = { PAGE_OFFSET,        "Kernel Mapping" },
134         [VMALLOC_START_NR]      = { 0UL,                "vmalloc() Area" },
135         [VMALLOC_END_NR]        = { 0UL,                "vmalloc() End" },
136 #ifdef CONFIG_HIGHMEM
137         [PKMAP_BASE_NR]         = { 0UL,                "Persistent kmap() Area" },
138 #endif
139         [CPU_ENTRY_AREA_NR]     = { 0UL,                "CPU entry area" },
140         [FIXADDR_START_NR]      = { 0UL,                "Fixmap area" },
141         [END_OF_SPACE_NR]       = { -1,                 NULL }
142 };
143
144 #define INIT_PGD        (swapper_pg_dir)
145
146 #endif /* !CONFIG_X86_64 */
147
148 /* Multipliers for offsets within the PTEs */
149 #define PTE_LEVEL_MULT (PAGE_SIZE)
150 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
151 #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
152 #define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
153 #define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT)
154
155 #define pt_dump_seq_printf(m, to_dmesg, fmt, args...)           \
156 ({                                                              \
157         if (to_dmesg)                                   \
158                 printk(KERN_INFO fmt, ##args);                  \
159         else                                                    \
160                 if (m)                                          \
161                         seq_printf(m, fmt, ##args);             \
162 })
163
164 #define pt_dump_cont_printf(m, to_dmesg, fmt, args...)          \
165 ({                                                              \
166         if (to_dmesg)                                   \
167                 printk(KERN_CONT fmt, ##args);                  \
168         else                                                    \
169                 if (m)                                          \
170                         seq_printf(m, fmt, ##args);             \
171 })
172
173 /*
174  * Print a readable form of a pgprot_t to the seq_file
175  */
176 static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
177 {
178         pgprotval_t pr = pgprot_val(prot);
179         static const char * const level_name[] =
180                 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
181
182         if (!(pr & _PAGE_PRESENT)) {
183                 /* Not present */
184                 pt_dump_cont_printf(m, dmsg, "                              ");
185         } else {
186                 if (pr & _PAGE_USER)
187                         pt_dump_cont_printf(m, dmsg, "USR ");
188                 else
189                         pt_dump_cont_printf(m, dmsg, "    ");
190                 if (pr & _PAGE_RW)
191                         pt_dump_cont_printf(m, dmsg, "RW ");
192                 else
193                         pt_dump_cont_printf(m, dmsg, "ro ");
194                 if (pr & _PAGE_PWT)
195                         pt_dump_cont_printf(m, dmsg, "PWT ");
196                 else
197                         pt_dump_cont_printf(m, dmsg, "    ");
198                 if (pr & _PAGE_PCD)
199                         pt_dump_cont_printf(m, dmsg, "PCD ");
200                 else
201                         pt_dump_cont_printf(m, dmsg, "    ");
202
203                 /* Bit 7 has a different meaning on level 3 vs 4 */
204                 if (level <= 4 && pr & _PAGE_PSE)
205                         pt_dump_cont_printf(m, dmsg, "PSE ");
206                 else
207                         pt_dump_cont_printf(m, dmsg, "    ");
208                 if ((level == 5 && pr & _PAGE_PAT) ||
209                     ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE))
210                         pt_dump_cont_printf(m, dmsg, "PAT ");
211                 else
212                         pt_dump_cont_printf(m, dmsg, "    ");
213                 if (pr & _PAGE_GLOBAL)
214                         pt_dump_cont_printf(m, dmsg, "GLB ");
215                 else
216                         pt_dump_cont_printf(m, dmsg, "    ");
217                 if (pr & _PAGE_NX)
218                         pt_dump_cont_printf(m, dmsg, "NX ");
219                 else
220                         pt_dump_cont_printf(m, dmsg, "x  ");
221         }
222         pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]);
223 }
224
225 /*
226  * On 64 bits, sign-extend the 48 bit address to 64 bit
227  */
228 static unsigned long normalize_addr(unsigned long u)
229 {
230         int shift;
231         if (!IS_ENABLED(CONFIG_X86_64))
232                 return u;
233
234         shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
235         return (signed long)(u << shift) >> shift;
236 }
237
238 /*
239  * This function gets called on a break in a continuous series
240  * of PTE entries; the next one is different so we need to
241  * print what we collected so far.
242  */
243 static void note_page(struct seq_file *m, struct pg_state *st,
244                       pgprot_t new_prot, pgprotval_t new_eff, int level)
245 {
246         pgprotval_t prot, cur, eff;
247         static const char units[] = "BKMGTPE";
248
249         /*
250          * If we have a "break" in the series, we need to flush the state that
251          * we have now. "break" is either changing perms, levels or
252          * address space marker.
253          */
254         prot = pgprot_val(new_prot);
255         cur = pgprot_val(st->current_prot);
256         eff = st->effective_prot;
257
258         if (!st->level) {
259                 /* First entry */
260                 st->current_prot = new_prot;
261                 st->effective_prot = new_eff;
262                 st->level = level;
263                 st->marker = address_markers;
264                 st->lines = 0;
265                 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
266                                    st->marker->name);
267         } else if (prot != cur || new_eff != eff || level != st->level ||
268                    st->current_address >= st->marker[1].start_address) {
269                 const char *unit = units;
270                 unsigned long delta;
271                 int width = sizeof(unsigned long) * 2;
272
273                 if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX)) {
274                         WARN_ONCE(1,
275                                   "x86/mm: Found insecure W+X mapping at address %p/%pS\n",
276                                   (void *)st->start_address,
277                                   (void *)st->start_address);
278                         st->wx_pages += (st->current_address -
279                                          st->start_address) / PAGE_SIZE;
280                 }
281
282                 /*
283                  * Now print the actual finished series
284                  */
285                 if (!st->marker->max_lines ||
286                     st->lines < st->marker->max_lines) {
287                         pt_dump_seq_printf(m, st->to_dmesg,
288                                            "0x%0*lx-0x%0*lx   ",
289                                            width, st->start_address,
290                                            width, st->current_address);
291
292                         delta = st->current_address - st->start_address;
293                         while (!(delta & 1023) && unit[1]) {
294                                 delta >>= 10;
295                                 unit++;
296                         }
297                         pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ",
298                                             delta, *unit);
299                         printk_prot(m, st->current_prot, st->level,
300                                     st->to_dmesg);
301                 }
302                 st->lines++;
303
304                 /*
305                  * We print markers for special areas of address space,
306                  * such as the start of vmalloc space etc.
307                  * This helps in the interpretation.
308                  */
309                 if (st->current_address >= st->marker[1].start_address) {
310                         if (st->marker->max_lines &&
311                             st->lines > st->marker->max_lines) {
312                                 unsigned long nskip =
313                                         st->lines - st->marker->max_lines;
314                                 pt_dump_seq_printf(m, st->to_dmesg,
315                                                    "... %lu entr%s skipped ... \n",
316                                                    nskip,
317                                                    nskip == 1 ? "y" : "ies");
318                         }
319                         st->marker++;
320                         st->lines = 0;
321                         pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
322                                            st->marker->name);
323                 }
324
325                 st->start_address = st->current_address;
326                 st->current_prot = new_prot;
327                 st->effective_prot = new_eff;
328                 st->level = level;
329         }
330 }
331
332 static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
333 {
334         return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) |
335                ((prot1 | prot2) & _PAGE_NX);
336 }
337
338 static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
339                            pgprotval_t eff_in, unsigned long P)
340 {
341         int i;
342         pte_t *pte;
343         pgprotval_t prot, eff;
344
345         for (i = 0; i < PTRS_PER_PTE; i++) {
346                 st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
347                 pte = pte_offset_map(&addr, st->current_address);
348                 prot = pte_flags(*pte);
349                 eff = effective_prot(eff_in, prot);
350                 note_page(m, st, __pgprot(prot), eff, 5);
351                 pte_unmap(pte);
352         }
353 }
354 #ifdef CONFIG_KASAN
355
356 /*
357  * This is an optimization for KASAN=y case. Since all kasan page tables
358  * eventually point to the kasan_zero_page we could call note_page()
359  * right away without walking through lower level page tables. This saves
360  * us dozens of seconds (minutes for 5-level config) while checking for
361  * W+X mapping or reading kernel_page_tables debugfs file.
362  */
363 static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
364                                 void *pt)
365 {
366         if (__pa(pt) == __pa(kasan_zero_pmd) ||
367             (pgtable_l5_enabled() && __pa(pt) == __pa(kasan_zero_p4d)) ||
368             __pa(pt) == __pa(kasan_zero_pud)) {
369                 pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
370                 note_page(m, st, __pgprot(prot), 0, 5);
371                 return true;
372         }
373         return false;
374 }
375 #else
376 static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
377                                 void *pt)
378 {
379         return false;
380 }
381 #endif
382
383 #if PTRS_PER_PMD > 1
384
385 static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
386                            pgprotval_t eff_in, unsigned long P)
387 {
388         int i;
389         pmd_t *start, *pmd_start;
390         pgprotval_t prot, eff;
391
392         pmd_start = start = (pmd_t *)pud_page_vaddr(addr);
393         for (i = 0; i < PTRS_PER_PMD; i++) {
394                 st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
395                 if (!pmd_none(*start)) {
396                         prot = pmd_flags(*start);
397                         eff = effective_prot(eff_in, prot);
398                         if (pmd_large(*start) || !pmd_present(*start)) {
399                                 note_page(m, st, __pgprot(prot), eff, 4);
400                         } else if (!kasan_page_table(m, st, pmd_start)) {
401                                 walk_pte_level(m, st, *start, eff,
402                                                P + i * PMD_LEVEL_MULT);
403                         }
404                 } else
405                         note_page(m, st, __pgprot(0), 0, 4);
406                 start++;
407         }
408 }
409
410 #else
411 #define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p)
412 #define pud_large(a) pmd_large(__pmd(pud_val(a)))
413 #define pud_none(a)  pmd_none(__pmd(pud_val(a)))
414 #endif
415
416 #if PTRS_PER_PUD > 1
417
418 static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr,
419                            pgprotval_t eff_in, unsigned long P)
420 {
421         int i;
422         pud_t *start, *pud_start;
423         pgprotval_t prot, eff;
424         pud_t *prev_pud = NULL;
425
426         pud_start = start = (pud_t *)p4d_page_vaddr(addr);
427
428         for (i = 0; i < PTRS_PER_PUD; i++) {
429                 st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
430                 if (!pud_none(*start)) {
431                         prot = pud_flags(*start);
432                         eff = effective_prot(eff_in, prot);
433                         if (pud_large(*start) || !pud_present(*start)) {
434                                 note_page(m, st, __pgprot(prot), eff, 3);
435                         } else if (!kasan_page_table(m, st, pud_start)) {
436                                 walk_pmd_level(m, st, *start, eff,
437                                                P + i * PUD_LEVEL_MULT);
438                         }
439                 } else
440                         note_page(m, st, __pgprot(0), 0, 3);
441
442                 prev_pud = start;
443                 start++;
444         }
445 }
446
447 #else
448 #define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p)
449 #define p4d_large(a) pud_large(__pud(p4d_val(a)))
450 #define p4d_none(a)  pud_none(__pud(p4d_val(a)))
451 #endif
452
453 static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
454                            pgprotval_t eff_in, unsigned long P)
455 {
456         int i;
457         p4d_t *start, *p4d_start;
458         pgprotval_t prot, eff;
459
460         if (PTRS_PER_P4D == 1)
461                 return walk_pud_level(m, st, __p4d(pgd_val(addr)), eff_in, P);
462
463         p4d_start = start = (p4d_t *)pgd_page_vaddr(addr);
464
465         for (i = 0; i < PTRS_PER_P4D; i++) {
466                 st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT);
467                 if (!p4d_none(*start)) {
468                         prot = p4d_flags(*start);
469                         eff = effective_prot(eff_in, prot);
470                         if (p4d_large(*start) || !p4d_present(*start)) {
471                                 note_page(m, st, __pgprot(prot), eff, 2);
472                         } else if (!kasan_page_table(m, st, p4d_start)) {
473                                 walk_pud_level(m, st, *start, eff,
474                                                P + i * P4D_LEVEL_MULT);
475                         }
476                 } else
477                         note_page(m, st, __pgprot(0), 0, 2);
478
479                 start++;
480         }
481 }
482
483 #define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a))))
484 #define pgd_none(a)  (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a))))
485
486 static inline bool is_hypervisor_range(int idx)
487 {
488 #ifdef CONFIG_X86_64
489         /*
490          * ffff800000000000 - ffff87ffffffffff is reserved for
491          * the hypervisor.
492          */
493         return  (idx >= pgd_index(__PAGE_OFFSET) - 16) &&
494                 (idx <  pgd_index(__PAGE_OFFSET));
495 #else
496         return false;
497 #endif
498 }
499
500 static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
501                                        bool checkwx, bool dmesg)
502 {
503         pgd_t *start = INIT_PGD;
504         pgprotval_t prot, eff;
505         int i;
506         struct pg_state st = {};
507
508         if (pgd) {
509                 start = pgd;
510                 st.to_dmesg = dmesg;
511         }
512
513         st.check_wx = checkwx;
514         if (checkwx)
515                 st.wx_pages = 0;
516
517         for (i = 0; i < PTRS_PER_PGD; i++) {
518                 st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
519                 if (!pgd_none(*start) && !is_hypervisor_range(i)) {
520                         prot = pgd_flags(*start);
521 #ifdef CONFIG_X86_PAE
522                         eff = _PAGE_USER | _PAGE_RW;
523 #else
524                         eff = prot;
525 #endif
526                         if (pgd_large(*start) || !pgd_present(*start)) {
527                                 note_page(m, &st, __pgprot(prot), eff, 1);
528                         } else {
529                                 walk_p4d_level(m, &st, *start, eff,
530                                                i * PGD_LEVEL_MULT);
531                         }
532                 } else
533                         note_page(m, &st, __pgprot(0), 0, 1);
534
535                 cond_resched();
536                 start++;
537         }
538
539         /* Flush out the last page */
540         st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT);
541         note_page(m, &st, __pgprot(0), 0, 0);
542         if (!checkwx)
543                 return;
544         if (st.wx_pages)
545                 pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n",
546                         st.wx_pages);
547         else
548                 pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n");
549 }
550
551 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
552 {
553         ptdump_walk_pgd_level_core(m, pgd, false, true);
554 }
555
556 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
557 {
558 #ifdef CONFIG_PAGE_TABLE_ISOLATION
559         if (user && static_cpu_has(X86_FEATURE_PTI))
560                 pgd = kernel_to_user_pgdp(pgd);
561 #endif
562         ptdump_walk_pgd_level_core(m, pgd, false, false);
563 }
564 EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
565
566 static void ptdump_walk_user_pgd_level_checkwx(void)
567 {
568 #ifdef CONFIG_PAGE_TABLE_ISOLATION
569         pgd_t *pgd = INIT_PGD;
570
571         if (!static_cpu_has(X86_FEATURE_PTI))
572                 return;
573
574         pr_info("x86/mm: Checking user space page tables\n");
575         pgd = kernel_to_user_pgdp(pgd);
576         ptdump_walk_pgd_level_core(NULL, pgd, true, false);
577 #endif
578 }
579
580 void ptdump_walk_pgd_level_checkwx(void)
581 {
582         ptdump_walk_pgd_level_core(NULL, NULL, true, false);
583         ptdump_walk_user_pgd_level_checkwx();
584 }
585
586 static int __init pt_dump_init(void)
587 {
588         /*
589          * Various markers are not compile-time constants, so assign them
590          * here.
591          */
592 #ifdef CONFIG_X86_64
593         address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET;
594         address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
595         address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
596 #ifdef CONFIG_MODIFY_LDT_SYSCALL
597         address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
598 #endif
599 #ifdef CONFIG_KASAN
600         address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
601         address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
602 #endif
603 #endif
604 #ifdef CONFIG_X86_32
605         address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
606         address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
607 # ifdef CONFIG_HIGHMEM
608         address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
609 # endif
610         address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
611         address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
612 #endif
613         return 0;
614 }
615 __initcall(pt_dump_init);