1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
5 * This code is based in part on work published here:
7 * https://github.com/IAIK/KAISER
9 * The original work was written by and and signed off by for the Linux
12 * Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
13 * Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
14 * Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
15 * Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
17 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
18 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
19 * Andy Lutomirsky <luto@amacapital.net>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/types.h>
25 #include <linux/bug.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
29 #include <linux/uaccess.h>
30 #include <linux/cpu.h>
32 #include <asm/cpufeature.h>
33 #include <asm/hypervisor.h>
34 #include <asm/vsyscall.h>
35 #include <asm/cmdline.h>
37 #include <asm/pgtable.h>
38 #include <asm/pgalloc.h>
39 #include <asm/tlbflush.h>
41 #include <asm/sections.h>
42 #include <asm/set_memory.h>
45 #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
47 /* Backporting helper */
49 #define __GFP_NOTRACK 0
53 * Define the page-table levels we clone for user-space on 32
57 #define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PMD
59 #define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PTE
62 static void __init pti_print_if_insecure(const char *reason)
64 if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
65 pr_info("%s\n", reason);
68 static void __init pti_print_if_secure(const char *reason)
70 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
71 pr_info("%s\n", reason);
74 static enum pti_mode {
80 void __init pti_check_boottime_disable(void)
85 /* Assume mode is auto unless overridden. */
88 if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
89 pti_mode = PTI_FORCE_OFF;
90 pti_print_if_insecure("disabled on XEN PV.");
94 ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
96 if (ret == 3 && !strncmp(arg, "off", 3)) {
97 pti_mode = PTI_FORCE_OFF;
98 pti_print_if_insecure("disabled on command line.");
101 if (ret == 2 && !strncmp(arg, "on", 2)) {
102 pti_mode = PTI_FORCE_ON;
103 pti_print_if_secure("force enabled on command line.");
106 if (ret == 4 && !strncmp(arg, "auto", 4)) {
112 if (cmdline_find_option_bool(boot_command_line, "nopti") ||
113 cpu_mitigations_off()) {
114 pti_mode = PTI_FORCE_OFF;
115 pti_print_if_insecure("disabled on command line.");
120 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
123 setup_force_cpu_cap(X86_FEATURE_PTI);
126 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
129 * Changes to the high (kernel) portion of the kernelmode page
130 * tables are not automatically propagated to the usermode tables.
132 * Users should keep in mind that, unlike the kernelmode tables,
133 * there is no vmalloc_fault equivalent for the usermode tables.
134 * Top-level entries added to init_mm's usermode pgd after boot
135 * will not be automatically propagated to other mms.
137 if (!pgdp_maps_userspace(pgdp))
141 * The user page tables get the full PGD, accessible from
144 kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
147 * If this is normal user memory, make it NX in the kernel
148 * pagetables so that, if we somehow screw up and return to
149 * usermode with the kernel CR3 loaded, we'll get a page fault
150 * instead of allowing user code to execute with the wrong CR3.
152 * As exceptions, we don't set NX if:
153 * - _PAGE_USER is not set. This could be an executable
154 * EFI runtime mapping or something similar, and the kernel
155 * may execute from it
156 * - we don't have NX support
157 * - we're clearing the PGD (i.e. the new pgd is not present).
159 if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
160 (__supported_pte_mask & _PAGE_NX))
163 /* return the copy of the PGD we want the kernel to use: */
168 * Walk the user copy of the page tables (optionally) trying to allocate
169 * page table pages on the way down.
171 * Returns a pointer to a P4D on success, or NULL on failure.
173 static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
175 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
176 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
178 if (address < PAGE_OFFSET) {
179 WARN_ONCE(1, "attempt to walk user address\n");
183 if (pgd_none(*pgd)) {
184 unsigned long new_p4d_page = __get_free_page(gfp);
185 if (WARN_ON_ONCE(!new_p4d_page))
188 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
190 BUILD_BUG_ON(pgd_large(*pgd) != 0);
192 return p4d_offset(pgd, address);
196 * Walk the user copy of the page tables (optionally) trying to allocate
197 * page table pages on the way down.
199 * Returns a pointer to a PMD on success, or NULL on failure.
201 static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
203 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
207 p4d = pti_user_pagetable_walk_p4d(address);
211 BUILD_BUG_ON(p4d_large(*p4d) != 0);
212 if (p4d_none(*p4d)) {
213 unsigned long new_pud_page = __get_free_page(gfp);
214 if (WARN_ON_ONCE(!new_pud_page))
217 set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
220 pud = pud_offset(p4d, address);
221 /* The user page tables do not use large mappings: */
222 if (pud_large(*pud)) {
226 if (pud_none(*pud)) {
227 unsigned long new_pmd_page = __get_free_page(gfp);
228 if (WARN_ON_ONCE(!new_pmd_page))
231 set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
234 return pmd_offset(pud, address);
238 * Walk the shadow copy of the page tables (optionally) trying to allocate
239 * page table pages on the way down. Does not support large pages.
241 * Note: this is only used when mapping *new* kernel data into the
242 * user/shadow page tables. It is never used for userspace data.
244 * Returns a pointer to a PTE on success, or NULL on failure.
246 static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
248 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
252 pmd = pti_user_pagetable_walk_pmd(address);
256 /* We can't do anything sensible if we hit a large mapping. */
257 if (pmd_large(*pmd)) {
262 if (pmd_none(*pmd)) {
263 unsigned long new_pte_page = __get_free_page(gfp);
267 set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
270 pte = pte_offset_kernel(pmd, address);
271 if (pte_flags(*pte) & _PAGE_USER) {
272 WARN_ONCE(1, "attempt to walk to user pte\n");
278 #ifdef CONFIG_X86_VSYSCALL_EMULATION
279 static void __init pti_setup_vsyscall(void)
281 pte_t *pte, *target_pte;
284 pte = lookup_address(VSYSCALL_ADDR, &level);
285 if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
288 target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
289 if (WARN_ON(!target_pte))
293 set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
296 static void __init pti_setup_vsyscall(void) { }
299 enum pti_clone_level {
305 pti_clone_pgtable(unsigned long start, unsigned long end,
306 enum pti_clone_level level)
311 * Clone the populated PMDs which cover start to end. These PMD areas
314 for (addr = start; addr < end;) {
315 pte_t *pte, *target_pte;
316 pmd_t *pmd, *target_pmd;
325 pgd = pgd_offset_k(addr);
326 if (WARN_ON(pgd_none(*pgd)))
328 p4d = p4d_offset(pgd, addr);
329 if (WARN_ON(p4d_none(*p4d)))
332 pud = pud_offset(p4d, addr);
333 if (pud_none(*pud)) {
334 WARN_ON_ONCE(addr & ~PUD_MASK);
335 addr = round_up(addr + 1, PUD_SIZE);
339 pmd = pmd_offset(pud, addr);
340 if (pmd_none(*pmd)) {
341 WARN_ON_ONCE(addr & ~PMD_MASK);
342 addr = round_up(addr + 1, PMD_SIZE);
346 if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
347 target_pmd = pti_user_pagetable_walk_pmd(addr);
348 if (WARN_ON(!target_pmd))
352 * Only clone present PMDs. This ensures only setting
353 * _PAGE_GLOBAL on present PMDs. This should only be
354 * called on well-known addresses anyway, so a non-
355 * present PMD would be a surprise.
357 if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
361 * Setting 'target_pmd' below creates a mapping in both
362 * the user and kernel page tables. It is effectively
363 * global, so set it as global in both copies. Note:
364 * the X86_FEATURE_PGE check is not _required_ because
365 * the CPU ignores _PAGE_GLOBAL when PGE is not
366 * supported. The check keeps consistentency with
367 * code that only set this bit when supported.
369 if (boot_cpu_has(X86_FEATURE_PGE))
370 *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
373 * Copy the PMD. That is, the kernelmode and usermode
374 * tables will share the last-level page tables of this
381 } else if (level == PTI_CLONE_PTE) {
383 /* Walk the page-table down to the pte level */
384 pte = pte_offset_kernel(pmd, addr);
385 if (pte_none(*pte)) {
390 /* Only clone present PTEs */
391 if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
394 /* Allocate PTE in the user page-table */
395 target_pte = pti_user_pagetable_walk_pte(addr);
396 if (WARN_ON(!target_pte))
399 /* Set GLOBAL bit in both PTEs */
400 if (boot_cpu_has(X86_FEATURE_PGE))
401 *pte = pte_set_flags(*pte, _PAGE_GLOBAL);
416 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
417 * next-level entry on 5-level systems.
419 static void __init pti_clone_p4d(unsigned long addr)
421 p4d_t *kernel_p4d, *user_p4d;
424 user_p4d = pti_user_pagetable_walk_p4d(addr);
428 kernel_pgd = pgd_offset_k(addr);
429 kernel_p4d = p4d_offset(kernel_pgd, addr);
430 *user_p4d = *kernel_p4d;
434 * Clone the CPU_ENTRY_AREA and associated data into the user space visible
437 static void __init pti_clone_user_shared(void)
441 pti_clone_p4d(CPU_ENTRY_AREA_BASE);
443 for_each_possible_cpu(cpu) {
445 * The SYSCALL64 entry code needs to be able to find the
446 * thread stack and needs one word of scratch space in which
447 * to spill a register. All of this lives in the TSS, in
448 * the sp1 and sp2 slots.
450 * This is done for all possible CPUs during boot to ensure
451 * that it's propagated to all mms.
454 unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
455 phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
458 target_pte = pti_user_pagetable_walk_pte(va);
459 if (WARN_ON(!target_pte))
462 *target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
466 #else /* CONFIG_X86_64 */
469 * On 32 bit PAE systems with 1GB of Kernel address space there is only
470 * one pgd/p4d for the whole kernel. Cloning that would map the whole
471 * address space into the user page-tables, making PTI useless. So clone
472 * the page-table on the PMD level to prevent that.
474 static void __init pti_clone_user_shared(void)
476 unsigned long start, end;
478 start = CPU_ENTRY_AREA_BASE;
479 end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
481 pti_clone_pgtable(start, end, PTI_CLONE_PMD);
483 #endif /* CONFIG_X86_64 */
486 * Clone the ESPFIX P4D into the user space visible page table
488 static void __init pti_setup_espfix64(void)
490 #ifdef CONFIG_X86_ESPFIX64
491 pti_clone_p4d(ESPFIX_BASE_ADDR);
496 * Clone the populated PMDs of the entry and irqentry text and force it RO.
498 static void pti_clone_entry_text(void)
500 pti_clone_pgtable((unsigned long) __entry_text_start,
501 (unsigned long) __irqentry_text_end,
506 * Global pages and PCIDs are both ways to make kernel TLB entries
507 * live longer, reduce TLB misses and improve kernel performance.
508 * But, leaving all kernel text Global makes it potentially accessible
509 * to Meltdown-style attacks which make it trivial to find gadgets or
512 * Only use global pages when it is really worth it.
514 static inline bool pti_kernel_image_global_ok(void)
517 * Systems with PCIDs get litlle benefit from global
518 * kernel text and are not worth the downsides.
520 if (cpu_feature_enabled(X86_FEATURE_PCID))
524 * Only do global kernel image for pti=auto. Do the most
525 * secure thing (not global) if pti=on specified.
527 if (pti_mode != PTI_AUTO)
531 * K8 may not tolerate the cleared _PAGE_RW on the userspace
532 * global kernel image pages. Do the safe thing (disable
533 * global kernel image). This is unlikely to ever be
534 * noticed because PTI is disabled by default on AMD CPUs.
536 if (boot_cpu_has(X86_FEATURE_K8))
540 * RANDSTRUCT derives its hardening benefits from the
541 * attacker's lack of knowledge about the layout of kernel
542 * data structures. Keep the kernel image non-global in
543 * cases where RANDSTRUCT is in use to help keep the layout a
546 if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
553 * For some configurations, map all of kernel text into the user page
554 * tables. This reduces TLB misses, especially on non-PCID systems.
556 static void pti_clone_kernel_text(void)
559 * rodata is part of the kernel image and is normally
560 * readable on the filesystem or on the web. But, do not
561 * clone the areas past rodata, they might contain secrets.
563 unsigned long start = PFN_ALIGN(_text);
564 unsigned long end_clone = (unsigned long)__end_rodata_aligned;
565 unsigned long end_global = PFN_ALIGN((unsigned long)_etext);
567 if (!pti_kernel_image_global_ok())
570 pr_debug("mapping partial kernel image into user address space\n");
573 * Note that this will undo _some_ of the work that
574 * pti_set_kernel_image_nonglobal() did to clear the
577 pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
580 * pti_clone_pgtable() will set the global bit in any PMDs
581 * that it clones, but we also need to get any PTEs in
582 * the last level for areas that are not huge-page-aligned.
585 /* Set the global bit for normal non-__init kernel text: */
586 set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
589 static void pti_set_kernel_image_nonglobal(void)
592 * The identity map is created with PMDs, regardless of the
593 * actual length of the kernel. We need to clear
594 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
597 unsigned long start = PFN_ALIGN(_text);
598 unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
601 * This clears _PAGE_GLOBAL from the entire kernel image.
602 * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
603 * areas that are mapped to userspace.
605 set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
609 * Initialize kernel page table isolation
611 void __init pti_init(void)
613 if (!boot_cpu_has(X86_FEATURE_PTI))
616 pr_info("enabled\n");
620 * We check for X86_FEATURE_PCID here. But the init-code will
621 * clear the feature flag on 32 bit because the feature is not
622 * supported on 32 bit anyway. To print the warning we need to
623 * check with cpuid directly again.
625 if (cpuid_ecx(0x1) & BIT(17)) {
626 /* Use printk to work around pr_fmt() */
627 printk(KERN_WARNING "\n");
628 printk(KERN_WARNING "************************************************************\n");
629 printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
630 printk(KERN_WARNING "** **\n");
631 printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
632 printk(KERN_WARNING "** Your performance will increase dramatically if you **\n");
633 printk(KERN_WARNING "** switch to a 64-bit kernel! **\n");
634 printk(KERN_WARNING "** **\n");
635 printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
636 printk(KERN_WARNING "************************************************************\n");
640 pti_clone_user_shared();
642 /* Undo all global bits from the init pagetables in head_64.S: */
643 pti_set_kernel_image_nonglobal();
644 /* Replace some of the global bits just for shared entry text: */
645 pti_clone_entry_text();
646 pti_setup_espfix64();
647 pti_setup_vsyscall();
651 * Finalize the kernel mappings in the userspace page-table. Some of the
652 * mappings for the kernel image might have changed since pti_init()
653 * cloned them. This is because parts of the kernel image have been
654 * mapped RO and/or NX. These changes need to be cloned again to the
655 * userspace page-table.
657 void pti_finalize(void)
659 if (!boot_cpu_has(X86_FEATURE_PTI))
662 * We need to clone everything (again) that maps parts of the
665 pti_clone_entry_text();
666 pti_clone_kernel_text();
668 debug_checkwx_user();