powerpc/64e: KASAN Full support for BOOK3E/64
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Tue, 28 Jun 2022 14:48:59 +0000 (16:48 +0200)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 29 Jun 2022 07:04:15 +0000 (17:04 +1000)
We now have memory organised in a way that allows
implementing KASAN.

Unlike book3s/64, book3e always has translation active so the only
thing needed to use KASAN is to setup an early zero shadow mapping
just after setting a stack pointer and before calling early_setup().

The memory layout is now as follows

   +------------------------+  Kernel virtual map end (0xc000200000000000)
   |                        |
   |    16TB of KASAN map   |
   |                        |
   +------------------------+  Kernel KASAN shadow map start
   |                        |
   |    16TB of IO map      |
   |                        |
   +------------------------+  Kernel IO map start
   |                        |
   |    16TB of vmemmap     |
   |                        |
   +------------------------+  Kernel vmemmap start
   |                        |
   |    16TB of vmap        |
   |                        |
   +------------------------+  Kernel virt start (0xc000100000000000)
   |                        |
   |    64TB of linear mem  |
   |                        |
   +------------------------+  Kernel linear (0xc.....)

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/0bef8beda27baf71e3b9e8b13e620fba6e19499b.1656427701.git.christophe.leroy@csgroup.eu
arch/powerpc/Kconfig
arch/powerpc/Kconfig.debug
arch/powerpc/include/asm/kasan.h
arch/powerpc/kernel/head_64.S
arch/powerpc/mm/kasan/Makefile
arch/powerpc/mm/kasan/init_book3e_64.c [new file with mode: 0644]
arch/powerpc/mm/kasan/init_book3s_64.c
arch/powerpc/platforms/Kconfig.cputype

index c2ce2e6..92e0cad 100644 (file)
@@ -193,6 +193,7 @@ config PPC
        select HAVE_ARCH_JUMP_LABEL_RELATIVE
        select HAVE_ARCH_KASAN                  if PPC32 && PPC_PAGE_SHIFT <= 14
        select HAVE_ARCH_KASAN                  if PPC_RADIX_MMU
+       select HAVE_ARCH_KASAN                  if PPC_BOOK3E_64
        select HAVE_ARCH_KASAN_VMALLOC          if HAVE_ARCH_KASAN
        select HAVE_ARCH_KFENCE                 if PPC_BOOK3S_32 || PPC_8xx || 40x
        select HAVE_ARCH_KGDB
@@ -254,6 +255,7 @@ config PPC
        select IOMMU_HELPER                     if PPC64
        select IRQ_DOMAIN
        select IRQ_FORCED_THREADING
+       select KASAN_VMALLOC                    if KASAN && MODULES
        select MMU_GATHER_PAGE_SIZE
        select MMU_GATHER_RCU_TABLE_FREE
        select MODULES_USE_ELF_RELA
index 9f363c1..6a8855d 100644 (file)
@@ -375,4 +375,5 @@ config KASAN_SHADOW_OFFSET
        hex
        depends on KASAN
        default 0xe0000000 if PPC32
-       default 0xa80e000000000000 if PPC64
+       default 0xa80e000000000000 if PPC_BOOK3S_64
+       default 0xa8001c0000000000 if PPC_BOOK3E_64
index a6be402..92a9682 100644 (file)
@@ -19,7 +19,7 @@
 
 #define KASAN_SHADOW_SCALE_SHIFT       3
 
-#ifdef CONFIG_MODULES
+#if defined(CONFIG_MODULES) && defined(CONFIG_PPC32)
 #define KASAN_KERN_START       ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M)
 #else
 #define KASAN_KERN_START       PAGE_OFFSET
  * c00e000000000000 << 3 + a80e000000000000 = c00fc00000000000
  */
 #define KASAN_SHADOW_END 0xc00fc00000000000UL
+
+#else
+
+/*
+ * The shadow ends before the highest accessible address
+ * because we don't need a shadow for the shadow.
+ * But it doesn't hurt to have a shadow for the shadow,
+ * keep shadow end aligned eases things.
+ */
+#define KASAN_SHADOW_END 0xc000200000000000UL
+
 #endif
 
 #ifdef CONFIG_KASAN
index d3eea63..cf2c089 100644 (file)
@@ -965,6 +965,9 @@ start_here_multiplatform:
         * and SLB setup before we turn on relocation.
         */
 
+#ifdef CONFIG_KASAN
+       bl      kasan_early_init
+#endif
        /* Restore parameters passed from prom_init/kexec */
        mr      r3,r31
        LOAD_REG_ADDR(r12, DOTSYM(early_setup))
index 4999aad..699eeff 100644 (file)
@@ -6,3 +6,4 @@ obj-$(CONFIG_PPC32)             += init_32.o
 obj-$(CONFIG_PPC_8xx)          += 8xx.o
 obj-$(CONFIG_PPC_BOOK3S_32)    += book3s_32.o
 obj-$(CONFIG_PPC_BOOK3S_64)    += init_book3s_64.o
+obj-$(CONFIG_PPC_BOOK3E_64)    += init_book3e_64.o
diff --git a/arch/powerpc/mm/kasan/init_book3e_64.c b/arch/powerpc/mm/kasan/init_book3e_64.c
new file mode 100644 (file)
index 0000000..11519e8
--- /dev/null
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KASAN for 64-bit Book3e powerpc
+ *
+ * Copyright 2022, Christophe Leroy, CS GROUP France
+ */
+
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/kasan.h>
+#include <linux/printk.h>
+#include <linux/memblock.h>
+#include <linux/set_memory.h>
+
+#include <asm/pgalloc.h>
+
+static inline bool kasan_pud_table(p4d_t p4d)
+{
+       return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
+}
+
+static inline bool kasan_pmd_table(pud_t pud)
+{
+       return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd));
+}
+
+static inline bool kasan_pte_table(pmd_t pmd)
+{
+       return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte));
+}
+
+static int __init kasan_map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
+{
+       pgd_t *pgdp;
+       p4d_t *p4dp;
+       pud_t *pudp;
+       pmd_t *pmdp;
+       pte_t *ptep;
+
+       pgdp = pgd_offset_k(ea);
+       p4dp = p4d_offset(pgdp, ea);
+       if (kasan_pud_table(*p4dp)) {
+               pudp = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
+               memcpy(pudp, kasan_early_shadow_pud, PUD_TABLE_SIZE);
+               p4d_populate(&init_mm, p4dp, pudp);
+       }
+       pudp = pud_offset(p4dp, ea);
+       if (kasan_pmd_table(*pudp)) {
+               pmdp = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
+               memcpy(pmdp, kasan_early_shadow_pmd, PMD_TABLE_SIZE);
+               pud_populate(&init_mm, pudp, pmdp);
+       }
+       pmdp = pmd_offset(pudp, ea);
+       if (kasan_pte_table(*pmdp)) {
+               ptep = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
+               memcpy(ptep, kasan_early_shadow_pte, PTE_TABLE_SIZE);
+               pmd_populate_kernel(&init_mm, pmdp, ptep);
+       }
+       ptep = pte_offset_kernel(pmdp, ea);
+
+       __set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot), 0);
+
+       return 0;
+}
+
+static void __init kasan_init_phys_region(void *start, void *end)
+{
+       unsigned long k_start, k_end, k_cur;
+       void *va;
+
+       if (start >= end)
+               return;
+
+       k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE);
+       k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE);
+
+       va = memblock_alloc(k_end - k_start, PAGE_SIZE);
+       for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE)
+               kasan_map_kernel_page(k_cur, __pa(va), PAGE_KERNEL);
+}
+
+void __init kasan_early_init(void)
+{
+       int i;
+       unsigned long addr;
+       pgd_t *pgd = pgd_offset_k(KASAN_SHADOW_START);
+       pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL);
+
+       BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
+       BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
+
+       for (i = 0; i < PTRS_PER_PTE; i++)
+               __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
+                            &kasan_early_shadow_pte[i], zero_pte, 0);
+
+       for (i = 0; i < PTRS_PER_PMD; i++)
+               pmd_populate_kernel(&init_mm, &kasan_early_shadow_pmd[i],
+                                   kasan_early_shadow_pte);
+
+       for (i = 0; i < PTRS_PER_PUD; i++)
+               pud_populate(&init_mm, &kasan_early_shadow_pud[i],
+                            kasan_early_shadow_pmd);
+
+       for (addr = KASAN_SHADOW_START; addr != KASAN_SHADOW_END; addr += PGDIR_SIZE)
+               p4d_populate(&init_mm, p4d_offset(pgd++, addr), kasan_early_shadow_pud);
+}
+
+void __init kasan_init(void)
+{
+       phys_addr_t start, end;
+       u64 i;
+       pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO);
+
+       for_each_mem_range(i, &start, &end)
+               kasan_init_phys_region((void *)start, (void *)end);
+
+       if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+               kasan_remove_zero_shadow((void *)VMALLOC_START, VMALLOC_SIZE);
+
+       for (i = 0; i < PTRS_PER_PTE; i++)
+               __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
+                            &kasan_early_shadow_pte[i], zero_pte, 0);
+
+       flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+       memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+
+       /* Enable error messages */
+       init_task.kasan_depth = 0;
+       pr_info("KASAN init done\n");
+}
+
+void __init kasan_late_init(void) { }
index 0da5566..9300d64 100644 (file)
@@ -99,4 +99,6 @@ void __init kasan_init(void)
        pr_info("KASAN init done\n");
 }
 
+void __init kasan_early_init(void) { }
+
 void __init kasan_late_init(void) { }
index 9e2df4b..383ed4f 100644 (file)
@@ -2,7 +2,6 @@
 config PPC32
        bool
        default y if !PPC64
-       select KASAN_VMALLOC if KASAN && MODULES
 
 config PPC64
        bool "64-bit kernel"