ARM: Support KFENCE for ARM 63/281563/1
authorKefeng Wang <wangkefeng.wang@huawei.com>
Mon, 15 Nov 2021 13:48:48 +0000 (21:48 +0800)
committerSeung-Woo Kim <sw0312.kim@samsung.com>
Tue, 20 Sep 2022 02:56:50 +0000 (11:56 +0900)
Add architecture specific implementation details for KFENCE and enable
KFENCE on ARM. In particular, this implements the required interface in
 <asm/kfence.h>.

KFENCE requires that attributes for pages from its memory pool can
individually be set. Therefore, force the kfence pool to be mapped
at page granularity.

Testing this patch using the testcases in kfence_test.c and all passed
with or without ARM_LPAE.

Acked-by: Marco Elver <elver@google.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Link: https://lore.kernel.org/linux-arm-kernel/20211115134848.171098-4-wangkefeng.wang@huawei.com/
[port for kfence feature to rpi-5.10.95]
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Seung-Woo Kim <sw0312.kim@samsung.com>
Change-Id: Ib2afb1c98fb5bf437f68766fe391ac9bc118f6ad

arch/arm/Kconfig
arch/arm/include/asm/kfence.h [new file with mode: 0644]
arch/arm/mm/fault.c

index a87c79a..99e02d1 100644 (file)
@@ -68,6 +68,7 @@ config ARM
        select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
        select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT
+       select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL
        select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
        select HAVE_ARCH_MMAP_RND_BITS if MMU
        select HAVE_ARCH_SECCOMP
diff --git a/arch/arm/include/asm/kfence.h b/arch/arm/include/asm/kfence.h
new file mode 100644 (file)
index 0000000..7980d0f
--- /dev/null
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_ARM_KFENCE_H
+#define __ASM_ARM_KFENCE_H
+
+#include <linux/kfence.h>
+
+#include <asm/pgalloc.h>
+#include <asm/set_memory.h>
+
+static inline int split_pmd_page(pmd_t *pmd, unsigned long addr)
+{
+       int i;
+       unsigned long pfn = PFN_DOWN(__pa(addr));
+       pte_t *pte = pte_alloc_one_kernel(&init_mm);
+
+       if (!pte)
+               return -ENOMEM;
+
+       for (i = 0; i < PTRS_PER_PTE; i++)
+               set_pte_ext(pte + i, pfn_pte(pfn + i, PAGE_KERNEL), 0);
+       pmd_populate_kernel(&init_mm, pmd, pte);
+
+       flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+       return 0;
+}
+
+static inline bool arch_kfence_init_pool(void)
+{
+       unsigned long addr;
+       pmd_t *pmd;
+
+       for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
+            addr += PAGE_SIZE) {
+               pmd = pmd_off_k(addr);
+
+               if (pmd_leaf(*pmd)) {
+                       if (split_pmd_page(pmd, addr & PMD_MASK))
+                               return false;
+               }
+       }
+
+       return true;
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+       set_memory_valid(addr, 1, !protect);
+
+       return true;
+}
+
+#endif /* __ASM_ARM_KFENCE_H */
index d050021..54952fc 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/sched/debug.h>
 #include <linux/highmem.h>
 #include <linux/perf_event.h>
+#include <linux/kfence.h>
 
 #include <asm/system_misc.h>
 #include <asm/system_info.h>
@@ -137,10 +138,14 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
        /*
         * No handler, we'll have to terminate things with extreme prejudice.
         */
-       if (addr < PAGE_SIZE)
+       if (addr < PAGE_SIZE) {
                msg = "NULL pointer dereference";
-       else
+       } else {
+               if (kfence_handle_page_fault(addr, is_write_fault(fsr), regs))
+                       return;
+
                msg = "paging request";
+       }
 
        die_kernel_fault(msg, mm, addr, fsr, regs);
 }