x86, kfence: enable KFENCE for x86 38/281538/1
authorAlexander Potapenko <glider@google.com>
Tue, 3 Nov 2020 17:58:34 +0000 (18:58 +0100)
committerSeung-Woo Kim <sw0312.kim@samsung.com>
Tue, 20 Sep 2022 02:41:17 +0000 (11:41 +0900)
Add architecture specific implementation details for KFENCE and enable
KFENCE for the x86 architecture. In particular, this implements the
required interface in <asm/kfence.h> for setting up the pool and
providing helper functions for protecting and unprotecting pages.

For x86, we need to ensure that the pool uses 4K pages, which is done
using the set_memory_4k() helper function.

Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
Co-developed-by: Marco Elver <elver@google.com>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Alexander Potapenko <glider@google.com>
Reviewed-by: Jann Horn <jannh@google.com>
[port kfence feature to rpi-5.10.95]
Signed-off-by: Sung-hun Kim <sfoon.kim@samsung.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Seung-Woo Kim <sw0312.kim@samsung.com>
Change-Id: Idfebe6e03d9bda5de4bd6d0084f65bd0feda8d8a

arch/x86/Kconfig
arch/x86/include/asm/kfence.h [new file with mode: 0644]
arch/x86/mm/fault.c

index 38583e9..ce70abb 100644 (file)
@@ -148,6 +148,7 @@ config X86
        select HAVE_ARCH_JUMP_LABEL_RELATIVE
        select HAVE_ARCH_KASAN                  if X86_64
        select HAVE_ARCH_KASAN_VMALLOC          if X86_64
+       select HAVE_ARCH_KFENCE
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_MMAP_RND_BITS          if MMU
        select HAVE_ARCH_MMAP_RND_COMPAT_BITS   if MMU && COMPAT
diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h
new file mode 100644 (file)
index 0000000..beeac10
--- /dev/null
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_X86_KFENCE_H
+#define _ASM_X86_KFENCE_H
+
+#include <linux/bug.h>
+#include <linux/kfence.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/set_memory.h>
+#include <asm/tlbflush.h>
+
+/*
+ * The page fault handler entry function, up to which the stack trace is
+ * truncated in reports.
+ */
+#define KFENCE_SKIP_ARCH_FAULT_HANDLER "asm_exc_page_fault"
+
+/* Force 4K pages for __kfence_pool. */
+static inline bool arch_kfence_init_pool(void)
+{
+       unsigned long addr;
+
+       for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
+            addr += PAGE_SIZE) {
+               unsigned int level;
+
+               if (!lookup_address(addr, &level))
+                       return false;
+
+               if (level != PG_LEVEL_4K)
+                       set_memory_4k(addr, 1);
+       }
+
+       return true;
+}
+
+/* Protect the given page and flush TLB. */
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+       unsigned int level;
+       pte_t *pte = lookup_address(addr, &level);
+
+       if (WARN_ON(!pte || level != PG_LEVEL_4K))
+               return false;
+
+       /*
+        * We need to avoid IPIs, as we may get KFENCE allocations or faults
+        * with interrupts disabled. Therefore, the below is best-effort, and
+        * does not flush TLBs on all CPUs. We can tolerate some inaccuracy;
+        * lazy fault handling takes care of faults after the page is PRESENT.
+        */
+
+       if (protect)
+               set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+       else
+               set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+
+       /* Flush this CPU's TLB. */
+       flush_tlb_one_kernel(addr);
+       return true;
+}
+
+#endif /* _ASM_X86_KFENCE_H */
index 9c1545c..c64e44f 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kdebug.h>              /* oops_begin/end, ...          */
 #include <linux/extable.h>             /* search_exception_tables      */
 #include <linux/memblock.h>            /* max_low_pfn                  */
+#include <linux/kfence.h>              /* kfence_handle_page_fault     */
 #include <linux/kprobes.h>             /* NOKPROBE_SYMBOL, ...         */
 #include <linux/mmiotrace.h>           /* kmmio_handler, ...           */
 #include <linux/perf_event.h>          /* perf_sw_event                */
@@ -732,6 +733,10 @@ no_context(struct pt_regs *regs, unsigned long error_code,
        if (IS_ENABLED(CONFIG_EFI))
                efi_recover_from_page_fault(address);
 
+       /* Only not-present faults should be handled by KFENCE. */
+       if (!(error_code & X86_PF_PROT) && kfence_handle_page_fault(address))
+               return;
+
 oops:
        /*
         * Oops. The kernel tried to access some bad page. We'll have to