select HAVE_ARCH_HASH
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
+ select HAVE_ARCH_KFENCE
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_REGS_AND_STACK_ACCESS_API
config PARISC_PAGE_SIZE_16KB
bool "16KB"
- depends on PA8X00 && BROKEN
+ depends on PA8X00 && BROKEN && !KFENCE
config PARISC_PAGE_SIZE_64KB
bool "64KB"
- depends on PA8X00 && BROKEN
+ depends on PA8X00 && BROKEN && !KFENCE
endchoice
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PA-RISC KFENCE support.
+ *
+ * Copyright (C) 2021, Helge Deller <deller@gmx.de>
+ */
+
+#ifndef _ASM_PARISC_KFENCE_H
+#define _ASM_PARISC_KFENCE_H
+
+#include <linux/kfence.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+static inline bool arch_kfence_init_pool(void)
+{
+ return true;
+}
+
+/* Protect the given page and flush TLB. */
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ pte_t *pte = virt_to_kpte(addr);
+
+ if (WARN_ON(!pte))
+ return false;
+
+ /*
+ * We need to avoid IPIs, as we may get KFENCE allocations or faults
+ * with interrupts disabled.
+ */
+
+ if (protect)
+ set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+ else
+ set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+ return true;
+}
+
+#endif /* _ASM_PARISC_KFENCE_H */
#include <linux/ratelimit.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
+#include <linux/kfence.h>
#include <asm/assembly.h>
#include <asm/io.h>
/* Clean up and return if in exception table. */
if (fixup_exception(regs))
return;
+ /* Clean up and return if handled by kfence. */
+ if (kfence_handle_page_fault(fault_address,
+ parisc_acctyp(code, regs->iir) == VM_WRITE, regs))
+ return;
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
parisc_terminate("Kernel Fault", regs, code, fault_address);
}