From: Amit Daniel Kachhap Date: Fri, 13 Mar 2020 09:04:58 +0000 (+0530) Subject: arm64: mask PAC bits of __builtin_return_address X-Git-Tag: v5.10.7~2834^2~6^2~7 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=689eae42afd7a916634146edca38463769969184;p=platform%2Fkernel%2Flinux-rpi.git arm64: mask PAC bits of __builtin_return_address Functions like vmap() record how much memory has been allocated by their callers, and callers are identified using __builtin_return_address(). Once the kernel is using pointer-auth the return address will be signed. This means it will not match any kernel symbol, and will vary between threads even for the same caller. The output of /proc/vmallocinfo in this case may look like, 0x(____ptrval____)-0x(____ptrval____) 20480 0x86e28000100e7c60 pages=4 vmalloc N0=4 0x(____ptrval____)-0x(____ptrval____) 20480 0x86e28000100e7c60 pages=4 vmalloc N0=4 0x(____ptrval____)-0x(____ptrval____) 20480 0xc5c78000100e7c60 pages=4 vmalloc N0=4 The above three 64bit values should be the same symbol name and not different LR values. Use the pre-processor to add logic to clear the PAC to __builtin_return_address() callers. This patch adds a new file asm/compiler.h and is transitively included via include/compiler_types.h on the compiler command line so it is guaranteed to be loaded and the users of this macro will not find a wrong version. Helper macros ptrauth_kernel_pac_mask/ptrauth_clear_pac are created for this purpose and added in this file. Existing macro ptrauth_user_pac_mask moved from asm/pointer_auth.h. Signed-off-by: Amit Daniel Kachhap Reviewed-by: James Morse Signed-off-by: Catalin Marinas --- diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 87e2cbb..115ceea 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -118,6 +118,7 @@ config ARM64 select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_BITREVERSE + select HAVE_ARCH_COMPILER_H select HAVE_ARCH_HUGE_VMAP select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL_RELATIVE diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h new file mode 100644 index 0000000..eece20d --- /dev/null +++ b/arch/arm64/include/asm/compiler.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_COMPILER_H +#define __ASM_COMPILER_H + +#if defined(CONFIG_ARM64_PTR_AUTH) + +/* + * The EL0/EL1 pointer bits used by a pointer authentication code. + * This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply. + */ +#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual) +#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual) + +/* Valid for EL0 TTBR0 and EL1 TTBR1 instruction pointers */ +#define ptrauth_clear_pac(ptr) \ + ((ptr & BIT_ULL(55)) ? (ptr | ptrauth_kernel_pac_mask()) : \ + (ptr & ~ptrauth_user_pac_mask())) + +#define __builtin_return_address(val) \ + (void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val))) + +#endif /* CONFIG_ARM64_PTR_AUTH */ + +#endif /* __ASM_COMPILER_H */ diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index 833d3f9..70c4715 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -68,16 +68,9 @@ static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kerne extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg); -/* - * The EL0 pointer bits used by a pointer authentication code. - * This is dependent on TBI0 being enabled, or bits 63:56 would also apply. - */ -#define ptrauth_user_pac_mask() GENMASK(54, vabits_actual) - -/* Only valid for EL0 TTBR0 instruction pointers */ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) { - return ptr & ~ptrauth_user_pac_mask(); + return ptrauth_clear_pac(ptr); } #define ptrauth_thread_init_user(tsk) \