Merge branch 'for-next/scs' into for-next/core
authorWill Deacon <will@kernel.org>
Thu, 28 May 2020 17:03:40 +0000 (18:03 +0100)
committerWill Deacon <will@kernel.org>
Thu, 28 May 2020 17:03:40 +0000 (18:03 +0100)
Support for Clang's Shadow Call Stack in the kernel
(Sami Tolvanen and Will Deacon)
* for-next/scs:
  arm64: entry-ftrace.S: Update comment to indicate that x18 is live
  scs: Move DEFINE_SCS macro into core code
  scs: Remove references to asm/scs.h from core code
  scs: Move scs_overflow_check() out of architecture code
  arm64: scs: Use 'scs_sp' register alias for x18
  scs: Move accounting into alloc/free functions
  arm64: scs: Store absolute SCS stack pointer value in thread_info
  efi/libstub: Disable Shadow Call Stack
  arm64: scs: Add shadow stacks for SDEI
  arm64: Implement Shadow Call Stack
  arm64: Disable SCS for hypervisor code
  arm64: vdso: Disable Shadow Call Stack
  arm64: efi: Restore register x18 if it was corrupted
  arm64: Preserve register x18 when CPU is suspended
  arm64: Reserve register x18 from general allocation with SCS
  scs: Disable when function graph tracing is enabled
  scs: Add support for stack usage debugging
  scs: Add page accounting for shadow call stack allocations
  scs: Add support for Clang's Shadow Call Stack (SCS)

1  2 
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/include/asm/kvm_hyp.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/efi-rt-wrapper.S
arch/arm64/kernel/entry.S
arch/arm64/kernel/head.S
arch/arm64/kernel/vdso/Makefile
arch/arm64/mm/proc.S

@@@ -63,11 -61,10 +63,12 @@@ config ARM6
        select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
        select ARCH_KEEP_MEMBLOCK
        select ARCH_USE_CMPXCHG_LOCKREF
 +      select ARCH_USE_GNU_PROPERTY
        select ARCH_USE_QUEUED_RWLOCKS
        select ARCH_USE_QUEUED_SPINLOCKS
 +      select ARCH_USE_SYM_ANNOTATIONS
        select ARCH_SUPPORTS_MEMORY_FAILURE
+       select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
        select ARCH_SUPPORTS_NUMA_BALANCING
Simple merge
  #include <linux/compiler.h>
  #include <linux/kvm_host.h>
  #include <asm/alternative.h>
 -#include <asm/kvm_mmu.h>
  #include <asm/sysreg.h>
  
- #define __hyp_text __section(.hyp.text) notrace
+ #define __hyp_text __section(.hyp.text) notrace __noscs
  
  #define read_sysreg_elx(r,nvh,vh)                                     \
        ({                                                              \
Simple merge
@@@ -34,5 -34,14 +34,14 @@@ SYM_FUNC_START(__efi_rt_asm_wrapper
        ldp     x29, x30, [sp], #32
        b.ne    0f
        ret
- 0:    b       efi_handle_corrupted_x18        // tail call
+ 0:
+       /*
+        * With CONFIG_SHADOW_CALL_STACK, the kernel uses x18 to store a
+        * shadow stack pointer, which we need to restore before returning to
+        * potentially instrumented code. This is safe because the wrapper is
+        * called with preemption disabled and a separate shadow stack is used
+        * for interrupts.
+        */
+       mov     x18, x2
+       b       efi_handle_corrupted_x18        // tail call
 -ENDPROC(__efi_rt_asm_wrapper)
 +SYM_FUNC_END(__efi_rt_asm_wrapper)
@@@ -178,7 -179,9 +179,9 @@@ alternative_cb_en
  
        apply_ssbd 1, x22, x23
  
 -      ptrauth_keys_install_kernel tsk, 1, x20, x22, x23
 +      ptrauth_keys_install_kernel tsk, x20, x22, x23
+       scs_load tsk, x20
        .else
        add     x21, sp, #S_FRAME_SIZE
        get_current_task tsk
@@@ -901,7 -918,9 +919,9 @@@ SYM_FUNC_START(cpu_switch_to
        ldr     lr, [x8]
        mov     sp, x9
        msr     sp_el0, x1
 -      ptrauth_keys_install_kernel x1, 1, x8, x9, x10
 +      ptrauth_keys_install_kernel x1, x8, x9, x10
+       scs_save x0, x8
+       scs_load x1, x8
        ret
  SYM_FUNC_END(cpu_switch_to)
  NOKPROBE(cpu_switch_to)
@@@ -745,13 -742,9 +750,14 @@@ SYM_FUNC_START_LOCAL(__secondary_switch
        ldr     x2, [x0, #CPU_BOOT_TASK]
        cbz     x2, __secondary_too_slow
        msr     sp_el0, x2
+       scs_load x2, x3
        mov     x29, #0
        mov     x30, #0
 +
 +#ifdef CONFIG_ARM64_PTR_AUTH
 +      ptrauth_keys_init_cpu x2, x3, x4, x5
 +#endif
 +
        b       secondary_start_kernel
  SYM_FUNC_END(__secondary_switched)
  
@@@ -29,7 -23,9 +29,7 @@@ ldflags-y := -shared -nostdlib -soname=
  ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
  ccflags-y += -DDISABLE_BRANCH_PROFILING
  
- CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os
 -VDSO_LDFLAGS := -Bsymbolic
 -
+ CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS)
  KBUILD_CFLAGS                 += $(DISABLE_LTO)
  KASAN_SANITIZE                        := n
  UBSAN_SANITIZE                        := n
Simple merge