Merge branch 'for-next/bti' into for-next/core
authorWill Deacon <will@kernel.org>
Thu, 28 May 2020 17:00:51 +0000 (18:00 +0100)
committerWill Deacon <will@kernel.org>
Thu, 28 May 2020 17:00:51 +0000 (18:00 +0100)
Support for Branch Target Identification (BTI) in user and kernel
(Mark Brown and others)
* for-next/bti: (39 commits)
  arm64: vdso: Fix CFI directives in sigreturn trampoline
  arm64: vdso: Don't prefix sigreturn trampoline with a BTI C instruction
  arm64: bti: Fix support for userspace only BTI
  arm64: kconfig: Update and comment GCC version check for kernel BTI
  arm64: vdso: Map the vDSO text with guarded pages when built for BTI
  arm64: vdso: Force the vDSO to be linked as BTI when built for BTI
  arm64: vdso: Annotate for BTI
  arm64: asm: Provide a mechanism for generating ELF note for BTI
  arm64: bti: Provide Kconfig for kernel mode BTI
  arm64: mm: Mark executable text as guarded pages
  arm64: bpf: Annotate JITed code for BTI
  arm64: Set GP bit in kernel page tables to enable BTI for the kernel
  arm64: asm: Override SYM_FUNC_START when building the kernel with BTI
  arm64: bti: Support building kernel C code using BTI
  arm64: Document why we enable PAC support for leaf functions
  arm64: insn: Report PAC and BTI instructions as skippable
  arm64: insn: Don't assume unrecognized HINTs are skippable
  arm64: insn: Provide a better name for aarch64_insn_is_nop()
  arm64: insn: Add constants for new HINT instruction decode
  arm64: Disable old style assembly annotations
  ...

18 files changed:
1  2 
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/entry-common.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/insn.c
arch/arm64/kernel/sleep.S
arch/arm64/kernel/traps.c
arch/arm64/kernel/vdso.c
arch/arm64/kernel/vdso/Makefile
arch/arm64/net/bpf_jit.h
arch/arm64/net/bpf_jit_comp.c

Simple merge
Simple merge
@@@ -61,8 -61,8 +61,9 @@@
  #define ARM64_HAS_AMU_EXTN                    51
  #define ARM64_HAS_ADDRESS_AUTH                        52
  #define ARM64_HAS_GENERIC_AUTH                        53
 -#define ARM64_BTI                             54
 +#define ARM64_HAS_32BIT_EL1                   54
++#define ARM64_BTI                             55
  
--#define ARM64_NCAPS                           55
++#define ARM64_NCAPS                           56
  
  #endif /* __ASM_CPUCAPS_H */
Simple merge
Simple merge
Simple merge
@@@ -238,9 -181,9 +238,11 @@@ static const struct arm64_ftr_bits ftr_
  };
  
  static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
 +      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
+                                   FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
        ARM64_FTR_END,
  };
  
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -95,11 -95,12 +95,11 @@@ SYM_FUNC_START(__cpu_suspend_enter
        ldp     x29, lr, [sp], #16
        mov     x0, #1
        ret
ENDPROC(__cpu_suspend_enter)
SYM_FUNC_END(__cpu_suspend_enter)
  
        .pushsection ".idmap.text", "awx"
ENTRY(cpu_resume)
SYM_CODE_START(cpu_resume)
        bl      el2_setup               // if in EL2 drop to EL1 cleanly
 -      mov     x0, #ARM64_CPU_RUNTIME
        bl      __cpu_setup
        /* enable the MMU early - so we can access sleep_save_stash by va */
        adrp    x1, swapper_pg_dir
Simple merge
@@@ -136,9 -142,10 +136,10 @@@ static int __setup_additional_pages(enu
                                    int uses_interp)
  {
        unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
+       unsigned long gp_flags = 0;
        void *ret;
  
 -      vdso_text_len = vdso_lookup[arch_index].vdso_pages << PAGE_SHIFT;
 +      vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
        /* Be sure to map the data page */
        vdso_mapping_len = vdso_text_len + PAGE_SIZE;
  
        vdso_base += PAGE_SIZE;
        mm->context.vdso = (void *)vdso_base;
        ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
-                                      VM_READ|VM_EXEC|
+                                      VM_READ|VM_EXEC|gp_flags|
                                       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
 -                                     vdso_lookup[arch_index].cm);
 +                                     vdso_info[abi].cm);
        if (IS_ERR(ret))
                goto up_fail;
  
@@@ -17,12 -17,10 +17,14 @@@ obj-vdso := vgettimeofday.o note.o sigr
  targets := $(obj-vdso) vdso.so vdso.so.dbg
  obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
  
+ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti
 +# -Bsymbolic has been added for consistency with arm, the compat vDSO and
 +# potential future proofing if we end up with internal calls to the exported
 +# routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
 +# preparation in build-time C")).
  ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
-               -Bsymbolic --eh-frame-hdr --build-id -n -T
 -              --build-id -n $(btildflags-y) -T
++              -Bsymbolic --eh-frame-hdr --build-id -n $(btildflags-y) -T
  
  ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
  ccflags-y += -DDISABLE_BRANCH_PROFILING
  /* Rn & Rm; set condition flags */
  #define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
  
 +/* Logical (immediate) */
 +#define A64_LOGIC_IMM(sf, Rd, Rn, imm, type) ({ \
 +      u64 imm64 = (sf) ? (u64)imm : (u64)(u32)imm; \
 +      aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_##type, \
 +              A64_VARIANT(sf), Rn, Rd, imm64); \
 +})
 +/* Rd = Rn OP imm */
 +#define A64_AND_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND)
 +#define A64_ORR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, ORR)
 +#define A64_EOR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, EOR)
 +#define A64_ANDS_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND_SETFLAGS)
 +/* Rn & imm; set condition flags */
 +#define A64_TST_I(sf, Rn, imm) A64_ANDS_I(sf, A64_ZR, Rn, imm)
 +
+ /* HINTs */
+ #define A64_HINT(x) aarch64_insn_gen_hint(x)
+ /* BTI */
+ #define A64_BTI_C  A64_HINT(AARCH64_INSN_HINT_BTIC)
+ #define A64_BTI_J  A64_HINT(AARCH64_INSN_HINT_BTIJ)
+ #define A64_BTI_JC A64_HINT(AARCH64_INSN_HINT_BTIJC)
  #endif /* _BPF_JIT_H */
Simple merge