From: Will Deacon Date: Thu, 28 May 2020 17:00:51 +0000 (+0100) Subject: Merge branch 'for-next/bti' into for-next/core X-Git-Tag: v5.10.7~2348^2~9 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=d27865279f12035c730818aa1a0280fada866a37;p=platform%2Fkernel%2Flinux-rpi.git Merge branch 'for-next/bti' into for-next/core Support for Branch Target Identification (BTI) in user and kernel (Mark Brown and others) * for-next/bti: (39 commits) arm64: vdso: Fix CFI directives in sigreturn trampoline arm64: vdso: Don't prefix sigreturn trampoline with a BTI C instruction arm64: bti: Fix support for userspace only BTI arm64: kconfig: Update and comment GCC version check for kernel BTI arm64: vdso: Map the vDSO text with guarded pages when built for BTI arm64: vdso: Force the vDSO to be linked as BTI when built for BTI arm64: vdso: Annotate for BTI arm64: asm: Provide a mechanism for generating ELF note for BTI arm64: bti: Provide Kconfig for kernel mode BTI arm64: mm: Mark executable text as guarded pages arm64: bpf: Annotate JITed code for BTI arm64: Set GP bit in kernel page tables to enable BTI for the kernel arm64: asm: Override SYM_FUNC_START when building the kernel with BTI arm64: bti: Support building kernel C code using BTI arm64: Document why we enable PAC support for leaf functions arm64: insn: Report PAC and BTI instructions as skippable arm64: insn: Don't assume unrecognized HINTs are skippable arm64: insn: Provide a better name for aarch64_insn_is_nop() arm64: insn: Add constants for new HINT instruction decode arm64: Disable old style assembly annotations ... --- d27865279f12035c730818aa1a0280fada866a37 diff --cc arch/arm64/include/asm/cpucaps.h index c54c674,7b60514..eacd0c7 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@@ -61,8 -61,8 +61,9 @@@ #define ARM64_HAS_AMU_EXTN 51 #define ARM64_HAS_ADDRESS_AUTH 52 #define ARM64_HAS_GENERIC_AUTH 53 -#define ARM64_BTI 54 +#define ARM64_HAS_32BIT_EL1 54 ++#define ARM64_BTI 55 --#define ARM64_NCAPS 55 ++#define ARM64_NCAPS 56 #endif /* __ASM_CPUCAPS_H */ diff --cc arch/arm64/kernel/cpufeature.c index b3202a9,d3cc247..4ae4167 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@@ -238,9 -181,9 +238,11 @@@ static const struct arm64_ftr_bits ftr_ }; static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0), ARM64_FTR_END, }; diff --cc arch/arm64/kernel/sleep.S index 56b1fe9,c1bf43c..ba40d57 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@@ -95,11 -95,12 +95,11 @@@ SYM_FUNC_START(__cpu_suspend_enter ldp x29, lr, [sp], #16 mov x0, #1 ret - ENDPROC(__cpu_suspend_enter) + SYM_FUNC_END(__cpu_suspend_enter) .pushsection ".idmap.text", "awx" - ENTRY(cpu_resume) + SYM_CODE_START(cpu_resume) bl el2_setup // if in EL2 drop to EL1 cleanly - mov x0, #ARM64_CPU_RUNTIME bl __cpu_setup /* enable the MMU early - so we can access sleep_save_stash by va */ adrp x1, swapper_pg_dir diff --cc arch/arm64/kernel/vdso.c index f3eea5e2,3b0289d..d51a898 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@@ -136,9 -142,10 +136,10 @@@ static int __setup_additional_pages(enu int uses_interp) { unsigned long vdso_base, vdso_text_len, vdso_mapping_len; + unsigned long gp_flags = 0; void *ret; - vdso_text_len = vdso_lookup[arch_index].vdso_pages << PAGE_SHIFT; + vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT; /* Be sure to map the data page */ vdso_mapping_len = vdso_text_len + PAGE_SIZE; @@@ -157,9 -167,9 +161,9 @@@ vdso_base += PAGE_SIZE; mm->context.vdso = (void *)vdso_base; ret = _install_special_mapping(mm, vdso_base, vdso_text_len, - VM_READ|VM_EXEC| + VM_READ|VM_EXEC|gp_flags| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, - vdso_lookup[arch_index].cm); + vdso_info[abi].cm); if (IS_ERR(ret)) goto up_fail; diff --cc arch/arm64/kernel/vdso/Makefile index 95e9e44,51ad1cc..fccd67d --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@@ -17,12 -17,10 +17,14 @@@ obj-vdso := vgettimeofday.o note.o sigr targets := $(obj-vdso) vdso.so vdso.so.dbg obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) + btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti + +# -Bsymbolic has been added for consistency with arm, the compat vDSO and +# potential future proofing if we end up with internal calls to the exported +# routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so +# preparation in build-time C")). ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ - -Bsymbolic --eh-frame-hdr --build-id -n -T - --build-id -n $(btildflags-y) -T ++ -Bsymbolic --eh-frame-hdr --build-id -n $(btildflags-y) -T ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18 ccflags-y += -DDISABLE_BRANCH_PROFILING diff --cc arch/arm64/net/bpf_jit.h index 923ae7f,05b4777..cc0cf0f --- a/arch/arm64/net/bpf_jit.h +++ b/arch/arm64/net/bpf_jit.h @@@ -197,18 -189,12 +197,26 @@@ /* Rn & Rm; set condition flags */ #define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm) +/* Logical (immediate) */ +#define A64_LOGIC_IMM(sf, Rd, Rn, imm, type) ({ \ + u64 imm64 = (sf) ? (u64)imm : (u64)(u32)imm; \ + aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_##type, \ + A64_VARIANT(sf), Rn, Rd, imm64); \ +}) +/* Rd = Rn OP imm */ +#define A64_AND_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND) +#define A64_ORR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, ORR) +#define A64_EOR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, EOR) +#define A64_ANDS_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND_SETFLAGS) +/* Rn & imm; set condition flags */ +#define A64_TST_I(sf, Rn, imm) A64_ANDS_I(sf, A64_ZR, Rn, imm) + + /* HINTs */ + #define A64_HINT(x) aarch64_insn_gen_hint(x) + + /* BTI */ + #define A64_BTI_C A64_HINT(AARCH64_INSN_HINT_BTIC) + #define A64_BTI_J A64_HINT(AARCH64_INSN_HINT_BTIJ) + #define A64_BTI_JC A64_HINT(AARCH64_INSN_HINT_BTIJC) + #endif /* _BPF_JIT_H */