From: Mauro (mdrjr) Ribeiro Date: Mon, 6 Apr 2020 23:04:56 +0000 (-0300) Subject: Merge tag 'v4.9.114' of git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux... X-Git-Tag: hardkernel-4.9.236-104~210 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=3835849be0b587ef8d18ef25db3f2f560afb1972;p=platform%2Fkernel%2Flinux-amlogic.git Merge tag 'v4.9.114' of git://git./linux/kernel/git/stable/linux-stable into odroidg12-4.9.y This is the 4.9.114 stable release --- 3835849be0b587ef8d18ef25db3f2f560afb1972 diff --cc arch/arm64/include/asm/cpufeature.h index 9a8bb54,15868ec..1dc16f5 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@@ -221,12 -221,28 +221,34 @@@ static inline bool system_supports_mixe return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1)); } +static inline bool system_uses_ttbr0_pan(void) +{ + return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && + !cpus_have_cap(ARM64_HAS_PAN); +} + + #define ARM64_SSBD_UNKNOWN -1 + #define ARM64_SSBD_FORCE_DISABLE 0 + #define ARM64_SSBD_KERNEL 1 + #define ARM64_SSBD_FORCE_ENABLE 2 + #define ARM64_SSBD_MITIGATED 3 + + static inline int arm64_get_ssbd_state(void) + { + #ifdef CONFIG_ARM64_SSBD + extern int ssbd_state; + return ssbd_state; + #else + return ARM64_SSBD_UNKNOWN; + #endif + } + + #ifdef CONFIG_ARM64_SSBD + void arm64_set_ssbd_mitigation(bool state); + #else + static inline void arm64_set_ssbd_mitigation(bool state) {} + #endif + #endif /* __ASSEMBLY__ */ #endif diff --cc arch/arm64/include/asm/percpu.h index d7a3c62,0d55157..a2f6bd2 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@@ -16,7 -16,7 +16,8 @@@ #ifndef __ASM_PERCPU_H #define __ASM_PERCPU_H +#include + #include static inline void set_my_cpu_offset(unsigned long off) { diff --cc arch/arm64/kernel/cpu_errata.c index 2de62aa,1db97ad..539330f --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@@ -20,6 -20,6 +20,8 @@@ #include #include #include ++#include ++#include static bool __maybe_unused is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) diff --cc arch/arm64/kernel/entry.S index dc3f26d,ca978d7..b172ea9 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@@ -97,6 -96,25 +98,25 @@@ alternative_else_nop_endi add \dst, \dst, #(\sym - .entry.tramp.text) .endm + // This macro corrupts x0-x3. It is the caller's duty + // to save/restore them if required. + .macro apply_ssbd, state, targ, tmp1, tmp2 + #ifdef CONFIG_ARM64_SSBD + alternative_cb arm64_enable_wa2_handling + b \targ + alternative_cb_end + ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 + cbz \tmp2, \targ - ldr \tmp2, [tsk, #TI_FLAGS] ++ ldr \tmp2, [tsk, #TSK_TI_FLAGS] + tbnz \tmp2, #TIF_SSBD, \targ + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 + mov w1, #\state + alternative_cb arm64_update_smccc_conduit + nop // Patched to SMC/HVC #0 + alternative_cb_end + #endif + .endm + .macro kernel_entry, el, regsize = 64 .if \regsize == 32 mov w0, w0 // zero upper 32 bits of x0 @@@ -119,10 -137,19 +139,18 @@@ .if \el == 0 mrs x21, sp_el0 - mov tsk, sp - and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear, - ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug + ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear, + ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug disable_step_tsk x19, x20 // exceptions when scheduling. + apply_ssbd 1, 1f, x22, x23 + + #ifdef CONFIG_ARM64_SSBD + ldp x0, x1, [sp, #16 * 0] + ldp x2, x3, [sp, #16 * 1] + #endif + 1: + mov x29, xzr // fp pointed to user-space .else add x21, sp, #S_FRAME_SIZE @@@ -255,8 -218,9 +279,10 @@@ alternative_if ARM64_WORKAROUND_84571 alternative_else_nop_endif #endif 3: + apply_ssbd 0, 5f, x0, x1 + 5: .endif + msr elr_el1, x21 // set up the return data msr spsr_el1, x22 ldp x0, x1, [sp, #16 * 0] diff --cc include/linux/compiler-gcc.h index a6d1bf2,8e82e33..700ee16 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@@ -74,16 -86,19 +86,18 @@@ */ #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) -#define inline \ - inline __attribute__((always_inline, unused)) notrace __gnu_inline +#define inline inline __attribute__((always_inline,unused)) notrace +#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace +#define __inline __inline __attribute__((always_inline,unused)) notrace #else -#define inline inline __attribute__((unused)) notrace __gnu_inline +/* A lot of inline functions can cause havoc with function tracing */ +#define inline inline __attribute__((unused)) notrace +#define __inline__ __inline__ __attribute__((unused)) notrace +#define __inline __inline __attribute__((unused)) notrace #endif -#define __inline__ inline -#define __inline inline ++//#define __inline__ inline ++//#define __inline inline #define __always_inline inline __attribute__((always_inline)) #define noinline __attribute__((noinline))