Merge tag 'v4.9.114' of git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux...
authorMauro (mdrjr) Ribeiro <mauro.ribeiro@hardkernel.com>
Mon, 6 Apr 2020 23:04:56 +0000 (20:04 -0300)
committerMauro (mdrjr) Ribeiro <mauro.ribeiro@hardkernel.com>
Mon, 6 Apr 2020 23:04:56 +0000 (20:04 -0300)
This is the 4.9.114 stable release

18 files changed:
1  2 
Documentation/kernel-parameters.txt
Makefile
arch/arm64/Kconfig
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/percpu.h
arch/arm64/include/asm/thread_info.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/suspend.c
include/linux/arm-smccc.h
include/linux/compiler-gcc.h
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_input.c

Simple merge
diff --cc Makefile
Simple merge
Simple merge
Simple merge
@@@ -221,12 -221,28 +221,34 @@@ static inline bool system_supports_mixe
        return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
  }
  
 +static inline bool system_uses_ttbr0_pan(void)
 +{
 +      return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
 +              !cpus_have_cap(ARM64_HAS_PAN);
 +}
 +
+ #define ARM64_SSBD_UNKNOWN            -1
+ #define ARM64_SSBD_FORCE_DISABLE      0
+ #define ARM64_SSBD_KERNEL             1
+ #define ARM64_SSBD_FORCE_ENABLE               2
+ #define ARM64_SSBD_MITIGATED          3
+ static inline int arm64_get_ssbd_state(void)
+ {
+ #ifdef CONFIG_ARM64_SSBD
+       extern int ssbd_state;
+       return ssbd_state;
+ #else
+       return ARM64_SSBD_UNKNOWN;
+ #endif
+ }
+ #ifdef CONFIG_ARM64_SSBD
+ void arm64_set_ssbd_mitigation(bool state);
+ #else
+ static inline void arm64_set_ssbd_mitigation(bool state) {}
+ #endif
  #endif /* __ASSEMBLY__ */
  
  #endif
Simple merge
@@@ -16,7 -16,7 +16,8 @@@
  #ifndef __ASM_PERCPU_H
  #define __ASM_PERCPU_H
  
 +#include <asm/stack_pointer.h>
+ #include <asm/alternative.h>
  
  static inline void set_my_cpu_offset(unsigned long off)
  {
Simple merge
Simple merge
@@@ -20,6 -20,6 +20,8 @@@
  #include <asm/cpu.h>
  #include <asm/cputype.h>
  #include <asm/cpufeature.h>
++#include <linux/psci.h>
++#include <linux/arm-smccc.h>
  
  static bool __maybe_unused
  is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
Simple merge
@@@ -97,6 -96,25 +98,25 @@@ alternative_else_nop_endi
        add     \dst, \dst, #(\sym - .entry.tramp.text)
        .endm
  
 -      ldr     \tmp2, [tsk, #TI_FLAGS]
+       // This macro corrupts x0-x3. It is the caller's duty
+       // to save/restore them if required.
+       .macro  apply_ssbd, state, targ, tmp1, tmp2
+ #ifdef CONFIG_ARM64_SSBD
+ alternative_cb        arm64_enable_wa2_handling
+       b       \targ
+ alternative_cb_end
+       ldr_this_cpu    \tmp2, arm64_ssbd_callback_required, \tmp1
+       cbz     \tmp2, \targ
++      ldr     \tmp2, [tsk, #TSK_TI_FLAGS]
+       tbnz    \tmp2, #TIF_SSBD, \targ
+       mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+       mov     w1, #\state
+ alternative_cb        arm64_update_smccc_conduit
+       nop                                     // Patched to SMC/HVC #0
+ alternative_cb_end
+ #endif
+       .endm
        .macro  kernel_entry, el, regsize = 64
        .if     \regsize == 32
        mov     w0, w0                          // zero upper 32 bits of x0
  
        .if     \el == 0
        mrs     x21, sp_el0
 -      mov     tsk, sp
 -      and     tsk, tsk, #~(THREAD_SIZE - 1)   // Ensure MDSCR_EL1.SS is clear,
 -      ldr     x19, [tsk, #TI_FLAGS]           // since we can unmask debug
 +      ldr_this_cpu    tsk, __entry_task, x20  // Ensure MDSCR_EL1.SS is clear,
 +      ldr     x19, [tsk, #TSK_TI_FLAGS]       // since we can unmask debug
        disable_step_tsk x19, x20               // exceptions when scheduling.
  
+       apply_ssbd 1, 1f, x22, x23
+ #ifdef CONFIG_ARM64_SSBD
+       ldp     x0, x1, [sp, #16 * 0]
+       ldp     x2, x3, [sp, #16 * 1]
+ #endif
+ 1:
        mov     x29, xzr                        // fp pointed to user-space
        .else
        add     x21, sp, #S_FRAME_SIZE
@@@ -255,8 -218,9 +279,10 @@@ alternative_if ARM64_WORKAROUND_84571
  alternative_else_nop_endif
  #endif
  3:
+       apply_ssbd 0, 5f, x0, x1
+ 5:
        .endif
 +
        msr     elr_el1, x21                    // set up the return data
        msr     spsr_el1, x22
        ldp     x0, x1, [sp, #16 * 0]
Simple merge
Simple merge
Simple merge
   */
  #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) ||              \
      !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
 -#define inline \
 -      inline __attribute__((always_inline, unused)) notrace __gnu_inline
 +#define inline inline         __attribute__((always_inline,unused)) notrace
 +#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace
 +#define __inline __inline     __attribute__((always_inline,unused)) notrace
  #else
 -#define inline inline         __attribute__((unused)) notrace __gnu_inline
 +/* A lot of inline functions can cause havoc with function tracing */
 +#define inline inline         __attribute__((unused)) notrace
 +#define __inline__ __inline__ __attribute__((unused)) notrace
 +#define __inline __inline     __attribute__((unused)) notrace
  #endif
  
 -#define __inline__ inline
 -#define __inline inline
++//#define __inline__ inline
++//#define __inline inline
  #define __always_inline       inline __attribute__((always_inline))
  #define  noinline     __attribute__((noinline))
  
Simple merge
Simple merge