return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
}
+static inline bool system_uses_ttbr0_pan(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
+ !cpus_have_cap(ARM64_HAS_PAN);
+}
+
+ #define ARM64_SSBD_UNKNOWN -1
+ #define ARM64_SSBD_FORCE_DISABLE 0
+ #define ARM64_SSBD_KERNEL 1
+ #define ARM64_SSBD_FORCE_ENABLE 2
+ #define ARM64_SSBD_MITIGATED 3
+
+ static inline int arm64_get_ssbd_state(void)
+ {
+ #ifdef CONFIG_ARM64_SSBD
+ extern int ssbd_state;
+ return ssbd_state;
+ #else
+ return ARM64_SSBD_UNKNOWN;
+ #endif
+ }
+
+ #ifdef CONFIG_ARM64_SSBD
+ void arm64_set_ssbd_mitigation(bool state);
+ #else
+ static inline void arm64_set_ssbd_mitigation(bool state) {}
+ #endif
+
#endif /* __ASSEMBLY__ */
#endif
add \dst, \dst, #(\sym - .entry.tramp.text)
.endm
- ldr \tmp2, [tsk, #TI_FLAGS]
+ // This macro corrupts x0-x3. It is the caller's duty
+ // to save/restore them if required.
+ .macro apply_ssbd, state, targ, tmp1, tmp2
+ #ifdef CONFIG_ARM64_SSBD
+ alternative_cb arm64_enable_wa2_handling
+ b \targ
+ alternative_cb_end
+ ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
+ cbz \tmp2, \targ
++ ldr \tmp2, [tsk, #TSK_TI_FLAGS]
+ tbnz \tmp2, #TIF_SSBD, \targ
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+ mov w1, #\state
+ alternative_cb arm64_update_smccc_conduit
+ nop // Patched to SMC/HVC #0
+ alternative_cb_end
+ #endif
+ .endm
+
.macro kernel_entry, el, regsize = 64
.if \regsize == 32
mov w0, w0 // zero upper 32 bits of x0
.if \el == 0
mrs x21, sp_el0
- mov tsk, sp
- and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear,
- ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
+ ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
+ ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
disable_step_tsk x19, x20 // exceptions when scheduling.
+ apply_ssbd 1, 1f, x22, x23
+
+ #ifdef CONFIG_ARM64_SSBD
+ ldp x0, x1, [sp, #16 * 0]
+ ldp x2, x3, [sp, #16 * 1]
+ #endif
+ 1:
+
mov x29, xzr // fp pointed to user-space
.else
add x21, sp, #S_FRAME_SIZE
alternative_else_nop_endif
#endif
3:
+ apply_ssbd 0, 5f, x0, x1
+ 5:
.endif
+
msr elr_el1, x21 // set up the return data
msr spsr_el1, x22
ldp x0, x1, [sp, #16 * 0]
*/
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-#define inline \
- inline __attribute__((always_inline, unused)) notrace __gnu_inline
+#define inline inline __attribute__((always_inline,unused)) notrace
+#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace
+#define __inline __inline __attribute__((always_inline,unused)) notrace
#else
-#define inline inline __attribute__((unused)) notrace __gnu_inline
+/* A lot of inline functions can cause havoc with function tracing */
+#define inline inline __attribute__((unused)) notrace
+#define __inline__ __inline__ __attribute__((unused)) notrace
+#define __inline __inline __attribute__((unused)) notrace
#endif
-#define __inline__ inline
-#define __inline inline
++//#define __inline__ inline
++//#define __inline inline
#define __always_inline inline __attribute__((always_inline))
#define noinline __attribute__((noinline))