1 /* SPDX-License-Identifier: GPL-2.0 */
3 #include <linux/stringify.h>
4 #include <linux/linkage.h>
5 #include <asm/dwarf2.h>
6 #include <asm/cpufeatures.h>
7 #include <asm/alternative.h>
8 #include <asm/asm-offsets.h>
9 #include <asm/export.h>
10 #include <asm/nospec-branch.h>
11 #include <asm/unwind_hints.h>
12 #include <asm/percpu.h>
13 #include <asm/frame.h>
16 .section .text..__x86.indirect_thunk
20 ANNOTATE_INTRA_FUNCTION_CALL
35 .align RETPOLINE_THUNK_SIZE
36 SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
40 ALTERNATIVE_2 __stringify(RETPOLINE \reg), \
41 __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE, \
42 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_NOT(X86_FEATURE_RETPOLINE)
47 * Despite being an assembler file we can't just use .irp here
48 * because __KSYM_DEPS__ only uses the C preprocessor and would
49 * only see one instance of "__x86_indirect_thunk_\reg" rather
50 * than one per register with the correct names. So we do it
51 * the simple and nasty way...
53 * Worse, you can only have a single EXPORT_SYMBOL per line,
54 * and CPP can't insert newlines, so we have to repeat everything
58 #define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
60 .align RETPOLINE_THUNK_SIZE
61 SYM_CODE_START(__x86_indirect_thunk_array)
63 #define GEN(reg) THUNK reg
64 #include <asm/GEN-for-each-reg.h>
67 .align RETPOLINE_THUNK_SIZE
68 SYM_CODE_END(__x86_indirect_thunk_array)
70 #define GEN(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
71 #include <asm/GEN-for-each-reg.h>
74 #ifdef CONFIG_CALL_DEPTH_TRACKING
76 .align RETPOLINE_THUNK_SIZE
78 SYM_INNER_LABEL(__x86_indirect_call_thunk_\reg, SYM_L_GLOBAL)
89 .align RETPOLINE_THUNK_SIZE
90 SYM_CODE_START(__x86_indirect_call_thunk_array)
92 #define GEN(reg) CALL_THUNK reg
93 #include <asm/GEN-for-each-reg.h>
96 .align RETPOLINE_THUNK_SIZE
97 SYM_CODE_END(__x86_indirect_call_thunk_array)
99 #define GEN(reg) __EXPORT_THUNK(__x86_indirect_call_thunk_ ## reg)
100 #include <asm/GEN-for-each-reg.h>
103 .macro JUMP_THUNK reg
104 .align RETPOLINE_THUNK_SIZE
106 SYM_INNER_LABEL(__x86_indirect_jump_thunk_\reg, SYM_L_GLOBAL)
107 UNWIND_HINT_UNDEFINED
115 .align RETPOLINE_THUNK_SIZE
116 SYM_CODE_START(__x86_indirect_jump_thunk_array)
118 #define GEN(reg) JUMP_THUNK reg
119 #include <asm/GEN-for-each-reg.h>
122 .align RETPOLINE_THUNK_SIZE
123 SYM_CODE_END(__x86_indirect_jump_thunk_array)
125 #define GEN(reg) __EXPORT_THUNK(__x86_indirect_jump_thunk_ ## reg)
126 #include <asm/GEN-for-each-reg.h>
130 * This function name is magical and is used by -mfunction-return=thunk-extern
131 * for the compiler to generate JMPs to it.
133 #ifdef CONFIG_RETHUNK
136 * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
139 * - srso_alias_untrain_ret() is 2M aligned
140 * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14
141 * and 20 in its virtual address are set (while those bits in the
142 * srso_alias_untrain_ret() function are cleared).
144 * This guarantees that those two addresses will alias in the branch
145 * target buffer of Zen3/4 generations, leading to any potential
146 * poisoned entries at that BTB slot to get evicted.
148 * As a result, srso_alias_safe_ret() becomes a safe return.
150 #ifdef CONFIG_CPU_SRSO
151 .section .text..__x86.rethunk_untrain
153 SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
158 jmp srso_alias_return_thunk
159 SYM_FUNC_END(srso_alias_untrain_ret)
160 __EXPORT_THUNK(srso_alias_untrain_ret)
162 .section .text..__x86.rethunk_safe
164 /* dummy definition for alternatives */
165 SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
169 SYM_FUNC_END(srso_alias_untrain_ret)
172 SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
173 lea 8(%_ASM_SP), %_ASM_SP
178 SYM_FUNC_END(srso_alias_safe_ret)
180 .section .text..__x86.return_thunk
182 SYM_CODE_START(srso_alias_return_thunk)
185 call srso_alias_safe_ret
187 SYM_CODE_END(srso_alias_return_thunk)
190 * Some generic notes on the untraining sequences:
192 * They are interchangeable when it comes to flushing potentially wrong
193 * RET predictions from the BTB.
195 * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the
196 * Retbleed sequence because the return sequence done there
197 * (srso_safe_ret()) is longer and the return sequence must fully nest
198 * (end before) the untraining sequence. Therefore, the untraining
199 * sequence must fully overlap the return sequence.
201 * Regarding alignment - the instructions which need to be untrained,
202 * must all start at a cacheline boundary for Zen1/2 generations. That
203 * is, instruction sequences starting at srso_safe_ret() and
204 * the respective instruction sequences at retbleed_return_thunk()
205 * must start at a cacheline boundary.
209 * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
210 * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
211 * alignment within the BTB.
212 * 2) The instruction at retbleed_untrain_ret must contain, and not
213 * end with, the 0xc3 byte of the RET.
214 * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
215 * from re-poisioning the BTB prediction.
218 .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc
219 SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
222 * As executed from retbleed_untrain_ret, this is:
226 * JMP retbleed_return_thunk
228 * Executing the TEST instruction has a side effect of evicting any BTB
229 * prediction (potentially attacker controlled) attached to the RET, as
230 * retbleed_return_thunk + 1 isn't an instruction boundary at the moment.
235 * As executed from retbleed_return_thunk, this is a plain RET.
237 * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
239 * We subsequently jump backwards and architecturally execute the RET.
240 * This creates a correct BTB prediction (type=ret), but in the
241 * meantime we suffer Straight Line Speculation (because the type was
242 * no branch) which is halted by the INT3.
244 * With SMT enabled and STIBP active, a sibling thread cannot poison
245 * RET's prediction to a type of its choice, but can evict the
246 * prediction due to competitive sharing. If the prediction is
247 * evicted, retbleed_return_thunk will suffer Straight Line Speculation
248 * which will be contained safely by the INT3.
250 SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL)
253 SYM_CODE_END(retbleed_return_thunk)
256 * Ensure the TEST decoding / BTB invalidation is complete.
261 * Jump back and execute the RET in the middle of the TEST instruction.
262 * INT3 is for SLS protection.
264 jmp retbleed_return_thunk
266 SYM_FUNC_END(retbleed_untrain_ret)
267 __EXPORT_THUNK(retbleed_untrain_ret)
270 * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
271 * above. On kernel entry, srso_untrain_ret() is executed which is a
273 * movabs $0xccccc30824648d48,%rax
275 * and when the return thunk executes the inner label srso_safe_ret()
276 * later, it is a stack manipulation and a RET which is mispredicted and
277 * thus a "safe" one to use.
280 .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
281 SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
286 * This forces the function return instruction to speculate into a trap
287 * (UD2 in srso_return_thunk() below). This RET will then mispredict
288 * and execution will continue at the return site read from the top of
291 SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
292 lea 8(%_ASM_SP), %_ASM_SP
300 SYM_CODE_END(srso_safe_ret)
301 SYM_FUNC_END(srso_untrain_ret)
302 __EXPORT_THUNK(srso_untrain_ret)
304 SYM_CODE_START(srso_return_thunk)
309 SYM_CODE_END(srso_return_thunk)
311 SYM_FUNC_START(entry_untrain_ret)
312 ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
313 "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
314 "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
315 SYM_FUNC_END(entry_untrain_ret)
316 __EXPORT_THUNK(entry_untrain_ret)
318 SYM_CODE_START(__x86_return_thunk)
324 SYM_CODE_END(__x86_return_thunk)
325 EXPORT_SYMBOL(__x86_return_thunk)
327 #endif /* CONFIG_RETHUNK */
329 #ifdef CONFIG_CALL_DEPTH_TRACKING
332 SYM_FUNC_START(__x86_return_skl)
335 * Keep the hotpath in a 16byte I-fetch for the non-debug
338 CALL_THUNKS_DEBUG_INC_RETS
339 shlq $5, PER_CPU_VAR(pcpu_hot + X86_call_depth)
345 CALL_THUNKS_DEBUG_INC_STUFFS
347 ANNOTATE_INTRA_FUNCTION_CALL
359 SYM_FUNC_END(__x86_return_skl)
361 #endif /* CONFIG_CALL_DEPTH_TRACKING */