1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
6 #include <linux/static_key.h>
7 #include <linux/objtool.h>
8 #include <linux/linkage.h>
10 #include <asm/alternative.h>
11 #include <asm/cpufeatures.h>
12 #include <asm/msr-index.h>
13 #include <asm/unwind_hints.h>
16 * Fill the CPU return stack buffer.
18 * Each entry in the RSB, if used for a speculative 'ret', contains an
19 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
21 * This is required in various cases for retpoline and IBRS-based
22 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
23 * eliminate potentially bogus entries from the RSB, and sometimes
24 * purely to ensure that it doesn't get empty, which on some CPUs would
25 * allow predictions from other (unwanted!) sources to be used.
27 * We define a CPP macro such that it can be used from both .S files and
28 * inline assembly. It's possible to do a .macro and then include that
29 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
32 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
35 * Google experimented with loop-unrolling and this turned out to be
36 * the optimal version - two calls, each with their own speculation
37 * trap should their return address end up getting used, in a loop.
39 #define __FILL_RETURN_BUFFER(reg, nr, sp) \
42 ANNOTATE_INTRA_FUNCTION_CALL; \
44 773: /* speculation trap */ \
50 ANNOTATE_INTRA_FUNCTION_CALL; \
52 775: /* speculation trap */ \
58 add $(BITS_PER_LONG/8) * 2, sp; \
65 * This should be used immediately before an indirect jump/call. It tells
66 * objtool the subsequent indirect jump/call is vouched safe for retpoline
69 .macro ANNOTATE_RETPOLINE_SAFE
71 .pushsection .discard.retpoline_safe
72 _ASM_PTR .Lannotate_\@
77 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
78 * indirect jmp/call which may be susceptible to the Spectre variant 2
81 .macro JMP_NOSPEC reg:req
82 #ifdef CONFIG_RETPOLINE
83 ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
84 __stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
85 __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_LFENCE
91 .macro CALL_NOSPEC reg:req
92 #ifdef CONFIG_RETPOLINE
93 ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
94 __stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
95 __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_LFENCE
102 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
103 * monstrosity above, manually.
105 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
106 #ifdef CONFIG_RETPOLINE
107 ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
108 __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
113 #else /* __ASSEMBLY__ */
115 #define ANNOTATE_RETPOLINE_SAFE \
117 ".pushsection .discard.retpoline_safe\n\t" \
118 _ASM_PTR " 999b\n\t" \
121 #ifdef CONFIG_RETPOLINE
124 extern asmlinkage void __x86_indirect_thunk_ ## reg (void);
125 #include <asm/GEN-for-each-reg.h>
131 * Inline asm uses the %V modifier which is only in newer GCC
132 * which is ensured when CONFIG_RETPOLINE is defined.
134 # define CALL_NOSPEC \
136 ANNOTATE_RETPOLINE_SAFE \
137 "call *%[thunk_target]\n", \
138 "call __x86_indirect_thunk_%V[thunk_target]\n", \
139 X86_FEATURE_RETPOLINE, \
141 ANNOTATE_RETPOLINE_SAFE \
142 "call *%[thunk_target]\n", \
143 X86_FEATURE_RETPOLINE_LFENCE)
145 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
147 #else /* CONFIG_X86_32 */
149 * For i386 we use the original ret-equivalent retpoline, because
150 * otherwise we'll run out of registers. We don't care about CET
153 # define CALL_NOSPEC \
155 ANNOTATE_RETPOLINE_SAFE \
156 "call *%[thunk_target]\n", \
159 "901: call 903f;\n" \
164 "903: lea 4(%%esp), %%esp;\n" \
165 " pushl %[thunk_target];\n" \
168 "904: call 901b;\n", \
169 X86_FEATURE_RETPOLINE, \
171 ANNOTATE_RETPOLINE_SAFE \
172 "call *%[thunk_target]\n", \
173 X86_FEATURE_RETPOLINE_LFENCE)
175 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
177 #else /* No retpoline for C / inline asm */
178 # define CALL_NOSPEC "call *%[thunk_target]\n"
179 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
182 /* The Spectre V2 mitigation variants */
183 enum spectre_v2_mitigation {
185 SPECTRE_V2_RETPOLINE,
188 SPECTRE_V2_EIBRS_RETPOLINE,
189 SPECTRE_V2_EIBRS_LFENCE,
192 /* The indirect branch speculation control variants */
193 enum spectre_v2_user_mitigation {
194 SPECTRE_V2_USER_NONE,
195 SPECTRE_V2_USER_STRICT,
196 SPECTRE_V2_USER_STRICT_PREFERRED,
197 SPECTRE_V2_USER_PRCTL,
198 SPECTRE_V2_USER_SECCOMP,
201 /* The Speculative Store Bypass disable variants */
202 enum ssb_mitigation {
203 SPEC_STORE_BYPASS_NONE,
204 SPEC_STORE_BYPASS_DISABLE,
205 SPEC_STORE_BYPASS_PRCTL,
206 SPEC_STORE_BYPASS_SECCOMP,
209 extern char __indirect_thunk_start[];
210 extern char __indirect_thunk_end[];
212 static __always_inline
213 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
215 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
218 "d" ((u32)(val >> 32)),
219 [feature] "i" (feature)
223 static inline void indirect_branch_prediction_barrier(void)
225 u64 val = PRED_CMD_IBPB;
227 alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
230 /* The Intel SPEC CTRL MSR base value cache */
231 extern u64 x86_spec_ctrl_base;
234 * With retpoline, we must use IBRS to restrict branch prediction
235 * before calling into firmware.
237 * (Implemented as CPP macros due to header hell.)
239 #define firmware_restrict_branch_speculation_start() \
241 u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
244 alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
245 X86_FEATURE_USE_IBRS_FW); \
248 #define firmware_restrict_branch_speculation_end() \
250 u64 val = x86_spec_ctrl_base; \
252 alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
253 X86_FEATURE_USE_IBRS_FW); \
257 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
258 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
259 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
261 DECLARE_STATIC_KEY_FALSE(mds_user_clear);
262 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
264 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
266 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
268 #include <asm/segment.h>
271 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
273 * This uses the otherwise unused and obsolete VERW instruction in
274 * combination with microcode which triggers a CPU buffer flush when the
275 * instruction is executed.
277 static __always_inline void mds_clear_cpu_buffers(void)
279 static const u16 ds = __KERNEL_DS;
282 * Has to be the memory-operand variant because only that
283 * guarantees the CPU buffer flush functionality according to
284 * documentation. The register-operand variant does not.
285 * Works with any segment selector, but a valid writable
286 * data segment is the fastest variant.
288 * "cc" clobber is required because VERW modifies ZF.
290 asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
294 * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
296 * Clear CPU buffers if the corresponding static key is enabled
298 static __always_inline void mds_user_clear_cpu_buffers(void)
300 if (static_branch_likely(&mds_user_clear))
301 mds_clear_cpu_buffers();
305 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
307 * Clear CPU buffers if the corresponding static key is enabled
309 static inline void mds_idle_clear_cpu_buffers(void)
311 if (static_branch_likely(&mds_idle_clear))
312 mds_clear_cpu_buffers();
315 #endif /* __ASSEMBLY__ */
318 * Below is used in the eBPF JIT compiler and emits the byte sequence
319 * for the following assembly:
321 * With retpolines configured:
329 * mov %rcx,(%rsp) for x86_64
330 * mov %edx,(%esp) for x86_32
333 * Without retpolines configured:
335 * jmp *%rcx for x86_64
336 * jmp *%edx for x86_32
338 #ifdef CONFIG_RETPOLINE
339 # ifdef CONFIG_X86_64
340 # define RETPOLINE_RCX_BPF_JIT_SIZE 17
341 # define RETPOLINE_RCX_BPF_JIT() \
343 EMIT1_off32(0xE8, 7); /* callq do_rop */ \
345 EMIT2(0xF3, 0x90); /* pause */ \
346 EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
347 EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
349 EMIT4(0x48, 0x89, 0x0C, 0x24); /* mov %rcx,(%rsp) */ \
350 EMIT1(0xC3); /* retq */ \
352 # else /* !CONFIG_X86_64 */
353 # define RETPOLINE_EDX_BPF_JIT() \
355 EMIT1_off32(0xE8, 7); /* call do_rop */ \
357 EMIT2(0xF3, 0x90); /* pause */ \
358 EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
359 EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
361 EMIT3(0x89, 0x14, 0x24); /* mov %edx,(%esp) */ \
362 EMIT1(0xC3); /* ret */ \
365 #else /* !CONFIG_RETPOLINE */
366 # ifdef CONFIG_X86_64
367 # define RETPOLINE_RCX_BPF_JIT_SIZE 2
368 # define RETPOLINE_RCX_BPF_JIT() \
369 EMIT2(0xFF, 0xE1); /* jmp *%rcx */
370 # else /* !CONFIG_X86_64 */
371 # define RETPOLINE_EDX_BPF_JIT() \
372 EMIT2(0xFF, 0xE2) /* jmp *%edx */
376 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */