KVM: VMX: Prevent guest RSB poisoning attacks with eIBRS
[platform/kernel/linux-rpi.git] / arch / x86 / include / asm / nospec-branch.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
5
6 #include <linux/static_key.h>
7 #include <linux/objtool.h>
8 #include <linux/linkage.h>
9
10 #include <asm/alternative.h>
11 #include <asm/cpufeatures.h>
12 #include <asm/msr-index.h>
13 #include <asm/unwind_hints.h>
14
15 #define RETPOLINE_THUNK_SIZE    32
16
17 /*
18  * Fill the CPU return stack buffer.
19  *
20  * Each entry in the RSB, if used for a speculative 'ret', contains an
21  * infinite 'pause; lfence; jmp' loop to capture speculative execution.
22  *
23  * This is required in various cases for retpoline and IBRS-based
24  * mitigations for the Spectre variant 2 vulnerability. Sometimes to
25  * eliminate potentially bogus entries from the RSB, and sometimes
26  * purely to ensure that it doesn't get empty, which on some CPUs would
27  * allow predictions from other (unwanted!) sources to be used.
28  *
29  * We define a CPP macro such that it can be used from both .S files and
30  * inline assembly. It's possible to do a .macro and then include that
31  * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
32  */
33
34 #define RSB_CLEAR_LOOPS         32      /* To forcibly overwrite all entries */
35
36 /*
37  * Google experimented with loop-unrolling and this turned out to be
38  * the optimal version - two calls, each with their own speculation
39  * trap should their return address end up getting used, in a loop.
40  */
41 #define __FILL_RETURN_BUFFER(reg, nr, sp)       \
42         mov     $(nr/2), reg;                   \
43 771:                                            \
44         ANNOTATE_INTRA_FUNCTION_CALL;           \
45         call    772f;                           \
46 773:    /* speculation trap */                  \
47         UNWIND_HINT_EMPTY;                      \
48         pause;                                  \
49         lfence;                                 \
50         jmp     773b;                           \
51 772:                                            \
52         ANNOTATE_INTRA_FUNCTION_CALL;           \
53         call    774f;                           \
54 775:    /* speculation trap */                  \
55         UNWIND_HINT_EMPTY;                      \
56         pause;                                  \
57         lfence;                                 \
58         jmp     775b;                           \
59 774:                                            \
60         add     $(BITS_PER_LONG/8) * 2, sp;     \
61         dec     reg;                            \
62         jnz     771b;
63
64 #ifdef __ASSEMBLY__
65
66 /*
67  * This should be used immediately before an indirect jump/call. It tells
68  * objtool the subsequent indirect jump/call is vouched safe for retpoline
69  * builds.
70  */
71 .macro ANNOTATE_RETPOLINE_SAFE
72         .Lannotate_\@:
73         .pushsection .discard.retpoline_safe
74         _ASM_PTR .Lannotate_\@
75         .popsection
76 .endm
77
78 /*
79  * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
80  * vs RETBleed validation.
81  */
82 #define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
83
84 /*
85  * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
86  * eventually turn into it's own annotation.
87  */
88 .macro ANNOTATE_UNRET_END
89 #ifdef CONFIG_DEBUG_ENTRY
90         ANNOTATE_RETPOLINE_SAFE
91         nop
92 #endif
93 .endm
94
95 /*
96  * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
97  * indirect jmp/call which may be susceptible to the Spectre variant 2
98  * attack.
99  */
100 .macro JMP_NOSPEC reg:req
101 #ifdef CONFIG_RETPOLINE
102         ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
103                       __stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
104                       __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_LFENCE
105 #else
106         jmp     *%\reg
107 #endif
108 .endm
109
110 .macro CALL_NOSPEC reg:req
111 #ifdef CONFIG_RETPOLINE
112         ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
113                       __stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
114                       __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_LFENCE
115 #else
116         call    *%\reg
117 #endif
118 .endm
119
120  /*
121   * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
122   * monstrosity above, manually.
123   */
124 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
125         ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
126         __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
127 .Lskip_rsb_\@:
128 .endm
129
130 /*
131  * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
132  * return thunk isn't mapped into the userspace tables (then again, AMD
133  * typically has NO_MELTDOWN).
134  *
135  * While zen_untrain_ret() doesn't clobber anything but requires stack,
136  * entry_ibpb() will clobber AX, CX, DX.
137  *
138  * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
139  * where we have a stack but before any RET instruction.
140  */
141 .macro UNTRAIN_RET
142 #ifdef CONFIG_RETPOLINE
143         ANNOTATE_UNRET_END
144         ALTERNATIVE_2 "",                                               \
145                       "call zen_untrain_ret", X86_FEATURE_UNRET,        \
146                       "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
147 #endif
148 .endm
149
150 #else /* __ASSEMBLY__ */
151
152 #define ANNOTATE_RETPOLINE_SAFE                                 \
153         "999:\n\t"                                              \
154         ".pushsection .discard.retpoline_safe\n\t"              \
155         _ASM_PTR " 999b\n\t"                                    \
156         ".popsection\n\t"
157
158 extern void __x86_return_thunk(void);
159 extern void zen_untrain_ret(void);
160 extern void entry_ibpb(void);
161
162 #ifdef CONFIG_RETPOLINE
163
164 typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
165
166 #define GEN(reg) \
167         extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
168 #include <asm/GEN-for-each-reg.h>
169 #undef GEN
170
171 extern retpoline_thunk_t __x86_indirect_thunk_array[];
172
173 #ifdef CONFIG_X86_64
174
175 /*
176  * Inline asm uses the %V modifier which is only in newer GCC
177  * which is ensured when CONFIG_RETPOLINE is defined.
178  */
179 # define CALL_NOSPEC                                            \
180         ALTERNATIVE_2(                                          \
181         ANNOTATE_RETPOLINE_SAFE                                 \
182         "call *%[thunk_target]\n",                              \
183         "call __x86_indirect_thunk_%V[thunk_target]\n",         \
184         X86_FEATURE_RETPOLINE,                                  \
185         "lfence;\n"                                             \
186         ANNOTATE_RETPOLINE_SAFE                                 \
187         "call *%[thunk_target]\n",                              \
188         X86_FEATURE_RETPOLINE_LFENCE)
189
190 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
191
192 #else /* CONFIG_X86_32 */
193 /*
194  * For i386 we use the original ret-equivalent retpoline, because
195  * otherwise we'll run out of registers. We don't care about CET
196  * here, anyway.
197  */
198 # define CALL_NOSPEC                                            \
199         ALTERNATIVE_2(                                          \
200         ANNOTATE_RETPOLINE_SAFE                                 \
201         "call *%[thunk_target]\n",                              \
202         "       jmp    904f;\n"                                 \
203         "       .align 16\n"                                    \
204         "901:   call   903f;\n"                                 \
205         "902:   pause;\n"                                       \
206         "       lfence;\n"                                      \
207         "       jmp    902b;\n"                                 \
208         "       .align 16\n"                                    \
209         "903:   lea    4(%%esp), %%esp;\n"                      \
210         "       pushl  %[thunk_target];\n"                      \
211         "       ret;\n"                                         \
212         "       .align 16\n"                                    \
213         "904:   call   901b;\n",                                \
214         X86_FEATURE_RETPOLINE,                                  \
215         "lfence;\n"                                             \
216         ANNOTATE_RETPOLINE_SAFE                                 \
217         "call *%[thunk_target]\n",                              \
218         X86_FEATURE_RETPOLINE_LFENCE)
219
220 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
221 #endif
222 #else /* No retpoline for C / inline asm */
223 # define CALL_NOSPEC "call *%[thunk_target]\n"
224 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
225 #endif
226
227 /* The Spectre V2 mitigation variants */
228 enum spectre_v2_mitigation {
229         SPECTRE_V2_NONE,
230         SPECTRE_V2_RETPOLINE,
231         SPECTRE_V2_LFENCE,
232         SPECTRE_V2_EIBRS,
233         SPECTRE_V2_EIBRS_RETPOLINE,
234         SPECTRE_V2_EIBRS_LFENCE,
235         SPECTRE_V2_IBRS,
236 };
237
238 /* The indirect branch speculation control variants */
239 enum spectre_v2_user_mitigation {
240         SPECTRE_V2_USER_NONE,
241         SPECTRE_V2_USER_STRICT,
242         SPECTRE_V2_USER_STRICT_PREFERRED,
243         SPECTRE_V2_USER_PRCTL,
244         SPECTRE_V2_USER_SECCOMP,
245 };
246
247 /* The Speculative Store Bypass disable variants */
248 enum ssb_mitigation {
249         SPEC_STORE_BYPASS_NONE,
250         SPEC_STORE_BYPASS_DISABLE,
251         SPEC_STORE_BYPASS_PRCTL,
252         SPEC_STORE_BYPASS_SECCOMP,
253 };
254
255 extern char __indirect_thunk_start[];
256 extern char __indirect_thunk_end[];
257
258 static __always_inline
259 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
260 {
261         asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
262                 : : "c" (msr),
263                     "a" ((u32)val),
264                     "d" ((u32)(val >> 32)),
265                     [feature] "i" (feature)
266                 : "memory");
267 }
268
269 static inline void indirect_branch_prediction_barrier(void)
270 {
271         u64 val = PRED_CMD_IBPB;
272
273         alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
274 }
275
276 /* The Intel SPEC CTRL MSR base value cache */
277 extern u64 x86_spec_ctrl_base;
278 extern u64 x86_spec_ctrl_current;
279 extern void write_spec_ctrl_current(u64 val, bool force);
280 extern u64 spec_ctrl_current(void);
281
282 /*
283  * With retpoline, we must use IBRS to restrict branch prediction
284  * before calling into firmware.
285  *
286  * (Implemented as CPP macros due to header hell.)
287  */
288 #define firmware_restrict_branch_speculation_start()                    \
289 do {                                                                    \
290         preempt_disable();                                              \
291         alternative_msr_write(MSR_IA32_SPEC_CTRL,                       \
292                               spec_ctrl_current() | SPEC_CTRL_IBRS,     \
293                               X86_FEATURE_USE_IBRS_FW);                 \
294 } while (0)
295
296 #define firmware_restrict_branch_speculation_end()                      \
297 do {                                                                    \
298         alternative_msr_write(MSR_IA32_SPEC_CTRL,                       \
299                               spec_ctrl_current(),                      \
300                               X86_FEATURE_USE_IBRS_FW);                 \
301         preempt_enable();                                               \
302 } while (0)
303
304 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
305 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
306 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
307
308 DECLARE_STATIC_KEY_FALSE(mds_user_clear);
309 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
310
311 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
312
313 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
314
315 #include <asm/segment.h>
316
317 /**
318  * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
319  *
320  * This uses the otherwise unused and obsolete VERW instruction in
321  * combination with microcode which triggers a CPU buffer flush when the
322  * instruction is executed.
323  */
324 static __always_inline void mds_clear_cpu_buffers(void)
325 {
326         static const u16 ds = __KERNEL_DS;
327
328         /*
329          * Has to be the memory-operand variant because only that
330          * guarantees the CPU buffer flush functionality according to
331          * documentation. The register-operand variant does not.
332          * Works with any segment selector, but a valid writable
333          * data segment is the fastest variant.
334          *
335          * "cc" clobber is required because VERW modifies ZF.
336          */
337         asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
338 }
339
340 /**
341  * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
342  *
343  * Clear CPU buffers if the corresponding static key is enabled
344  */
345 static __always_inline void mds_user_clear_cpu_buffers(void)
346 {
347         if (static_branch_likely(&mds_user_clear))
348                 mds_clear_cpu_buffers();
349 }
350
351 /**
352  * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
353  *
354  * Clear CPU buffers if the corresponding static key is enabled
355  */
356 static inline void mds_idle_clear_cpu_buffers(void)
357 {
358         if (static_branch_likely(&mds_idle_clear))
359                 mds_clear_cpu_buffers();
360 }
361
362 #endif /* __ASSEMBLY__ */
363
364 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */