x86/bugs: Add retbleed=ibpb
[platform/kernel/linux-rpi.git] / arch / x86 / include / asm / nospec-branch.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
5
6 #include <linux/static_key.h>
7 #include <linux/objtool.h>
8 #include <linux/linkage.h>
9
10 #include <asm/alternative.h>
11 #include <asm/cpufeatures.h>
12 #include <asm/msr-index.h>
13 #include <asm/unwind_hints.h>
14
15 #define RETPOLINE_THUNK_SIZE    32
16
17 /*
18  * Fill the CPU return stack buffer.
19  *
20  * Each entry in the RSB, if used for a speculative 'ret', contains an
21  * infinite 'pause; lfence; jmp' loop to capture speculative execution.
22  *
23  * This is required in various cases for retpoline and IBRS-based
24  * mitigations for the Spectre variant 2 vulnerability. Sometimes to
25  * eliminate potentially bogus entries from the RSB, and sometimes
26  * purely to ensure that it doesn't get empty, which on some CPUs would
27  * allow predictions from other (unwanted!) sources to be used.
28  *
29  * We define a CPP macro such that it can be used from both .S files and
30  * inline assembly. It's possible to do a .macro and then include that
31  * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
32  */
33
34 #define RSB_CLEAR_LOOPS         32      /* To forcibly overwrite all entries */
35
36 /*
37  * Google experimented with loop-unrolling and this turned out to be
38  * the optimal version - two calls, each with their own speculation
39  * trap should their return address end up getting used, in a loop.
40  */
41 #define __FILL_RETURN_BUFFER(reg, nr, sp)       \
42         mov     $(nr/2), reg;                   \
43 771:                                            \
44         ANNOTATE_INTRA_FUNCTION_CALL;           \
45         call    772f;                           \
46 773:    /* speculation trap */                  \
47         UNWIND_HINT_EMPTY;                      \
48         pause;                                  \
49         lfence;                                 \
50         jmp     773b;                           \
51 772:                                            \
52         ANNOTATE_INTRA_FUNCTION_CALL;           \
53         call    774f;                           \
54 775:    /* speculation trap */                  \
55         UNWIND_HINT_EMPTY;                      \
56         pause;                                  \
57         lfence;                                 \
58         jmp     775b;                           \
59 774:                                            \
60         add     $(BITS_PER_LONG/8) * 2, sp;     \
61         dec     reg;                            \
62         jnz     771b;
63
64 #ifdef __ASSEMBLY__
65
66 /*
67  * This should be used immediately before an indirect jump/call. It tells
68  * objtool the subsequent indirect jump/call is vouched safe for retpoline
69  * builds.
70  */
71 .macro ANNOTATE_RETPOLINE_SAFE
72         .Lannotate_\@:
73         .pushsection .discard.retpoline_safe
74         _ASM_PTR .Lannotate_\@
75         .popsection
76 .endm
77
78 /*
79  * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
80  * vs RETBleed validation.
81  */
82 #define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
83
84 /*
85  * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
86  * indirect jmp/call which may be susceptible to the Spectre variant 2
87  * attack.
88  */
89 .macro JMP_NOSPEC reg:req
90 #ifdef CONFIG_RETPOLINE
91         ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
92                       __stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
93                       __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_LFENCE
94 #else
95         jmp     *%\reg
96 #endif
97 .endm
98
99 .macro CALL_NOSPEC reg:req
100 #ifdef CONFIG_RETPOLINE
101         ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
102                       __stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
103                       __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_LFENCE
104 #else
105         call    *%\reg
106 #endif
107 .endm
108
109  /*
110   * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
111   * monstrosity above, manually.
112   */
113 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
114 #ifdef CONFIG_RETPOLINE
115         ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
116         __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
117 .Lskip_rsb_\@:
118 #endif
119 .endm
120
121 /*
122  * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
123  * return thunk isn't mapped into the userspace tables (then again, AMD
124  * typically has NO_MELTDOWN).
125  *
126  * While zen_untrain_ret() doesn't clobber anything but requires stack,
127  * entry_ibpb() will clobber AX, CX, DX.
128  *
129  * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
130  * where we have a stack but before any RET instruction.
131  */
132 .macro UNTRAIN_RET
133 #ifdef CONFIG_RETPOLINE
134         ALTERNATIVE_2 "",                                               \
135                       "call zen_untrain_ret", X86_FEATURE_UNRET,        \
136                       "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
137 #endif
138 .endm
139
140 #else /* __ASSEMBLY__ */
141
142 #define ANNOTATE_RETPOLINE_SAFE                                 \
143         "999:\n\t"                                              \
144         ".pushsection .discard.retpoline_safe\n\t"              \
145         _ASM_PTR " 999b\n\t"                                    \
146         ".popsection\n\t"
147
148 extern void __x86_return_thunk(void);
149 extern void zen_untrain_ret(void);
150 extern void entry_ibpb(void);
151
152 #ifdef CONFIG_RETPOLINE
153
154 typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
155
156 #define GEN(reg) \
157         extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
158 #include <asm/GEN-for-each-reg.h>
159 #undef GEN
160
161 extern retpoline_thunk_t __x86_indirect_thunk_array[];
162
163 #ifdef CONFIG_X86_64
164
165 /*
166  * Inline asm uses the %V modifier which is only in newer GCC
167  * which is ensured when CONFIG_RETPOLINE is defined.
168  */
169 # define CALL_NOSPEC                                            \
170         ALTERNATIVE_2(                                          \
171         ANNOTATE_RETPOLINE_SAFE                                 \
172         "call *%[thunk_target]\n",                              \
173         "call __x86_indirect_thunk_%V[thunk_target]\n",         \
174         X86_FEATURE_RETPOLINE,                                  \
175         "lfence;\n"                                             \
176         ANNOTATE_RETPOLINE_SAFE                                 \
177         "call *%[thunk_target]\n",                              \
178         X86_FEATURE_RETPOLINE_LFENCE)
179
180 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
181
182 #else /* CONFIG_X86_32 */
183 /*
184  * For i386 we use the original ret-equivalent retpoline, because
185  * otherwise we'll run out of registers. We don't care about CET
186  * here, anyway.
187  */
188 # define CALL_NOSPEC                                            \
189         ALTERNATIVE_2(                                          \
190         ANNOTATE_RETPOLINE_SAFE                                 \
191         "call *%[thunk_target]\n",                              \
192         "       jmp    904f;\n"                                 \
193         "       .align 16\n"                                    \
194         "901:   call   903f;\n"                                 \
195         "902:   pause;\n"                                       \
196         "       lfence;\n"                                      \
197         "       jmp    902b;\n"                                 \
198         "       .align 16\n"                                    \
199         "903:   lea    4(%%esp), %%esp;\n"                      \
200         "       pushl  %[thunk_target];\n"                      \
201         "       ret;\n"                                         \
202         "       .align 16\n"                                    \
203         "904:   call   901b;\n",                                \
204         X86_FEATURE_RETPOLINE,                                  \
205         "lfence;\n"                                             \
206         ANNOTATE_RETPOLINE_SAFE                                 \
207         "call *%[thunk_target]\n",                              \
208         X86_FEATURE_RETPOLINE_LFENCE)
209
210 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
211 #endif
212 #else /* No retpoline for C / inline asm */
213 # define CALL_NOSPEC "call *%[thunk_target]\n"
214 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
215 #endif
216
217 /* The Spectre V2 mitigation variants */
218 enum spectre_v2_mitigation {
219         SPECTRE_V2_NONE,
220         SPECTRE_V2_RETPOLINE,
221         SPECTRE_V2_LFENCE,
222         SPECTRE_V2_EIBRS,
223         SPECTRE_V2_EIBRS_RETPOLINE,
224         SPECTRE_V2_EIBRS_LFENCE,
225         SPECTRE_V2_IBRS,
226 };
227
228 /* The indirect branch speculation control variants */
229 enum spectre_v2_user_mitigation {
230         SPECTRE_V2_USER_NONE,
231         SPECTRE_V2_USER_STRICT,
232         SPECTRE_V2_USER_STRICT_PREFERRED,
233         SPECTRE_V2_USER_PRCTL,
234         SPECTRE_V2_USER_SECCOMP,
235 };
236
237 /* The Speculative Store Bypass disable variants */
238 enum ssb_mitigation {
239         SPEC_STORE_BYPASS_NONE,
240         SPEC_STORE_BYPASS_DISABLE,
241         SPEC_STORE_BYPASS_PRCTL,
242         SPEC_STORE_BYPASS_SECCOMP,
243 };
244
245 extern char __indirect_thunk_start[];
246 extern char __indirect_thunk_end[];
247
248 static __always_inline
249 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
250 {
251         asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
252                 : : "c" (msr),
253                     "a" ((u32)val),
254                     "d" ((u32)(val >> 32)),
255                     [feature] "i" (feature)
256                 : "memory");
257 }
258
259 static inline void indirect_branch_prediction_barrier(void)
260 {
261         u64 val = PRED_CMD_IBPB;
262
263         alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
264 }
265
266 /* The Intel SPEC CTRL MSR base value cache */
267 extern u64 x86_spec_ctrl_base;
268 extern void write_spec_ctrl_current(u64 val, bool force);
269 extern u64 spec_ctrl_current(void);
270
271 /*
272  * With retpoline, we must use IBRS to restrict branch prediction
273  * before calling into firmware.
274  *
275  * (Implemented as CPP macros due to header hell.)
276  */
277 #define firmware_restrict_branch_speculation_start()                    \
278 do {                                                                    \
279         u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS;                  \
280                                                                         \
281         preempt_disable();                                              \
282         alternative_msr_write(MSR_IA32_SPEC_CTRL, val,                  \
283                               X86_FEATURE_USE_IBRS_FW);                 \
284 } while (0)
285
286 #define firmware_restrict_branch_speculation_end()                      \
287 do {                                                                    \
288         u64 val = x86_spec_ctrl_base;                                   \
289                                                                         \
290         alternative_msr_write(MSR_IA32_SPEC_CTRL, val,                  \
291                               X86_FEATURE_USE_IBRS_FW);                 \
292         preempt_enable();                                               \
293 } while (0)
294
295 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
296 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
297 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
298
299 DECLARE_STATIC_KEY_FALSE(mds_user_clear);
300 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
301
302 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
303
304 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
305
306 #include <asm/segment.h>
307
308 /**
309  * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
310  *
311  * This uses the otherwise unused and obsolete VERW instruction in
312  * combination with microcode which triggers a CPU buffer flush when the
313  * instruction is executed.
314  */
315 static __always_inline void mds_clear_cpu_buffers(void)
316 {
317         static const u16 ds = __KERNEL_DS;
318
319         /*
320          * Has to be the memory-operand variant because only that
321          * guarantees the CPU buffer flush functionality according to
322          * documentation. The register-operand variant does not.
323          * Works with any segment selector, but a valid writable
324          * data segment is the fastest variant.
325          *
326          * "cc" clobber is required because VERW modifies ZF.
327          */
328         asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
329 }
330
331 /**
332  * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
333  *
334  * Clear CPU buffers if the corresponding static key is enabled
335  */
336 static __always_inline void mds_user_clear_cpu_buffers(void)
337 {
338         if (static_branch_likely(&mds_user_clear))
339                 mds_clear_cpu_buffers();
340 }
341
342 /**
343  * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
344  *
345  * Clear CPU buffers if the corresponding static key is enabled
346  */
347 static inline void mds_idle_clear_cpu_buffers(void)
348 {
349         if (static_branch_likely(&mds_idle_clear))
350                 mds_clear_cpu_buffers();
351 }
352
353 #endif /* __ASSEMBLY__ */
354
355 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */