x86/srso: Fix return thunks in generated code
[platform/kernel/linux-starfive.git] / arch / x86 / kernel / cpu / bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *      - Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *      - Channing Corn (tests & fixes),
9  *      - Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16 #include <linux/sched/smt.h>
17 #include <linux/pgtable.h>
18 #include <linux/bpf.h>
19
20 #include <asm/spec-ctrl.h>
21 #include <asm/cmdline.h>
22 #include <asm/bugs.h>
23 #include <asm/processor.h>
24 #include <asm/processor-flags.h>
25 #include <asm/fpu/api.h>
26 #include <asm/msr.h>
27 #include <asm/vmx.h>
28 #include <asm/paravirt.h>
29 #include <asm/intel-family.h>
30 #include <asm/e820/api.h>
31 #include <asm/hypervisor.h>
32 #include <asm/tlbflush.h>
33
34 #include "cpu.h"
35
36 static void __init spectre_v1_select_mitigation(void);
37 static void __init spectre_v2_select_mitigation(void);
38 static void __init retbleed_select_mitigation(void);
39 static void __init spectre_v2_user_select_mitigation(void);
40 static void __init ssb_select_mitigation(void);
41 static void __init l1tf_select_mitigation(void);
42 static void __init mds_select_mitigation(void);
43 static void __init md_clear_update_mitigation(void);
44 static void __init md_clear_select_mitigation(void);
45 static void __init taa_select_mitigation(void);
46 static void __init mmio_select_mitigation(void);
47 static void __init srbds_select_mitigation(void);
48 static void __init l1d_flush_select_mitigation(void);
49 static void __init gds_select_mitigation(void);
50 static void __init srso_select_mitigation(void);
51
52 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
53 u64 x86_spec_ctrl_base;
54 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
55
56 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
57 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
58 EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
59
60 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
61 EXPORT_SYMBOL_GPL(x86_pred_cmd);
62
63 static DEFINE_MUTEX(spec_ctrl_mutex);
64
65 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
66 static void update_spec_ctrl(u64 val)
67 {
68         this_cpu_write(x86_spec_ctrl_current, val);
69         wrmsrl(MSR_IA32_SPEC_CTRL, val);
70 }
71
72 /*
73  * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
74  * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
75  */
76 void update_spec_ctrl_cond(u64 val)
77 {
78         if (this_cpu_read(x86_spec_ctrl_current) == val)
79                 return;
80
81         this_cpu_write(x86_spec_ctrl_current, val);
82
83         /*
84          * When KERNEL_IBRS this MSR is written on return-to-user, unless
85          * forced the update can be delayed until that time.
86          */
87         if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
88                 wrmsrl(MSR_IA32_SPEC_CTRL, val);
89 }
90
91 u64 spec_ctrl_current(void)
92 {
93         return this_cpu_read(x86_spec_ctrl_current);
94 }
95 EXPORT_SYMBOL_GPL(spec_ctrl_current);
96
97 /*
98  * AMD specific MSR info for Speculative Store Bypass control.
99  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
100  */
101 u64 __ro_after_init x86_amd_ls_cfg_base;
102 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
103
104 /* Control conditional STIBP in switch_to() */
105 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
106 /* Control conditional IBPB in switch_mm() */
107 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
108 /* Control unconditional IBPB in switch_mm() */
109 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
110
111 /* Control MDS CPU buffer clear before returning to user space */
112 DEFINE_STATIC_KEY_FALSE(mds_user_clear);
113 EXPORT_SYMBOL_GPL(mds_user_clear);
114 /* Control MDS CPU buffer clear before idling (halt, mwait) */
115 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
116 EXPORT_SYMBOL_GPL(mds_idle_clear);
117
118 /*
119  * Controls whether l1d flush based mitigations are enabled,
120  * based on hw features and admin setting via boot parameter
121  * defaults to false
122  */
123 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
124
125 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
126 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
127 EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
128
129 void __init cpu_select_mitigations(void)
130 {
131         /*
132          * Read the SPEC_CTRL MSR to account for reserved bits which may
133          * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
134          * init code as it is not enumerated and depends on the family.
135          */
136         if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
137                 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
138
139                 /*
140                  * Previously running kernel (kexec), may have some controls
141                  * turned ON. Clear them and let the mitigations setup below
142                  * rediscover them based on configuration.
143                  */
144                 x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
145         }
146
147         /* Select the proper CPU mitigations before patching alternatives: */
148         spectre_v1_select_mitigation();
149         spectre_v2_select_mitigation();
150         /*
151          * retbleed_select_mitigation() relies on the state set by
152          * spectre_v2_select_mitigation(); specifically it wants to know about
153          * spectre_v2=ibrs.
154          */
155         retbleed_select_mitigation();
156         /*
157          * spectre_v2_user_select_mitigation() relies on the state set by
158          * retbleed_select_mitigation(); specifically the STIBP selection is
159          * forced for UNRET or IBPB.
160          */
161         spectre_v2_user_select_mitigation();
162         ssb_select_mitigation();
163         l1tf_select_mitigation();
164         md_clear_select_mitigation();
165         srbds_select_mitigation();
166         l1d_flush_select_mitigation();
167         gds_select_mitigation();
168         srso_select_mitigation();
169 }
170
171 /*
172  * NOTE: This function is *only* called for SVM, since Intel uses
173  * MSR_IA32_SPEC_CTRL for SSBD.
174  */
175 void
176 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
177 {
178         u64 guestval, hostval;
179         struct thread_info *ti = current_thread_info();
180
181         /*
182          * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
183          * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
184          */
185         if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
186             !static_cpu_has(X86_FEATURE_VIRT_SSBD))
187                 return;
188
189         /*
190          * If the host has SSBD mitigation enabled, force it in the host's
191          * virtual MSR value. If its not permanently enabled, evaluate
192          * current's TIF_SSBD thread flag.
193          */
194         if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
195                 hostval = SPEC_CTRL_SSBD;
196         else
197                 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
198
199         /* Sanitize the guest value */
200         guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
201
202         if (hostval != guestval) {
203                 unsigned long tif;
204
205                 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
206                                  ssbd_spec_ctrl_to_tif(hostval);
207
208                 speculation_ctrl_update(tif);
209         }
210 }
211 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
212
213 static void x86_amd_ssb_disable(void)
214 {
215         u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
216
217         if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
218                 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
219         else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
220                 wrmsrl(MSR_AMD64_LS_CFG, msrval);
221 }
222
223 #undef pr_fmt
224 #define pr_fmt(fmt)     "MDS: " fmt
225
226 /* Default mitigation for MDS-affected CPUs */
227 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
228 static bool mds_nosmt __ro_after_init = false;
229
230 static const char * const mds_strings[] = {
231         [MDS_MITIGATION_OFF]    = "Vulnerable",
232         [MDS_MITIGATION_FULL]   = "Mitigation: Clear CPU buffers",
233         [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
234 };
235
236 static void __init mds_select_mitigation(void)
237 {
238         if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
239                 mds_mitigation = MDS_MITIGATION_OFF;
240                 return;
241         }
242
243         if (mds_mitigation == MDS_MITIGATION_FULL) {
244                 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
245                         mds_mitigation = MDS_MITIGATION_VMWERV;
246
247                 static_branch_enable(&mds_user_clear);
248
249                 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
250                     (mds_nosmt || cpu_mitigations_auto_nosmt()))
251                         cpu_smt_disable(false);
252         }
253 }
254
255 static int __init mds_cmdline(char *str)
256 {
257         if (!boot_cpu_has_bug(X86_BUG_MDS))
258                 return 0;
259
260         if (!str)
261                 return -EINVAL;
262
263         if (!strcmp(str, "off"))
264                 mds_mitigation = MDS_MITIGATION_OFF;
265         else if (!strcmp(str, "full"))
266                 mds_mitigation = MDS_MITIGATION_FULL;
267         else if (!strcmp(str, "full,nosmt")) {
268                 mds_mitigation = MDS_MITIGATION_FULL;
269                 mds_nosmt = true;
270         }
271
272         return 0;
273 }
274 early_param("mds", mds_cmdline);
275
276 #undef pr_fmt
277 #define pr_fmt(fmt)     "TAA: " fmt
278
279 enum taa_mitigations {
280         TAA_MITIGATION_OFF,
281         TAA_MITIGATION_UCODE_NEEDED,
282         TAA_MITIGATION_VERW,
283         TAA_MITIGATION_TSX_DISABLED,
284 };
285
286 /* Default mitigation for TAA-affected CPUs */
287 static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
288 static bool taa_nosmt __ro_after_init;
289
290 static const char * const taa_strings[] = {
291         [TAA_MITIGATION_OFF]            = "Vulnerable",
292         [TAA_MITIGATION_UCODE_NEEDED]   = "Vulnerable: Clear CPU buffers attempted, no microcode",
293         [TAA_MITIGATION_VERW]           = "Mitigation: Clear CPU buffers",
294         [TAA_MITIGATION_TSX_DISABLED]   = "Mitigation: TSX disabled",
295 };
296
297 static void __init taa_select_mitigation(void)
298 {
299         u64 ia32_cap;
300
301         if (!boot_cpu_has_bug(X86_BUG_TAA)) {
302                 taa_mitigation = TAA_MITIGATION_OFF;
303                 return;
304         }
305
306         /* TSX previously disabled by tsx=off */
307         if (!boot_cpu_has(X86_FEATURE_RTM)) {
308                 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
309                 return;
310         }
311
312         if (cpu_mitigations_off()) {
313                 taa_mitigation = TAA_MITIGATION_OFF;
314                 return;
315         }
316
317         /*
318          * TAA mitigation via VERW is turned off if both
319          * tsx_async_abort=off and mds=off are specified.
320          */
321         if (taa_mitigation == TAA_MITIGATION_OFF &&
322             mds_mitigation == MDS_MITIGATION_OFF)
323                 return;
324
325         if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
326                 taa_mitigation = TAA_MITIGATION_VERW;
327         else
328                 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
329
330         /*
331          * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
332          * A microcode update fixes this behavior to clear CPU buffers. It also
333          * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
334          * ARCH_CAP_TSX_CTRL_MSR bit.
335          *
336          * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
337          * update is required.
338          */
339         ia32_cap = x86_read_arch_cap_msr();
340         if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
341             !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
342                 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
343
344         /*
345          * TSX is enabled, select alternate mitigation for TAA which is
346          * the same as MDS. Enable MDS static branch to clear CPU buffers.
347          *
348          * For guests that can't determine whether the correct microcode is
349          * present on host, enable the mitigation for UCODE_NEEDED as well.
350          */
351         static_branch_enable(&mds_user_clear);
352
353         if (taa_nosmt || cpu_mitigations_auto_nosmt())
354                 cpu_smt_disable(false);
355 }
356
357 static int __init tsx_async_abort_parse_cmdline(char *str)
358 {
359         if (!boot_cpu_has_bug(X86_BUG_TAA))
360                 return 0;
361
362         if (!str)
363                 return -EINVAL;
364
365         if (!strcmp(str, "off")) {
366                 taa_mitigation = TAA_MITIGATION_OFF;
367         } else if (!strcmp(str, "full")) {
368                 taa_mitigation = TAA_MITIGATION_VERW;
369         } else if (!strcmp(str, "full,nosmt")) {
370                 taa_mitigation = TAA_MITIGATION_VERW;
371                 taa_nosmt = true;
372         }
373
374         return 0;
375 }
376 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
377
378 #undef pr_fmt
379 #define pr_fmt(fmt)     "MMIO Stale Data: " fmt
380
381 enum mmio_mitigations {
382         MMIO_MITIGATION_OFF,
383         MMIO_MITIGATION_UCODE_NEEDED,
384         MMIO_MITIGATION_VERW,
385 };
386
387 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
388 static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
389 static bool mmio_nosmt __ro_after_init = false;
390
391 static const char * const mmio_strings[] = {
392         [MMIO_MITIGATION_OFF]           = "Vulnerable",
393         [MMIO_MITIGATION_UCODE_NEEDED]  = "Vulnerable: Clear CPU buffers attempted, no microcode",
394         [MMIO_MITIGATION_VERW]          = "Mitigation: Clear CPU buffers",
395 };
396
397 static void __init mmio_select_mitigation(void)
398 {
399         u64 ia32_cap;
400
401         if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
402              boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
403              cpu_mitigations_off()) {
404                 mmio_mitigation = MMIO_MITIGATION_OFF;
405                 return;
406         }
407
408         if (mmio_mitigation == MMIO_MITIGATION_OFF)
409                 return;
410
411         ia32_cap = x86_read_arch_cap_msr();
412
413         /*
414          * Enable CPU buffer clear mitigation for host and VMM, if also affected
415          * by MDS or TAA. Otherwise, enable mitigation for VMM only.
416          */
417         if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
418                                               boot_cpu_has(X86_FEATURE_RTM)))
419                 static_branch_enable(&mds_user_clear);
420         else
421                 static_branch_enable(&mmio_stale_data_clear);
422
423         /*
424          * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
425          * be propagated to uncore buffers, clearing the Fill buffers on idle
426          * is required irrespective of SMT state.
427          */
428         if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
429                 static_branch_enable(&mds_idle_clear);
430
431         /*
432          * Check if the system has the right microcode.
433          *
434          * CPU Fill buffer clear mitigation is enumerated by either an explicit
435          * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
436          * affected systems.
437          */
438         if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
439             (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
440              boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
441              !(ia32_cap & ARCH_CAP_MDS_NO)))
442                 mmio_mitigation = MMIO_MITIGATION_VERW;
443         else
444                 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
445
446         if (mmio_nosmt || cpu_mitigations_auto_nosmt())
447                 cpu_smt_disable(false);
448 }
449
450 static int __init mmio_stale_data_parse_cmdline(char *str)
451 {
452         if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
453                 return 0;
454
455         if (!str)
456                 return -EINVAL;
457
458         if (!strcmp(str, "off")) {
459                 mmio_mitigation = MMIO_MITIGATION_OFF;
460         } else if (!strcmp(str, "full")) {
461                 mmio_mitigation = MMIO_MITIGATION_VERW;
462         } else if (!strcmp(str, "full,nosmt")) {
463                 mmio_mitigation = MMIO_MITIGATION_VERW;
464                 mmio_nosmt = true;
465         }
466
467         return 0;
468 }
469 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
470
471 #undef pr_fmt
472 #define pr_fmt(fmt)     "" fmt
473
474 static void __init md_clear_update_mitigation(void)
475 {
476         if (cpu_mitigations_off())
477                 return;
478
479         if (!static_key_enabled(&mds_user_clear))
480                 goto out;
481
482         /*
483          * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
484          * mitigation, if necessary.
485          */
486         if (mds_mitigation == MDS_MITIGATION_OFF &&
487             boot_cpu_has_bug(X86_BUG_MDS)) {
488                 mds_mitigation = MDS_MITIGATION_FULL;
489                 mds_select_mitigation();
490         }
491         if (taa_mitigation == TAA_MITIGATION_OFF &&
492             boot_cpu_has_bug(X86_BUG_TAA)) {
493                 taa_mitigation = TAA_MITIGATION_VERW;
494                 taa_select_mitigation();
495         }
496         if (mmio_mitigation == MMIO_MITIGATION_OFF &&
497             boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
498                 mmio_mitigation = MMIO_MITIGATION_VERW;
499                 mmio_select_mitigation();
500         }
501 out:
502         if (boot_cpu_has_bug(X86_BUG_MDS))
503                 pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
504         if (boot_cpu_has_bug(X86_BUG_TAA))
505                 pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
506         if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
507                 pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
508         else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
509                 pr_info("MMIO Stale Data: Unknown: No mitigations\n");
510 }
511
512 static void __init md_clear_select_mitigation(void)
513 {
514         mds_select_mitigation();
515         taa_select_mitigation();
516         mmio_select_mitigation();
517
518         /*
519          * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
520          * and print their mitigation after MDS, TAA and MMIO Stale Data
521          * mitigation selection is done.
522          */
523         md_clear_update_mitigation();
524 }
525
526 #undef pr_fmt
527 #define pr_fmt(fmt)     "SRBDS: " fmt
528
529 enum srbds_mitigations {
530         SRBDS_MITIGATION_OFF,
531         SRBDS_MITIGATION_UCODE_NEEDED,
532         SRBDS_MITIGATION_FULL,
533         SRBDS_MITIGATION_TSX_OFF,
534         SRBDS_MITIGATION_HYPERVISOR,
535 };
536
537 static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
538
539 static const char * const srbds_strings[] = {
540         [SRBDS_MITIGATION_OFF]          = "Vulnerable",
541         [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
542         [SRBDS_MITIGATION_FULL]         = "Mitigation: Microcode",
543         [SRBDS_MITIGATION_TSX_OFF]      = "Mitigation: TSX disabled",
544         [SRBDS_MITIGATION_HYPERVISOR]   = "Unknown: Dependent on hypervisor status",
545 };
546
547 static bool srbds_off;
548
549 void update_srbds_msr(void)
550 {
551         u64 mcu_ctrl;
552
553         if (!boot_cpu_has_bug(X86_BUG_SRBDS))
554                 return;
555
556         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
557                 return;
558
559         if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
560                 return;
561
562         /*
563          * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
564          * being disabled and it hasn't received the SRBDS MSR microcode.
565          */
566         if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
567                 return;
568
569         rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
570
571         switch (srbds_mitigation) {
572         case SRBDS_MITIGATION_OFF:
573         case SRBDS_MITIGATION_TSX_OFF:
574                 mcu_ctrl |= RNGDS_MITG_DIS;
575                 break;
576         case SRBDS_MITIGATION_FULL:
577                 mcu_ctrl &= ~RNGDS_MITG_DIS;
578                 break;
579         default:
580                 break;
581         }
582
583         wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
584 }
585
586 static void __init srbds_select_mitigation(void)
587 {
588         u64 ia32_cap;
589
590         if (!boot_cpu_has_bug(X86_BUG_SRBDS))
591                 return;
592
593         /*
594          * Check to see if this is one of the MDS_NO systems supporting TSX that
595          * are only exposed to SRBDS when TSX is enabled or when CPU is affected
596          * by Processor MMIO Stale Data vulnerability.
597          */
598         ia32_cap = x86_read_arch_cap_msr();
599         if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
600             !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
601                 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
602         else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
603                 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
604         else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
605                 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
606         else if (cpu_mitigations_off() || srbds_off)
607                 srbds_mitigation = SRBDS_MITIGATION_OFF;
608
609         update_srbds_msr();
610         pr_info("%s\n", srbds_strings[srbds_mitigation]);
611 }
612
613 static int __init srbds_parse_cmdline(char *str)
614 {
615         if (!str)
616                 return -EINVAL;
617
618         if (!boot_cpu_has_bug(X86_BUG_SRBDS))
619                 return 0;
620
621         srbds_off = !strcmp(str, "off");
622         return 0;
623 }
624 early_param("srbds", srbds_parse_cmdline);
625
626 #undef pr_fmt
627 #define pr_fmt(fmt)     "L1D Flush : " fmt
628
629 enum l1d_flush_mitigations {
630         L1D_FLUSH_OFF = 0,
631         L1D_FLUSH_ON,
632 };
633
634 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
635
636 static void __init l1d_flush_select_mitigation(void)
637 {
638         if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
639                 return;
640
641         static_branch_enable(&switch_mm_cond_l1d_flush);
642         pr_info("Conditional flush on switch_mm() enabled\n");
643 }
644
645 static int __init l1d_flush_parse_cmdline(char *str)
646 {
647         if (!strcmp(str, "on"))
648                 l1d_flush_mitigation = L1D_FLUSH_ON;
649
650         return 0;
651 }
652 early_param("l1d_flush", l1d_flush_parse_cmdline);
653
654 #undef pr_fmt
655 #define pr_fmt(fmt)     "GDS: " fmt
656
657 enum gds_mitigations {
658         GDS_MITIGATION_OFF,
659         GDS_MITIGATION_UCODE_NEEDED,
660         GDS_MITIGATION_FORCE,
661         GDS_MITIGATION_FULL,
662         GDS_MITIGATION_FULL_LOCKED,
663         GDS_MITIGATION_HYPERVISOR,
664 };
665
666 #if IS_ENABLED(CONFIG_GDS_FORCE_MITIGATION)
667 static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE;
668 #else
669 static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL;
670 #endif
671
672 static const char * const gds_strings[] = {
673         [GDS_MITIGATION_OFF]            = "Vulnerable",
674         [GDS_MITIGATION_UCODE_NEEDED]   = "Vulnerable: No microcode",
675         [GDS_MITIGATION_FORCE]          = "Mitigation: AVX disabled, no microcode",
676         [GDS_MITIGATION_FULL]           = "Mitigation: Microcode",
677         [GDS_MITIGATION_FULL_LOCKED]    = "Mitigation: Microcode (locked)",
678         [GDS_MITIGATION_HYPERVISOR]     = "Unknown: Dependent on hypervisor status",
679 };
680
681 bool gds_ucode_mitigated(void)
682 {
683         return (gds_mitigation == GDS_MITIGATION_FULL ||
684                 gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
685 }
686 EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
687
688 void update_gds_msr(void)
689 {
690         u64 mcu_ctrl_after;
691         u64 mcu_ctrl;
692
693         switch (gds_mitigation) {
694         case GDS_MITIGATION_OFF:
695                 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
696                 mcu_ctrl |= GDS_MITG_DIS;
697                 break;
698         case GDS_MITIGATION_FULL_LOCKED:
699                 /*
700                  * The LOCKED state comes from the boot CPU. APs might not have
701                  * the same state. Make sure the mitigation is enabled on all
702                  * CPUs.
703                  */
704         case GDS_MITIGATION_FULL:
705                 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
706                 mcu_ctrl &= ~GDS_MITG_DIS;
707                 break;
708         case GDS_MITIGATION_FORCE:
709         case GDS_MITIGATION_UCODE_NEEDED:
710         case GDS_MITIGATION_HYPERVISOR:
711                 return;
712         };
713
714         wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
715
716         /*
717          * Check to make sure that the WRMSR value was not ignored. Writes to
718          * GDS_MITG_DIS will be ignored if this processor is locked but the boot
719          * processor was not.
720          */
721         rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
722         WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
723 }
724
725 static void __init gds_select_mitigation(void)
726 {
727         u64 mcu_ctrl;
728
729         if (!boot_cpu_has_bug(X86_BUG_GDS))
730                 return;
731
732         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
733                 gds_mitigation = GDS_MITIGATION_HYPERVISOR;
734                 goto out;
735         }
736
737         if (cpu_mitigations_off())
738                 gds_mitigation = GDS_MITIGATION_OFF;
739         /* Will verify below that mitigation _can_ be disabled */
740
741         /* No microcode */
742         if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
743                 if (gds_mitigation == GDS_MITIGATION_FORCE) {
744                         /*
745                          * This only needs to be done on the boot CPU so do it
746                          * here rather than in update_gds_msr()
747                          */
748                         setup_clear_cpu_cap(X86_FEATURE_AVX);
749                         pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
750                 } else {
751                         gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
752                 }
753                 goto out;
754         }
755
756         /* Microcode has mitigation, use it */
757         if (gds_mitigation == GDS_MITIGATION_FORCE)
758                 gds_mitigation = GDS_MITIGATION_FULL;
759
760         rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
761         if (mcu_ctrl & GDS_MITG_LOCKED) {
762                 if (gds_mitigation == GDS_MITIGATION_OFF)
763                         pr_warn("Mitigation locked. Disable failed.\n");
764
765                 /*
766                  * The mitigation is selected from the boot CPU. All other CPUs
767                  * _should_ have the same state. If the boot CPU isn't locked
768                  * but others are then update_gds_msr() will WARN() of the state
769                  * mismatch. If the boot CPU is locked update_gds_msr() will
770                  * ensure the other CPUs have the mitigation enabled.
771                  */
772                 gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
773         }
774
775         update_gds_msr();
776 out:
777         pr_info("%s\n", gds_strings[gds_mitigation]);
778 }
779
780 static int __init gds_parse_cmdline(char *str)
781 {
782         if (!str)
783                 return -EINVAL;
784
785         if (!boot_cpu_has_bug(X86_BUG_GDS))
786                 return 0;
787
788         if (!strcmp(str, "off"))
789                 gds_mitigation = GDS_MITIGATION_OFF;
790         else if (!strcmp(str, "force"))
791                 gds_mitigation = GDS_MITIGATION_FORCE;
792
793         return 0;
794 }
795 early_param("gather_data_sampling", gds_parse_cmdline);
796
797 #undef pr_fmt
798 #define pr_fmt(fmt)     "Spectre V1 : " fmt
799
800 enum spectre_v1_mitigation {
801         SPECTRE_V1_MITIGATION_NONE,
802         SPECTRE_V1_MITIGATION_AUTO,
803 };
804
805 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
806         SPECTRE_V1_MITIGATION_AUTO;
807
808 static const char * const spectre_v1_strings[] = {
809         [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
810         [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
811 };
812
813 /*
814  * Does SMAP provide full mitigation against speculative kernel access to
815  * userspace?
816  */
817 static bool smap_works_speculatively(void)
818 {
819         if (!boot_cpu_has(X86_FEATURE_SMAP))
820                 return false;
821
822         /*
823          * On CPUs which are vulnerable to Meltdown, SMAP does not
824          * prevent speculative access to user data in the L1 cache.
825          * Consider SMAP to be non-functional as a mitigation on these
826          * CPUs.
827          */
828         if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
829                 return false;
830
831         return true;
832 }
833
834 static void __init spectre_v1_select_mitigation(void)
835 {
836         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
837                 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
838                 return;
839         }
840
841         if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
842                 /*
843                  * With Spectre v1, a user can speculatively control either
844                  * path of a conditional swapgs with a user-controlled GS
845                  * value.  The mitigation is to add lfences to both code paths.
846                  *
847                  * If FSGSBASE is enabled, the user can put a kernel address in
848                  * GS, in which case SMAP provides no protection.
849                  *
850                  * If FSGSBASE is disabled, the user can only put a user space
851                  * address in GS.  That makes an attack harder, but still
852                  * possible if there's no SMAP protection.
853                  */
854                 if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
855                     !smap_works_speculatively()) {
856                         /*
857                          * Mitigation can be provided from SWAPGS itself or
858                          * PTI as the CR3 write in the Meltdown mitigation
859                          * is serializing.
860                          *
861                          * If neither is there, mitigate with an LFENCE to
862                          * stop speculation through swapgs.
863                          */
864                         if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
865                             !boot_cpu_has(X86_FEATURE_PTI))
866                                 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
867
868                         /*
869                          * Enable lfences in the kernel entry (non-swapgs)
870                          * paths, to prevent user entry from speculatively
871                          * skipping swapgs.
872                          */
873                         setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
874                 }
875         }
876
877         pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
878 }
879
880 static int __init nospectre_v1_cmdline(char *str)
881 {
882         spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
883         return 0;
884 }
885 early_param("nospectre_v1", nospectre_v1_cmdline);
886
887 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
888         SPECTRE_V2_NONE;
889
890 #undef pr_fmt
891 #define pr_fmt(fmt)     "RETBleed: " fmt
892
893 enum retbleed_mitigation {
894         RETBLEED_MITIGATION_NONE,
895         RETBLEED_MITIGATION_UNRET,
896         RETBLEED_MITIGATION_IBPB,
897         RETBLEED_MITIGATION_IBRS,
898         RETBLEED_MITIGATION_EIBRS,
899 };
900
901 enum retbleed_mitigation_cmd {
902         RETBLEED_CMD_OFF,
903         RETBLEED_CMD_AUTO,
904         RETBLEED_CMD_UNRET,
905         RETBLEED_CMD_IBPB,
906 };
907
908 static const char * const retbleed_strings[] = {
909         [RETBLEED_MITIGATION_NONE]      = "Vulnerable",
910         [RETBLEED_MITIGATION_UNRET]     = "Mitigation: untrained return thunk",
911         [RETBLEED_MITIGATION_IBPB]      = "Mitigation: IBPB",
912         [RETBLEED_MITIGATION_IBRS]      = "Mitigation: IBRS",
913         [RETBLEED_MITIGATION_EIBRS]     = "Mitigation: Enhanced IBRS",
914 };
915
916 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
917         RETBLEED_MITIGATION_NONE;
918 static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
919         RETBLEED_CMD_AUTO;
920
921 static int __ro_after_init retbleed_nosmt = false;
922
923 static int __init retbleed_parse_cmdline(char *str)
924 {
925         if (!str)
926                 return -EINVAL;
927
928         while (str) {
929                 char *next = strchr(str, ',');
930                 if (next) {
931                         *next = 0;
932                         next++;
933                 }
934
935                 if (!strcmp(str, "off")) {
936                         retbleed_cmd = RETBLEED_CMD_OFF;
937                 } else if (!strcmp(str, "auto")) {
938                         retbleed_cmd = RETBLEED_CMD_AUTO;
939                 } else if (!strcmp(str, "unret")) {
940                         retbleed_cmd = RETBLEED_CMD_UNRET;
941                 } else if (!strcmp(str, "ibpb")) {
942                         retbleed_cmd = RETBLEED_CMD_IBPB;
943                 } else if (!strcmp(str, "nosmt")) {
944                         retbleed_nosmt = true;
945                 } else {
946                         pr_err("Ignoring unknown retbleed option (%s).", str);
947                 }
948
949                 str = next;
950         }
951
952         return 0;
953 }
954 early_param("retbleed", retbleed_parse_cmdline);
955
956 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
957 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
958
959 static void __init retbleed_select_mitigation(void)
960 {
961         bool mitigate_smt = false;
962
963         if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
964                 return;
965
966         switch (retbleed_cmd) {
967         case RETBLEED_CMD_OFF:
968                 return;
969
970         case RETBLEED_CMD_UNRET:
971                 if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
972                         retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
973                 } else {
974                         pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n");
975                         goto do_cmd_auto;
976                 }
977                 break;
978
979         case RETBLEED_CMD_IBPB:
980                 if (!boot_cpu_has(X86_FEATURE_IBPB)) {
981                         pr_err("WARNING: CPU does not support IBPB.\n");
982                         goto do_cmd_auto;
983                 } else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
984                         retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
985                 } else {
986                         pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
987                         goto do_cmd_auto;
988                 }
989                 break;
990
991 do_cmd_auto:
992         case RETBLEED_CMD_AUTO:
993         default:
994                 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
995                     boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
996                         if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
997                                 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
998                         else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB))
999                                 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1000                 }
1001
1002                 /*
1003                  * The Intel mitigation (IBRS or eIBRS) was already selected in
1004                  * spectre_v2_select_mitigation().  'retbleed_mitigation' will
1005                  * be set accordingly below.
1006                  */
1007
1008                 break;
1009         }
1010
1011         switch (retbleed_mitigation) {
1012         case RETBLEED_MITIGATION_UNRET:
1013                 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1014                 setup_force_cpu_cap(X86_FEATURE_UNRET);
1015
1016                 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
1017                     boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
1018                         pr_err(RETBLEED_UNTRAIN_MSG);
1019
1020                 mitigate_smt = true;
1021                 break;
1022
1023         case RETBLEED_MITIGATION_IBPB:
1024                 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
1025                 mitigate_smt = true;
1026                 break;
1027
1028         default:
1029                 break;
1030         }
1031
1032         if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
1033             (retbleed_nosmt || cpu_mitigations_auto_nosmt()))
1034                 cpu_smt_disable(false);
1035
1036         /*
1037          * Let IBRS trump all on Intel without affecting the effects of the
1038          * retbleed= cmdline option.
1039          */
1040         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1041                 switch (spectre_v2_enabled) {
1042                 case SPECTRE_V2_IBRS:
1043                         retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1044                         break;
1045                 case SPECTRE_V2_EIBRS:
1046                 case SPECTRE_V2_EIBRS_RETPOLINE:
1047                 case SPECTRE_V2_EIBRS_LFENCE:
1048                         retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1049                         break;
1050                 default:
1051                         pr_err(RETBLEED_INTEL_MSG);
1052                 }
1053         }
1054
1055         pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
1056 }
1057
1058 #undef pr_fmt
1059 #define pr_fmt(fmt)     "Spectre V2 : " fmt
1060
1061 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
1062         SPECTRE_V2_USER_NONE;
1063 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
1064         SPECTRE_V2_USER_NONE;
1065
1066 #ifdef CONFIG_RETPOLINE
1067 static bool spectre_v2_bad_module;
1068
1069 bool retpoline_module_ok(bool has_retpoline)
1070 {
1071         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
1072                 return true;
1073
1074         pr_err("System may be vulnerable to spectre v2\n");
1075         spectre_v2_bad_module = true;
1076         return false;
1077 }
1078
1079 static inline const char *spectre_v2_module_string(void)
1080 {
1081         return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
1082 }
1083 #else
1084 static inline const char *spectre_v2_module_string(void) { return ""; }
1085 #endif
1086
1087 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
1088 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
1089 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
1090 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
1091
1092 #ifdef CONFIG_BPF_SYSCALL
1093 void unpriv_ebpf_notify(int new_state)
1094 {
1095         if (new_state)
1096                 return;
1097
1098         /* Unprivileged eBPF is enabled */
1099
1100         switch (spectre_v2_enabled) {
1101         case SPECTRE_V2_EIBRS:
1102                 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1103                 break;
1104         case SPECTRE_V2_EIBRS_LFENCE:
1105                 if (sched_smt_active())
1106                         pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1107                 break;
1108         default:
1109                 break;
1110         }
1111 }
1112 #endif
1113
1114 static inline bool match_option(const char *arg, int arglen, const char *opt)
1115 {
1116         int len = strlen(opt);
1117
1118         return len == arglen && !strncmp(arg, opt, len);
1119 }
1120
1121 /* The kernel command line selection for spectre v2 */
1122 enum spectre_v2_mitigation_cmd {
1123         SPECTRE_V2_CMD_NONE,
1124         SPECTRE_V2_CMD_AUTO,
1125         SPECTRE_V2_CMD_FORCE,
1126         SPECTRE_V2_CMD_RETPOLINE,
1127         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
1128         SPECTRE_V2_CMD_RETPOLINE_LFENCE,
1129         SPECTRE_V2_CMD_EIBRS,
1130         SPECTRE_V2_CMD_EIBRS_RETPOLINE,
1131         SPECTRE_V2_CMD_EIBRS_LFENCE,
1132         SPECTRE_V2_CMD_IBRS,
1133 };
1134
1135 enum spectre_v2_user_cmd {
1136         SPECTRE_V2_USER_CMD_NONE,
1137         SPECTRE_V2_USER_CMD_AUTO,
1138         SPECTRE_V2_USER_CMD_FORCE,
1139         SPECTRE_V2_USER_CMD_PRCTL,
1140         SPECTRE_V2_USER_CMD_PRCTL_IBPB,
1141         SPECTRE_V2_USER_CMD_SECCOMP,
1142         SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
1143 };
1144
1145 static const char * const spectre_v2_user_strings[] = {
1146         [SPECTRE_V2_USER_NONE]                  = "User space: Vulnerable",
1147         [SPECTRE_V2_USER_STRICT]                = "User space: Mitigation: STIBP protection",
1148         [SPECTRE_V2_USER_STRICT_PREFERRED]      = "User space: Mitigation: STIBP always-on protection",
1149         [SPECTRE_V2_USER_PRCTL]                 = "User space: Mitigation: STIBP via prctl",
1150         [SPECTRE_V2_USER_SECCOMP]               = "User space: Mitigation: STIBP via seccomp and prctl",
1151 };
1152
1153 static const struct {
1154         const char                      *option;
1155         enum spectre_v2_user_cmd        cmd;
1156         bool                            secure;
1157 } v2_user_options[] __initconst = {
1158         { "auto",               SPECTRE_V2_USER_CMD_AUTO,               false },
1159         { "off",                SPECTRE_V2_USER_CMD_NONE,               false },
1160         { "on",                 SPECTRE_V2_USER_CMD_FORCE,              true  },
1161         { "prctl",              SPECTRE_V2_USER_CMD_PRCTL,              false },
1162         { "prctl,ibpb",         SPECTRE_V2_USER_CMD_PRCTL_IBPB,         false },
1163         { "seccomp",            SPECTRE_V2_USER_CMD_SECCOMP,            false },
1164         { "seccomp,ibpb",       SPECTRE_V2_USER_CMD_SECCOMP_IBPB,       false },
1165 };
1166
1167 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
1168 {
1169         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1170                 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
1171 }
1172
1173 static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
1174
1175 static enum spectre_v2_user_cmd __init
1176 spectre_v2_parse_user_cmdline(void)
1177 {
1178         char arg[20];
1179         int ret, i;
1180
1181         switch (spectre_v2_cmd) {
1182         case SPECTRE_V2_CMD_NONE:
1183                 return SPECTRE_V2_USER_CMD_NONE;
1184         case SPECTRE_V2_CMD_FORCE:
1185                 return SPECTRE_V2_USER_CMD_FORCE;
1186         default:
1187                 break;
1188         }
1189
1190         ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
1191                                   arg, sizeof(arg));
1192         if (ret < 0)
1193                 return SPECTRE_V2_USER_CMD_AUTO;
1194
1195         for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
1196                 if (match_option(arg, ret, v2_user_options[i].option)) {
1197                         spec_v2_user_print_cond(v2_user_options[i].option,
1198                                                 v2_user_options[i].secure);
1199                         return v2_user_options[i].cmd;
1200                 }
1201         }
1202
1203         pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
1204         return SPECTRE_V2_USER_CMD_AUTO;
1205 }
1206
1207 static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
1208 {
1209         return mode == SPECTRE_V2_EIBRS ||
1210                mode == SPECTRE_V2_EIBRS_RETPOLINE ||
1211                mode == SPECTRE_V2_EIBRS_LFENCE;
1212 }
1213
1214 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1215 {
1216         return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
1217 }
1218
1219 static void __init
1220 spectre_v2_user_select_mitigation(void)
1221 {
1222         enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
1223         bool smt_possible = IS_ENABLED(CONFIG_SMP);
1224         enum spectre_v2_user_cmd cmd;
1225
1226         if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1227                 return;
1228
1229         if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
1230             cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
1231                 smt_possible = false;
1232
1233         cmd = spectre_v2_parse_user_cmdline();
1234         switch (cmd) {
1235         case SPECTRE_V2_USER_CMD_NONE:
1236                 goto set_mode;
1237         case SPECTRE_V2_USER_CMD_FORCE:
1238                 mode = SPECTRE_V2_USER_STRICT;
1239                 break;
1240         case SPECTRE_V2_USER_CMD_AUTO:
1241         case SPECTRE_V2_USER_CMD_PRCTL:
1242         case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1243                 mode = SPECTRE_V2_USER_PRCTL;
1244                 break;
1245         case SPECTRE_V2_USER_CMD_SECCOMP:
1246         case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1247                 if (IS_ENABLED(CONFIG_SECCOMP))
1248                         mode = SPECTRE_V2_USER_SECCOMP;
1249                 else
1250                         mode = SPECTRE_V2_USER_PRCTL;
1251                 break;
1252         }
1253
1254         /* Initialize Indirect Branch Prediction Barrier */
1255         if (boot_cpu_has(X86_FEATURE_IBPB)) {
1256                 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
1257
1258                 spectre_v2_user_ibpb = mode;
1259                 switch (cmd) {
1260                 case SPECTRE_V2_USER_CMD_FORCE:
1261                 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1262                 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1263                         static_branch_enable(&switch_mm_always_ibpb);
1264                         spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1265                         break;
1266                 case SPECTRE_V2_USER_CMD_PRCTL:
1267                 case SPECTRE_V2_USER_CMD_AUTO:
1268                 case SPECTRE_V2_USER_CMD_SECCOMP:
1269                         static_branch_enable(&switch_mm_cond_ibpb);
1270                         break;
1271                 default:
1272                         break;
1273                 }
1274
1275                 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
1276                         static_key_enabled(&switch_mm_always_ibpb) ?
1277                         "always-on" : "conditional");
1278         }
1279
1280         /*
1281          * If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP
1282          * is not required.
1283          *
1284          * Enhanced IBRS also protects against cross-thread branch target
1285          * injection in user-mode as the IBRS bit remains always set which
1286          * implicitly enables cross-thread protections.  However, in legacy IBRS
1287          * mode, the IBRS bit is set only on kernel entry and cleared on return
1288          * to userspace. This disables the implicit cross-thread protection,
1289          * so allow for STIBP to be selected in that case.
1290          */
1291         if (!boot_cpu_has(X86_FEATURE_STIBP) ||
1292             !smt_possible ||
1293             spectre_v2_in_eibrs_mode(spectre_v2_enabled))
1294                 return;
1295
1296         /*
1297          * At this point, an STIBP mode other than "off" has been set.
1298          * If STIBP support is not being forced, check if STIBP always-on
1299          * is preferred.
1300          */
1301         if (mode != SPECTRE_V2_USER_STRICT &&
1302             boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
1303                 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
1304
1305         if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
1306             retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
1307                 if (mode != SPECTRE_V2_USER_STRICT &&
1308                     mode != SPECTRE_V2_USER_STRICT_PREFERRED)
1309                         pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
1310                 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
1311         }
1312
1313         spectre_v2_user_stibp = mode;
1314
1315 set_mode:
1316         pr_info("%s\n", spectre_v2_user_strings[mode]);
1317 }
1318
1319 static const char * const spectre_v2_strings[] = {
1320         [SPECTRE_V2_NONE]                       = "Vulnerable",
1321         [SPECTRE_V2_RETPOLINE]                  = "Mitigation: Retpolines",
1322         [SPECTRE_V2_LFENCE]                     = "Mitigation: LFENCE",
1323         [SPECTRE_V2_EIBRS]                      = "Mitigation: Enhanced IBRS",
1324         [SPECTRE_V2_EIBRS_LFENCE]               = "Mitigation: Enhanced IBRS + LFENCE",
1325         [SPECTRE_V2_EIBRS_RETPOLINE]            = "Mitigation: Enhanced IBRS + Retpolines",
1326         [SPECTRE_V2_IBRS]                       = "Mitigation: IBRS",
1327 };
1328
1329 static const struct {
1330         const char *option;
1331         enum spectre_v2_mitigation_cmd cmd;
1332         bool secure;
1333 } mitigation_options[] __initconst = {
1334         { "off",                SPECTRE_V2_CMD_NONE,              false },
1335         { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
1336         { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
1337         { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
1338         { "retpoline,lfence",   SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
1339         { "retpoline,generic",  SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
1340         { "eibrs",              SPECTRE_V2_CMD_EIBRS,             false },
1341         { "eibrs,lfence",       SPECTRE_V2_CMD_EIBRS_LFENCE,      false },
1342         { "eibrs,retpoline",    SPECTRE_V2_CMD_EIBRS_RETPOLINE,   false },
1343         { "auto",               SPECTRE_V2_CMD_AUTO,              false },
1344         { "ibrs",               SPECTRE_V2_CMD_IBRS,              false },
1345 };
1346
1347 static void __init spec_v2_print_cond(const char *reason, bool secure)
1348 {
1349         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1350                 pr_info("%s selected on command line.\n", reason);
1351 }
1352
1353 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
1354 {
1355         enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
1356         char arg[20];
1357         int ret, i;
1358
1359         if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
1360             cpu_mitigations_off())
1361                 return SPECTRE_V2_CMD_NONE;
1362
1363         ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
1364         if (ret < 0)
1365                 return SPECTRE_V2_CMD_AUTO;
1366
1367         for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
1368                 if (!match_option(arg, ret, mitigation_options[i].option))
1369                         continue;
1370                 cmd = mitigation_options[i].cmd;
1371                 break;
1372         }
1373
1374         if (i >= ARRAY_SIZE(mitigation_options)) {
1375                 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1376                 return SPECTRE_V2_CMD_AUTO;
1377         }
1378
1379         if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
1380              cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1381              cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
1382              cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1383              cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1384             !IS_ENABLED(CONFIG_RETPOLINE)) {
1385                 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1386                        mitigation_options[i].option);
1387                 return SPECTRE_V2_CMD_AUTO;
1388         }
1389
1390         if ((cmd == SPECTRE_V2_CMD_EIBRS ||
1391              cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1392              cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1393             !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1394                 pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
1395                        mitigation_options[i].option);
1396                 return SPECTRE_V2_CMD_AUTO;
1397         }
1398
1399         if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1400              cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
1401             !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
1402                 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1403                        mitigation_options[i].option);
1404                 return SPECTRE_V2_CMD_AUTO;
1405         }
1406
1407         if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) {
1408                 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1409                        mitigation_options[i].option);
1410                 return SPECTRE_V2_CMD_AUTO;
1411         }
1412
1413         if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1414                 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
1415                        mitigation_options[i].option);
1416                 return SPECTRE_V2_CMD_AUTO;
1417         }
1418
1419         if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
1420                 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
1421                        mitigation_options[i].option);
1422                 return SPECTRE_V2_CMD_AUTO;
1423         }
1424
1425         if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) {
1426                 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
1427                        mitigation_options[i].option);
1428                 return SPECTRE_V2_CMD_AUTO;
1429         }
1430
1431         spec_v2_print_cond(mitigation_options[i].option,
1432                            mitigation_options[i].secure);
1433         return cmd;
1434 }
1435
1436 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
1437 {
1438         if (!IS_ENABLED(CONFIG_RETPOLINE)) {
1439                 pr_err("Kernel not compiled with retpoline; no mitigation available!");
1440                 return SPECTRE_V2_NONE;
1441         }
1442
1443         return SPECTRE_V2_RETPOLINE;
1444 }
1445
1446 /* Disable in-kernel use of non-RSB RET predictors */
1447 static void __init spec_ctrl_disable_kernel_rrsba(void)
1448 {
1449         u64 ia32_cap;
1450
1451         if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
1452                 return;
1453
1454         ia32_cap = x86_read_arch_cap_msr();
1455
1456         if (ia32_cap & ARCH_CAP_RRSBA) {
1457                 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
1458                 update_spec_ctrl(x86_spec_ctrl_base);
1459         }
1460 }
1461
1462 static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
1463 {
1464         /*
1465          * Similar to context switches, there are two types of RSB attacks
1466          * after VM exit:
1467          *
1468          * 1) RSB underflow
1469          *
1470          * 2) Poisoned RSB entry
1471          *
1472          * When retpoline is enabled, both are mitigated by filling/clearing
1473          * the RSB.
1474          *
1475          * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
1476          * prediction isolation protections, RSB still needs to be cleared
1477          * because of #2.  Note that SMEP provides no protection here, unlike
1478          * user-space-poisoned RSB entries.
1479          *
1480          * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
1481          * bug is present then a LITE version of RSB protection is required,
1482          * just a single call needs to retire before a RET is executed.
1483          */
1484         switch (mode) {
1485         case SPECTRE_V2_NONE:
1486                 return;
1487
1488         case SPECTRE_V2_EIBRS_LFENCE:
1489         case SPECTRE_V2_EIBRS:
1490                 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
1491                         setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
1492                         pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
1493                 }
1494                 return;
1495
1496         case SPECTRE_V2_EIBRS_RETPOLINE:
1497         case SPECTRE_V2_RETPOLINE:
1498         case SPECTRE_V2_LFENCE:
1499         case SPECTRE_V2_IBRS:
1500                 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1501                 pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
1502                 return;
1503         }
1504
1505         pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
1506         dump_stack();
1507 }
1508
1509 static void __init spectre_v2_select_mitigation(void)
1510 {
1511         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
1512         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
1513
1514         /*
1515          * If the CPU is not affected and the command line mode is NONE or AUTO
1516          * then nothing to do.
1517          */
1518         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
1519             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
1520                 return;
1521
1522         switch (cmd) {
1523         case SPECTRE_V2_CMD_NONE:
1524                 return;
1525
1526         case SPECTRE_V2_CMD_FORCE:
1527         case SPECTRE_V2_CMD_AUTO:
1528                 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1529                         mode = SPECTRE_V2_EIBRS;
1530                         break;
1531                 }
1532
1533                 if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) &&
1534                     boot_cpu_has_bug(X86_BUG_RETBLEED) &&
1535                     retbleed_cmd != RETBLEED_CMD_OFF &&
1536                     boot_cpu_has(X86_FEATURE_IBRS) &&
1537                     boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1538                         mode = SPECTRE_V2_IBRS;
1539                         break;
1540                 }
1541
1542                 mode = spectre_v2_select_retpoline();
1543                 break;
1544
1545         case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
1546                 pr_err(SPECTRE_V2_LFENCE_MSG);
1547                 mode = SPECTRE_V2_LFENCE;
1548                 break;
1549
1550         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
1551                 mode = SPECTRE_V2_RETPOLINE;
1552                 break;
1553
1554         case SPECTRE_V2_CMD_RETPOLINE:
1555                 mode = spectre_v2_select_retpoline();
1556                 break;
1557
1558         case SPECTRE_V2_CMD_IBRS:
1559                 mode = SPECTRE_V2_IBRS;
1560                 break;
1561
1562         case SPECTRE_V2_CMD_EIBRS:
1563                 mode = SPECTRE_V2_EIBRS;
1564                 break;
1565
1566         case SPECTRE_V2_CMD_EIBRS_LFENCE:
1567                 mode = SPECTRE_V2_EIBRS_LFENCE;
1568                 break;
1569
1570         case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
1571                 mode = SPECTRE_V2_EIBRS_RETPOLINE;
1572                 break;
1573         }
1574
1575         if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
1576                 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1577
1578         if (spectre_v2_in_ibrs_mode(mode)) {
1579                 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
1580                 update_spec_ctrl(x86_spec_ctrl_base);
1581         }
1582
1583         switch (mode) {
1584         case SPECTRE_V2_NONE:
1585         case SPECTRE_V2_EIBRS:
1586                 break;
1587
1588         case SPECTRE_V2_IBRS:
1589                 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
1590                 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
1591                         pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
1592                 break;
1593
1594         case SPECTRE_V2_LFENCE:
1595         case SPECTRE_V2_EIBRS_LFENCE:
1596                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
1597                 fallthrough;
1598
1599         case SPECTRE_V2_RETPOLINE:
1600         case SPECTRE_V2_EIBRS_RETPOLINE:
1601                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
1602                 break;
1603         }
1604
1605         /*
1606          * Disable alternate RSB predictions in kernel when indirect CALLs and
1607          * JMPs gets protection against BHI and Intramode-BTI, but RET
1608          * prediction from a non-RSB predictor is still a risk.
1609          */
1610         if (mode == SPECTRE_V2_EIBRS_LFENCE ||
1611             mode == SPECTRE_V2_EIBRS_RETPOLINE ||
1612             mode == SPECTRE_V2_RETPOLINE)
1613                 spec_ctrl_disable_kernel_rrsba();
1614
1615         spectre_v2_enabled = mode;
1616         pr_info("%s\n", spectre_v2_strings[mode]);
1617
1618         /*
1619          * If Spectre v2 protection has been enabled, fill the RSB during a
1620          * context switch.  In general there are two types of RSB attacks
1621          * across context switches, for which the CALLs/RETs may be unbalanced.
1622          *
1623          * 1) RSB underflow
1624          *
1625          *    Some Intel parts have "bottomless RSB".  When the RSB is empty,
1626          *    speculated return targets may come from the branch predictor,
1627          *    which could have a user-poisoned BTB or BHB entry.
1628          *
1629          *    AMD has it even worse: *all* returns are speculated from the BTB,
1630          *    regardless of the state of the RSB.
1631          *
1632          *    When IBRS or eIBRS is enabled, the "user -> kernel" attack
1633          *    scenario is mitigated by the IBRS branch prediction isolation
1634          *    properties, so the RSB buffer filling wouldn't be necessary to
1635          *    protect against this type of attack.
1636          *
1637          *    The "user -> user" attack scenario is mitigated by RSB filling.
1638          *
1639          * 2) Poisoned RSB entry
1640          *
1641          *    If the 'next' in-kernel return stack is shorter than 'prev',
1642          *    'next' could be tricked into speculating with a user-poisoned RSB
1643          *    entry.
1644          *
1645          *    The "user -> kernel" attack scenario is mitigated by SMEP and
1646          *    eIBRS.
1647          *
1648          *    The "user -> user" scenario, also known as SpectreBHB, requires
1649          *    RSB clearing.
1650          *
1651          * So to mitigate all cases, unconditionally fill RSB on context
1652          * switches.
1653          *
1654          * FIXME: Is this pointless for retbleed-affected AMD?
1655          */
1656         setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
1657         pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
1658
1659         spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
1660
1661         /*
1662          * Retpoline protects the kernel, but doesn't protect firmware.  IBRS
1663          * and Enhanced IBRS protect firmware too, so enable IBRS around
1664          * firmware calls only when IBRS / Enhanced IBRS aren't otherwise
1665          * enabled.
1666          *
1667          * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
1668          * the user might select retpoline on the kernel command line and if
1669          * the CPU supports Enhanced IBRS, kernel might un-intentionally not
1670          * enable IBRS around firmware calls.
1671          */
1672         if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
1673             boot_cpu_has(X86_FEATURE_IBPB) &&
1674             (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1675              boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
1676
1677                 if (retbleed_cmd != RETBLEED_CMD_IBPB) {
1678                         setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
1679                         pr_info("Enabling Speculation Barrier for firmware calls\n");
1680                 }
1681
1682         } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
1683                 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
1684                 pr_info("Enabling Restricted Speculation for firmware calls\n");
1685         }
1686
1687         /* Set up IBPB and STIBP depending on the general spectre V2 command */
1688         spectre_v2_cmd = cmd;
1689 }
1690
1691 static void update_stibp_msr(void * __unused)
1692 {
1693         u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
1694         update_spec_ctrl(val);
1695 }
1696
1697 /* Update x86_spec_ctrl_base in case SMT state changed. */
1698 static void update_stibp_strict(void)
1699 {
1700         u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
1701
1702         if (sched_smt_active())
1703                 mask |= SPEC_CTRL_STIBP;
1704
1705         if (mask == x86_spec_ctrl_base)
1706                 return;
1707
1708         pr_info("Update user space SMT mitigation: STIBP %s\n",
1709                 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
1710         x86_spec_ctrl_base = mask;
1711         on_each_cpu(update_stibp_msr, NULL, 1);
1712 }
1713
1714 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
1715 static void update_indir_branch_cond(void)
1716 {
1717         if (sched_smt_active())
1718                 static_branch_enable(&switch_to_cond_stibp);
1719         else
1720                 static_branch_disable(&switch_to_cond_stibp);
1721 }
1722
1723 #undef pr_fmt
1724 #define pr_fmt(fmt) fmt
1725
1726 /* Update the static key controlling the MDS CPU buffer clear in idle */
1727 static void update_mds_branch_idle(void)
1728 {
1729         u64 ia32_cap = x86_read_arch_cap_msr();
1730
1731         /*
1732          * Enable the idle clearing if SMT is active on CPUs which are
1733          * affected only by MSBDS and not any other MDS variant.
1734          *
1735          * The other variants cannot be mitigated when SMT is enabled, so
1736          * clearing the buffers on idle just to prevent the Store Buffer
1737          * repartitioning leak would be a window dressing exercise.
1738          */
1739         if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
1740                 return;
1741
1742         if (sched_smt_active()) {
1743                 static_branch_enable(&mds_idle_clear);
1744         } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
1745                    (ia32_cap & ARCH_CAP_FBSDP_NO)) {
1746                 static_branch_disable(&mds_idle_clear);
1747         }
1748 }
1749
1750 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1751 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1752 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
1753
1754 void cpu_bugs_smt_update(void)
1755 {
1756         mutex_lock(&spec_ctrl_mutex);
1757
1758         if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1759             spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
1760                 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1761
1762         switch (spectre_v2_user_stibp) {
1763         case SPECTRE_V2_USER_NONE:
1764                 break;
1765         case SPECTRE_V2_USER_STRICT:
1766         case SPECTRE_V2_USER_STRICT_PREFERRED:
1767                 update_stibp_strict();
1768                 break;
1769         case SPECTRE_V2_USER_PRCTL:
1770         case SPECTRE_V2_USER_SECCOMP:
1771                 update_indir_branch_cond();
1772                 break;
1773         }
1774
1775         switch (mds_mitigation) {
1776         case MDS_MITIGATION_FULL:
1777         case MDS_MITIGATION_VMWERV:
1778                 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
1779                         pr_warn_once(MDS_MSG_SMT);
1780                 update_mds_branch_idle();
1781                 break;
1782         case MDS_MITIGATION_OFF:
1783                 break;
1784         }
1785
1786         switch (taa_mitigation) {
1787         case TAA_MITIGATION_VERW:
1788         case TAA_MITIGATION_UCODE_NEEDED:
1789                 if (sched_smt_active())
1790                         pr_warn_once(TAA_MSG_SMT);
1791                 break;
1792         case TAA_MITIGATION_TSX_DISABLED:
1793         case TAA_MITIGATION_OFF:
1794                 break;
1795         }
1796
1797         switch (mmio_mitigation) {
1798         case MMIO_MITIGATION_VERW:
1799         case MMIO_MITIGATION_UCODE_NEEDED:
1800                 if (sched_smt_active())
1801                         pr_warn_once(MMIO_MSG_SMT);
1802                 break;
1803         case MMIO_MITIGATION_OFF:
1804                 break;
1805         }
1806
1807         mutex_unlock(&spec_ctrl_mutex);
1808 }
1809
1810 #undef pr_fmt
1811 #define pr_fmt(fmt)     "Speculative Store Bypass: " fmt
1812
1813 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
1814
1815 /* The kernel command line selection */
1816 enum ssb_mitigation_cmd {
1817         SPEC_STORE_BYPASS_CMD_NONE,
1818         SPEC_STORE_BYPASS_CMD_AUTO,
1819         SPEC_STORE_BYPASS_CMD_ON,
1820         SPEC_STORE_BYPASS_CMD_PRCTL,
1821         SPEC_STORE_BYPASS_CMD_SECCOMP,
1822 };
1823
1824 static const char * const ssb_strings[] = {
1825         [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
1826         [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
1827         [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
1828         [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1829 };
1830
1831 static const struct {
1832         const char *option;
1833         enum ssb_mitigation_cmd cmd;
1834 } ssb_mitigation_options[]  __initconst = {
1835         { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
1836         { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
1837         { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
1838         { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
1839         { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
1840 };
1841
1842 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
1843 {
1844         enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
1845         char arg[20];
1846         int ret, i;
1847
1848         if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
1849             cpu_mitigations_off()) {
1850                 return SPEC_STORE_BYPASS_CMD_NONE;
1851         } else {
1852                 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
1853                                           arg, sizeof(arg));
1854                 if (ret < 0)
1855                         return SPEC_STORE_BYPASS_CMD_AUTO;
1856
1857                 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
1858                         if (!match_option(arg, ret, ssb_mitigation_options[i].option))
1859                                 continue;
1860
1861                         cmd = ssb_mitigation_options[i].cmd;
1862                         break;
1863                 }
1864
1865                 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
1866                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1867                         return SPEC_STORE_BYPASS_CMD_AUTO;
1868                 }
1869         }
1870
1871         return cmd;
1872 }
1873
1874 static enum ssb_mitigation __init __ssb_select_mitigation(void)
1875 {
1876         enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
1877         enum ssb_mitigation_cmd cmd;
1878
1879         if (!boot_cpu_has(X86_FEATURE_SSBD))
1880                 return mode;
1881
1882         cmd = ssb_parse_cmdline();
1883         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
1884             (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
1885              cmd == SPEC_STORE_BYPASS_CMD_AUTO))
1886                 return mode;
1887
1888         switch (cmd) {
1889         case SPEC_STORE_BYPASS_CMD_SECCOMP:
1890                 /*
1891                  * Choose prctl+seccomp as the default mode if seccomp is
1892                  * enabled.
1893                  */
1894                 if (IS_ENABLED(CONFIG_SECCOMP))
1895                         mode = SPEC_STORE_BYPASS_SECCOMP;
1896                 else
1897                         mode = SPEC_STORE_BYPASS_PRCTL;
1898                 break;
1899         case SPEC_STORE_BYPASS_CMD_ON:
1900                 mode = SPEC_STORE_BYPASS_DISABLE;
1901                 break;
1902         case SPEC_STORE_BYPASS_CMD_AUTO:
1903         case SPEC_STORE_BYPASS_CMD_PRCTL:
1904                 mode = SPEC_STORE_BYPASS_PRCTL;
1905                 break;
1906         case SPEC_STORE_BYPASS_CMD_NONE:
1907                 break;
1908         }
1909
1910         /*
1911          * We have three CPU feature flags that are in play here:
1912          *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1913          *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1914          *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1915          */
1916         if (mode == SPEC_STORE_BYPASS_DISABLE) {
1917                 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
1918                 /*
1919                  * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1920                  * use a completely different MSR and bit dependent on family.
1921                  */
1922                 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
1923                     !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1924                         x86_amd_ssb_disable();
1925                 } else {
1926                         x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
1927                         update_spec_ctrl(x86_spec_ctrl_base);
1928                 }
1929         }
1930
1931         return mode;
1932 }
1933
1934 static void ssb_select_mitigation(void)
1935 {
1936         ssb_mode = __ssb_select_mitigation();
1937
1938         if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1939                 pr_info("%s\n", ssb_strings[ssb_mode]);
1940 }
1941
1942 #undef pr_fmt
1943 #define pr_fmt(fmt)     "Speculation prctl: " fmt
1944
1945 static void task_update_spec_tif(struct task_struct *tsk)
1946 {
1947         /* Force the update of the real TIF bits */
1948         set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
1949
1950         /*
1951          * Immediately update the speculation control MSRs for the current
1952          * task, but for a non-current task delay setting the CPU
1953          * mitigation until it is scheduled next.
1954          *
1955          * This can only happen for SECCOMP mitigation. For PRCTL it's
1956          * always the current task.
1957          */
1958         if (tsk == current)
1959                 speculation_ctrl_update_current();
1960 }
1961
1962 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
1963 {
1964
1965         if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
1966                 return -EPERM;
1967
1968         switch (ctrl) {
1969         case PR_SPEC_ENABLE:
1970                 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
1971                 return 0;
1972         case PR_SPEC_DISABLE:
1973                 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
1974                 return 0;
1975         default:
1976                 return -ERANGE;
1977         }
1978 }
1979
1980 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
1981 {
1982         if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
1983             ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
1984                 return -ENXIO;
1985
1986         switch (ctrl) {
1987         case PR_SPEC_ENABLE:
1988                 /* If speculation is force disabled, enable is not allowed */
1989                 if (task_spec_ssb_force_disable(task))
1990                         return -EPERM;
1991                 task_clear_spec_ssb_disable(task);
1992                 task_clear_spec_ssb_noexec(task);
1993                 task_update_spec_tif(task);
1994                 break;
1995         case PR_SPEC_DISABLE:
1996                 task_set_spec_ssb_disable(task);
1997                 task_clear_spec_ssb_noexec(task);
1998                 task_update_spec_tif(task);
1999                 break;
2000         case PR_SPEC_FORCE_DISABLE:
2001                 task_set_spec_ssb_disable(task);
2002                 task_set_spec_ssb_force_disable(task);
2003                 task_clear_spec_ssb_noexec(task);
2004                 task_update_spec_tif(task);
2005                 break;
2006         case PR_SPEC_DISABLE_NOEXEC:
2007                 if (task_spec_ssb_force_disable(task))
2008                         return -EPERM;
2009                 task_set_spec_ssb_disable(task);
2010                 task_set_spec_ssb_noexec(task);
2011                 task_update_spec_tif(task);
2012                 break;
2013         default:
2014                 return -ERANGE;
2015         }
2016         return 0;
2017 }
2018
2019 static bool is_spec_ib_user_controlled(void)
2020 {
2021         return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
2022                 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2023                 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
2024                 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
2025 }
2026
2027 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
2028 {
2029         switch (ctrl) {
2030         case PR_SPEC_ENABLE:
2031                 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2032                     spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2033                         return 0;
2034
2035                 /*
2036                  * With strict mode for both IBPB and STIBP, the instruction
2037                  * code paths avoid checking this task flag and instead,
2038                  * unconditionally run the instruction. However, STIBP and IBPB
2039                  * are independent and either can be set to conditionally
2040                  * enabled regardless of the mode of the other.
2041                  *
2042                  * If either is set to conditional, allow the task flag to be
2043                  * updated, unless it was force-disabled by a previous prctl
2044                  * call. Currently, this is possible on an AMD CPU which has the
2045                  * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
2046                  * kernel is booted with 'spectre_v2_user=seccomp', then
2047                  * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
2048                  * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
2049                  */
2050                 if (!is_spec_ib_user_controlled() ||
2051                     task_spec_ib_force_disable(task))
2052                         return -EPERM;
2053
2054                 task_clear_spec_ib_disable(task);
2055                 task_update_spec_tif(task);
2056                 break;
2057         case PR_SPEC_DISABLE:
2058         case PR_SPEC_FORCE_DISABLE:
2059                 /*
2060                  * Indirect branch speculation is always allowed when
2061                  * mitigation is force disabled.
2062                  */
2063                 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2064                     spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2065                         return -EPERM;
2066
2067                 if (!is_spec_ib_user_controlled())
2068                         return 0;
2069
2070                 task_set_spec_ib_disable(task);
2071                 if (ctrl == PR_SPEC_FORCE_DISABLE)
2072                         task_set_spec_ib_force_disable(task);
2073                 task_update_spec_tif(task);
2074                 if (task == current)
2075                         indirect_branch_prediction_barrier();
2076                 break;
2077         default:
2078                 return -ERANGE;
2079         }
2080         return 0;
2081 }
2082
2083 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
2084                              unsigned long ctrl)
2085 {
2086         switch (which) {
2087         case PR_SPEC_STORE_BYPASS:
2088                 return ssb_prctl_set(task, ctrl);
2089         case PR_SPEC_INDIRECT_BRANCH:
2090                 return ib_prctl_set(task, ctrl);
2091         case PR_SPEC_L1D_FLUSH:
2092                 return l1d_flush_prctl_set(task, ctrl);
2093         default:
2094                 return -ENODEV;
2095         }
2096 }
2097
2098 #ifdef CONFIG_SECCOMP
2099 void arch_seccomp_spec_mitigate(struct task_struct *task)
2100 {
2101         if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
2102                 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2103         if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2104             spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
2105                 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2106 }
2107 #endif
2108
2109 static int l1d_flush_prctl_get(struct task_struct *task)
2110 {
2111         if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2112                 return PR_SPEC_FORCE_DISABLE;
2113
2114         if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
2115                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2116         else
2117                 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2118 }
2119
2120 static int ssb_prctl_get(struct task_struct *task)
2121 {
2122         switch (ssb_mode) {
2123         case SPEC_STORE_BYPASS_DISABLE:
2124                 return PR_SPEC_DISABLE;
2125         case SPEC_STORE_BYPASS_SECCOMP:
2126         case SPEC_STORE_BYPASS_PRCTL:
2127                 if (task_spec_ssb_force_disable(task))
2128                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2129                 if (task_spec_ssb_noexec(task))
2130                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
2131                 if (task_spec_ssb_disable(task))
2132                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2133                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2134         default:
2135                 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2136                         return PR_SPEC_ENABLE;
2137                 return PR_SPEC_NOT_AFFECTED;
2138         }
2139 }
2140
2141 static int ib_prctl_get(struct task_struct *task)
2142 {
2143         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2144                 return PR_SPEC_NOT_AFFECTED;
2145
2146         if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2147             spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2148                 return PR_SPEC_ENABLE;
2149         else if (is_spec_ib_user_controlled()) {
2150                 if (task_spec_ib_force_disable(task))
2151                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2152                 if (task_spec_ib_disable(task))
2153                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2154                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2155         } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
2156             spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2157             spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
2158                 return PR_SPEC_DISABLE;
2159         else
2160                 return PR_SPEC_NOT_AFFECTED;
2161 }
2162
2163 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
2164 {
2165         switch (which) {
2166         case PR_SPEC_STORE_BYPASS:
2167                 return ssb_prctl_get(task);
2168         case PR_SPEC_INDIRECT_BRANCH:
2169                 return ib_prctl_get(task);
2170         case PR_SPEC_L1D_FLUSH:
2171                 return l1d_flush_prctl_get(task);
2172         default:
2173                 return -ENODEV;
2174         }
2175 }
2176
2177 void x86_spec_ctrl_setup_ap(void)
2178 {
2179         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
2180                 update_spec_ctrl(x86_spec_ctrl_base);
2181
2182         if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
2183                 x86_amd_ssb_disable();
2184 }
2185
2186 bool itlb_multihit_kvm_mitigation;
2187 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
2188
2189 #undef pr_fmt
2190 #define pr_fmt(fmt)     "L1TF: " fmt
2191
2192 /* Default mitigation for L1TF-affected CPUs */
2193 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
2194 #if IS_ENABLED(CONFIG_KVM_INTEL)
2195 EXPORT_SYMBOL_GPL(l1tf_mitigation);
2196 #endif
2197 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
2198 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
2199
2200 /*
2201  * These CPUs all support 44bits physical address space internally in the
2202  * cache but CPUID can report a smaller number of physical address bits.
2203  *
2204  * The L1TF mitigation uses the top most address bit for the inversion of
2205  * non present PTEs. When the installed memory reaches into the top most
2206  * address bit due to memory holes, which has been observed on machines
2207  * which report 36bits physical address bits and have 32G RAM installed,
2208  * then the mitigation range check in l1tf_select_mitigation() triggers.
2209  * This is a false positive because the mitigation is still possible due to
2210  * the fact that the cache uses 44bit internally. Use the cache bits
2211  * instead of the reported physical bits and adjust them on the affected
2212  * machines to 44bit if the reported bits are less than 44.
2213  */
2214 static void override_cache_bits(struct cpuinfo_x86 *c)
2215 {
2216         if (c->x86 != 6)
2217                 return;
2218
2219         switch (c->x86_model) {
2220         case INTEL_FAM6_NEHALEM:
2221         case INTEL_FAM6_WESTMERE:
2222         case INTEL_FAM6_SANDYBRIDGE:
2223         case INTEL_FAM6_IVYBRIDGE:
2224         case INTEL_FAM6_HASWELL:
2225         case INTEL_FAM6_HASWELL_L:
2226         case INTEL_FAM6_HASWELL_G:
2227         case INTEL_FAM6_BROADWELL:
2228         case INTEL_FAM6_BROADWELL_G:
2229         case INTEL_FAM6_SKYLAKE_L:
2230         case INTEL_FAM6_SKYLAKE:
2231         case INTEL_FAM6_KABYLAKE_L:
2232         case INTEL_FAM6_KABYLAKE:
2233                 if (c->x86_cache_bits < 44)
2234                         c->x86_cache_bits = 44;
2235                 break;
2236         }
2237 }
2238
2239 static void __init l1tf_select_mitigation(void)
2240 {
2241         u64 half_pa;
2242
2243         if (!boot_cpu_has_bug(X86_BUG_L1TF))
2244                 return;
2245
2246         if (cpu_mitigations_off())
2247                 l1tf_mitigation = L1TF_MITIGATION_OFF;
2248         else if (cpu_mitigations_auto_nosmt())
2249                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2250
2251         override_cache_bits(&boot_cpu_data);
2252
2253         switch (l1tf_mitigation) {
2254         case L1TF_MITIGATION_OFF:
2255         case L1TF_MITIGATION_FLUSH_NOWARN:
2256         case L1TF_MITIGATION_FLUSH:
2257                 break;
2258         case L1TF_MITIGATION_FLUSH_NOSMT:
2259         case L1TF_MITIGATION_FULL:
2260                 cpu_smt_disable(false);
2261                 break;
2262         case L1TF_MITIGATION_FULL_FORCE:
2263                 cpu_smt_disable(true);
2264                 break;
2265         }
2266
2267 #if CONFIG_PGTABLE_LEVELS == 2
2268         pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
2269         return;
2270 #endif
2271
2272         half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
2273         if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
2274                         e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
2275                 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
2276                 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
2277                                 half_pa);
2278                 pr_info("However, doing so will make a part of your RAM unusable.\n");
2279                 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
2280                 return;
2281         }
2282
2283         setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
2284 }
2285
2286 static int __init l1tf_cmdline(char *str)
2287 {
2288         if (!boot_cpu_has_bug(X86_BUG_L1TF))
2289                 return 0;
2290
2291         if (!str)
2292                 return -EINVAL;
2293
2294         if (!strcmp(str, "off"))
2295                 l1tf_mitigation = L1TF_MITIGATION_OFF;
2296         else if (!strcmp(str, "flush,nowarn"))
2297                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
2298         else if (!strcmp(str, "flush"))
2299                 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2300         else if (!strcmp(str, "flush,nosmt"))
2301                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2302         else if (!strcmp(str, "full"))
2303                 l1tf_mitigation = L1TF_MITIGATION_FULL;
2304         else if (!strcmp(str, "full,force"))
2305                 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
2306
2307         return 0;
2308 }
2309 early_param("l1tf", l1tf_cmdline);
2310
2311 #undef pr_fmt
2312 #define pr_fmt(fmt)     "Speculative Return Stack Overflow: " fmt
2313
2314 enum srso_mitigation {
2315         SRSO_MITIGATION_NONE,
2316         SRSO_MITIGATION_MICROCODE,
2317         SRSO_MITIGATION_SAFE_RET,
2318         SRSO_MITIGATION_IBPB,
2319         SRSO_MITIGATION_IBPB_ON_VMEXIT,
2320 };
2321
2322 enum srso_mitigation_cmd {
2323         SRSO_CMD_OFF,
2324         SRSO_CMD_MICROCODE,
2325         SRSO_CMD_SAFE_RET,
2326         SRSO_CMD_IBPB,
2327         SRSO_CMD_IBPB_ON_VMEXIT,
2328 };
2329
2330 static const char * const srso_strings[] = {
2331         [SRSO_MITIGATION_NONE]           = "Vulnerable",
2332         [SRSO_MITIGATION_MICROCODE]      = "Mitigation: microcode",
2333         [SRSO_MITIGATION_SAFE_RET]       = "Mitigation: safe RET",
2334         [SRSO_MITIGATION_IBPB]           = "Mitigation: IBPB",
2335         [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
2336 };
2337
2338 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
2339 static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET;
2340
2341 static int __init srso_parse_cmdline(char *str)
2342 {
2343         if (!str)
2344                 return -EINVAL;
2345
2346         if (!strcmp(str, "off"))
2347                 srso_cmd = SRSO_CMD_OFF;
2348         else if (!strcmp(str, "microcode"))
2349                 srso_cmd = SRSO_CMD_MICROCODE;
2350         else if (!strcmp(str, "safe-ret"))
2351                 srso_cmd = SRSO_CMD_SAFE_RET;
2352         else if (!strcmp(str, "ibpb"))
2353                 srso_cmd = SRSO_CMD_IBPB;
2354         else if (!strcmp(str, "ibpb-vmexit"))
2355                 srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT;
2356         else
2357                 pr_err("Ignoring unknown SRSO option (%s).", str);
2358
2359         return 0;
2360 }
2361 early_param("spec_rstack_overflow", srso_parse_cmdline);
2362
2363 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
2364
2365 static void __init srso_select_mitigation(void)
2366 {
2367         bool has_microcode;
2368
2369         if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
2370                 goto pred_cmd;
2371
2372         /*
2373          * The first check is for the kernel running as a guest in order
2374          * for guests to verify whether IBPB is a viable mitigation.
2375          */
2376         has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode();
2377         if (!has_microcode) {
2378                 pr_warn("IBPB-extending microcode not applied!\n");
2379                 pr_warn(SRSO_NOTICE);
2380         } else {
2381                 /*
2382                  * Enable the synthetic (even if in a real CPUID leaf)
2383                  * flags for guests.
2384                  */
2385                 setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
2386                 setup_force_cpu_cap(X86_FEATURE_SBPB);
2387
2388                 /*
2389                  * Zen1/2 with SMT off aren't vulnerable after the right
2390                  * IBPB microcode has been applied.
2391                  */
2392                 if ((boot_cpu_data.x86 < 0x19) &&
2393                     (cpu_smt_control == CPU_SMT_DISABLED))
2394                         setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
2395         }
2396
2397         if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
2398                 if (has_microcode) {
2399                         pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n");
2400                         srso_mitigation = SRSO_MITIGATION_IBPB;
2401                         goto pred_cmd;
2402                 }
2403         }
2404
2405         switch (srso_cmd) {
2406         case SRSO_CMD_OFF:
2407                 return;
2408
2409         case SRSO_CMD_MICROCODE:
2410                 if (has_microcode) {
2411                         srso_mitigation = SRSO_MITIGATION_MICROCODE;
2412                         pr_warn(SRSO_NOTICE);
2413                 }
2414                 break;
2415
2416         case SRSO_CMD_SAFE_RET:
2417                 if (IS_ENABLED(CONFIG_CPU_SRSO)) {
2418                         /*
2419                          * Enable the return thunk for generated code
2420                          * like ftrace, static_call, etc.
2421                          */
2422                         setup_force_cpu_cap(X86_FEATURE_RETHUNK);
2423
2424                         if (boot_cpu_data.x86 == 0x19)
2425                                 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
2426                         else
2427                                 setup_force_cpu_cap(X86_FEATURE_SRSO);
2428                         srso_mitigation = SRSO_MITIGATION_SAFE_RET;
2429                 } else {
2430                         pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
2431                         goto pred_cmd;
2432                 }
2433                 break;
2434
2435         case SRSO_CMD_IBPB:
2436                 if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
2437                         if (has_microcode) {
2438                                 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
2439                                 srso_mitigation = SRSO_MITIGATION_IBPB;
2440                         }
2441                 } else {
2442                         pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
2443                         goto pred_cmd;
2444                 }
2445                 break;
2446
2447         case SRSO_CMD_IBPB_ON_VMEXIT:
2448                 if (IS_ENABLED(CONFIG_CPU_SRSO)) {
2449                         if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
2450                                 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
2451                                 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
2452                         }
2453                 } else {
2454                         pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
2455                         goto pred_cmd;
2456                 }
2457                 break;
2458
2459         default:
2460                 break;
2461         }
2462
2463         pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
2464
2465 pred_cmd:
2466         if (boot_cpu_has(X86_FEATURE_SRSO_NO) ||
2467             srso_cmd == SRSO_CMD_OFF)
2468                 x86_pred_cmd = PRED_CMD_SBPB;
2469 }
2470
2471 #undef pr_fmt
2472 #define pr_fmt(fmt) fmt
2473
2474 #ifdef CONFIG_SYSFS
2475
2476 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
2477
2478 #if IS_ENABLED(CONFIG_KVM_INTEL)
2479 static const char * const l1tf_vmx_states[] = {
2480         [VMENTER_L1D_FLUSH_AUTO]                = "auto",
2481         [VMENTER_L1D_FLUSH_NEVER]               = "vulnerable",
2482         [VMENTER_L1D_FLUSH_COND]                = "conditional cache flushes",
2483         [VMENTER_L1D_FLUSH_ALWAYS]              = "cache flushes",
2484         [VMENTER_L1D_FLUSH_EPT_DISABLED]        = "EPT disabled",
2485         [VMENTER_L1D_FLUSH_NOT_REQUIRED]        = "flush not necessary"
2486 };
2487
2488 static ssize_t l1tf_show_state(char *buf)
2489 {
2490         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
2491                 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
2492
2493         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
2494             (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
2495              sched_smt_active())) {
2496                 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
2497                                l1tf_vmx_states[l1tf_vmx_mitigation]);
2498         }
2499
2500         return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
2501                        l1tf_vmx_states[l1tf_vmx_mitigation],
2502                        sched_smt_active() ? "vulnerable" : "disabled");
2503 }
2504
2505 static ssize_t itlb_multihit_show_state(char *buf)
2506 {
2507         if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
2508             !boot_cpu_has(X86_FEATURE_VMX))
2509                 return sprintf(buf, "KVM: Mitigation: VMX unsupported\n");
2510         else if (!(cr4_read_shadow() & X86_CR4_VMXE))
2511                 return sprintf(buf, "KVM: Mitigation: VMX disabled\n");
2512         else if (itlb_multihit_kvm_mitigation)
2513                 return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
2514         else
2515                 return sprintf(buf, "KVM: Vulnerable\n");
2516 }
2517 #else
2518 static ssize_t l1tf_show_state(char *buf)
2519 {
2520         return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
2521 }
2522
2523 static ssize_t itlb_multihit_show_state(char *buf)
2524 {
2525         return sprintf(buf, "Processor vulnerable\n");
2526 }
2527 #endif
2528
2529 static ssize_t mds_show_state(char *buf)
2530 {
2531         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2532                 return sprintf(buf, "%s; SMT Host state unknown\n",
2533                                mds_strings[mds_mitigation]);
2534         }
2535
2536         if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
2537                 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
2538                                (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
2539                                 sched_smt_active() ? "mitigated" : "disabled"));
2540         }
2541
2542         return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
2543                        sched_smt_active() ? "vulnerable" : "disabled");
2544 }
2545
2546 static ssize_t tsx_async_abort_show_state(char *buf)
2547 {
2548         if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
2549             (taa_mitigation == TAA_MITIGATION_OFF))
2550                 return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
2551
2552         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2553                 return sprintf(buf, "%s; SMT Host state unknown\n",
2554                                taa_strings[taa_mitigation]);
2555         }
2556
2557         return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
2558                        sched_smt_active() ? "vulnerable" : "disabled");
2559 }
2560
2561 static ssize_t mmio_stale_data_show_state(char *buf)
2562 {
2563         if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
2564                 return sysfs_emit(buf, "Unknown: No mitigations\n");
2565
2566         if (mmio_mitigation == MMIO_MITIGATION_OFF)
2567                 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
2568
2569         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2570                 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
2571                                   mmio_strings[mmio_mitigation]);
2572         }
2573
2574         return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
2575                           sched_smt_active() ? "vulnerable" : "disabled");
2576 }
2577
2578 static char *stibp_state(void)
2579 {
2580         if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
2581                 return "";
2582
2583         switch (spectre_v2_user_stibp) {
2584         case SPECTRE_V2_USER_NONE:
2585                 return ", STIBP: disabled";
2586         case SPECTRE_V2_USER_STRICT:
2587                 return ", STIBP: forced";
2588         case SPECTRE_V2_USER_STRICT_PREFERRED:
2589                 return ", STIBP: always-on";
2590         case SPECTRE_V2_USER_PRCTL:
2591         case SPECTRE_V2_USER_SECCOMP:
2592                 if (static_key_enabled(&switch_to_cond_stibp))
2593                         return ", STIBP: conditional";
2594         }
2595         return "";
2596 }
2597
2598 static char *ibpb_state(void)
2599 {
2600         if (boot_cpu_has(X86_FEATURE_IBPB)) {
2601                 if (static_key_enabled(&switch_mm_always_ibpb))
2602                         return ", IBPB: always-on";
2603                 if (static_key_enabled(&switch_mm_cond_ibpb))
2604                         return ", IBPB: conditional";
2605                 return ", IBPB: disabled";
2606         }
2607         return "";
2608 }
2609
2610 static char *pbrsb_eibrs_state(void)
2611 {
2612         if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
2613                 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
2614                     boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
2615                         return ", PBRSB-eIBRS: SW sequence";
2616                 else
2617                         return ", PBRSB-eIBRS: Vulnerable";
2618         } else {
2619                 return ", PBRSB-eIBRS: Not affected";
2620         }
2621 }
2622
2623 static ssize_t spectre_v2_show_state(char *buf)
2624 {
2625         if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
2626                 return sprintf(buf, "Vulnerable: LFENCE\n");
2627
2628         if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2629                 return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
2630
2631         if (sched_smt_active() && unprivileged_ebpf_enabled() &&
2632             spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
2633                 return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
2634
2635         return sprintf(buf, "%s%s%s%s%s%s%s\n",
2636                        spectre_v2_strings[spectre_v2_enabled],
2637                        ibpb_state(),
2638                        boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
2639                        stibp_state(),
2640                        boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
2641                        pbrsb_eibrs_state(),
2642                        spectre_v2_module_string());
2643 }
2644
2645 static ssize_t srbds_show_state(char *buf)
2646 {
2647         return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
2648 }
2649
2650 static ssize_t retbleed_show_state(char *buf)
2651 {
2652         if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
2653             retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
2654             if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
2655                 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
2656                     return sprintf(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
2657
2658             return sprintf(buf, "%s; SMT %s\n",
2659                            retbleed_strings[retbleed_mitigation],
2660                            !sched_smt_active() ? "disabled" :
2661                            spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2662                            spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
2663                            "enabled with STIBP protection" : "vulnerable");
2664         }
2665
2666         return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
2667 }
2668
2669 static ssize_t gds_show_state(char *buf)
2670 {
2671         return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
2672 }
2673
2674 static ssize_t srso_show_state(char *buf)
2675 {
2676         return sysfs_emit(buf, "%s%s\n",
2677                           srso_strings[srso_mitigation],
2678                           (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
2679 }
2680
2681 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
2682                                char *buf, unsigned int bug)
2683 {
2684         if (!boot_cpu_has_bug(bug))
2685                 return sprintf(buf, "Not affected\n");
2686
2687         switch (bug) {
2688         case X86_BUG_CPU_MELTDOWN:
2689                 if (boot_cpu_has(X86_FEATURE_PTI))
2690                         return sprintf(buf, "Mitigation: PTI\n");
2691
2692                 if (hypervisor_is_type(X86_HYPER_XEN_PV))
2693                         return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
2694
2695                 break;
2696
2697         case X86_BUG_SPECTRE_V1:
2698                 return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
2699
2700         case X86_BUG_SPECTRE_V2:
2701                 return spectre_v2_show_state(buf);
2702
2703         case X86_BUG_SPEC_STORE_BYPASS:
2704                 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
2705
2706         case X86_BUG_L1TF:
2707                 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
2708                         return l1tf_show_state(buf);
2709                 break;
2710
2711         case X86_BUG_MDS:
2712                 return mds_show_state(buf);
2713
2714         case X86_BUG_TAA:
2715                 return tsx_async_abort_show_state(buf);
2716
2717         case X86_BUG_ITLB_MULTIHIT:
2718                 return itlb_multihit_show_state(buf);
2719
2720         case X86_BUG_SRBDS:
2721                 return srbds_show_state(buf);
2722
2723         case X86_BUG_MMIO_STALE_DATA:
2724         case X86_BUG_MMIO_UNKNOWN:
2725                 return mmio_stale_data_show_state(buf);
2726
2727         case X86_BUG_RETBLEED:
2728                 return retbleed_show_state(buf);
2729
2730         case X86_BUG_GDS:
2731                 return gds_show_state(buf);
2732
2733         case X86_BUG_SRSO:
2734                 return srso_show_state(buf);
2735
2736         default:
2737                 break;
2738         }
2739
2740         return sprintf(buf, "Vulnerable\n");
2741 }
2742
2743 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
2744 {
2745         return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
2746 }
2747
2748 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
2749 {
2750         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
2751 }
2752
2753 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
2754 {
2755         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
2756 }
2757
2758 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
2759 {
2760         return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
2761 }
2762
2763 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
2764 {
2765         return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
2766 }
2767
2768 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
2769 {
2770         return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
2771 }
2772
2773 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
2774 {
2775         return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
2776 }
2777
2778 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
2779 {
2780         return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
2781 }
2782
2783 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
2784 {
2785         return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
2786 }
2787
2788 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
2789 {
2790         if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
2791                 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
2792         else
2793                 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
2794 }
2795
2796 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
2797 {
2798         return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
2799 }
2800
2801 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
2802 {
2803         return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
2804 }
2805
2806 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
2807 {
2808         return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
2809 }
2810 #endif