x86/bugs: Report Intel retbleed vulnerability
[platform/kernel/linux-starfive.git] / arch / x86 / kernel / cpu / common.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* cpu_feature_enabled() cannot be used this early */
3 #define USE_EARLY_PGTABLE_L5
4
5 #include <linux/memblock.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/percpu.h>
11 #include <linux/string.h>
12 #include <linux/ctype.h>
13 #include <linux/delay.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/clock.h>
16 #include <linux/sched/task.h>
17 #include <linux/sched/smt.h>
18 #include <linux/init.h>
19 #include <linux/kprobes.h>
20 #include <linux/kgdb.h>
21 #include <linux/smp.h>
22 #include <linux/io.h>
23 #include <linux/syscore_ops.h>
24 #include <linux/pgtable.h>
25
26 #include <asm/cmdline.h>
27 #include <asm/stackprotector.h>
28 #include <asm/perf_event.h>
29 #include <asm/mmu_context.h>
30 #include <asm/doublefault.h>
31 #include <asm/archrandom.h>
32 #include <asm/hypervisor.h>
33 #include <asm/processor.h>
34 #include <asm/tlbflush.h>
35 #include <asm/debugreg.h>
36 #include <asm/sections.h>
37 #include <asm/vsyscall.h>
38 #include <linux/topology.h>
39 #include <linux/cpumask.h>
40 #include <linux/atomic.h>
41 #include <asm/proto.h>
42 #include <asm/setup.h>
43 #include <asm/apic.h>
44 #include <asm/desc.h>
45 #include <asm/fpu/api.h>
46 #include <asm/mtrr.h>
47 #include <asm/hwcap2.h>
48 #include <linux/numa.h>
49 #include <asm/numa.h>
50 #include <asm/asm.h>
51 #include <asm/bugs.h>
52 #include <asm/cpu.h>
53 #include <asm/mce.h>
54 #include <asm/msr.h>
55 #include <asm/memtype.h>
56 #include <asm/microcode.h>
57 #include <asm/microcode_intel.h>
58 #include <asm/intel-family.h>
59 #include <asm/cpu_device_id.h>
60 #include <asm/uv/uv.h>
61 #include <asm/sigframe.h>
62 #include <asm/traps.h>
63 #include <asm/sev.h>
64
65 #include "cpu.h"
66
67 u32 elf_hwcap2 __read_mostly;
68
69 /* all of these masks are initialized in setup_cpu_local_masks() */
70 cpumask_var_t cpu_initialized_mask;
71 cpumask_var_t cpu_callout_mask;
72 cpumask_var_t cpu_callin_mask;
73
74 /* representing cpus for which sibling maps can be computed */
75 cpumask_var_t cpu_sibling_setup_mask;
76
77 /* Number of siblings per CPU package */
78 int smp_num_siblings = 1;
79 EXPORT_SYMBOL(smp_num_siblings);
80
81 /* Last level cache ID of each logical CPU */
82 DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
83
84 u16 get_llc_id(unsigned int cpu)
85 {
86         return per_cpu(cpu_llc_id, cpu);
87 }
88 EXPORT_SYMBOL_GPL(get_llc_id);
89
90 /* L2 cache ID of each logical CPU */
91 DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id) = BAD_APICID;
92
93 static struct ppin_info {
94         int     feature;
95         int     msr_ppin_ctl;
96         int     msr_ppin;
97 } ppin_info[] = {
98         [X86_VENDOR_INTEL] = {
99                 .feature = X86_FEATURE_INTEL_PPIN,
100                 .msr_ppin_ctl = MSR_PPIN_CTL,
101                 .msr_ppin = MSR_PPIN
102         },
103         [X86_VENDOR_AMD] = {
104                 .feature = X86_FEATURE_AMD_PPIN,
105                 .msr_ppin_ctl = MSR_AMD_PPIN_CTL,
106                 .msr_ppin = MSR_AMD_PPIN
107         },
108 };
109
110 static const struct x86_cpu_id ppin_cpuids[] = {
111         X86_MATCH_FEATURE(X86_FEATURE_AMD_PPIN, &ppin_info[X86_VENDOR_AMD]),
112         X86_MATCH_FEATURE(X86_FEATURE_INTEL_PPIN, &ppin_info[X86_VENDOR_INTEL]),
113
114         /* Legacy models without CPUID enumeration */
115         X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]),
116         X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &ppin_info[X86_VENDOR_INTEL]),
117         X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]),
118         X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]),
119         X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]),
120         X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]),
121         X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]),
122         X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
123         X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]),
124         X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]),
125
126         {}
127 };
128
129 static void ppin_init(struct cpuinfo_x86 *c)
130 {
131         const struct x86_cpu_id *id;
132         unsigned long long val;
133         struct ppin_info *info;
134
135         id = x86_match_cpu(ppin_cpuids);
136         if (!id)
137                 return;
138
139         /*
140          * Testing the presence of the MSR is not enough. Need to check
141          * that the PPIN_CTL allows reading of the PPIN.
142          */
143         info = (struct ppin_info *)id->driver_data;
144
145         if (rdmsrl_safe(info->msr_ppin_ctl, &val))
146                 goto clear_ppin;
147
148         if ((val & 3UL) == 1UL) {
149                 /* PPIN locked in disabled mode */
150                 goto clear_ppin;
151         }
152
153         /* If PPIN is disabled, try to enable */
154         if (!(val & 2UL)) {
155                 wrmsrl_safe(info->msr_ppin_ctl,  val | 2UL);
156                 rdmsrl_safe(info->msr_ppin_ctl, &val);
157         }
158
159         /* Is the enable bit set? */
160         if (val & 2UL) {
161                 c->ppin = __rdmsr(info->msr_ppin);
162                 set_cpu_cap(c, info->feature);
163                 return;
164         }
165
166 clear_ppin:
167         clear_cpu_cap(c, info->feature);
168 }
169
170 /* correctly size the local cpu masks */
171 void __init setup_cpu_local_masks(void)
172 {
173         alloc_bootmem_cpumask_var(&cpu_initialized_mask);
174         alloc_bootmem_cpumask_var(&cpu_callin_mask);
175         alloc_bootmem_cpumask_var(&cpu_callout_mask);
176         alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
177 }
178
179 static void default_init(struct cpuinfo_x86 *c)
180 {
181 #ifdef CONFIG_X86_64
182         cpu_detect_cache_sizes(c);
183 #else
184         /* Not much we can do here... */
185         /* Check if at least it has cpuid */
186         if (c->cpuid_level == -1) {
187                 /* No cpuid. It must be an ancient CPU */
188                 if (c->x86 == 4)
189                         strcpy(c->x86_model_id, "486");
190                 else if (c->x86 == 3)
191                         strcpy(c->x86_model_id, "386");
192         }
193 #endif
194 }
195
196 static const struct cpu_dev default_cpu = {
197         .c_init         = default_init,
198         .c_vendor       = "Unknown",
199         .c_x86_vendor   = X86_VENDOR_UNKNOWN,
200 };
201
202 static const struct cpu_dev *this_cpu = &default_cpu;
203
204 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
205 #ifdef CONFIG_X86_64
206         /*
207          * We need valid kernel segments for data and code in long mode too
208          * IRET will check the segment types  kkeil 2000/10/28
209          * Also sysret mandates a special GDT layout
210          *
211          * TLS descriptors are currently at a different place compared to i386.
212          * Hopefully nobody expects them at a fixed place (Wine?)
213          */
214         [GDT_ENTRY_KERNEL32_CS]         = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
215         [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
216         [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
217         [GDT_ENTRY_DEFAULT_USER32_CS]   = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
218         [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
219         [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
220 #else
221         [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
222         [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
223         [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
224         [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
225         /*
226          * Segments used for calling PnP BIOS have byte granularity.
227          * They code segments and data segments have fixed 64k limits,
228          * the transfer segment sizes are set at run time.
229          */
230         /* 32-bit code */
231         [GDT_ENTRY_PNPBIOS_CS32]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
232         /* 16-bit code */
233         [GDT_ENTRY_PNPBIOS_CS16]        = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
234         /* 16-bit data */
235         [GDT_ENTRY_PNPBIOS_DS]          = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
236         /* 16-bit data */
237         [GDT_ENTRY_PNPBIOS_TS1]         = GDT_ENTRY_INIT(0x0092, 0, 0),
238         /* 16-bit data */
239         [GDT_ENTRY_PNPBIOS_TS2]         = GDT_ENTRY_INIT(0x0092, 0, 0),
240         /*
241          * The APM segments have byte granularity and their bases
242          * are set at run time.  All have 64k limits.
243          */
244         /* 32-bit code */
245         [GDT_ENTRY_APMBIOS_BASE]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
246         /* 16-bit code */
247         [GDT_ENTRY_APMBIOS_BASE+1]      = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
248         /* data */
249         [GDT_ENTRY_APMBIOS_BASE+2]      = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
250
251         [GDT_ENTRY_ESPFIX_SS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
252         [GDT_ENTRY_PERCPU]              = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
253 #endif
254 } };
255 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
256
257 #ifdef CONFIG_X86_64
258 static int __init x86_nopcid_setup(char *s)
259 {
260         /* nopcid doesn't accept parameters */
261         if (s)
262                 return -EINVAL;
263
264         /* do not emit a message if the feature is not present */
265         if (!boot_cpu_has(X86_FEATURE_PCID))
266                 return 0;
267
268         setup_clear_cpu_cap(X86_FEATURE_PCID);
269         pr_info("nopcid: PCID feature disabled\n");
270         return 0;
271 }
272 early_param("nopcid", x86_nopcid_setup);
273 #endif
274
275 static int __init x86_noinvpcid_setup(char *s)
276 {
277         /* noinvpcid doesn't accept parameters */
278         if (s)
279                 return -EINVAL;
280
281         /* do not emit a message if the feature is not present */
282         if (!boot_cpu_has(X86_FEATURE_INVPCID))
283                 return 0;
284
285         setup_clear_cpu_cap(X86_FEATURE_INVPCID);
286         pr_info("noinvpcid: INVPCID feature disabled\n");
287         return 0;
288 }
289 early_param("noinvpcid", x86_noinvpcid_setup);
290
291 #ifdef CONFIG_X86_32
292 static int cachesize_override = -1;
293 static int disable_x86_serial_nr = 1;
294
295 static int __init cachesize_setup(char *str)
296 {
297         get_option(&str, &cachesize_override);
298         return 1;
299 }
300 __setup("cachesize=", cachesize_setup);
301
302 /* Standard macro to see if a specific flag is changeable */
303 static inline int flag_is_changeable_p(u32 flag)
304 {
305         u32 f1, f2;
306
307         /*
308          * Cyrix and IDT cpus allow disabling of CPUID
309          * so the code below may return different results
310          * when it is executed before and after enabling
311          * the CPUID. Add "volatile" to not allow gcc to
312          * optimize the subsequent calls to this function.
313          */
314         asm volatile ("pushfl           \n\t"
315                       "pushfl           \n\t"
316                       "popl %0          \n\t"
317                       "movl %0, %1      \n\t"
318                       "xorl %2, %0      \n\t"
319                       "pushl %0         \n\t"
320                       "popfl            \n\t"
321                       "pushfl           \n\t"
322                       "popl %0          \n\t"
323                       "popfl            \n\t"
324
325                       : "=&r" (f1), "=&r" (f2)
326                       : "ir" (flag));
327
328         return ((f1^f2) & flag) != 0;
329 }
330
331 /* Probe for the CPUID instruction */
332 int have_cpuid_p(void)
333 {
334         return flag_is_changeable_p(X86_EFLAGS_ID);
335 }
336
337 static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
338 {
339         unsigned long lo, hi;
340
341         if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
342                 return;
343
344         /* Disable processor serial number: */
345
346         rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
347         lo |= 0x200000;
348         wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
349
350         pr_notice("CPU serial number disabled.\n");
351         clear_cpu_cap(c, X86_FEATURE_PN);
352
353         /* Disabling the serial number may affect the cpuid level */
354         c->cpuid_level = cpuid_eax(0);
355 }
356
357 static int __init x86_serial_nr_setup(char *s)
358 {
359         disable_x86_serial_nr = 0;
360         return 1;
361 }
362 __setup("serialnumber", x86_serial_nr_setup);
363 #else
364 static inline int flag_is_changeable_p(u32 flag)
365 {
366         return 1;
367 }
368 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
369 {
370 }
371 #endif
372
373 static __always_inline void setup_smep(struct cpuinfo_x86 *c)
374 {
375         if (cpu_has(c, X86_FEATURE_SMEP))
376                 cr4_set_bits(X86_CR4_SMEP);
377 }
378
379 static __always_inline void setup_smap(struct cpuinfo_x86 *c)
380 {
381         unsigned long eflags = native_save_fl();
382
383         /* This should have been cleared long ago */
384         BUG_ON(eflags & X86_EFLAGS_AC);
385
386         if (cpu_has(c, X86_FEATURE_SMAP))
387                 cr4_set_bits(X86_CR4_SMAP);
388 }
389
390 static __always_inline void setup_umip(struct cpuinfo_x86 *c)
391 {
392         /* Check the boot processor, plus build option for UMIP. */
393         if (!cpu_feature_enabled(X86_FEATURE_UMIP))
394                 goto out;
395
396         /* Check the current processor's cpuid bits. */
397         if (!cpu_has(c, X86_FEATURE_UMIP))
398                 goto out;
399
400         cr4_set_bits(X86_CR4_UMIP);
401
402         pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n");
403
404         return;
405
406 out:
407         /*
408          * Make sure UMIP is disabled in case it was enabled in a
409          * previous boot (e.g., via kexec).
410          */
411         cr4_clear_bits(X86_CR4_UMIP);
412 }
413
414 /* These bits should not change their value after CPU init is finished. */
415 static const unsigned long cr4_pinned_mask =
416         X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP |
417         X86_CR4_FSGSBASE | X86_CR4_CET;
418 static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
419 static unsigned long cr4_pinned_bits __ro_after_init;
420
421 void native_write_cr0(unsigned long val)
422 {
423         unsigned long bits_missing = 0;
424
425 set_register:
426         asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
427
428         if (static_branch_likely(&cr_pinning)) {
429                 if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
430                         bits_missing = X86_CR0_WP;
431                         val |= bits_missing;
432                         goto set_register;
433                 }
434                 /* Warn after we've set the missing bits. */
435                 WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n");
436         }
437 }
438 EXPORT_SYMBOL(native_write_cr0);
439
440 void __no_profile native_write_cr4(unsigned long val)
441 {
442         unsigned long bits_changed = 0;
443
444 set_register:
445         asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
446
447         if (static_branch_likely(&cr_pinning)) {
448                 if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
449                         bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
450                         val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
451                         goto set_register;
452                 }
453                 /* Warn after we've corrected the changed bits. */
454                 WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
455                           bits_changed);
456         }
457 }
458 #if IS_MODULE(CONFIG_LKDTM)
459 EXPORT_SYMBOL_GPL(native_write_cr4);
460 #endif
461
462 void cr4_update_irqsoff(unsigned long set, unsigned long clear)
463 {
464         unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
465
466         lockdep_assert_irqs_disabled();
467
468         newval = (cr4 & ~clear) | set;
469         if (newval != cr4) {
470                 this_cpu_write(cpu_tlbstate.cr4, newval);
471                 __write_cr4(newval);
472         }
473 }
474 EXPORT_SYMBOL(cr4_update_irqsoff);
475
476 /* Read the CR4 shadow. */
477 unsigned long cr4_read_shadow(void)
478 {
479         return this_cpu_read(cpu_tlbstate.cr4);
480 }
481 EXPORT_SYMBOL_GPL(cr4_read_shadow);
482
483 void cr4_init(void)
484 {
485         unsigned long cr4 = __read_cr4();
486
487         if (boot_cpu_has(X86_FEATURE_PCID))
488                 cr4 |= X86_CR4_PCIDE;
489         if (static_branch_likely(&cr_pinning))
490                 cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
491
492         __write_cr4(cr4);
493
494         /* Initialize cr4 shadow for this CPU. */
495         this_cpu_write(cpu_tlbstate.cr4, cr4);
496 }
497
498 /*
499  * Once CPU feature detection is finished (and boot params have been
500  * parsed), record any of the sensitive CR bits that are set, and
501  * enable CR pinning.
502  */
503 static void __init setup_cr_pinning(void)
504 {
505         cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
506         static_key_enable(&cr_pinning.key);
507 }
508
509 static __init int x86_nofsgsbase_setup(char *arg)
510 {
511         /* Require an exact match without trailing characters. */
512         if (strlen(arg))
513                 return 0;
514
515         /* Do not emit a message if the feature is not present. */
516         if (!boot_cpu_has(X86_FEATURE_FSGSBASE))
517                 return 1;
518
519         setup_clear_cpu_cap(X86_FEATURE_FSGSBASE);
520         pr_info("FSGSBASE disabled via kernel command line\n");
521         return 1;
522 }
523 __setup("nofsgsbase", x86_nofsgsbase_setup);
524
525 /*
526  * Protection Keys are not available in 32-bit mode.
527  */
528 static bool pku_disabled;
529
530 static __always_inline void setup_pku(struct cpuinfo_x86 *c)
531 {
532         if (c == &boot_cpu_data) {
533                 if (pku_disabled || !cpu_feature_enabled(X86_FEATURE_PKU))
534                         return;
535                 /*
536                  * Setting CR4.PKE will cause the X86_FEATURE_OSPKE cpuid
537                  * bit to be set.  Enforce it.
538                  */
539                 setup_force_cpu_cap(X86_FEATURE_OSPKE);
540
541         } else if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) {
542                 return;
543         }
544
545         cr4_set_bits(X86_CR4_PKE);
546         /* Load the default PKRU value */
547         pkru_write_default();
548 }
549
550 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
551 static __init int setup_disable_pku(char *arg)
552 {
553         /*
554          * Do not clear the X86_FEATURE_PKU bit.  All of the
555          * runtime checks are against OSPKE so clearing the
556          * bit does nothing.
557          *
558          * This way, we will see "pku" in cpuinfo, but not
559          * "ospke", which is exactly what we want.  It shows
560          * that the CPU has PKU, but the OS has not enabled it.
561          * This happens to be exactly how a system would look
562          * if we disabled the config option.
563          */
564         pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
565         pku_disabled = true;
566         return 1;
567 }
568 __setup("nopku", setup_disable_pku);
569 #endif /* CONFIG_X86_64 */
570
571 #ifdef CONFIG_X86_KERNEL_IBT
572
573 __noendbr u64 ibt_save(void)
574 {
575         u64 msr = 0;
576
577         if (cpu_feature_enabled(X86_FEATURE_IBT)) {
578                 rdmsrl(MSR_IA32_S_CET, msr);
579                 wrmsrl(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN);
580         }
581
582         return msr;
583 }
584
585 __noendbr void ibt_restore(u64 save)
586 {
587         u64 msr;
588
589         if (cpu_feature_enabled(X86_FEATURE_IBT)) {
590                 rdmsrl(MSR_IA32_S_CET, msr);
591                 msr &= ~CET_ENDBR_EN;
592                 msr |= (save & CET_ENDBR_EN);
593                 wrmsrl(MSR_IA32_S_CET, msr);
594         }
595 }
596
597 #endif
598
599 static __always_inline void setup_cet(struct cpuinfo_x86 *c)
600 {
601         u64 msr = CET_ENDBR_EN;
602
603         if (!HAS_KERNEL_IBT ||
604             !cpu_feature_enabled(X86_FEATURE_IBT))
605                 return;
606
607         wrmsrl(MSR_IA32_S_CET, msr);
608         cr4_set_bits(X86_CR4_CET);
609
610         if (!ibt_selftest()) {
611                 pr_err("IBT selftest: Failed!\n");
612                 setup_clear_cpu_cap(X86_FEATURE_IBT);
613                 return;
614         }
615 }
616
617 __noendbr void cet_disable(void)
618 {
619         if (cpu_feature_enabled(X86_FEATURE_IBT))
620                 wrmsrl(MSR_IA32_S_CET, 0);
621 }
622
623 /*
624  * Some CPU features depend on higher CPUID levels, which may not always
625  * be available due to CPUID level capping or broken virtualization
626  * software.  Add those features to this table to auto-disable them.
627  */
628 struct cpuid_dependent_feature {
629         u32 feature;
630         u32 level;
631 };
632
633 static const struct cpuid_dependent_feature
634 cpuid_dependent_features[] = {
635         { X86_FEATURE_MWAIT,            0x00000005 },
636         { X86_FEATURE_DCA,              0x00000009 },
637         { X86_FEATURE_XSAVE,            0x0000000d },
638         { 0, 0 }
639 };
640
641 static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
642 {
643         const struct cpuid_dependent_feature *df;
644
645         for (df = cpuid_dependent_features; df->feature; df++) {
646
647                 if (!cpu_has(c, df->feature))
648                         continue;
649                 /*
650                  * Note: cpuid_level is set to -1 if unavailable, but
651                  * extended_extended_level is set to 0 if unavailable
652                  * and the legitimate extended levels are all negative
653                  * when signed; hence the weird messing around with
654                  * signs here...
655                  */
656                 if (!((s32)df->level < 0 ?
657                      (u32)df->level > (u32)c->extended_cpuid_level :
658                      (s32)df->level > (s32)c->cpuid_level))
659                         continue;
660
661                 clear_cpu_cap(c, df->feature);
662                 if (!warn)
663                         continue;
664
665                 pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
666                         x86_cap_flag(df->feature), df->level);
667         }
668 }
669
670 /*
671  * Naming convention should be: <Name> [(<Codename>)]
672  * This table only is used unless init_<vendor>() below doesn't set it;
673  * in particular, if CPUID levels 0x80000002..4 are supported, this
674  * isn't used
675  */
676
677 /* Look up CPU names by table lookup. */
678 static const char *table_lookup_model(struct cpuinfo_x86 *c)
679 {
680 #ifdef CONFIG_X86_32
681         const struct legacy_cpu_model_info *info;
682
683         if (c->x86_model >= 16)
684                 return NULL;    /* Range check */
685
686         if (!this_cpu)
687                 return NULL;
688
689         info = this_cpu->legacy_models;
690
691         while (info->family) {
692                 if (info->family == c->x86)
693                         return info->model_names[c->x86_model];
694                 info++;
695         }
696 #endif
697         return NULL;            /* Not found */
698 }
699
700 /* Aligned to unsigned long to avoid split lock in atomic bitmap ops */
701 __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
702 __u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
703
704 void load_percpu_segment(int cpu)
705 {
706 #ifdef CONFIG_X86_32
707         loadsegment(fs, __KERNEL_PERCPU);
708 #else
709         __loadsegment_simple(gs, 0);
710         wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
711 #endif
712 }
713
714 #ifdef CONFIG_X86_32
715 /* The 32-bit entry code needs to find cpu_entry_area. */
716 DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
717 #endif
718
719 /* Load the original GDT from the per-cpu structure */
720 void load_direct_gdt(int cpu)
721 {
722         struct desc_ptr gdt_descr;
723
724         gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
725         gdt_descr.size = GDT_SIZE - 1;
726         load_gdt(&gdt_descr);
727 }
728 EXPORT_SYMBOL_GPL(load_direct_gdt);
729
730 /* Load a fixmap remapping of the per-cpu GDT */
731 void load_fixmap_gdt(int cpu)
732 {
733         struct desc_ptr gdt_descr;
734
735         gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
736         gdt_descr.size = GDT_SIZE - 1;
737         load_gdt(&gdt_descr);
738 }
739 EXPORT_SYMBOL_GPL(load_fixmap_gdt);
740
741 /*
742  * Current gdt points %fs at the "master" per-cpu area: after this,
743  * it's on the real one.
744  */
745 void switch_to_new_gdt(int cpu)
746 {
747         /* Load the original GDT */
748         load_direct_gdt(cpu);
749         /* Reload the per-cpu base */
750         load_percpu_segment(cpu);
751 }
752
753 static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
754
755 static void get_model_name(struct cpuinfo_x86 *c)
756 {
757         unsigned int *v;
758         char *p, *q, *s;
759
760         if (c->extended_cpuid_level < 0x80000004)
761                 return;
762
763         v = (unsigned int *)c->x86_model_id;
764         cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
765         cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
766         cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
767         c->x86_model_id[48] = 0;
768
769         /* Trim whitespace */
770         p = q = s = &c->x86_model_id[0];
771
772         while (*p == ' ')
773                 p++;
774
775         while (*p) {
776                 /* Note the last non-whitespace index */
777                 if (!isspace(*p))
778                         s = q;
779
780                 *q++ = *p++;
781         }
782
783         *(s + 1) = '\0';
784 }
785
786 void detect_num_cpu_cores(struct cpuinfo_x86 *c)
787 {
788         unsigned int eax, ebx, ecx, edx;
789
790         c->x86_max_cores = 1;
791         if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
792                 return;
793
794         cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
795         if (eax & 0x1f)
796                 c->x86_max_cores = (eax >> 26) + 1;
797 }
798
799 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
800 {
801         unsigned int n, dummy, ebx, ecx, edx, l2size;
802
803         n = c->extended_cpuid_level;
804
805         if (n >= 0x80000005) {
806                 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
807                 c->x86_cache_size = (ecx>>24) + (edx>>24);
808 #ifdef CONFIG_X86_64
809                 /* On K8 L1 TLB is inclusive, so don't count it */
810                 c->x86_tlbsize = 0;
811 #endif
812         }
813
814         if (n < 0x80000006)     /* Some chips just has a large L1. */
815                 return;
816
817         cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
818         l2size = ecx >> 16;
819
820 #ifdef CONFIG_X86_64
821         c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
822 #else
823         /* do processor-specific cache resizing */
824         if (this_cpu->legacy_cache_size)
825                 l2size = this_cpu->legacy_cache_size(c, l2size);
826
827         /* Allow user to override all this if necessary. */
828         if (cachesize_override != -1)
829                 l2size = cachesize_override;
830
831         if (l2size == 0)
832                 return;         /* Again, no L2 cache is possible */
833 #endif
834
835         c->x86_cache_size = l2size;
836 }
837
838 u16 __read_mostly tlb_lli_4k[NR_INFO];
839 u16 __read_mostly tlb_lli_2m[NR_INFO];
840 u16 __read_mostly tlb_lli_4m[NR_INFO];
841 u16 __read_mostly tlb_lld_4k[NR_INFO];
842 u16 __read_mostly tlb_lld_2m[NR_INFO];
843 u16 __read_mostly tlb_lld_4m[NR_INFO];
844 u16 __read_mostly tlb_lld_1g[NR_INFO];
845
846 static void cpu_detect_tlb(struct cpuinfo_x86 *c)
847 {
848         if (this_cpu->c_detect_tlb)
849                 this_cpu->c_detect_tlb(c);
850
851         pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
852                 tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
853                 tlb_lli_4m[ENTRIES]);
854
855         pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
856                 tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
857                 tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
858 }
859
860 int detect_ht_early(struct cpuinfo_x86 *c)
861 {
862 #ifdef CONFIG_SMP
863         u32 eax, ebx, ecx, edx;
864
865         if (!cpu_has(c, X86_FEATURE_HT))
866                 return -1;
867
868         if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
869                 return -1;
870
871         if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
872                 return -1;
873
874         cpuid(1, &eax, &ebx, &ecx, &edx);
875
876         smp_num_siblings = (ebx & 0xff0000) >> 16;
877         if (smp_num_siblings == 1)
878                 pr_info_once("CPU0: Hyper-Threading is disabled\n");
879 #endif
880         return 0;
881 }
882
883 void detect_ht(struct cpuinfo_x86 *c)
884 {
885 #ifdef CONFIG_SMP
886         int index_msb, core_bits;
887
888         if (detect_ht_early(c) < 0)
889                 return;
890
891         index_msb = get_count_order(smp_num_siblings);
892         c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
893
894         smp_num_siblings = smp_num_siblings / c->x86_max_cores;
895
896         index_msb = get_count_order(smp_num_siblings);
897
898         core_bits = get_count_order(c->x86_max_cores);
899
900         c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
901                                        ((1 << core_bits) - 1);
902 #endif
903 }
904
905 static void get_cpu_vendor(struct cpuinfo_x86 *c)
906 {
907         char *v = c->x86_vendor_id;
908         int i;
909
910         for (i = 0; i < X86_VENDOR_NUM; i++) {
911                 if (!cpu_devs[i])
912                         break;
913
914                 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
915                     (cpu_devs[i]->c_ident[1] &&
916                      !strcmp(v, cpu_devs[i]->c_ident[1]))) {
917
918                         this_cpu = cpu_devs[i];
919                         c->x86_vendor = this_cpu->c_x86_vendor;
920                         return;
921                 }
922         }
923
924         pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
925                     "CPU: Your system may be unstable.\n", v);
926
927         c->x86_vendor = X86_VENDOR_UNKNOWN;
928         this_cpu = &default_cpu;
929 }
930
931 void cpu_detect(struct cpuinfo_x86 *c)
932 {
933         /* Get vendor name */
934         cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
935               (unsigned int *)&c->x86_vendor_id[0],
936               (unsigned int *)&c->x86_vendor_id[8],
937               (unsigned int *)&c->x86_vendor_id[4]);
938
939         c->x86 = 4;
940         /* Intel-defined flags: level 0x00000001 */
941         if (c->cpuid_level >= 0x00000001) {
942                 u32 junk, tfms, cap0, misc;
943
944                 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
945                 c->x86          = x86_family(tfms);
946                 c->x86_model    = x86_model(tfms);
947                 c->x86_stepping = x86_stepping(tfms);
948
949                 if (cap0 & (1<<19)) {
950                         c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
951                         c->x86_cache_alignment = c->x86_clflush_size;
952                 }
953         }
954 }
955
956 static void apply_forced_caps(struct cpuinfo_x86 *c)
957 {
958         int i;
959
960         for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
961                 c->x86_capability[i] &= ~cpu_caps_cleared[i];
962                 c->x86_capability[i] |= cpu_caps_set[i];
963         }
964 }
965
966 static void init_speculation_control(struct cpuinfo_x86 *c)
967 {
968         /*
969          * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
970          * and they also have a different bit for STIBP support. Also,
971          * a hypervisor might have set the individual AMD bits even on
972          * Intel CPUs, for finer-grained selection of what's available.
973          */
974         if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
975                 set_cpu_cap(c, X86_FEATURE_IBRS);
976                 set_cpu_cap(c, X86_FEATURE_IBPB);
977                 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
978         }
979
980         if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
981                 set_cpu_cap(c, X86_FEATURE_STIBP);
982
983         if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
984             cpu_has(c, X86_FEATURE_VIRT_SSBD))
985                 set_cpu_cap(c, X86_FEATURE_SSBD);
986
987         if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
988                 set_cpu_cap(c, X86_FEATURE_IBRS);
989                 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
990         }
991
992         if (cpu_has(c, X86_FEATURE_AMD_IBPB))
993                 set_cpu_cap(c, X86_FEATURE_IBPB);
994
995         if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
996                 set_cpu_cap(c, X86_FEATURE_STIBP);
997                 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
998         }
999
1000         if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
1001                 set_cpu_cap(c, X86_FEATURE_SSBD);
1002                 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1003                 clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
1004         }
1005 }
1006
1007 void get_cpu_cap(struct cpuinfo_x86 *c)
1008 {
1009         u32 eax, ebx, ecx, edx;
1010
1011         /* Intel-defined flags: level 0x00000001 */
1012         if (c->cpuid_level >= 0x00000001) {
1013                 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
1014
1015                 c->x86_capability[CPUID_1_ECX] = ecx;
1016                 c->x86_capability[CPUID_1_EDX] = edx;
1017         }
1018
1019         /* Thermal and Power Management Leaf: level 0x00000006 (eax) */
1020         if (c->cpuid_level >= 0x00000006)
1021                 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
1022
1023         /* Additional Intel-defined flags: level 0x00000007 */
1024         if (c->cpuid_level >= 0x00000007) {
1025                 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
1026                 c->x86_capability[CPUID_7_0_EBX] = ebx;
1027                 c->x86_capability[CPUID_7_ECX] = ecx;
1028                 c->x86_capability[CPUID_7_EDX] = edx;
1029
1030                 /* Check valid sub-leaf index before accessing it */
1031                 if (eax >= 1) {
1032                         cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx);
1033                         c->x86_capability[CPUID_7_1_EAX] = eax;
1034                 }
1035         }
1036
1037         /* Extended state features: level 0x0000000d */
1038         if (c->cpuid_level >= 0x0000000d) {
1039                 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
1040
1041                 c->x86_capability[CPUID_D_1_EAX] = eax;
1042         }
1043
1044         /* AMD-defined flags: level 0x80000001 */
1045         eax = cpuid_eax(0x80000000);
1046         c->extended_cpuid_level = eax;
1047
1048         if ((eax & 0xffff0000) == 0x80000000) {
1049                 if (eax >= 0x80000001) {
1050                         cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
1051
1052                         c->x86_capability[CPUID_8000_0001_ECX] = ecx;
1053                         c->x86_capability[CPUID_8000_0001_EDX] = edx;
1054                 }
1055         }
1056
1057         if (c->extended_cpuid_level >= 0x80000007) {
1058                 cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
1059
1060                 c->x86_capability[CPUID_8000_0007_EBX] = ebx;
1061                 c->x86_power = edx;
1062         }
1063
1064         if (c->extended_cpuid_level >= 0x80000008) {
1065                 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1066                 c->x86_capability[CPUID_8000_0008_EBX] = ebx;
1067         }
1068
1069         if (c->extended_cpuid_level >= 0x8000000a)
1070                 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
1071
1072         if (c->extended_cpuid_level >= 0x8000001f)
1073                 c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
1074
1075         init_scattered_cpuid_features(c);
1076         init_speculation_control(c);
1077
1078         /*
1079          * Clear/Set all flags overridden by options, after probe.
1080          * This needs to happen each time we re-probe, which may happen
1081          * several times during CPU initialization.
1082          */
1083         apply_forced_caps(c);
1084 }
1085
1086 void get_cpu_address_sizes(struct cpuinfo_x86 *c)
1087 {
1088         u32 eax, ebx, ecx, edx;
1089
1090         if (c->extended_cpuid_level >= 0x80000008) {
1091                 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1092
1093                 c->x86_virt_bits = (eax >> 8) & 0xff;
1094                 c->x86_phys_bits = eax & 0xff;
1095         }
1096 #ifdef CONFIG_X86_32
1097         else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
1098                 c->x86_phys_bits = 36;
1099 #endif
1100         c->x86_cache_bits = c->x86_phys_bits;
1101 }
1102
1103 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
1104 {
1105 #ifdef CONFIG_X86_32
1106         int i;
1107
1108         /*
1109          * First of all, decide if this is a 486 or higher
1110          * It's a 486 if we can modify the AC flag
1111          */
1112         if (flag_is_changeable_p(X86_EFLAGS_AC))
1113                 c->x86 = 4;
1114         else
1115                 c->x86 = 3;
1116
1117         for (i = 0; i < X86_VENDOR_NUM; i++)
1118                 if (cpu_devs[i] && cpu_devs[i]->c_identify) {
1119                         c->x86_vendor_id[0] = 0;
1120                         cpu_devs[i]->c_identify(c);
1121                         if (c->x86_vendor_id[0]) {
1122                                 get_cpu_vendor(c);
1123                                 break;
1124                         }
1125                 }
1126 #endif
1127 }
1128
1129 #define NO_SPECULATION          BIT(0)
1130 #define NO_MELTDOWN             BIT(1)
1131 #define NO_SSB                  BIT(2)
1132 #define NO_L1TF                 BIT(3)
1133 #define NO_MDS                  BIT(4)
1134 #define MSBDS_ONLY              BIT(5)
1135 #define NO_SWAPGS               BIT(6)
1136 #define NO_ITLB_MULTIHIT        BIT(7)
1137 #define NO_SPECTRE_V2           BIT(8)
1138
1139 #define VULNWL(vendor, family, model, whitelist)        \
1140         X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
1141
1142 #define VULNWL_INTEL(model, whitelist)          \
1143         VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
1144
1145 #define VULNWL_AMD(family, whitelist)           \
1146         VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
1147
1148 #define VULNWL_HYGON(family, whitelist)         \
1149         VULNWL(HYGON, family, X86_MODEL_ANY, whitelist)
1150
1151 static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
1152         VULNWL(ANY,     4, X86_MODEL_ANY,       NO_SPECULATION),
1153         VULNWL(CENTAUR, 5, X86_MODEL_ANY,       NO_SPECULATION),
1154         VULNWL(INTEL,   5, X86_MODEL_ANY,       NO_SPECULATION),
1155         VULNWL(NSC,     5, X86_MODEL_ANY,       NO_SPECULATION),
1156         VULNWL(VORTEX,  5, X86_MODEL_ANY,       NO_SPECULATION),
1157         VULNWL(VORTEX,  6, X86_MODEL_ANY,       NO_SPECULATION),
1158
1159         /* Intel Family 6 */
1160         VULNWL_INTEL(ATOM_SALTWELL,             NO_SPECULATION | NO_ITLB_MULTIHIT),
1161         VULNWL_INTEL(ATOM_SALTWELL_TABLET,      NO_SPECULATION | NO_ITLB_MULTIHIT),
1162         VULNWL_INTEL(ATOM_SALTWELL_MID,         NO_SPECULATION | NO_ITLB_MULTIHIT),
1163         VULNWL_INTEL(ATOM_BONNELL,              NO_SPECULATION | NO_ITLB_MULTIHIT),
1164         VULNWL_INTEL(ATOM_BONNELL_MID,          NO_SPECULATION | NO_ITLB_MULTIHIT),
1165
1166         VULNWL_INTEL(ATOM_SILVERMONT,           NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1167         VULNWL_INTEL(ATOM_SILVERMONT_D,         NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1168         VULNWL_INTEL(ATOM_SILVERMONT_MID,       NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1169         VULNWL_INTEL(ATOM_AIRMONT,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1170         VULNWL_INTEL(XEON_PHI_KNL,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1171         VULNWL_INTEL(XEON_PHI_KNM,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1172
1173         VULNWL_INTEL(CORE_YONAH,                NO_SSB),
1174
1175         VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1176         VULNWL_INTEL(ATOM_AIRMONT_NP,           NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1177
1178         VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1179         VULNWL_INTEL(ATOM_GOLDMONT_D,           NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1180         VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1181
1182         /*
1183          * Technically, swapgs isn't serializing on AMD (despite it previously
1184          * being documented as such in the APM).  But according to AMD, %gs is
1185          * updated non-speculatively, and the issuing of %gs-relative memory
1186          * operands will be blocked until the %gs update completes, which is
1187          * good enough for our purposes.
1188          */
1189
1190         VULNWL_INTEL(ATOM_TREMONT_D,            NO_ITLB_MULTIHIT),
1191
1192         /* AMD Family 0xf - 0x12 */
1193         VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1194         VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1195         VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1196         VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1197
1198         /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
1199         VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1200         VULNWL_HYGON(X86_FAMILY_ANY,    NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1201
1202         /* Zhaoxin Family 7 */
1203         VULNWL(CENTAUR, 7, X86_MODEL_ANY,       NO_SPECTRE_V2 | NO_SWAPGS),
1204         VULNWL(ZHAOXIN, 7, X86_MODEL_ANY,       NO_SPECTRE_V2 | NO_SWAPGS),
1205         {}
1206 };
1207
1208 #define VULNBL(vendor, family, model, blacklist)        \
1209         X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
1210
1211 #define VULNBL_INTEL_STEPPINGS(model, steppings, issues)                   \
1212         X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6,             \
1213                                             INTEL_FAM6_##model, steppings, \
1214                                             X86_FEATURE_ANY, issues)
1215
1216 #define VULNBL_AMD(family, blacklist)           \
1217         VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
1218
1219 #define VULNBL_HYGON(family, blacklist)         \
1220         VULNBL(HYGON, family, X86_MODEL_ANY, blacklist)
1221
1222 #define SRBDS           BIT(0)
1223 /* CPU is affected by X86_BUG_MMIO_STALE_DATA */
1224 #define MMIO            BIT(1)
1225 /* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
1226 #define MMIO_SBDS       BIT(2)
1227 /* CPU is affected by RETbleed, speculating where you would not expect it */
1228 #define RETBLEED        BIT(3)
1229
1230 static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
1231         VULNBL_INTEL_STEPPINGS(IVYBRIDGE,       X86_STEPPING_ANY,               SRBDS),
1232         VULNBL_INTEL_STEPPINGS(HASWELL,         X86_STEPPING_ANY,               SRBDS),
1233         VULNBL_INTEL_STEPPINGS(HASWELL_L,       X86_STEPPING_ANY,               SRBDS),
1234         VULNBL_INTEL_STEPPINGS(HASWELL_G,       X86_STEPPING_ANY,               SRBDS),
1235         VULNBL_INTEL_STEPPINGS(HASWELL_X,       BIT(2) | BIT(4),                MMIO),
1236         VULNBL_INTEL_STEPPINGS(BROADWELL_D,     X86_STEPPINGS(0x3, 0x5),        MMIO),
1237         VULNBL_INTEL_STEPPINGS(BROADWELL_G,     X86_STEPPING_ANY,               SRBDS),
1238         VULNBL_INTEL_STEPPINGS(BROADWELL_X,     X86_STEPPING_ANY,               MMIO),
1239         VULNBL_INTEL_STEPPINGS(BROADWELL,       X86_STEPPING_ANY,               SRBDS),
1240         VULNBL_INTEL_STEPPINGS(SKYLAKE_L,       X86_STEPPINGS(0x3, 0x3),        SRBDS | MMIO | RETBLEED),
1241         VULNBL_INTEL_STEPPINGS(SKYLAKE_L,       X86_STEPPING_ANY,               SRBDS),
1242         VULNBL_INTEL_STEPPINGS(SKYLAKE_X,       BIT(3) | BIT(4) | BIT(6) |
1243                                                 BIT(7) | BIT(0xB),              MMIO | RETBLEED),
1244         VULNBL_INTEL_STEPPINGS(SKYLAKE,         X86_STEPPINGS(0x3, 0x3),        SRBDS | MMIO | RETBLEED),
1245         VULNBL_INTEL_STEPPINGS(SKYLAKE,         X86_STEPPING_ANY,               SRBDS),
1246         VULNBL_INTEL_STEPPINGS(KABYLAKE_L,      X86_STEPPINGS(0x9, 0xC),        SRBDS | MMIO | RETBLEED),
1247         VULNBL_INTEL_STEPPINGS(KABYLAKE_L,      X86_STEPPINGS(0x0, 0x8),        SRBDS),
1248         VULNBL_INTEL_STEPPINGS(KABYLAKE,        X86_STEPPINGS(0x9, 0xD),        SRBDS | MMIO | RETBLEED),
1249         VULNBL_INTEL_STEPPINGS(KABYLAKE,        X86_STEPPINGS(0x0, 0x8),        SRBDS),
1250         VULNBL_INTEL_STEPPINGS(ICELAKE_L,       X86_STEPPINGS(0x5, 0x5),        MMIO | MMIO_SBDS | RETBLEED),
1251         VULNBL_INTEL_STEPPINGS(ICELAKE_D,       X86_STEPPINGS(0x1, 0x1),        MMIO),
1252         VULNBL_INTEL_STEPPINGS(ICELAKE_X,       X86_STEPPINGS(0x4, 0x6),        MMIO),
1253         VULNBL_INTEL_STEPPINGS(COMETLAKE,       BIT(2) | BIT(3) | BIT(5),       MMIO | MMIO_SBDS | RETBLEED),
1254         VULNBL_INTEL_STEPPINGS(COMETLAKE_L,     X86_STEPPINGS(0x1, 0x1),        MMIO | MMIO_SBDS | RETBLEED),
1255         VULNBL_INTEL_STEPPINGS(COMETLAKE_L,     X86_STEPPINGS(0x0, 0x0),        MMIO | RETBLEED),
1256         VULNBL_INTEL_STEPPINGS(LAKEFIELD,       X86_STEPPINGS(0x1, 0x1),        MMIO | MMIO_SBDS | RETBLEED),
1257         VULNBL_INTEL_STEPPINGS(ROCKETLAKE,      X86_STEPPINGS(0x1, 0x1),        MMIO | RETBLEED),
1258         VULNBL_INTEL_STEPPINGS(ATOM_TREMONT,    X86_STEPPINGS(0x1, 0x1),        MMIO | MMIO_SBDS),
1259         VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D,  X86_STEPPING_ANY,               MMIO),
1260         VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L,  X86_STEPPINGS(0x0, 0x0),        MMIO | MMIO_SBDS),
1261
1262         VULNBL_AMD(0x15, RETBLEED),
1263         VULNBL_AMD(0x16, RETBLEED),
1264         VULNBL_AMD(0x17, RETBLEED),
1265         VULNBL_HYGON(0x18, RETBLEED),
1266         {}
1267 };
1268
1269 static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
1270 {
1271         const struct x86_cpu_id *m = x86_match_cpu(table);
1272
1273         return m && !!(m->driver_data & which);
1274 }
1275
1276 u64 x86_read_arch_cap_msr(void)
1277 {
1278         u64 ia32_cap = 0;
1279
1280         if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
1281                 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
1282
1283         return ia32_cap;
1284 }
1285
1286 static bool arch_cap_mmio_immune(u64 ia32_cap)
1287 {
1288         return (ia32_cap & ARCH_CAP_FBSDP_NO &&
1289                 ia32_cap & ARCH_CAP_PSDP_NO &&
1290                 ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
1291 }
1292
1293 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1294 {
1295         u64 ia32_cap = x86_read_arch_cap_msr();
1296
1297         /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
1298         if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
1299             !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
1300                 setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
1301
1302         if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
1303                 return;
1304
1305         setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
1306
1307         if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2))
1308                 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1309
1310         if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
1311             !(ia32_cap & ARCH_CAP_SSB_NO) &&
1312            !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
1313                 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
1314
1315         if (ia32_cap & ARCH_CAP_IBRS_ALL)
1316                 setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
1317
1318         if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
1319             !(ia32_cap & ARCH_CAP_MDS_NO)) {
1320                 setup_force_cpu_bug(X86_BUG_MDS);
1321                 if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
1322                         setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
1323         }
1324
1325         if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
1326                 setup_force_cpu_bug(X86_BUG_SWAPGS);
1327
1328         /*
1329          * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
1330          *      - TSX is supported or
1331          *      - TSX_CTRL is present
1332          *
1333          * TSX_CTRL check is needed for cases when TSX could be disabled before
1334          * the kernel boot e.g. kexec.
1335          * TSX_CTRL check alone is not sufficient for cases when the microcode
1336          * update is not present or running as guest that don't get TSX_CTRL.
1337          */
1338         if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
1339             (cpu_has(c, X86_FEATURE_RTM) ||
1340              (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
1341                 setup_force_cpu_bug(X86_BUG_TAA);
1342
1343         /*
1344          * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
1345          * in the vulnerability blacklist.
1346          *
1347          * Some of the implications and mitigation of Shared Buffers Data
1348          * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as
1349          * SRBDS.
1350          */
1351         if ((cpu_has(c, X86_FEATURE_RDRAND) ||
1352              cpu_has(c, X86_FEATURE_RDSEED)) &&
1353             cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS))
1354                     setup_force_cpu_bug(X86_BUG_SRBDS);
1355
1356         /*
1357          * Processor MMIO Stale Data bug enumeration
1358          *
1359          * Affected CPU list is generally enough to enumerate the vulnerability,
1360          * but for virtualization case check for ARCH_CAP MSR bits also, VMM may
1361          * not want the guest to enumerate the bug.
1362          */
1363         if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
1364             !arch_cap_mmio_immune(ia32_cap))
1365                 setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
1366
1367         if ((cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA)))
1368                 setup_force_cpu_bug(X86_BUG_RETBLEED);
1369
1370         if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
1371                 return;
1372
1373         /* Rogue Data Cache Load? No! */
1374         if (ia32_cap & ARCH_CAP_RDCL_NO)
1375                 return;
1376
1377         setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
1378
1379         if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
1380                 return;
1381
1382         setup_force_cpu_bug(X86_BUG_L1TF);
1383 }
1384
1385 /*
1386  * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
1387  * unfortunately, that's not true in practice because of early VIA
1388  * chips and (more importantly) broken virtualizers that are not easy
1389  * to detect. In the latter case it doesn't even *fail* reliably, so
1390  * probing for it doesn't even work. Disable it completely on 32-bit
1391  * unless we can find a reliable way to detect all the broken cases.
1392  * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1393  */
1394 static void detect_nopl(void)
1395 {
1396 #ifdef CONFIG_X86_32
1397         setup_clear_cpu_cap(X86_FEATURE_NOPL);
1398 #else
1399         setup_force_cpu_cap(X86_FEATURE_NOPL);
1400 #endif
1401 }
1402
1403 /*
1404  * We parse cpu parameters early because fpu__init_system() is executed
1405  * before parse_early_param().
1406  */
1407 static void __init cpu_parse_early_param(void)
1408 {
1409         char arg[128];
1410         char *argptr = arg, *opt;
1411         int arglen, taint = 0;
1412
1413 #ifdef CONFIG_X86_32
1414         if (cmdline_find_option_bool(boot_command_line, "no387"))
1415 #ifdef CONFIG_MATH_EMULATION
1416                 setup_clear_cpu_cap(X86_FEATURE_FPU);
1417 #else
1418                 pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n");
1419 #endif
1420
1421         if (cmdline_find_option_bool(boot_command_line, "nofxsr"))
1422                 setup_clear_cpu_cap(X86_FEATURE_FXSR);
1423 #endif
1424
1425         if (cmdline_find_option_bool(boot_command_line, "noxsave"))
1426                 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
1427
1428         if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
1429                 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
1430
1431         if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
1432                 setup_clear_cpu_cap(X86_FEATURE_XSAVES);
1433
1434         arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
1435         if (arglen <= 0)
1436                 return;
1437
1438         pr_info("Clearing CPUID bits:");
1439
1440         while (argptr) {
1441                 bool found __maybe_unused = false;
1442                 unsigned int bit;
1443
1444                 opt = strsep(&argptr, ",");
1445
1446                 /*
1447                  * Handle naked numbers first for feature flags which don't
1448                  * have names.
1449                  */
1450                 if (!kstrtouint(opt, 10, &bit)) {
1451                         if (bit < NCAPINTS * 32) {
1452
1453 #ifdef CONFIG_X86_FEATURE_NAMES
1454                                 /* empty-string, i.e., ""-defined feature flags */
1455                                 if (!x86_cap_flags[bit])
1456                                         pr_cont(" " X86_CAP_FMT_NUM, x86_cap_flag_num(bit));
1457                                 else
1458 #endif
1459                                         pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
1460
1461                                 setup_clear_cpu_cap(bit);
1462                                 taint++;
1463                         }
1464                         /*
1465                          * The assumption is that there are no feature names with only
1466                          * numbers in the name thus go to the next argument.
1467                          */
1468                         continue;
1469                 }
1470
1471 #ifdef CONFIG_X86_FEATURE_NAMES
1472                 for (bit = 0; bit < 32 * NCAPINTS; bit++) {
1473                         if (!x86_cap_flag(bit))
1474                                 continue;
1475
1476                         if (strcmp(x86_cap_flag(bit), opt))
1477                                 continue;
1478
1479                         pr_cont(" %s", opt);
1480                         setup_clear_cpu_cap(bit);
1481                         taint++;
1482                         found = true;
1483                         break;
1484                 }
1485
1486                 if (!found)
1487                         pr_cont(" (unknown: %s)", opt);
1488 #endif
1489         }
1490         pr_cont("\n");
1491
1492         if (taint)
1493                 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1494 }
1495
1496 /*
1497  * Do minimum CPU detection early.
1498  * Fields really needed: vendor, cpuid_level, family, model, mask,
1499  * cache alignment.
1500  * The others are not touched to avoid unwanted side effects.
1501  *
1502  * WARNING: this function is only called on the boot CPU.  Don't add code
1503  * here that is supposed to run on all CPUs.
1504  */
1505 static void __init early_identify_cpu(struct cpuinfo_x86 *c)
1506 {
1507 #ifdef CONFIG_X86_64
1508         c->x86_clflush_size = 64;
1509         c->x86_phys_bits = 36;
1510         c->x86_virt_bits = 48;
1511 #else
1512         c->x86_clflush_size = 32;
1513         c->x86_phys_bits = 32;
1514         c->x86_virt_bits = 32;
1515 #endif
1516         c->x86_cache_alignment = c->x86_clflush_size;
1517
1518         memset(&c->x86_capability, 0, sizeof(c->x86_capability));
1519         c->extended_cpuid_level = 0;
1520
1521         if (!have_cpuid_p())
1522                 identify_cpu_without_cpuid(c);
1523
1524         /* cyrix could have cpuid enabled via c_identify()*/
1525         if (have_cpuid_p()) {
1526                 cpu_detect(c);
1527                 get_cpu_vendor(c);
1528                 get_cpu_cap(c);
1529                 get_cpu_address_sizes(c);
1530                 setup_force_cpu_cap(X86_FEATURE_CPUID);
1531                 cpu_parse_early_param();
1532
1533                 if (this_cpu->c_early_init)
1534                         this_cpu->c_early_init(c);
1535
1536                 c->cpu_index = 0;
1537                 filter_cpuid_features(c, false);
1538
1539                 if (this_cpu->c_bsp_init)
1540                         this_cpu->c_bsp_init(c);
1541         } else {
1542                 setup_clear_cpu_cap(X86_FEATURE_CPUID);
1543         }
1544
1545         setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1546
1547         cpu_set_bug_bits(c);
1548
1549         sld_setup(c);
1550
1551         fpu__init_system(c);
1552
1553         init_sigframe_size();
1554
1555 #ifdef CONFIG_X86_32
1556         /*
1557          * Regardless of whether PCID is enumerated, the SDM says
1558          * that it can't be enabled in 32-bit mode.
1559          */
1560         setup_clear_cpu_cap(X86_FEATURE_PCID);
1561 #endif
1562
1563         /*
1564          * Later in the boot process pgtable_l5_enabled() relies on
1565          * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not
1566          * enabled by this point we need to clear the feature bit to avoid
1567          * false-positives at the later stage.
1568          *
1569          * pgtable_l5_enabled() can be false here for several reasons:
1570          *  - 5-level paging is disabled compile-time;
1571          *  - it's 32-bit kernel;
1572          *  - machine doesn't support 5-level paging;
1573          *  - user specified 'no5lvl' in kernel command line.
1574          */
1575         if (!pgtable_l5_enabled())
1576                 setup_clear_cpu_cap(X86_FEATURE_LA57);
1577
1578         detect_nopl();
1579 }
1580
1581 void __init early_cpu_init(void)
1582 {
1583         const struct cpu_dev *const *cdev;
1584         int count = 0;
1585
1586 #ifdef CONFIG_PROCESSOR_SELECT
1587         pr_info("KERNEL supported cpus:\n");
1588 #endif
1589
1590         for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
1591                 const struct cpu_dev *cpudev = *cdev;
1592
1593                 if (count >= X86_VENDOR_NUM)
1594                         break;
1595                 cpu_devs[count] = cpudev;
1596                 count++;
1597
1598 #ifdef CONFIG_PROCESSOR_SELECT
1599                 {
1600                         unsigned int j;
1601
1602                         for (j = 0; j < 2; j++) {
1603                                 if (!cpudev->c_ident[j])
1604                                         continue;
1605                                 pr_info("  %s %s\n", cpudev->c_vendor,
1606                                         cpudev->c_ident[j]);
1607                         }
1608                 }
1609 #endif
1610         }
1611         early_identify_cpu(&boot_cpu_data);
1612 }
1613
1614 static bool detect_null_seg_behavior(void)
1615 {
1616         /*
1617          * Empirically, writing zero to a segment selector on AMD does
1618          * not clear the base, whereas writing zero to a segment
1619          * selector on Intel does clear the base.  Intel's behavior
1620          * allows slightly faster context switches in the common case
1621          * where GS is unused by the prev and next threads.
1622          *
1623          * Since neither vendor documents this anywhere that I can see,
1624          * detect it directly instead of hard-coding the choice by
1625          * vendor.
1626          *
1627          * I've designated AMD's behavior as the "bug" because it's
1628          * counterintuitive and less friendly.
1629          */
1630
1631         unsigned long old_base, tmp;
1632         rdmsrl(MSR_FS_BASE, old_base);
1633         wrmsrl(MSR_FS_BASE, 1);
1634         loadsegment(fs, 0);
1635         rdmsrl(MSR_FS_BASE, tmp);
1636         wrmsrl(MSR_FS_BASE, old_base);
1637         return tmp == 0;
1638 }
1639
1640 void check_null_seg_clears_base(struct cpuinfo_x86 *c)
1641 {
1642         /* BUG_NULL_SEG is only relevant with 64bit userspace */
1643         if (!IS_ENABLED(CONFIG_X86_64))
1644                 return;
1645
1646         /* Zen3 CPUs advertise Null Selector Clears Base in CPUID. */
1647         if (c->extended_cpuid_level >= 0x80000021 &&
1648             cpuid_eax(0x80000021) & BIT(6))
1649                 return;
1650
1651         /*
1652          * CPUID bit above wasn't set. If this kernel is still running
1653          * as a HV guest, then the HV has decided not to advertize
1654          * that CPUID bit for whatever reason.  For example, one
1655          * member of the migration pool might be vulnerable.  Which
1656          * means, the bug is present: set the BUG flag and return.
1657          */
1658         if (cpu_has(c, X86_FEATURE_HYPERVISOR)) {
1659                 set_cpu_bug(c, X86_BUG_NULL_SEG);
1660                 return;
1661         }
1662
1663         /*
1664          * Zen2 CPUs also have this behaviour, but no CPUID bit.
1665          * 0x18 is the respective family for Hygon.
1666          */
1667         if ((c->x86 == 0x17 || c->x86 == 0x18) &&
1668             detect_null_seg_behavior())
1669                 return;
1670
1671         /* All the remaining ones are affected */
1672         set_cpu_bug(c, X86_BUG_NULL_SEG);
1673 }
1674
1675 static void generic_identify(struct cpuinfo_x86 *c)
1676 {
1677         c->extended_cpuid_level = 0;
1678
1679         if (!have_cpuid_p())
1680                 identify_cpu_without_cpuid(c);
1681
1682         /* cyrix could have cpuid enabled via c_identify()*/
1683         if (!have_cpuid_p())
1684                 return;
1685
1686         cpu_detect(c);
1687
1688         get_cpu_vendor(c);
1689
1690         get_cpu_cap(c);
1691
1692         get_cpu_address_sizes(c);
1693
1694         if (c->cpuid_level >= 0x00000001) {
1695                 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
1696 #ifdef CONFIG_X86_32
1697 # ifdef CONFIG_SMP
1698                 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1699 # else
1700                 c->apicid = c->initial_apicid;
1701 # endif
1702 #endif
1703                 c->phys_proc_id = c->initial_apicid;
1704         }
1705
1706         get_model_name(c); /* Default name */
1707
1708         /*
1709          * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
1710          * systems that run Linux at CPL > 0 may or may not have the
1711          * issue, but, even if they have the issue, there's absolutely
1712          * nothing we can do about it because we can't use the real IRET
1713          * instruction.
1714          *
1715          * NB: For the time being, only 32-bit kernels support
1716          * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
1717          * whether to apply espfix using paravirt hooks.  If any
1718          * non-paravirt system ever shows up that does *not* have the
1719          * ESPFIX issue, we can change this.
1720          */
1721 #ifdef CONFIG_X86_32
1722         set_cpu_bug(c, X86_BUG_ESPFIX);
1723 #endif
1724 }
1725
1726 /*
1727  * Validate that ACPI/mptables have the same information about the
1728  * effective APIC id and update the package map.
1729  */
1730 static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
1731 {
1732 #ifdef CONFIG_SMP
1733         unsigned int apicid, cpu = smp_processor_id();
1734
1735         apicid = apic->cpu_present_to_apicid(cpu);
1736
1737         if (apicid != c->apicid) {
1738                 pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
1739                        cpu, apicid, c->initial_apicid);
1740         }
1741         BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
1742         BUG_ON(topology_update_die_map(c->cpu_die_id, cpu));
1743 #else
1744         c->logical_proc_id = 0;
1745 #endif
1746 }
1747
1748 /*
1749  * This does the hard work of actually picking apart the CPU stuff...
1750  */
1751 static void identify_cpu(struct cpuinfo_x86 *c)
1752 {
1753         int i;
1754
1755         c->loops_per_jiffy = loops_per_jiffy;
1756         c->x86_cache_size = 0;
1757         c->x86_vendor = X86_VENDOR_UNKNOWN;
1758         c->x86_model = c->x86_stepping = 0;     /* So far unknown... */
1759         c->x86_vendor_id[0] = '\0'; /* Unset */
1760         c->x86_model_id[0] = '\0';  /* Unset */
1761         c->x86_max_cores = 1;
1762         c->x86_coreid_bits = 0;
1763         c->cu_id = 0xff;
1764 #ifdef CONFIG_X86_64
1765         c->x86_clflush_size = 64;
1766         c->x86_phys_bits = 36;
1767         c->x86_virt_bits = 48;
1768 #else
1769         c->cpuid_level = -1;    /* CPUID not detected */
1770         c->x86_clflush_size = 32;
1771         c->x86_phys_bits = 32;
1772         c->x86_virt_bits = 32;
1773 #endif
1774         c->x86_cache_alignment = c->x86_clflush_size;
1775         memset(&c->x86_capability, 0, sizeof(c->x86_capability));
1776 #ifdef CONFIG_X86_VMX_FEATURE_NAMES
1777         memset(&c->vmx_capability, 0, sizeof(c->vmx_capability));
1778 #endif
1779
1780         generic_identify(c);
1781
1782         if (this_cpu->c_identify)
1783                 this_cpu->c_identify(c);
1784
1785         /* Clear/Set all flags overridden by options, after probe */
1786         apply_forced_caps(c);
1787
1788 #ifdef CONFIG_X86_64
1789         c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1790 #endif
1791
1792         /*
1793          * Vendor-specific initialization.  In this section we
1794          * canonicalize the feature flags, meaning if there are
1795          * features a certain CPU supports which CPUID doesn't
1796          * tell us, CPUID claiming incorrect flags, or other bugs,
1797          * we handle them here.
1798          *
1799          * At the end of this section, c->x86_capability better
1800          * indicate the features this CPU genuinely supports!
1801          */
1802         if (this_cpu->c_init)
1803                 this_cpu->c_init(c);
1804
1805         /* Disable the PN if appropriate */
1806         squash_the_stupid_serial_number(c);
1807
1808         /* Set up SMEP/SMAP/UMIP */
1809         setup_smep(c);
1810         setup_smap(c);
1811         setup_umip(c);
1812
1813         /* Enable FSGSBASE instructions if available. */
1814         if (cpu_has(c, X86_FEATURE_FSGSBASE)) {
1815                 cr4_set_bits(X86_CR4_FSGSBASE);
1816                 elf_hwcap2 |= HWCAP2_FSGSBASE;
1817         }
1818
1819         /*
1820          * The vendor-specific functions might have changed features.
1821          * Now we do "generic changes."
1822          */
1823
1824         /* Filter out anything that depends on CPUID levels we don't have */
1825         filter_cpuid_features(c, true);
1826
1827         /* If the model name is still unset, do table lookup. */
1828         if (!c->x86_model_id[0]) {
1829                 const char *p;
1830                 p = table_lookup_model(c);
1831                 if (p)
1832                         strcpy(c->x86_model_id, p);
1833                 else
1834                         /* Last resort... */
1835                         sprintf(c->x86_model_id, "%02x/%02x",
1836                                 c->x86, c->x86_model);
1837         }
1838
1839 #ifdef CONFIG_X86_64
1840         detect_ht(c);
1841 #endif
1842
1843         x86_init_rdrand(c);
1844         setup_pku(c);
1845         setup_cet(c);
1846
1847         /*
1848          * Clear/Set all flags overridden by options, need do it
1849          * before following smp all cpus cap AND.
1850          */
1851         apply_forced_caps(c);
1852
1853         /*
1854          * On SMP, boot_cpu_data holds the common feature set between
1855          * all CPUs; so make sure that we indicate which features are
1856          * common between the CPUs.  The first time this routine gets
1857          * executed, c == &boot_cpu_data.
1858          */
1859         if (c != &boot_cpu_data) {
1860                 /* AND the already accumulated flags with these */
1861                 for (i = 0; i < NCAPINTS; i++)
1862                         boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1863
1864                 /* OR, i.e. replicate the bug flags */
1865                 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
1866                         c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
1867         }
1868
1869         ppin_init(c);
1870
1871         /* Init Machine Check Exception if available. */
1872         mcheck_cpu_init(c);
1873
1874         select_idle_routine(c);
1875
1876 #ifdef CONFIG_NUMA
1877         numa_add_cpu(smp_processor_id());
1878 #endif
1879 }
1880
1881 /*
1882  * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
1883  * on 32-bit kernels:
1884  */
1885 #ifdef CONFIG_X86_32
1886 void enable_sep_cpu(void)
1887 {
1888         struct tss_struct *tss;
1889         int cpu;
1890
1891         if (!boot_cpu_has(X86_FEATURE_SEP))
1892                 return;
1893
1894         cpu = get_cpu();
1895         tss = &per_cpu(cpu_tss_rw, cpu);
1896
1897         /*
1898          * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
1899          * see the big comment in struct x86_hw_tss's definition.
1900          */
1901
1902         tss->x86_tss.ss1 = __KERNEL_CS;
1903         wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
1904         wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
1905         wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
1906
1907         put_cpu();
1908 }
1909 #endif
1910
1911 void __init identify_boot_cpu(void)
1912 {
1913         identify_cpu(&boot_cpu_data);
1914         if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
1915                 pr_info("CET detected: Indirect Branch Tracking enabled\n");
1916 #ifdef CONFIG_X86_32
1917         sysenter_setup();
1918         enable_sep_cpu();
1919 #endif
1920         cpu_detect_tlb(&boot_cpu_data);
1921         setup_cr_pinning();
1922
1923         tsx_init();
1924 }
1925
1926 void identify_secondary_cpu(struct cpuinfo_x86 *c)
1927 {
1928         BUG_ON(c == &boot_cpu_data);
1929         identify_cpu(c);
1930 #ifdef CONFIG_X86_32
1931         enable_sep_cpu();
1932 #endif
1933         mtrr_ap_init();
1934         validate_apic_and_package_id(c);
1935         x86_spec_ctrl_setup_ap();
1936         update_srbds_msr();
1937
1938         tsx_ap_init();
1939 }
1940
1941 void print_cpu_info(struct cpuinfo_x86 *c)
1942 {
1943         const char *vendor = NULL;
1944
1945         if (c->x86_vendor < X86_VENDOR_NUM) {
1946                 vendor = this_cpu->c_vendor;
1947         } else {
1948                 if (c->cpuid_level >= 0)
1949                         vendor = c->x86_vendor_id;
1950         }
1951
1952         if (vendor && !strstr(c->x86_model_id, vendor))
1953                 pr_cont("%s ", vendor);
1954
1955         if (c->x86_model_id[0])
1956                 pr_cont("%s", c->x86_model_id);
1957         else
1958                 pr_cont("%d86", c->x86);
1959
1960         pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
1961
1962         if (c->x86_stepping || c->cpuid_level >= 0)
1963                 pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
1964         else
1965                 pr_cont(")\n");
1966 }
1967
1968 /*
1969  * clearcpuid= was already parsed in cpu_parse_early_param().  This dummy
1970  * function prevents it from becoming an environment variable for init.
1971  */
1972 static __init int setup_clearcpuid(char *arg)
1973 {
1974         return 1;
1975 }
1976 __setup("clearcpuid=", setup_clearcpuid);
1977
1978 #ifdef CONFIG_X86_64
1979 DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
1980                      fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
1981 EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
1982
1983 /*
1984  * The following percpu variables are hot.  Align current_task to
1985  * cacheline size such that they fall in the same cacheline.
1986  */
1987 DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
1988         &init_task;
1989 EXPORT_PER_CPU_SYMBOL(current_task);
1990
1991 DEFINE_PER_CPU(void *, hardirq_stack_ptr);
1992 DEFINE_PER_CPU(bool, hardirq_stack_inuse);
1993
1994 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1995 EXPORT_PER_CPU_SYMBOL(__preempt_count);
1996
1997 DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_STACK;
1998
1999 static void wrmsrl_cstar(unsigned long val)
2000 {
2001         /*
2002          * Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR
2003          * is so far ignored by the CPU, but raises a #VE trap in a TDX
2004          * guest. Avoid the pointless write on all Intel CPUs.
2005          */
2006         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2007                 wrmsrl(MSR_CSTAR, val);
2008 }
2009
2010 /* May not be marked __init: used by software suspend */
2011 void syscall_init(void)
2012 {
2013         wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
2014         wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
2015
2016 #ifdef CONFIG_IA32_EMULATION
2017         wrmsrl_cstar((unsigned long)entry_SYSCALL_compat);
2018         /*
2019          * This only works on Intel CPUs.
2020          * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
2021          * This does not cause SYSENTER to jump to the wrong location, because
2022          * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
2023          */
2024         wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
2025         wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
2026                     (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
2027         wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
2028 #else
2029         wrmsrl_cstar((unsigned long)ignore_sysret);
2030         wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
2031         wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
2032         wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
2033 #endif
2034
2035         /*
2036          * Flags to clear on syscall; clear as much as possible
2037          * to minimize user space-kernel interference.
2038          */
2039         wrmsrl(MSR_SYSCALL_MASK,
2040                X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
2041                X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF|
2042                X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF|
2043                X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_RF|
2044                X86_EFLAGS_AC|X86_EFLAGS_ID);
2045 }
2046
2047 #else   /* CONFIG_X86_64 */
2048
2049 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
2050 EXPORT_PER_CPU_SYMBOL(current_task);
2051 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
2052 EXPORT_PER_CPU_SYMBOL(__preempt_count);
2053
2054 /*
2055  * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
2056  * the top of the kernel stack.  Use an extra percpu variable to track the
2057  * top of the kernel stack directly.
2058  */
2059 DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
2060         (unsigned long)&init_thread_union + THREAD_SIZE;
2061 EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
2062
2063 #ifdef CONFIG_STACKPROTECTOR
2064 DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
2065 EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
2066 #endif
2067
2068 #endif  /* CONFIG_X86_64 */
2069
2070 /*
2071  * Clear all 6 debug registers:
2072  */
2073 static void clear_all_debug_regs(void)
2074 {
2075         int i;
2076
2077         for (i = 0; i < 8; i++) {
2078                 /* Ignore db4, db5 */
2079                 if ((i == 4) || (i == 5))
2080                         continue;
2081
2082                 set_debugreg(0, i);
2083         }
2084 }
2085
2086 #ifdef CONFIG_KGDB
2087 /*
2088  * Restore debug regs if using kgdbwait and you have a kernel debugger
2089  * connection established.
2090  */
2091 static void dbg_restore_debug_regs(void)
2092 {
2093         if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
2094                 arch_kgdb_ops.correct_hw_break();
2095 }
2096 #else /* ! CONFIG_KGDB */
2097 #define dbg_restore_debug_regs()
2098 #endif /* ! CONFIG_KGDB */
2099
2100 static void wait_for_master_cpu(int cpu)
2101 {
2102 #ifdef CONFIG_SMP
2103         /*
2104          * wait for ACK from master CPU before continuing
2105          * with AP initialization
2106          */
2107         WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
2108         while (!cpumask_test_cpu(cpu, cpu_callout_mask))
2109                 cpu_relax();
2110 #endif
2111 }
2112
2113 #ifdef CONFIG_X86_64
2114 static inline void setup_getcpu(int cpu)
2115 {
2116         unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
2117         struct desc_struct d = { };
2118
2119         if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
2120                 wrmsr(MSR_TSC_AUX, cpudata, 0);
2121
2122         /* Store CPU and node number in limit. */
2123         d.limit0 = cpudata;
2124         d.limit1 = cpudata >> 16;
2125
2126         d.type = 5;             /* RO data, expand down, accessed */
2127         d.dpl = 3;              /* Visible to user code */
2128         d.s = 1;                /* Not a system segment */
2129         d.p = 1;                /* Present */
2130         d.d = 1;                /* 32-bit */
2131
2132         write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
2133 }
2134
2135 static inline void ucode_cpu_init(int cpu)
2136 {
2137         if (cpu)
2138                 load_ucode_ap();
2139 }
2140
2141 static inline void tss_setup_ist(struct tss_struct *tss)
2142 {
2143         /* Set up the per-CPU TSS IST stacks */
2144         tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
2145         tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
2146         tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
2147         tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
2148         /* Only mapped when SEV-ES is active */
2149         tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
2150 }
2151
2152 #else /* CONFIG_X86_64 */
2153
2154 static inline void setup_getcpu(int cpu) { }
2155
2156 static inline void ucode_cpu_init(int cpu)
2157 {
2158         show_ucode_info_early();
2159 }
2160
2161 static inline void tss_setup_ist(struct tss_struct *tss) { }
2162
2163 #endif /* !CONFIG_X86_64 */
2164
2165 static inline void tss_setup_io_bitmap(struct tss_struct *tss)
2166 {
2167         tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
2168
2169 #ifdef CONFIG_X86_IOPL_IOPERM
2170         tss->io_bitmap.prev_max = 0;
2171         tss->io_bitmap.prev_sequence = 0;
2172         memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap));
2173         /*
2174          * Invalidate the extra array entry past the end of the all
2175          * permission bitmap as required by the hardware.
2176          */
2177         tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL;
2178 #endif
2179 }
2180
2181 /*
2182  * Setup everything needed to handle exceptions from the IDT, including the IST
2183  * exceptions which use paranoid_entry().
2184  */
2185 void cpu_init_exception_handling(void)
2186 {
2187         struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
2188         int cpu = raw_smp_processor_id();
2189
2190         /* paranoid_entry() gets the CPU number from the GDT */
2191         setup_getcpu(cpu);
2192
2193         /* IST vectors need TSS to be set up. */
2194         tss_setup_ist(tss);
2195         tss_setup_io_bitmap(tss);
2196         set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
2197
2198         load_TR_desc();
2199
2200         /* GHCB needs to be setup to handle #VC. */
2201         setup_ghcb();
2202
2203         /* Finally load the IDT */
2204         load_current_idt();
2205 }
2206
2207 /*
2208  * cpu_init() initializes state that is per-CPU. Some data is already
2209  * initialized (naturally) in the bootstrap process, such as the GDT.  We
2210  * reload it nevertheless, this function acts as a 'CPU state barrier',
2211  * nothing should get across.
2212  */
2213 void cpu_init(void)
2214 {
2215         struct task_struct *cur = current;
2216         int cpu = raw_smp_processor_id();
2217
2218         wait_for_master_cpu(cpu);
2219
2220         ucode_cpu_init(cpu);
2221
2222 #ifdef CONFIG_NUMA
2223         if (this_cpu_read(numa_node) == 0 &&
2224             early_cpu_to_node(cpu) != NUMA_NO_NODE)
2225                 set_numa_node(early_cpu_to_node(cpu));
2226 #endif
2227         pr_debug("Initializing CPU#%d\n", cpu);
2228
2229         if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
2230             boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE))
2231                 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
2232
2233         /*
2234          * Initialize the per-CPU GDT with the boot GDT,
2235          * and set up the GDT descriptor:
2236          */
2237         switch_to_new_gdt(cpu);
2238
2239         if (IS_ENABLED(CONFIG_X86_64)) {
2240                 loadsegment(fs, 0);
2241                 memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
2242                 syscall_init();
2243
2244                 wrmsrl(MSR_FS_BASE, 0);
2245                 wrmsrl(MSR_KERNEL_GS_BASE, 0);
2246                 barrier();
2247
2248                 x2apic_setup();
2249         }
2250
2251         mmgrab(&init_mm);
2252         cur->active_mm = &init_mm;
2253         BUG_ON(cur->mm);
2254         initialize_tlbstate_and_flush();
2255         enter_lazy_tlb(&init_mm, cur);
2256
2257         /*
2258          * sp0 points to the entry trampoline stack regardless of what task
2259          * is running.
2260          */
2261         load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
2262
2263         load_mm_ldt(&init_mm);
2264
2265         clear_all_debug_regs();
2266         dbg_restore_debug_regs();
2267
2268         doublefault_init_cpu_tss();
2269
2270         fpu__init_cpu();
2271
2272         if (is_uv_system())
2273                 uv_cpu_init();
2274
2275         load_fixmap_gdt(cpu);
2276 }
2277
2278 #ifdef CONFIG_SMP
2279 void cpu_init_secondary(void)
2280 {
2281         /*
2282          * Relies on the BP having set-up the IDT tables, which are loaded
2283          * on this CPU in cpu_init_exception_handling().
2284          */
2285         cpu_init_exception_handling();
2286         cpu_init();
2287 }
2288 #endif
2289
2290 #ifdef CONFIG_MICROCODE_LATE_LOADING
2291 /*
2292  * The microcode loader calls this upon late microcode load to recheck features,
2293  * only when microcode has been updated. Caller holds microcode_mutex and CPU
2294  * hotplug lock.
2295  */
2296 void microcode_check(void)
2297 {
2298         struct cpuinfo_x86 info;
2299
2300         perf_check_microcode();
2301
2302         /* Reload CPUID max function as it might've changed. */
2303         info.cpuid_level = cpuid_eax(0);
2304
2305         /*
2306          * Copy all capability leafs to pick up the synthetic ones so that
2307          * memcmp() below doesn't fail on that. The ones coming from CPUID will
2308          * get overwritten in get_cpu_cap().
2309          */
2310         memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
2311
2312         get_cpu_cap(&info);
2313
2314         if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
2315                 return;
2316
2317         pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
2318         pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
2319 }
2320 #endif
2321
2322 /*
2323  * Invoked from core CPU hotplug code after hotplug operations
2324  */
2325 void arch_smt_update(void)
2326 {
2327         /* Handle the speculative execution misfeatures */
2328         cpu_bugs_smt_update();
2329         /* Check whether IPI broadcasting can be enabled */
2330         apic_smt_update();
2331 }